VOL-1985 Migrate voltha-simonu-adapter to go mod

Change-Id: I0dedc208a64e7478c4ef6adc0287430e7b7d6702
diff --git a/vendor/github.com/pierrec/lz4/.travis.yml b/vendor/github.com/pierrec/lz4/.travis.yml
index 658910d..b2c806d 100644
--- a/vendor/github.com/pierrec/lz4/.travis.yml
+++ b/vendor/github.com/pierrec/lz4/.travis.yml
@@ -1,13 +1,9 @@
 language: go
 
-env:
-  - GO111MODULE=off
-  - GO111MODULE=on
-
 go:
+  - 1.8.x
   - 1.9.x
   - 1.10.x
-  - 1.11.x
   - master
 
 matrix:
@@ -20,5 +16,3 @@
 script: 
  - go test -v -cpu=2
  - go test -v -cpu=2 -race
- - go test -v -cpu=2 -tags noasm
- - go test -v -cpu=2 -race -tags noasm
diff --git a/vendor/github.com/pierrec/lz4/block.go b/vendor/github.com/pierrec/lz4/block.go
index d96e0e7..ef24f17 100644
--- a/vendor/github.com/pierrec/lz4/block.go
+++ b/vendor/github.com/pierrec/lz4/block.go
@@ -30,17 +30,75 @@
 // The destination buffer must be sized appropriately.
 //
 // An error is returned if the source data is invalid or the destination buffer is too small.
-func UncompressBlock(src, dst []byte) (di int, err error) {
+func UncompressBlock(src, dst []byte) (si int, err error) {
+	defer func() {
+		// It is now faster to let the runtime panic and recover on out of bound slice access
+		// than checking indices as we go along.
+		if recover() != nil {
+			err = ErrInvalidSourceShortBuffer
+		}
+	}()
 	sn := len(src)
 	if sn == 0 {
 		return 0, nil
 	}
+	var di int
 
-	di = decodeBlock(dst, src)
-	if di < 0 {
-		return 0, ErrInvalidSourceShortBuffer
+	for {
+		// Literals and match lengths (token).
+		b := int(src[si])
+		si++
+
+		// Literals.
+		if lLen := b >> 4; lLen > 0 {
+			if lLen == 0xF {
+				for src[si] == 0xFF {
+					lLen += 0xFF
+					si++
+				}
+				lLen += int(src[si])
+				si++
+			}
+			i := si
+			si += lLen
+			di += copy(dst[di:], src[i:si])
+
+			if si >= sn {
+				return di, nil
+			}
+		}
+
+		si++
+		_ = src[si] // Bound check elimination.
+		offset := int(src[si-1]) | int(src[si])<<8
+		si++
+
+		// Match.
+		mLen := b & 0xF
+		if mLen == 0xF {
+			for src[si] == 0xFF {
+				mLen += 0xFF
+				si++
+			}
+			mLen += int(src[si])
+			si++
+		}
+		mLen += minMatch
+
+		// Copy the match.
+		i := di - offset
+		if offset > 0 && mLen >= offset {
+			// Efficiently copy the match dst[di-offset:di] into the dst slice.
+			bytesToCopy := offset * (mLen / offset)
+			expanded := dst[i:]
+			for n := offset; n <= bytesToCopy+offset; n *= 2 {
+				copy(expanded[n:], expanded[:n])
+			}
+			di += bytesToCopy
+			mLen -= bytesToCopy
+		}
+		di += copy(dst[di:], dst[i:i+mLen])
 	}
-	return di, nil
 }
 
 // CompressBlock compresses the source buffer into the destination one.
@@ -128,7 +186,7 @@
 		di++
 
 		// Literals.
-		copy(dst[di:di+lLen], src[anchor:anchor+lLen])
+		copy(dst[di:], src[anchor:anchor+lLen])
 		di += lLen + 2
 		anchor = si
 
@@ -172,7 +230,7 @@
 		// Incompressible.
 		return 0, nil
 	}
-	di += copy(dst[di:di+len(src)-anchor], src[anchor:])
+	di += copy(dst[di:], src[anchor:])
 	return di, nil
 }
 
@@ -228,7 +286,7 @@
 			for ml < sn-si && src[next+ml] == src[si+ml] {
 				ml++
 			}
-			if ml < minMatch || ml <= mLen {
+			if ml+1 < minMatch || ml <= mLen {
 				// Match too small (<minMath) or smaller than the current match.
 				continue
 			}
@@ -289,7 +347,7 @@
 		di++
 
 		// Literals.
-		copy(dst[di:di+lLen], src[anchor:anchor+lLen])
+		copy(dst[di:], src[anchor:anchor+lLen])
 		di += lLen
 		anchor = si
 
@@ -334,6 +392,6 @@
 		// Incompressible.
 		return 0, nil
 	}
-	di += copy(dst[di:di+len(src)-anchor], src[anchor:])
+	di += copy(dst[di:], src[anchor:])
 	return di, nil
 }
diff --git a/vendor/github.com/pierrec/lz4/decode_amd64.go b/vendor/github.com/pierrec/lz4/decode_amd64.go
deleted file mode 100644
index 43cc14f..0000000
--- a/vendor/github.com/pierrec/lz4/decode_amd64.go
+++ /dev/null
@@ -1,8 +0,0 @@
-// +build !appengine
-// +build gc
-// +build !noasm
-
-package lz4
-
-//go:noescape
-func decodeBlock(dst, src []byte) int
diff --git a/vendor/github.com/pierrec/lz4/decode_amd64.s b/vendor/github.com/pierrec/lz4/decode_amd64.s
deleted file mode 100644
index 20fef39..0000000
--- a/vendor/github.com/pierrec/lz4/decode_amd64.s
+++ /dev/null
@@ -1,375 +0,0 @@
-// +build !appengine
-// +build gc
-// +build !noasm
-
-#include "textflag.h"
-
-// AX scratch
-// BX scratch
-// CX scratch
-// DX token
-//
-// DI &dst
-// SI &src
-// R8 &dst + len(dst)
-// R9 &src + len(src)
-// R11 &dst
-// R12 short output end
-// R13 short input end
-// func decodeBlock(dst, src []byte) int
-// using 50 bytes of stack currently
-TEXT ·decodeBlock(SB), NOSPLIT, $64-56
-	MOVQ dst_base+0(FP), DI
-	MOVQ DI, R11
-	MOVQ dst_len+8(FP), R8
-	ADDQ DI, R8
-
-	MOVQ src_base+24(FP), SI
-	MOVQ src_len+32(FP), R9
-	ADDQ SI, R9
-
-	// shortcut ends
-	// short output end
-	MOVQ R8, R12
-	SUBQ $32, R12
-	// short input end
-	MOVQ R9, R13
-	SUBQ $16, R13
-
-loop:
-	// for si < len(src)
-	CMPQ SI, R9
-	JGE end
-
-	// token := uint32(src[si])
-	MOVBQZX (SI), DX
-	INCQ SI
-
-	// lit_len = token >> 4
-	// if lit_len > 0
-	// CX = lit_len
-	MOVQ DX, CX
-	SHRQ $4, CX
-
-	// if lit_len != 0xF
-	CMPQ CX, $0xF
-	JEQ lit_len_loop_pre
-	CMPQ DI, R12
-	JGE lit_len_loop_pre
-	CMPQ SI, R13
-	JGE lit_len_loop_pre
-
-	// copy shortcut
-
-	// A two-stage shortcut for the most common case:
-	// 1) If the literal length is 0..14, and there is enough space,
-	// enter the shortcut and copy 16 bytes on behalf of the literals
-	// (in the fast mode, only 8 bytes can be safely copied this way).
-	// 2) Further if the match length is 4..18, copy 18 bytes in a similar
-	// manner; but we ensure that there's enough space in the output for
-	// those 18 bytes earlier, upon entering the shortcut (in other words,
-	// there is a combined check for both stages).
-
-	// copy literal
-	MOVOU (SI), X0
-	MOVOU X0, (DI)
-	ADDQ CX, DI
-	ADDQ CX, SI
-
-	MOVQ DX, CX
-	ANDQ $0xF, CX
-
-	// The second stage: prepare for match copying, decode full info.
-	// If it doesn't work out, the info won't be wasted.
-	// offset := uint16(data[:2])
-	MOVWQZX (SI), DX
-	ADDQ $2, SI
-
-	MOVQ DI, AX
-	SUBQ DX, AX
-	CMPQ AX, DI
-	JGT err_short_buf
-
-	// if we can't do the second stage then jump straight to read the
-	// match length, we already have the offset.
-	CMPQ CX, $0xF
-	JEQ match_len_loop_pre
-	CMPQ DX, $8
-	JLT match_len_loop_pre
-	CMPQ AX, R11
-	JLT err_short_buf
-
-	// memcpy(op + 0, match + 0, 8);
-	MOVQ (AX), BX
-	MOVQ BX, (DI)
-	// memcpy(op + 8, match + 8, 8);
-	MOVQ 8(AX), BX
-	MOVQ BX, 8(DI)
-	// memcpy(op +16, match +16, 2);
-	MOVW 16(AX), BX
-	MOVW BX, 16(DI)
-
-	ADDQ $4, DI // minmatch
-	ADDQ CX, DI
-
-	// shortcut complete, load next token
-	JMP loop
-
-lit_len_loop_pre:
-	// if lit_len > 0
-	CMPQ CX, $0
-	JEQ offset
-	CMPQ CX, $0xF
-	JNE copy_literal
-
-lit_len_loop:
-	// for src[si] == 0xFF
-	CMPB (SI), $0xFF
-	JNE lit_len_finalise
-
-	// bounds check src[si+1]
-	MOVQ SI, AX
-	ADDQ $1, AX
-	CMPQ AX, R9
-	JGT err_short_buf
-
-	// lit_len += 0xFF
-	ADDQ $0xFF, CX
-	INCQ SI
-	JMP lit_len_loop
-
-lit_len_finalise:
-	// lit_len += int(src[si])
-	// si++
-	MOVBQZX (SI), AX
-	ADDQ AX, CX
-	INCQ SI
-
-copy_literal:
-	// bounds check src and dst
-	MOVQ SI, AX
-	ADDQ CX, AX
-	CMPQ AX, R9
-	JGT err_short_buf
-
-	MOVQ DI, AX
-	ADDQ CX, AX
-	CMPQ AX, R8
-	JGT err_short_buf
-
-	// whats a good cut off to call memmove?
-	CMPQ CX, $16
-	JGT memmove_lit
-
-	// if len(dst[di:]) < 16
-	MOVQ R8, AX
-	SUBQ DI, AX
-	CMPQ AX, $16
-	JLT memmove_lit
-
-	// if len(src[si:]) < 16
-	MOVQ R9, AX
-	SUBQ SI, AX
-	CMPQ AX, $16
-	JLT memmove_lit
-
-	MOVOU (SI), X0
-	MOVOU X0, (DI)
-
-	JMP finish_lit_copy
-
-memmove_lit:
-	// memmove(to, from, len)
-	MOVQ DI, 0(SP)
-	MOVQ SI, 8(SP)
-	MOVQ CX, 16(SP)
-	// spill
-	MOVQ DI, 24(SP)
-	MOVQ SI, 32(SP)
-	MOVQ CX, 40(SP) // need len to inc SI, DI after
-	MOVB DX, 48(SP)
-	CALL runtime·memmove(SB)
-
-	// restore registers
-	MOVQ 24(SP), DI
-	MOVQ 32(SP), SI
-	MOVQ 40(SP), CX
-	MOVB 48(SP), DX
-
-	// recalc initial values
-	MOVQ dst_base+0(FP), R8
-	MOVQ R8, R11
-	ADDQ dst_len+8(FP), R8
-	MOVQ src_base+24(FP), R9
-	ADDQ src_len+32(FP), R9
-	MOVQ R8, R12
-	SUBQ $32, R12
-	MOVQ R9, R13
-	SUBQ $16, R13
-
-finish_lit_copy:
-	ADDQ CX, SI
-	ADDQ CX, DI
-
-	CMPQ SI, R9
-	JGE end
-
-offset:
-	// CX := mLen
-	// free up DX to use for offset
-	MOVQ DX, CX
-
-	MOVQ SI, AX
-	ADDQ $2, AX
-	CMPQ AX, R9
-	JGT err_short_buf
-
-	// offset
-	// DX := int(src[si]) | int(src[si+1])<<8
-	MOVWQZX (SI), DX
-	ADDQ $2, SI
-
-	// 0 offset is invalid
-	CMPQ DX, $0
-	JEQ err_corrupt
-
-	ANDB $0xF, CX
-
-match_len_loop_pre:
-	// if mlen != 0xF
-	CMPB CX, $0xF
-	JNE copy_match
-
-match_len_loop:
-	// for src[si] == 0xFF
-	// lit_len += 0xFF
-	CMPB (SI), $0xFF
-	JNE match_len_finalise
-
-	// bounds check src[si+1]
-	MOVQ SI, AX
-	ADDQ $1, AX
-	CMPQ AX, R9
-	JGT err_short_buf
-
-	ADDQ $0xFF, CX
-	INCQ SI
-	JMP match_len_loop
-
-match_len_finalise:
-	// lit_len += int(src[si])
-	// si++
-	MOVBQZX (SI), AX
-	ADDQ AX, CX
-	INCQ SI
-
-copy_match:
-	// mLen += minMatch
-	ADDQ $4, CX
-
-	// check we have match_len bytes left in dst
-	// di+match_len < len(dst)
-	MOVQ DI, AX
-	ADDQ CX, AX
-	CMPQ AX, R8
-	JGT err_short_buf
-
-	// DX = offset
-	// CX = match_len
-	// BX = &dst + (di - offset)
-	MOVQ DI, BX
-	SUBQ DX, BX
-
-	// check BX is within dst
-	// if BX < &dst
-	CMPQ BX, R11
-	JLT err_short_buf
-
-	// if offset + match_len < di
-	MOVQ BX, AX
-	ADDQ CX, AX
-	CMPQ DI, AX
-	JGT copy_interior_match
-
-	// AX := len(dst[:di])
-	// MOVQ DI, AX
-	// SUBQ R11, AX
-
-	// copy 16 bytes at a time
-	// if di-offset < 16 copy 16-(di-offset) bytes to di
-	// then do the remaining
-
-copy_match_loop:
-	// for match_len >= 0
-	// dst[di] = dst[i]
-	// di++
-	// i++
-	MOVB (BX), AX
-	MOVB AX, (DI)
-	INCQ DI
-	INCQ BX
-	DECQ CX
-
-	CMPQ CX, $0
-	JGT copy_match_loop
-
-	JMP loop
-
-copy_interior_match:
-	CMPQ CX, $16
-	JGT memmove_match
-
-	// if len(dst[di:]) < 16
-	MOVQ R8, AX
-	SUBQ DI, AX
-	CMPQ AX, $16
-	JLT memmove_match
-
-	MOVOU (BX), X0
-	MOVOU X0, (DI)
-
-	ADDQ CX, DI
-	JMP loop
-
-memmove_match:
-	// memmove(to, from, len)
-	MOVQ DI, 0(SP)
-	MOVQ BX, 8(SP)
-	MOVQ CX, 16(SP)
-	// spill
-	MOVQ DI, 24(SP)
-	MOVQ SI, 32(SP)
-	MOVQ CX, 40(SP) // need len to inc SI, DI after
-	CALL runtime·memmove(SB)
-
-	// restore registers
-	MOVQ 24(SP), DI
-	MOVQ 32(SP), SI
-	MOVQ 40(SP), CX
-
-	// recalc initial values
-	MOVQ dst_base+0(FP), R8
-	MOVQ R8, R11 // TODO: make these sensible numbers
-	ADDQ dst_len+8(FP), R8
-	MOVQ src_base+24(FP), R9
-	ADDQ src_len+32(FP), R9
-	MOVQ R8, R12
-	SUBQ $32, R12
-	MOVQ R9, R13
-	SUBQ $16, R13
-
-	ADDQ CX, DI
-	JMP loop
-
-err_corrupt:
-	MOVQ $-1, ret+48(FP)
-	RET
-
-err_short_buf:
-	MOVQ $-2, ret+48(FP)
-	RET
-
-end:
-	SUBQ R11, DI
-	MOVQ DI, ret+48(FP)
-	RET
diff --git a/vendor/github.com/pierrec/lz4/decode_other.go b/vendor/github.com/pierrec/lz4/decode_other.go
deleted file mode 100644
index b83a19a..0000000
--- a/vendor/github.com/pierrec/lz4/decode_other.go
+++ /dev/null
@@ -1,95 +0,0 @@
-// +build !amd64 appengine !gc noasm
-
-package lz4
-
-func decodeBlock(dst, src []byte) (ret int) {
-	defer func() {
-		// It is now faster to let the runtime panic and recover on out of bound slice access
-		// than checking indices as we go along.
-		if recover() != nil {
-			ret = -2
-		}
-	}()
-
-	var si, di int
-	for {
-		// Literals and match lengths (token).
-		b := int(src[si])
-		si++
-
-		// Literals.
-		if lLen := b >> 4; lLen > 0 {
-			switch {
-			case lLen < 0xF && di+18 < len(dst) && si+16 < len(src):
-				// Shortcut 1
-				// if we have enough room in src and dst, and the literals length
-				// is small enough (0..14) then copy all 16 bytes, even if not all
-				// are part of the literals.
-				copy(dst[di:], src[si:si+16])
-				si += lLen
-				di += lLen
-				if mLen := b & 0xF; mLen < 0xF {
-					// Shortcut 2
-					// if the match length (4..18) fits within the literals, then copy
-					// all 18 bytes, even if not all are part of the literals.
-					mLen += 4
-					if offset := int(src[si]) | int(src[si+1])<<8; mLen <= offset {
-						i := di - offset
-						copy(dst[di:], dst[i:i+18])
-						si += 2
-						di += mLen
-						continue
-					}
-				}
-			case lLen == 0xF:
-				for src[si] == 0xFF {
-					lLen += 0xFF
-					si++
-				}
-				lLen += int(src[si])
-				si++
-				fallthrough
-			default:
-				copy(dst[di:di+lLen], src[si:si+lLen])
-				si += lLen
-				di += lLen
-			}
-		}
-		if si >= len(src) {
-			return di
-		}
-
-		offset := int(src[si]) | int(src[si+1])<<8
-		if offset == 0 {
-			return -2
-		}
-		si += 2
-
-		// Match.
-		mLen := b & 0xF
-		if mLen == 0xF {
-			for src[si] == 0xFF {
-				mLen += 0xFF
-				si++
-			}
-			mLen += int(src[si])
-			si++
-		}
-		mLen += minMatch
-
-		// Copy the match.
-		expanded := dst[di-offset:]
-		if mLen > offset {
-			// Efficiently copy the match dst[di-offset:di] into the dst slice.
-			bytesToCopy := offset * (mLen / offset)
-			for n := offset; n <= bytesToCopy+offset; n *= 2 {
-				copy(expanded[n:], expanded[:n])
-			}
-			di += bytesToCopy
-			mLen -= bytesToCopy
-		}
-		di += copy(dst[di:di+mLen], expanded[:mLen])
-	}
-
-	return di
-}
diff --git a/vendor/github.com/pierrec/lz4/go.mod b/vendor/github.com/pierrec/lz4/go.mod
deleted file mode 100644
index f9f570a..0000000
--- a/vendor/github.com/pierrec/lz4/go.mod
+++ /dev/null
@@ -1,3 +0,0 @@
-module github.com/pierrec/lz4
-
-require github.com/pkg/profile v1.2.1
diff --git a/vendor/github.com/pierrec/lz4/go.sum b/vendor/github.com/pierrec/lz4/go.sum
deleted file mode 100644
index 6ca7598..0000000
--- a/vendor/github.com/pierrec/lz4/go.sum
+++ /dev/null
@@ -1,2 +0,0 @@
-github.com/pkg/profile v1.2.1 h1:F++O52m40owAmADcojzM+9gyjmMOY/T4oYJkgFDH8RE=
-github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=