[VOL-4290] Voltha go library updates for gRPC migration
Change-Id: I1aa2774beb6b7ed7419bc45aeb53fcae8a8ecda0
diff --git a/vendor/github.com/cespare/xxhash/README.md b/vendor/github.com/cespare/xxhash/README.md
deleted file mode 100644
index 0982fd2..0000000
--- a/vendor/github.com/cespare/xxhash/README.md
+++ /dev/null
@@ -1,50 +0,0 @@
-# xxhash
-
-[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash)
-
-xxhash is a Go implementation of the 64-bit
-[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
-high-quality hashing algorithm that is much faster than anything in the Go
-standard library.
-
-The API is very small, taking its cue from the other hashing packages in the
-standard library:
-
- $ go doc github.com/cespare/xxhash !
- package xxhash // import "github.com/cespare/xxhash"
-
- Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
- at http://cyan4973.github.io/xxHash/.
-
- func New() hash.Hash64
- func Sum64(b []byte) uint64
- func Sum64String(s string) uint64
-
-This implementation provides a fast pure-Go implementation and an even faster
-assembly implementation for amd64.
-
-## Benchmarks
-
-Here are some quick benchmarks comparing the pure-Go and assembly
-implementations of Sum64 against another popular Go XXH64 implementation,
-[github.com/OneOfOne/xxhash](https://github.com/OneOfOne/xxhash):
-
-| input size | OneOfOne | cespare (purego) | cespare |
-| --- | --- | --- | --- |
-| 5 B | 416 MB/s | 720 MB/s | 872 MB/s |
-| 100 B | 3980 MB/s | 5013 MB/s | 5252 MB/s |
-| 4 KB | 12727 MB/s | 12999 MB/s | 13026 MB/s |
-| 10 MB | 9879 MB/s | 10775 MB/s | 10913 MB/s |
-
-These numbers were generated with:
-
-```
-$ go test -benchtime 10s -bench '/OneOfOne,'
-$ go test -tags purego -benchtime 10s -bench '/xxhash,'
-$ go test -benchtime 10s -bench '/xxhash,'
-```
-
-## Projects using this package
-
-- [InfluxDB](https://github.com/influxdata/influxdb)
-- [Prometheus](https://github.com/prometheus/prometheus)
diff --git a/vendor/github.com/cespare/xxhash/go.mod b/vendor/github.com/cespare/xxhash/go.mod
deleted file mode 100644
index 10605a6..0000000
--- a/vendor/github.com/cespare/xxhash/go.mod
+++ /dev/null
@@ -1,6 +0,0 @@
-module github.com/cespare/xxhash
-
-require (
- github.com/OneOfOne/xxhash v1.2.2
- github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72
-)
diff --git a/vendor/github.com/cespare/xxhash/go.sum b/vendor/github.com/cespare/xxhash/go.sum
deleted file mode 100644
index f6b5542..0000000
--- a/vendor/github.com/cespare/xxhash/go.sum
+++ /dev/null
@@ -1,4 +0,0 @@
-github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
-github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
-github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ=
-github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
diff --git a/vendor/github.com/cespare/xxhash/rotate.go b/vendor/github.com/cespare/xxhash/rotate.go
deleted file mode 100644
index f3eac5e..0000000
--- a/vendor/github.com/cespare/xxhash/rotate.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// +build !go1.9
-
-package xxhash
-
-// TODO(caleb): After Go 1.10 comes out, remove this fallback code.
-
-func rol1(x uint64) uint64 { return (x << 1) | (x >> (64 - 1)) }
-func rol7(x uint64) uint64 { return (x << 7) | (x >> (64 - 7)) }
-func rol11(x uint64) uint64 { return (x << 11) | (x >> (64 - 11)) }
-func rol12(x uint64) uint64 { return (x << 12) | (x >> (64 - 12)) }
-func rol18(x uint64) uint64 { return (x << 18) | (x >> (64 - 18)) }
-func rol23(x uint64) uint64 { return (x << 23) | (x >> (64 - 23)) }
-func rol27(x uint64) uint64 { return (x << 27) | (x >> (64 - 27)) }
-func rol31(x uint64) uint64 { return (x << 31) | (x >> (64 - 31)) }
diff --git a/vendor/github.com/cespare/xxhash/rotate19.go b/vendor/github.com/cespare/xxhash/rotate19.go
deleted file mode 100644
index b99612b..0000000
--- a/vendor/github.com/cespare/xxhash/rotate19.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// +build go1.9
-
-package xxhash
-
-import "math/bits"
-
-func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) }
-func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) }
-func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
-func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
-func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
-func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
-func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
-func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }
diff --git a/vendor/github.com/cespare/xxhash/v2/.travis.yml b/vendor/github.com/cespare/xxhash/v2/.travis.yml
new file mode 100644
index 0000000..c516ea8
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/.travis.yml
@@ -0,0 +1,8 @@
+language: go
+go:
+ - "1.x"
+ - master
+env:
+ - TAGS=""
+ - TAGS="-tags purego"
+script: go test $TAGS -v ./...
diff --git a/vendor/github.com/cespare/xxhash/LICENSE.txt b/vendor/github.com/cespare/xxhash/v2/LICENSE.txt
similarity index 100%
rename from vendor/github.com/cespare/xxhash/LICENSE.txt
rename to vendor/github.com/cespare/xxhash/v2/LICENSE.txt
diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md
new file mode 100644
index 0000000..2fd8693
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/README.md
@@ -0,0 +1,67 @@
+# xxhash
+
+[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash)
+[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash)
+
+xxhash is a Go implementation of the 64-bit
+[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
+high-quality hashing algorithm that is much faster than anything in the Go
+standard library.
+
+This package provides a straightforward API:
+
+```
+func Sum64(b []byte) uint64
+func Sum64String(s string) uint64
+type Digest struct{ ... }
+ func New() *Digest
+```
+
+The `Digest` type implements hash.Hash64. Its key methods are:
+
+```
+func (*Digest) Write([]byte) (int, error)
+func (*Digest) WriteString(string) (int, error)
+func (*Digest) Sum64() uint64
+```
+
+This implementation provides a fast pure-Go implementation and an even faster
+assembly implementation for amd64.
+
+## Compatibility
+
+This package is in a module and the latest code is in version 2 of the module.
+You need a version of Go with at least "minimal module compatibility" to use
+github.com/cespare/xxhash/v2:
+
+* 1.9.7+ for Go 1.9
+* 1.10.3+ for Go 1.10
+* Go 1.11 or later
+
+I recommend using the latest release of Go.
+
+## Benchmarks
+
+Here are some quick benchmarks comparing the pure-Go and assembly
+implementations of Sum64.
+
+| input size | purego | asm |
+| --- | --- | --- |
+| 5 B | 979.66 MB/s | 1291.17 MB/s |
+| 100 B | 7475.26 MB/s | 7973.40 MB/s |
+| 4 KB | 17573.46 MB/s | 17602.65 MB/s |
+| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
+
+These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
+the following commands under Go 1.11.2:
+
+```
+$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
+$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
+```
+
+## Projects using this package
+
+- [InfluxDB](https://github.com/influxdata/influxdb)
+- [Prometheus](https://github.com/prometheus/prometheus)
+- [FreeCache](https://github.com/coocood/freecache)
diff --git a/vendor/github.com/cespare/xxhash/v2/go.mod b/vendor/github.com/cespare/xxhash/v2/go.mod
new file mode 100644
index 0000000..49f6760
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/go.mod
@@ -0,0 +1,3 @@
+module github.com/cespare/xxhash/v2
+
+go 1.11
diff --git a/vendor/github.com/cespare/xxhash/v2/go.sum b/vendor/github.com/cespare/xxhash/v2/go.sum
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/go.sum
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go
new file mode 100644
index 0000000..db0b35f
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go
@@ -0,0 +1,236 @@
+// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
+// at http://cyan4973.github.io/xxHash/.
+package xxhash
+
+import (
+ "encoding/binary"
+ "errors"
+ "math/bits"
+)
+
+const (
+ prime1 uint64 = 11400714785074694791
+ prime2 uint64 = 14029467366897019727
+ prime3 uint64 = 1609587929392839161
+ prime4 uint64 = 9650029242287828579
+ prime5 uint64 = 2870177450012600261
+)
+
+// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
+// possible in the Go code is worth a small (but measurable) performance boost
+// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
+// convenience in the Go code in a few places where we need to intentionally
+// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
+// result overflows a uint64).
+var (
+ prime1v = prime1
+ prime2v = prime2
+ prime3v = prime3
+ prime4v = prime4
+ prime5v = prime5
+)
+
+// Digest implements hash.Hash64.
+type Digest struct {
+ v1 uint64
+ v2 uint64
+ v3 uint64
+ v4 uint64
+ total uint64
+ mem [32]byte
+ n int // how much of mem is used
+}
+
+// New creates a new Digest that computes the 64-bit xxHash algorithm.
+func New() *Digest {
+ var d Digest
+ d.Reset()
+ return &d
+}
+
+// Reset clears the Digest's state so that it can be reused.
+func (d *Digest) Reset() {
+ d.v1 = prime1v + prime2
+ d.v2 = prime2
+ d.v3 = 0
+ d.v4 = -prime1v
+ d.total = 0
+ d.n = 0
+}
+
+// Size always returns 8 bytes.
+func (d *Digest) Size() int { return 8 }
+
+// BlockSize always returns 32 bytes.
+func (d *Digest) BlockSize() int { return 32 }
+
+// Write adds more data to d. It always returns len(b), nil.
+func (d *Digest) Write(b []byte) (n int, err error) {
+ n = len(b)
+ d.total += uint64(n)
+
+ if d.n+n < 32 {
+ // This new data doesn't even fill the current block.
+ copy(d.mem[d.n:], b)
+ d.n += n
+ return
+ }
+
+ if d.n > 0 {
+ // Finish off the partial block.
+ copy(d.mem[d.n:], b)
+ d.v1 = round(d.v1, u64(d.mem[0:8]))
+ d.v2 = round(d.v2, u64(d.mem[8:16]))
+ d.v3 = round(d.v3, u64(d.mem[16:24]))
+ d.v4 = round(d.v4, u64(d.mem[24:32]))
+ b = b[32-d.n:]
+ d.n = 0
+ }
+
+ if len(b) >= 32 {
+ // One or more full blocks left.
+ nw := writeBlocks(d, b)
+ b = b[nw:]
+ }
+
+ // Store any remaining partial block.
+ copy(d.mem[:], b)
+ d.n = len(b)
+
+ return
+}
+
+// Sum appends the current hash to b and returns the resulting slice.
+func (d *Digest) Sum(b []byte) []byte {
+ s := d.Sum64()
+ return append(
+ b,
+ byte(s>>56),
+ byte(s>>48),
+ byte(s>>40),
+ byte(s>>32),
+ byte(s>>24),
+ byte(s>>16),
+ byte(s>>8),
+ byte(s),
+ )
+}
+
+// Sum64 returns the current hash.
+func (d *Digest) Sum64() uint64 {
+ var h uint64
+
+ if d.total >= 32 {
+ v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
+ h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
+ h = mergeRound(h, v1)
+ h = mergeRound(h, v2)
+ h = mergeRound(h, v3)
+ h = mergeRound(h, v4)
+ } else {
+ h = d.v3 + prime5
+ }
+
+ h += d.total
+
+ i, end := 0, d.n
+ for ; i+8 <= end; i += 8 {
+ k1 := round(0, u64(d.mem[i:i+8]))
+ h ^= k1
+ h = rol27(h)*prime1 + prime4
+ }
+ if i+4 <= end {
+ h ^= uint64(u32(d.mem[i:i+4])) * prime1
+ h = rol23(h)*prime2 + prime3
+ i += 4
+ }
+ for i < end {
+ h ^= uint64(d.mem[i]) * prime5
+ h = rol11(h) * prime1
+ i++
+ }
+
+ h ^= h >> 33
+ h *= prime2
+ h ^= h >> 29
+ h *= prime3
+ h ^= h >> 32
+
+ return h
+}
+
+const (
+ magic = "xxh\x06"
+ marshaledSize = len(magic) + 8*5 + 32
+)
+
+// MarshalBinary implements the encoding.BinaryMarshaler interface.
+func (d *Digest) MarshalBinary() ([]byte, error) {
+ b := make([]byte, 0, marshaledSize)
+ b = append(b, magic...)
+ b = appendUint64(b, d.v1)
+ b = appendUint64(b, d.v2)
+ b = appendUint64(b, d.v3)
+ b = appendUint64(b, d.v4)
+ b = appendUint64(b, d.total)
+ b = append(b, d.mem[:d.n]...)
+ b = b[:len(b)+len(d.mem)-d.n]
+ return b, nil
+}
+
+// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
+func (d *Digest) UnmarshalBinary(b []byte) error {
+ if len(b) < len(magic) || string(b[:len(magic)]) != magic {
+ return errors.New("xxhash: invalid hash state identifier")
+ }
+ if len(b) != marshaledSize {
+ return errors.New("xxhash: invalid hash state size")
+ }
+ b = b[len(magic):]
+ b, d.v1 = consumeUint64(b)
+ b, d.v2 = consumeUint64(b)
+ b, d.v3 = consumeUint64(b)
+ b, d.v4 = consumeUint64(b)
+ b, d.total = consumeUint64(b)
+ copy(d.mem[:], b)
+ b = b[len(d.mem):]
+ d.n = int(d.total % uint64(len(d.mem)))
+ return nil
+}
+
+func appendUint64(b []byte, x uint64) []byte {
+ var a [8]byte
+ binary.LittleEndian.PutUint64(a[:], x)
+ return append(b, a[:]...)
+}
+
+func consumeUint64(b []byte) ([]byte, uint64) {
+ x := u64(b)
+ return b[8:], x
+}
+
+func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
+func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
+
+func round(acc, input uint64) uint64 {
+ acc += input * prime2
+ acc = rol31(acc)
+ acc *= prime1
+ return acc
+}
+
+func mergeRound(acc, val uint64) uint64 {
+ val = round(0, val)
+ acc ^= val
+ acc = acc*prime1 + prime4
+ return acc
+}
+
+func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) }
+func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) }
+func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
+func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
+func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
+func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
+func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
+func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }
diff --git a/vendor/github.com/cespare/xxhash/xxhash_amd64.go b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go
similarity index 74%
rename from vendor/github.com/cespare/xxhash/xxhash_amd64.go
rename to vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go
index d617652..ad14b80 100644
--- a/vendor/github.com/cespare/xxhash/xxhash_amd64.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go
@@ -9,4 +9,5 @@
//go:noescape
func Sum64(b []byte) uint64
-func writeBlocks(x *xxh, b []byte) []byte
+//go:noescape
+func writeBlocks(d *Digest, b []byte) int
diff --git a/vendor/github.com/cespare/xxhash/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
similarity index 78%
rename from vendor/github.com/cespare/xxhash/xxhash_amd64.s
rename to vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
index 757f201..d580e32 100644
--- a/vendor/github.com/cespare/xxhash/xxhash_amd64.s
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
@@ -170,23 +170,22 @@
RET
// writeBlocks uses the same registers as above except that it uses AX to store
-// the x pointer.
+// the d pointer.
-// func writeBlocks(x *xxh, b []byte) []byte
-TEXT ·writeBlocks(SB), NOSPLIT, $0-56
+// func writeBlocks(d *Digest, b []byte) int
+TEXT ·writeBlocks(SB), NOSPLIT, $0-40
// Load fixed primes needed for round.
MOVQ ·prime1v(SB), R13
MOVQ ·prime2v(SB), R14
// Load slice.
MOVQ b_base+8(FP), CX
- MOVQ CX, ret_base+32(FP) // initialize return base pointer; see NOTE below
MOVQ b_len+16(FP), DX
LEAQ (CX)(DX*1), BX
SUBQ $32, BX
- // Load vN from x.
- MOVQ x+0(FP), AX
+ // Load vN from d.
+ MOVQ d+0(FP), AX
MOVQ 0(AX), R8 // v1
MOVQ 8(AX), R9 // v2
MOVQ 16(AX), R10 // v3
@@ -203,31 +202,14 @@
CMPQ CX, BX
JLE blockLoop
- // Copy vN back to x.
+ // Copy vN back to d.
MOVQ R8, 0(AX)
MOVQ R9, 8(AX)
MOVQ R10, 16(AX)
MOVQ R11, 24(AX)
- // Construct return slice.
- // NOTE: It's important that we don't construct a slice that has a base
- // pointer off the end of the original slice, as in Go 1.7+ this will
- // cause runtime crashes. (See discussion in, for example,
- // https://github.com/golang/go/issues/16772.)
- // Therefore, we calculate the length/cap first, and if they're zero, we
- // keep the old base. This is what the compiler does as well if you
- // write code like
- // b = b[len(b):]
-
- // New length is 32 - (CX - BX) -> BX+32 - CX.
- ADDQ $32, BX
- SUBQ CX, BX
- JZ afterSetBase
-
- MOVQ CX, ret_base+32(FP)
-
-afterSetBase:
- MOVQ BX, ret_len+40(FP)
- MOVQ BX, ret_cap+48(FP) // set cap == len
+ // The number of bytes written is CX minus the old base pointer.
+ SUBQ b_base+8(FP), CX
+ MOVQ CX, ret+32(FP)
RET
diff --git a/vendor/github.com/cespare/xxhash/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
similarity index 85%
rename from vendor/github.com/cespare/xxhash/xxhash_other.go
rename to vendor/github.com/cespare/xxhash/v2/xxhash_other.go
index c68d13f..4a5a821 100644
--- a/vendor/github.com/cespare/xxhash/xxhash_other.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
@@ -5,9 +5,9 @@
// Sum64 computes the 64-bit xxHash digest of b.
func Sum64(b []byte) uint64 {
// A simpler version would be
- // x := New()
- // x.Write(b)
- // return x.Sum64()
+ // d := New()
+ // d.Write(b)
+ // return d.Sum64()
// but this is faster, particularly for small inputs.
n := len(b)
@@ -61,8 +61,9 @@
return h
}
-func writeBlocks(x *xxh, b []byte) []byte {
- v1, v2, v3, v4 := x.v1, x.v2, x.v3, x.v4
+func writeBlocks(d *Digest, b []byte) int {
+ v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
+ n := len(b)
for len(b) >= 32 {
v1 = round(v1, u64(b[0:8:len(b)]))
v2 = round(v2, u64(b[8:16:len(b)]))
@@ -70,6 +71,6 @@
v4 = round(v4, u64(b[24:32:len(b)]))
b = b[32:len(b):len(b)]
}
- x.v1, x.v2, x.v3, x.v4 = v1, v2, v3, v4
- return b
+ d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4
+ return n - len(b)
}
diff --git a/vendor/github.com/cespare/xxhash/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
similarity index 60%
rename from vendor/github.com/cespare/xxhash/xxhash_safe.go
rename to vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
index dfa15ab..fc9bea7 100644
--- a/vendor/github.com/cespare/xxhash/xxhash_safe.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
@@ -8,3 +8,8 @@
func Sum64String(s string) uint64 {
return Sum64([]byte(s))
}
+
+// WriteString adds more data to d. It always returns len(s), nil.
+func (d *Digest) WriteString(s string) (n int, err error) {
+ return d.Write([]byte(s))
+}
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
new file mode 100644
index 0000000..53bf76e
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
@@ -0,0 +1,46 @@
+// +build !appengine
+
+// This file encapsulates usage of unsafe.
+// xxhash_safe.go contains the safe implementations.
+
+package xxhash
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+// Notes:
+//
+// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ
+// for some discussion about these unsafe conversions.
+//
+// In the future it's possible that compiler optimizations will make these
+// unsafe operations unnecessary: https://golang.org/issue/2205.
+//
+// Both of these wrapper functions still incur function call overhead since they
+// will not be inlined. We could write Go/asm copies of Sum64 and Digest.Write
+// for strings to squeeze out a bit more speed. Mid-stack inlining should
+// eventually fix this.
+
+// Sum64String computes the 64-bit xxHash digest of s.
+// It may be faster than Sum64([]byte(s)) by avoiding a copy.
+func Sum64String(s string) uint64 {
+ var b []byte
+ bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+ bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
+ bh.Len = len(s)
+ bh.Cap = len(s)
+ return Sum64(b)
+}
+
+// WriteString adds more data to d. It always returns len(s), nil.
+// It may be faster than Write([]byte(s)) by avoiding a copy.
+func (d *Digest) WriteString(s string) (n int, err error) {
+ var b []byte
+ bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+ bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
+ bh.Len = len(s)
+ bh.Cap = len(s)
+ return d.Write(b)
+}
diff --git a/vendor/github.com/cespare/xxhash/xxhash.go b/vendor/github.com/cespare/xxhash/xxhash.go
deleted file mode 100644
index f896bd2..0000000
--- a/vendor/github.com/cespare/xxhash/xxhash.go
+++ /dev/null
@@ -1,168 +0,0 @@
-// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
-// at http://cyan4973.github.io/xxHash/.
-package xxhash
-
-import (
- "encoding/binary"
- "hash"
-)
-
-const (
- prime1 uint64 = 11400714785074694791
- prime2 uint64 = 14029467366897019727
- prime3 uint64 = 1609587929392839161
- prime4 uint64 = 9650029242287828579
- prime5 uint64 = 2870177450012600261
-)
-
-// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
-// possible in the Go code is worth a small (but measurable) performance boost
-// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
-// convenience in the Go code in a few places where we need to intentionally
-// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
-// result overflows a uint64).
-var (
- prime1v = prime1
- prime2v = prime2
- prime3v = prime3
- prime4v = prime4
- prime5v = prime5
-)
-
-type xxh struct {
- v1 uint64
- v2 uint64
- v3 uint64
- v4 uint64
- total int
- mem [32]byte
- n int // how much of mem is used
-}
-
-// New creates a new hash.Hash64 that implements the 64-bit xxHash algorithm.
-func New() hash.Hash64 {
- var x xxh
- x.Reset()
- return &x
-}
-
-func (x *xxh) Reset() {
- x.n = 0
- x.total = 0
- x.v1 = prime1v + prime2
- x.v2 = prime2
- x.v3 = 0
- x.v4 = -prime1v
-}
-
-func (x *xxh) Size() int { return 8 }
-func (x *xxh) BlockSize() int { return 32 }
-
-// Write adds more data to x. It always returns len(b), nil.
-func (x *xxh) Write(b []byte) (n int, err error) {
- n = len(b)
- x.total += len(b)
-
- if x.n+len(b) < 32 {
- // This new data doesn't even fill the current block.
- copy(x.mem[x.n:], b)
- x.n += len(b)
- return
- }
-
- if x.n > 0 {
- // Finish off the partial block.
- copy(x.mem[x.n:], b)
- x.v1 = round(x.v1, u64(x.mem[0:8]))
- x.v2 = round(x.v2, u64(x.mem[8:16]))
- x.v3 = round(x.v3, u64(x.mem[16:24]))
- x.v4 = round(x.v4, u64(x.mem[24:32]))
- b = b[32-x.n:]
- x.n = 0
- }
-
- if len(b) >= 32 {
- // One or more full blocks left.
- b = writeBlocks(x, b)
- }
-
- // Store any remaining partial block.
- copy(x.mem[:], b)
- x.n = len(b)
-
- return
-}
-
-func (x *xxh) Sum(b []byte) []byte {
- s := x.Sum64()
- return append(
- b,
- byte(s>>56),
- byte(s>>48),
- byte(s>>40),
- byte(s>>32),
- byte(s>>24),
- byte(s>>16),
- byte(s>>8),
- byte(s),
- )
-}
-
-func (x *xxh) Sum64() uint64 {
- var h uint64
-
- if x.total >= 32 {
- v1, v2, v3, v4 := x.v1, x.v2, x.v3, x.v4
- h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
- h = mergeRound(h, v1)
- h = mergeRound(h, v2)
- h = mergeRound(h, v3)
- h = mergeRound(h, v4)
- } else {
- h = x.v3 + prime5
- }
-
- h += uint64(x.total)
-
- i, end := 0, x.n
- for ; i+8 <= end; i += 8 {
- k1 := round(0, u64(x.mem[i:i+8]))
- h ^= k1
- h = rol27(h)*prime1 + prime4
- }
- if i+4 <= end {
- h ^= uint64(u32(x.mem[i:i+4])) * prime1
- h = rol23(h)*prime2 + prime3
- i += 4
- }
- for i < end {
- h ^= uint64(x.mem[i]) * prime5
- h = rol11(h) * prime1
- i++
- }
-
- h ^= h >> 33
- h *= prime2
- h ^= h >> 29
- h *= prime3
- h ^= h >> 32
-
- return h
-}
-
-func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
-func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
-
-func round(acc, input uint64) uint64 {
- acc += input * prime2
- acc = rol31(acc)
- acc *= prime1
- return acc
-}
-
-func mergeRound(acc, val uint64) uint64 {
- val = round(0, val)
- acc ^= val
- acc = acc*prime1 + prime4
- return acc
-}
diff --git a/vendor/github.com/cespare/xxhash/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/xxhash_unsafe.go
deleted file mode 100644
index d2b64e8..0000000
--- a/vendor/github.com/cespare/xxhash/xxhash_unsafe.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// +build !appengine
-
-// This file encapsulates usage of unsafe.
-// xxhash_safe.go contains the safe implementations.
-
-package xxhash
-
-import (
- "reflect"
- "unsafe"
-)
-
-// Sum64String computes the 64-bit xxHash digest of s.
-// It may be faster than Sum64([]byte(s)) by avoiding a copy.
-//
-// TODO(caleb): Consider removing this if an optimization is ever added to make
-// it unnecessary: https://golang.org/issue/2205.
-//
-// TODO(caleb): We still have a function call; we could instead write Go/asm
-// copies of Sum64 for strings to squeeze out a bit more speed.
-func Sum64String(s string) uint64 {
- // See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ
- // for some discussion about this unsafe conversion.
- var b []byte
- bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
- bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
- bh.Len = len(s)
- bh.Cap = len(s)
- return Sum64(b)
-}