Browse Source

vendor: Move back to upstream KCP (fixes #4407)

GitHub-Pull-Request: https://github.com/syncthing/syncthing/pull/4614
Audrius Butkevicius 8 years ago
parent
commit
72172d853c
32 changed files with 3426 additions and 46 deletions
  1. 1 1
      lib/connections/kcp_dial.go
  2. 1 1
      lib/connections/kcp_listen.go
  3. 1 1
      lib/connections/kcp_misc.go
  4. 1 1
      lib/protocol/benchmark_test.go
  5. 23 0
      vendor/github.com/klauspost/reedsolomon/LICENSE
  6. 125 0
      vendor/github.com/klauspost/reedsolomon/examples/simple-decoder.go
  7. 112 0
      vendor/github.com/klauspost/reedsolomon/examples/simple-encoder.go
  8. 165 0
      vendor/github.com/klauspost/reedsolomon/examples/stream-decoder.go
  9. 142 0
      vendor/github.com/klauspost/reedsolomon/examples/stream-encoder.go
  10. 65 0
      vendor/github.com/klauspost/reedsolomon/galois.go
  11. 91 0
      vendor/github.com/klauspost/reedsolomon/galois_amd64.go
  12. 236 0
      vendor/github.com/klauspost/reedsolomon/galois_amd64.s
  13. 48 0
      vendor/github.com/klauspost/reedsolomon/galois_arm64.go
  14. 141 0
      vendor/github.com/klauspost/reedsolomon/galois_arm64.s
  15. 27 0
      vendor/github.com/klauspost/reedsolomon/galois_noasm.go
  16. 132 0
      vendor/github.com/klauspost/reedsolomon/gentables.go
  17. 160 0
      vendor/github.com/klauspost/reedsolomon/inversion_tree.go
  18. 279 0
      vendor/github.com/klauspost/reedsolomon/matrix.go
  19. 111 0
      vendor/github.com/klauspost/reedsolomon/options.go
  20. 884 0
      vendor/github.com/klauspost/reedsolomon/reedsolomon.go
  21. 584 0
      vendor/github.com/klauspost/reedsolomon/streaming.go
  22. 0 0
      vendor/github.com/xtaci/kcp-go/LICENSE
  23. 13 0
      vendor/github.com/xtaci/kcp-go/blacklist.go
  24. 0 0
      vendor/github.com/xtaci/kcp-go/crypt.go
  25. 36 28
      vendor/github.com/xtaci/kcp-go/fec.go
  26. 2 3
      vendor/github.com/xtaci/kcp-go/kcp.go
  27. 25 0
      vendor/github.com/xtaci/kcp-go/rand.go
  28. 5 3
      vendor/github.com/xtaci/kcp-go/sess.go
  29. 0 0
      vendor/github.com/xtaci/kcp-go/snmp.go
  30. 0 0
      vendor/github.com/xtaci/kcp-go/updater.go
  31. 0 0
      vendor/github.com/xtaci/kcp-go/xor.go
  32. 16 8
      vendor/manifest

+ 1 - 1
lib/connections/kcp_dial.go

@@ -11,9 +11,9 @@ import (
 	"net/url"
 	"time"
 
-	"github.com/AudriusButkevicius/kcp-go"
 	"github.com/syncthing/syncthing/lib/config"
 	"github.com/syncthing/syncthing/lib/protocol"
+	"github.com/xtaci/kcp-go"
 	"github.com/xtaci/smux"
 )
 

+ 1 - 1
lib/connections/kcp_listen.go

@@ -15,9 +15,9 @@ import (
 	"sync/atomic"
 	"time"
 
-	"github.com/AudriusButkevicius/kcp-go"
 	"github.com/AudriusButkevicius/pfilter"
 	"github.com/ccding/go-stun/stun"
+	"github.com/xtaci/kcp-go"
 	"github.com/xtaci/smux"
 
 	"github.com/syncthing/syncthing/lib/config"

+ 1 - 1
lib/connections/kcp_misc.go

@@ -15,8 +15,8 @@ import (
 	"sync/atomic"
 	"time"
 
-	"github.com/AudriusButkevicius/kcp-go"
 	"github.com/AudriusButkevicius/pfilter"
+	"github.com/xtaci/kcp-go"
 	"github.com/xtaci/smux"
 )
 

+ 1 - 1
lib/protocol/benchmark_test.go

@@ -8,8 +8,8 @@ import (
 	"net"
 	"testing"
 
-	"github.com/AudriusButkevicius/kcp-go"
 	"github.com/syncthing/syncthing/lib/dialer"
+	"github.com/xtaci/kcp-go"
 )
 
 func BenchmarkRequestsRawTCP(b *testing.B) {

+ 23 - 0
vendor/github.com/klauspost/reedsolomon/LICENSE

@@ -0,0 +1,23 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Klaus Post
+Copyright (c) 2015 Backblaze
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+

+ 125 - 0
vendor/github.com/klauspost/reedsolomon/examples/simple-decoder.go

@@ -0,0 +1,125 @@
+//+build ignore
+
+// Copyright 2015, Klaus Post, see LICENSE for details.
+//
+// Simple decoder example.
+//
+// The decoder reverses the process of "simple-encoder.go"
+//
+// To build an executable use:
+//
+// go build simple-decoder.go
+//
+// Simple Encoder/Decoder Shortcomings:
+// * If the file size of the input isn't diviable by the number of data shards
+//   the output will contain extra zeroes
+//
+// * If the shard numbers isn't the same for the decoder as in the
+//   encoder, invalid output will be generated.
+//
+// * If values have changed in a shard, it cannot be reconstructed.
+//
+// * If two shards have been swapped, reconstruction will always fail.
+//   You need to supply the shards in the same order as they were given to you.
+//
+// The solution for this is to save a metadata file containing:
+//
+// * File size.
+// * The number of data/parity shards.
+// * HASH of each shard.
+// * Order of the shards.
+//
+// If you save these properties, you should abe able to detect file corruption
+// in a shard and be able to reconstruct your data if you have the needed number of shards left.
+
+package main
+
+import (
+	"flag"
+	"fmt"
+	"io/ioutil"
+	"os"
+
+	"github.com/klauspost/reedsolomon"
+)
+
+var dataShards = flag.Int("data", 4, "Number of shards to split the data into")
+var parShards = flag.Int("par", 2, "Number of parity shards")
+var outFile = flag.String("out", "", "Alternative output path/file")
+
+func init() {
+	flag.Usage = func() {
+		fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
+		fmt.Fprintf(os.Stderr, "  simple-decoder [-flags] basefile.ext\nDo not add the number to the filename.\n")
+		fmt.Fprintf(os.Stderr, "Valid flags:\n")
+		flag.PrintDefaults()
+	}
+}
+
+func main() {
+	// Parse flags
+	flag.Parse()
+	args := flag.Args()
+	if len(args) != 1 {
+		fmt.Fprintf(os.Stderr, "Error: No filenames given\n")
+		flag.Usage()
+		os.Exit(1)
+	}
+	fname := args[0]
+
+	// Create matrix
+	enc, err := reedsolomon.New(*dataShards, *parShards)
+	checkErr(err)
+
+	// Create shards and load the data.
+	shards := make([][]byte, *dataShards+*parShards)
+	for i := range shards {
+		infn := fmt.Sprintf("%s.%d", fname, i)
+		fmt.Println("Opening", infn)
+		shards[i], err = ioutil.ReadFile(infn)
+		if err != nil {
+			fmt.Println("Error reading file", err)
+			shards[i] = nil
+		}
+	}
+
+	// Verify the shards
+	ok, err := enc.Verify(shards)
+	if ok {
+		fmt.Println("No reconstruction needed")
+	} else {
+		fmt.Println("Verification failed. Reconstructing data")
+		err = enc.Reconstruct(shards)
+		if err != nil {
+			fmt.Println("Reconstruct failed -", err)
+			os.Exit(1)
+		}
+		ok, err = enc.Verify(shards)
+		if !ok {
+			fmt.Println("Verification failed after reconstruction, data likely corrupted.")
+			os.Exit(1)
+		}
+		checkErr(err)
+	}
+
+	// Join the shards and write them
+	outfn := *outFile
+	if outfn == "" {
+		outfn = fname
+	}
+
+	fmt.Println("Writing data to", outfn)
+	f, err := os.Create(outfn)
+	checkErr(err)
+
+	// We don't know the exact filesize.
+	err = enc.Join(f, shards, len(shards[0])**dataShards)
+	checkErr(err)
+}
+
+func checkErr(err error) {
+	if err != nil {
+		fmt.Fprintf(os.Stderr, "Error: %s", err.Error())
+		os.Exit(2)
+	}
+}

+ 112 - 0
vendor/github.com/klauspost/reedsolomon/examples/simple-encoder.go

@@ -0,0 +1,112 @@
+//+build ignore
+
+// Copyright 2015, Klaus Post, see LICENSE for details.
+//
+// Simple encoder example
+//
+// The encoder encodes a simgle file into a number of shards
+// To reverse the process see "simpledecoder.go"
+//
+// To build an executable use:
+//
+// go build simple-decoder.go
+//
+// Simple Encoder/Decoder Shortcomings:
+// * If the file size of the input isn't diviable by the number of data shards
+//   the output will contain extra zeroes
+//
+// * If the shard numbers isn't the same for the decoder as in the
+//   encoder, invalid output will be generated.
+//
+// * If values have changed in a shard, it cannot be reconstructed.
+//
+// * If two shards have been swapped, reconstruction will always fail.
+//   You need to supply the shards in the same order as they were given to you.
+//
+// The solution for this is to save a metadata file containing:
+//
+// * File size.
+// * The number of data/parity shards.
+// * HASH of each shard.
+// * Order of the shards.
+//
+// If you save these properties, you should abe able to detect file corruption
+// in a shard and be able to reconstruct your data if you have the needed number of shards left.
+
+package main
+
+import (
+	"flag"
+	"fmt"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+
+	"github.com/klauspost/reedsolomon"
+)
+
+var dataShards = flag.Int("data", 4, "Number of shards to split the data into, must be below 257.")
+var parShards = flag.Int("par", 2, "Number of parity shards")
+var outDir = flag.String("out", "", "Alternative output directory")
+
+func init() {
+	flag.Usage = func() {
+		fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
+		fmt.Fprintf(os.Stderr, "  simple-encoder [-flags] filename.ext\n\n")
+		fmt.Fprintf(os.Stderr, "Valid flags:\n")
+		flag.PrintDefaults()
+	}
+}
+
+func main() {
+	// Parse command line parameters.
+	flag.Parse()
+	args := flag.Args()
+	if len(args) != 1 {
+		fmt.Fprintf(os.Stderr, "Error: No input filename given\n")
+		flag.Usage()
+		os.Exit(1)
+	}
+	if *dataShards > 257 {
+		fmt.Fprintf(os.Stderr, "Error: Too many data shards\n")
+		os.Exit(1)
+	}
+	fname := args[0]
+
+	// Create encoding matrix.
+	enc, err := reedsolomon.New(*dataShards, *parShards)
+	checkErr(err)
+
+	fmt.Println("Opening", fname)
+	b, err := ioutil.ReadFile(fname)
+	checkErr(err)
+
+	// Split the file into equally sized shards.
+	shards, err := enc.Split(b)
+	checkErr(err)
+	fmt.Printf("File split into %d data+parity shards with %d bytes/shard.\n", len(shards), len(shards[0]))
+
+	// Encode parity
+	err = enc.Encode(shards)
+	checkErr(err)
+
+	// Write out the resulting files.
+	dir, file := filepath.Split(fname)
+	if *outDir != "" {
+		dir = *outDir
+	}
+	for i, shard := range shards {
+		outfn := fmt.Sprintf("%s.%d", file, i)
+
+		fmt.Println("Writing to", outfn)
+		err = ioutil.WriteFile(filepath.Join(dir, outfn), shard, os.ModePerm)
+		checkErr(err)
+	}
+}
+
+func checkErr(err error) {
+	if err != nil {
+		fmt.Fprintf(os.Stderr, "Error: %s", err.Error())
+		os.Exit(2)
+	}
+}

+ 165 - 0
vendor/github.com/klauspost/reedsolomon/examples/stream-decoder.go

@@ -0,0 +1,165 @@
+//+build ignore
+
+// Copyright 2015, Klaus Post, see LICENSE for details.
+//
+// Stream decoder example.
+//
+// The decoder reverses the process of "stream-encoder.go"
+//
+// To build an executable use:
+//
+// go build stream-decoder.go
+//
+// Simple Encoder/Decoder Shortcomings:
+// * If the file size of the input isn't dividable by the number of data shards
+//   the output will contain extra zeroes
+//
+// * If the shard numbers isn't the same for the decoder as in the
+//   encoder, invalid output will be generated.
+//
+// * If values have changed in a shard, it cannot be reconstructed.
+//
+// * If two shards have been swapped, reconstruction will always fail.
+//   You need to supply the shards in the same order as they were given to you.
+//
+// The solution for this is to save a metadata file containing:
+//
+// * File size.
+// * The number of data/parity shards.
+// * HASH of each shard.
+// * Order of the shards.
+//
+// If you save these properties, you should abe able to detect file corruption
+// in a shard and be able to reconstruct your data if you have the needed number of shards left.
+
+package main
+
+import (
+	"flag"
+	"fmt"
+	"io"
+	"os"
+
+	"github.com/klauspost/reedsolomon"
+)
+
+var dataShards = flag.Int("data", 4, "Number of shards to split the data into")
+var parShards = flag.Int("par", 2, "Number of parity shards")
+var outFile = flag.String("out", "", "Alternative output path/file")
+
+func init() {
+	flag.Usage = func() {
+		fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
+		fmt.Fprintf(os.Stderr, "  %s [-flags] basefile.ext\nDo not add the number to the filename.\n", os.Args[0])
+		fmt.Fprintf(os.Stderr, "Valid flags:\n")
+		flag.PrintDefaults()
+	}
+}
+
+func main() {
+	// Parse flags
+	flag.Parse()
+	args := flag.Args()
+	if len(args) != 1 {
+		fmt.Fprintf(os.Stderr, "Error: No filenames given\n")
+		flag.Usage()
+		os.Exit(1)
+	}
+	fname := args[0]
+
+	// Create matrix
+	enc, err := reedsolomon.NewStream(*dataShards, *parShards)
+	checkErr(err)
+
+	// Open the inputs
+	shards, size, err := openInput(*dataShards, *parShards, fname)
+	checkErr(err)
+
+	// Verify the shards
+	ok, err := enc.Verify(shards)
+	if ok {
+		fmt.Println("No reconstruction needed")
+	} else {
+		fmt.Println("Verification failed. Reconstructing data")
+		shards, size, err = openInput(*dataShards, *parShards, fname)
+		checkErr(err)
+		// Create out destination writers
+		out := make([]io.Writer, len(shards))
+		for i := range out {
+			if shards[i] == nil {
+				outfn := fmt.Sprintf("%s.%d", fname, i)
+				fmt.Println("Creating", outfn)
+				out[i], err = os.Create(outfn)
+				checkErr(err)
+			}
+		}
+		err = enc.Reconstruct(shards, out)
+		if err != nil {
+			fmt.Println("Reconstruct failed -", err)
+			os.Exit(1)
+		}
+		// Close output.
+		for i := range out {
+			if out[i] != nil {
+				err := out[i].(*os.File).Close()
+				checkErr(err)
+			}
+		}
+		shards, size, err = openInput(*dataShards, *parShards, fname)
+		ok, err = enc.Verify(shards)
+		if !ok {
+			fmt.Println("Verification failed after reconstruction, data likely corrupted:", err)
+			os.Exit(1)
+		}
+		checkErr(err)
+	}
+
+	// Join the shards and write them
+	outfn := *outFile
+	if outfn == "" {
+		outfn = fname
+	}
+
+	fmt.Println("Writing data to", outfn)
+	f, err := os.Create(outfn)
+	checkErr(err)
+
+	shards, size, err = openInput(*dataShards, *parShards, fname)
+	checkErr(err)
+
+	// We don't know the exact filesize.
+	err = enc.Join(f, shards, int64(*dataShards)*size)
+	checkErr(err)
+}
+
+func openInput(dataShards, parShards int, fname string) (r []io.Reader, size int64, err error) {
+	// Create shards and load the data.
+	shards := make([]io.Reader, dataShards+parShards)
+	for i := range shards {
+		infn := fmt.Sprintf("%s.%d", fname, i)
+		fmt.Println("Opening", infn)
+		f, err := os.Open(infn)
+		if err != nil {
+			fmt.Println("Error reading file", err)
+			shards[i] = nil
+			continue
+		} else {
+			shards[i] = f
+		}
+		stat, err := f.Stat()
+		checkErr(err)
+		if stat.Size() > 0 {
+			size = stat.Size()
+		} else {
+			shards[i] = nil
+		}
+	}
+	return shards, size, nil
+}
+
+func checkErr(err error) {
+	if err != nil {
+		fmt.Fprintf(os.Stderr, "Error: %s", err.Error())
+		os.Exit(2)
+	}
+}

+ 142 - 0
vendor/github.com/klauspost/reedsolomon/examples/stream-encoder.go

@@ -0,0 +1,142 @@
+//+build ignore
+
+// Copyright 2015, Klaus Post, see LICENSE for details.
+//
+// Simple stream encoder example
+//
+// The encoder encodes a single file into a number of shards
+// To reverse the process see "stream-decoder.go"
+//
+// To build an executable use:
+//
+// go build stream-encoder.go
+//
+// Simple Encoder/Decoder Shortcomings:
+// * If the file size of the input isn't dividable by the number of data shards
+//   the output will contain extra zeroes
+//
+// * If the shard numbers isn't the same for the decoder as in the
+//   encoder, invalid output will be generated.
+//
+// * If values have changed in a shard, it cannot be reconstructed.
+//
+// * If two shards have been swapped, reconstruction will always fail.
+//   You need to supply the shards in the same order as they were given to you.
+//
+// The solution for this is to save a metadata file containing:
+//
+// * File size.
+// * The number of data/parity shards.
+// * HASH of each shard.
+// * Order of the shards.
+//
+// If you save these properties, you should abe able to detect file corruption
+// in a shard and be able to reconstruct your data if you have the needed number of shards left.
+
+package main
+
+import (
+	"flag"
+	"fmt"
+	"os"
+	"path/filepath"
+
+	"io"
+
+	"github.com/klauspost/reedsolomon"
+)
+
+var dataShards = flag.Int("data", 4, "Number of shards to split the data into, must be below 257.")
+var parShards = flag.Int("par", 2, "Number of parity shards")
+var outDir = flag.String("out", "", "Alternative output directory")
+
+func init() {
+	flag.Usage = func() {
+		fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
+		fmt.Fprintf(os.Stderr, "  %s [-flags] filename.ext\n\n", os.Args[0])
+		fmt.Fprintf(os.Stderr, "Valid flags:\n")
+		flag.PrintDefaults()
+	}
+}
+
+func main() {
+	// Parse command line parameters.
+	flag.Parse()
+	args := flag.Args()
+	if len(args) != 1 {
+		fmt.Fprintf(os.Stderr, "Error: No input filename given\n")
+		flag.Usage()
+		os.Exit(1)
+	}
+	if *dataShards > 257 {
+		fmt.Fprintf(os.Stderr, "Error: Too many data shards\n")
+		os.Exit(1)
+	}
+	fname := args[0]
+
+	// Create encoding matrix.
+	enc, err := reedsolomon.NewStream(*dataShards, *parShards)
+	checkErr(err)
+
+	fmt.Println("Opening", fname)
+	f, err := os.Open(fname)
+	checkErr(err)
+
+	instat, err := f.Stat()
+	checkErr(err)
+
+	shards := *dataShards + *parShards
+	out := make([]*os.File, shards)
+
+	// Create the resulting files.
+	dir, file := filepath.Split(fname)
+	if *outDir != "" {
+		dir = *outDir
+	}
+	for i := range out {
+		outfn := fmt.Sprintf("%s.%d", file, i)
+		fmt.Println("Creating", outfn)
+		out[i], err = os.Create(filepath.Join(dir, outfn))
+		checkErr(err)
+	}
+
+	// Split into files.
+	data := make([]io.Writer, *dataShards)
+	for i := range data {
+		data[i] = out[i]
+	}
+	// Do the split
+	err = enc.Split(f, data, instat.Size())
+	checkErr(err)
+
+	// Close and re-open the files.
+	input := make([]io.Reader, *dataShards)
+
+	for i := range data {
+		out[i].Close()
+		f, err := os.Open(out[i].Name())
+		checkErr(err)
+		input[i] = f
+		defer f.Close()
+	}
+
+	// Create parity output writers
+	parity := make([]io.Writer, *parShards)
+	for i := range parity {
+		parity[i] = out[*dataShards+i]
+		defer out[*dataShards+i].Close()
+	}
+
+	// Encode parity
+	err = enc.Encode(input, parity)
+	checkErr(err)
+	fmt.Printf("File split into %d data + %d parity shards.\n", *dataShards, *parShards)
+
+}
+
+func checkErr(err error) {
+	if err != nil {
+		fmt.Fprintf(os.Stderr, "Error: %s", err.Error())
+		os.Exit(2)
+	}
+}

File diff suppressed because it is too large
+ 65 - 0
vendor/github.com/klauspost/reedsolomon/galois.go


+ 91 - 0
vendor/github.com/klauspost/reedsolomon/galois_amd64.go

@@ -0,0 +1,91 @@
+//+build !noasm
+//+build !appengine
+
+// Copyright 2015, Klaus Post, see LICENSE for details.
+
+package reedsolomon
+
+//go:noescape
+func galMulSSSE3(low, high, in, out []byte)
+
+//go:noescape
+func galMulSSSE3Xor(low, high, in, out []byte)
+
+//go:noescape
+func galMulAVX2Xor(low, high, in, out []byte)
+
+//go:noescape
+func galMulAVX2(low, high, in, out []byte)
+
+//go:noescape
+func sSE2XorSlice(in, out []byte)
+
+// This is what the assembler routines do in blocks of 16 bytes:
+/*
+func galMulSSSE3(low, high, in, out []byte) {
+	for n, input := range in {
+		l := input & 0xf
+		h := input >> 4
+		out[n] = low[l] ^ high[h]
+	}
+}
+
+func galMulSSSE3Xor(low, high, in, out []byte) {
+	for n, input := range in {
+		l := input & 0xf
+		h := input >> 4
+		out[n] ^= low[l] ^ high[h]
+	}
+}
+*/
+
+func galMulSlice(c byte, in, out []byte, ssse3, avx2 bool) {
+	var done int
+	if avx2 {
+		galMulAVX2(mulTableLow[c][:], mulTableHigh[c][:], in, out)
+		done = (len(in) >> 5) << 5
+	} else if ssse3 {
+		galMulSSSE3(mulTableLow[c][:], mulTableHigh[c][:], in, out)
+		done = (len(in) >> 4) << 4
+	}
+	remain := len(in) - done
+	if remain > 0 {
+		mt := mulTable[c]
+		for i := done; i < len(in); i++ {
+			out[i] = mt[in[i]]
+		}
+	}
+}
+
+func galMulSliceXor(c byte, in, out []byte, ssse3, avx2 bool) {
+	var done int
+	if avx2 {
+		galMulAVX2Xor(mulTableLow[c][:], mulTableHigh[c][:], in, out)
+		done = (len(in) >> 5) << 5
+	} else if ssse3 {
+		galMulSSSE3Xor(mulTableLow[c][:], mulTableHigh[c][:], in, out)
+		done = (len(in) >> 4) << 4
+	}
+	remain := len(in) - done
+	if remain > 0 {
+		mt := mulTable[c]
+		for i := done; i < len(in); i++ {
+			out[i] ^= mt[in[i]]
+		}
+	}
+}
+
+// slice galois add
+func sliceXor(in, out []byte, sse2 bool) {
+	var done int
+	if sse2 {
+		sSE2XorSlice(in, out)
+		done = (len(in) >> 4) << 4
+	}
+	remain := len(in) - done
+	if remain > 0 {
+		for i := done; i < len(in); i++ {
+			out[i] ^= in[i]
+		}
+	}
+}

+ 236 - 0
vendor/github.com/klauspost/reedsolomon/galois_amd64.s

@@ -0,0 +1,236 @@
+//+build !noasm !appengine
+
+// Copyright 2015, Klaus Post, see LICENSE for details.
+
+// Based on http://www.snia.org/sites/default/files2/SDC2013/presentations/NewThinking/EthanMiller_Screaming_Fast_Galois_Field%20Arithmetic_SIMD%20Instructions.pdf
+// and http://jerasure.org/jerasure/gf-complete/tree/master
+
+// func galMulSSSE3Xor(low, high, in, out []byte)
+TEXT ·galMulSSSE3Xor(SB), 7, $0
+	MOVQ   low+0(FP), SI     // SI: &low
+	MOVQ   high+24(FP), DX   // DX: &high
+	MOVOU  (SI), X6          // X6 low
+	MOVOU  (DX), X7          // X7: high
+	MOVQ   $15, BX           // BX: low mask
+	MOVQ   BX, X8
+	PXOR   X5, X5
+	MOVQ   in+48(FP), SI     // R11: &in
+	MOVQ   in_len+56(FP), R9 // R9: len(in)
+	MOVQ   out+72(FP), DX    // DX: &out
+	PSHUFB X5, X8            // X8: lomask (unpacked)
+	SHRQ   $4, R9            // len(in) / 16
+	MOVQ   SI, AX
+	MOVQ   DX, BX
+	ANDQ   $15, AX
+	ANDQ   $15, BX
+	CMPQ   R9, $0
+	JEQ    done_xor
+	ORQ    AX, BX
+	CMPQ   BX, $0
+	JNZ    loopback_xor
+
+loopback_xor_aligned:
+	MOVOA  (SI), X0             // in[x]
+	MOVOA  (DX), X4             // out[x]
+	MOVOA  X0, X1               // in[x]
+	MOVOA  X6, X2               // low copy
+	MOVOA  X7, X3               // high copy
+	PSRLQ  $4, X1               // X1: high input
+	PAND   X8, X0               // X0: low input
+	PAND   X8, X1               // X0: high input
+	PSHUFB X0, X2               // X2: mul low part
+	PSHUFB X1, X3               // X3: mul high part
+	PXOR   X2, X3               // X3: Result
+	PXOR   X4, X3               // X3: Result xor existing out
+	MOVOA  X3, (DX)             // Store
+	ADDQ   $16, SI              // in+=16
+	ADDQ   $16, DX              // out+=16
+	SUBQ   $1, R9
+	JNZ    loopback_xor_aligned
+	JMP    done_xor
+
+loopback_xor:
+	MOVOU  (SI), X0     // in[x]
+	MOVOU  (DX), X4     // out[x]
+	MOVOU  X0, X1       // in[x]
+	MOVOU  X6, X2       // low copy
+	MOVOU  X7, X3       // high copy
+	PSRLQ  $4, X1       // X1: high input
+	PAND   X8, X0       // X0: low input
+	PAND   X8, X1       // X0: high input
+	PSHUFB X0, X2       // X2: mul low part
+	PSHUFB X1, X3       // X3: mul high part
+	PXOR   X2, X3       // X3: Result
+	PXOR   X4, X3       // X3: Result xor existing out
+	MOVOU  X3, (DX)     // Store
+	ADDQ   $16, SI      // in+=16
+	ADDQ   $16, DX      // out+=16
+	SUBQ   $1, R9
+	JNZ    loopback_xor
+
+done_xor:
+	RET
+
+// func galMulSSSE3(low, high, in, out []byte)
+TEXT ·galMulSSSE3(SB), 7, $0
+	MOVQ   low+0(FP), SI     // SI: &low
+	MOVQ   high+24(FP), DX   // DX: &high
+	MOVOU  (SI), X6          // X6 low
+	MOVOU  (DX), X7          // X7: high
+	MOVQ   $15, BX           // BX: low mask
+	MOVQ   BX, X8
+	PXOR   X5, X5
+	MOVQ   in+48(FP), SI     // R11: &in
+	MOVQ   in_len+56(FP), R9 // R9: len(in)
+	MOVQ   out+72(FP), DX    // DX: &out
+	PSHUFB X5, X8            // X8: lomask (unpacked)
+	MOVQ   SI, AX
+	MOVQ   DX, BX
+	SHRQ   $4, R9            // len(in) / 16
+	ANDQ   $15, AX
+	ANDQ   $15, BX
+	CMPQ   R9, $0
+	JEQ    done
+	ORQ    AX, BX
+	CMPQ   BX, $0
+	JNZ    loopback
+
+loopback_aligned:
+	MOVOA  (SI), X0         // in[x]
+	MOVOA  X0, X1           // in[x]
+	MOVOA  X6, X2           // low copy
+	MOVOA  X7, X3           // high copy
+	PSRLQ  $4, X1           // X1: high input
+	PAND   X8, X0           // X0: low input
+	PAND   X8, X1           // X0: high input
+	PSHUFB X0, X2           // X2: mul low part
+	PSHUFB X1, X3           // X3: mul high part
+	PXOR   X2, X3           // X3: Result
+	MOVOA  X3, (DX)         // Store
+	ADDQ   $16, SI          // in+=16
+	ADDQ   $16, DX          // out+=16
+	SUBQ   $1, R9
+	JNZ    loopback_aligned
+	JMP    done
+
+loopback:
+	MOVOU  (SI), X0 // in[x]
+	MOVOU  X0, X1   // in[x]
+	MOVOA  X6, X2   // low copy
+	MOVOA  X7, X3   // high copy
+	PSRLQ  $4, X1   // X1: high input
+	PAND   X8, X0   // X0: low input
+	PAND   X8, X1   // X0: high input
+	PSHUFB X0, X2   // X2: mul low part
+	PSHUFB X1, X3   // X3: mul high part
+	PXOR   X2, X3   // X3: Result
+	MOVOU  X3, (DX) // Store
+	ADDQ   $16, SI  // in+=16
+	ADDQ   $16, DX  // out+=16
+	SUBQ   $1, R9
+	JNZ    loopback
+
+done:
+	RET
+
+// func galMulAVX2Xor(low, high, in, out []byte)
+TEXT ·galMulAVX2Xor(SB), 7, $0
+	MOVQ  low+0(FP), SI     // SI: &low
+	MOVQ  high+24(FP), DX   // DX: &high
+	MOVQ  $15, BX           // BX: low mask
+	MOVQ  BX, X5
+	MOVOU (SI), X6          // X6: low
+	MOVOU (DX), X7          // X7: high
+	MOVQ  in_len+56(FP), R9 // R9: len(in)
+
+	VINSERTI128  $1, X6, Y6, Y6 // low
+	VINSERTI128  $1, X7, Y7, Y7 // high
+	VPBROADCASTB X5, Y8         // Y8: lomask (unpacked)
+
+	SHRQ  $5, R9         // len(in) / 32
+	MOVQ  out+72(FP), DX // DX: &out
+	MOVQ  in+48(FP), SI  // SI: &in
+	TESTQ R9, R9
+	JZ    done_xor_avx2
+
+loopback_xor_avx2:
+	VMOVDQU (SI), Y0
+	VMOVDQU (DX), Y4
+	VPSRLQ  $4, Y0, Y1 // Y1: high input
+	VPAND   Y8, Y0, Y0 // Y0: low input
+	VPAND   Y8, Y1, Y1 // Y1: high input
+	VPSHUFB Y0, Y6, Y2 // Y2: mul low part
+	VPSHUFB Y1, Y7, Y3 // Y3: mul high part
+	VPXOR   Y3, Y2, Y3 // Y3: Result
+	VPXOR   Y4, Y3, Y4 // Y4: Result
+	VMOVDQU Y4, (DX)
+
+	ADDQ $32, SI           // in+=32
+	ADDQ $32, DX           // out+=32
+	SUBQ $1, R9
+	JNZ  loopback_xor_avx2
+
+done_xor_avx2:
+	VZEROUPPER
+	RET
+
+// func galMulAVX2(low, high, in, out []byte)
+TEXT ·galMulAVX2(SB), 7, $0
+	MOVQ  low+0(FP), SI     // SI: &low
+	MOVQ  high+24(FP), DX   // DX: &high
+	MOVQ  $15, BX           // BX: low mask
+	MOVQ  BX, X5
+	MOVOU (SI), X6          // X6: low
+	MOVOU (DX), X7          // X7: high
+	MOVQ  in_len+56(FP), R9 // R9: len(in)
+
+	VINSERTI128  $1, X6, Y6, Y6 // low
+	VINSERTI128  $1, X7, Y7, Y7 // high
+	VPBROADCASTB X5, Y8         // Y8: lomask (unpacked)
+
+	SHRQ  $5, R9         // len(in) / 32
+	MOVQ  out+72(FP), DX // DX: &out
+	MOVQ  in+48(FP), SI  // SI: &in
+	TESTQ R9, R9
+	JZ    done_avx2
+
+loopback_avx2:
+	VMOVDQU (SI), Y0
+	VPSRLQ  $4, Y0, Y1 // Y1: high input
+	VPAND   Y8, Y0, Y0 // Y0: low input
+	VPAND   Y8, Y1, Y1 // Y1: high input
+	VPSHUFB Y0, Y6, Y2 // Y2: mul low part
+	VPSHUFB Y1, Y7, Y3 // Y3: mul high part
+	VPXOR   Y3, Y2, Y4 // Y4: Result
+	VMOVDQU Y4, (DX)
+
+	ADDQ $32, SI       // in+=32
+	ADDQ $32, DX       // out+=32
+	SUBQ $1, R9
+	JNZ  loopback_avx2
+
+done_avx2:
+	VZEROUPPER
+	RET
+
+// func sSE2XorSlice(in, out []byte)
+TEXT ·sSE2XorSlice(SB), 7, $0
+	MOVQ in+0(FP), SI     // SI: &in
+	MOVQ in_len+8(FP), R9 // R9: len(in)
+	MOVQ out+24(FP), DX   // DX: &out
+	SHRQ $4, R9           // len(in) / 16
+	CMPQ R9, $0
+	JEQ  done_xor_sse2
+
+loopback_xor_sse2:
+	MOVOU (SI), X0          // in[x]
+	MOVOU (DX), X1          // out[x]
+	PXOR  X0, X1
+	MOVOU X1, (DX)
+	ADDQ  $16, SI           // in+=16
+	ADDQ  $16, DX           // out+=16
+	SUBQ  $1, R9
+	JNZ   loopback_xor_sse2
+
+done_xor_sse2:
+	RET

+ 48 - 0
vendor/github.com/klauspost/reedsolomon/galois_arm64.go

@@ -0,0 +1,48 @@
+//+build !noasm
+//+build !appengine
+
+// Copyright 2015, Klaus Post, see LICENSE for details.
+// Copyright 2017, Minio, Inc.
+
+package reedsolomon
+
+//go:noescape
+func galMulNEON(c uint64, in, out []byte)
+
+//go:noescape
+func galMulXorNEON(c uint64, in, out []byte)
+
+func galMulSlice(c byte, in, out []byte, ssse3, avx2 bool) {
+	var done int
+	galMulNEON(uint64(c), in, out)
+	done = (len(in) >> 5) << 5
+
+	remain := len(in) - done
+	if remain > 0 {
+		mt := mulTable[c]
+		for i := done; i < len(in); i++ {
+			out[i] = mt[in[i]]
+		}
+	}
+}
+
+func galMulSliceXor(c byte, in, out []byte, ssse3, avx2 bool) {
+	var done int
+	galMulXorNEON(uint64(c), in, out)
+	done = (len(in) >> 5) << 5
+
+	remain := len(in) - done
+	if remain > 0 {
+		mt := mulTable[c]
+		for i := done; i < len(in); i++ {
+			out[i] ^= mt[in[i]]
+		}
+	}
+}
+
+// slice galois add
+func sliceXor(in, out []byte, sse2 bool) {
+	for n, input := range in {
+		out[n] ^= input
+	}
+}

+ 141 - 0
vendor/github.com/klauspost/reedsolomon/galois_arm64.s

@@ -0,0 +1,141 @@
+//+build !noasm !appengine
+
+// Copyright 2015, Klaus Post, see LICENSE for details.
+// Copyright 2017, Minio, Inc.
+
+// Use github.com/minio/asm2plan9s on this file to assemble ARM instructions to
+// the opcodes of their Plan9 equivalents
+
+// polynomial multiplication
+#define POLYNOMIAL_MULTIPLICATION \
+	WORD $0x0e3ce340 \ // pmull  v0.8h,v26.8b,v28.8b
+	WORD $0x4e3ce346 \ // pmull2 v6.8h,v26.16b,v28.16b
+	WORD $0x0e3ce36c \ // pmull  v12.8h,v27.8b,v28.8b
+	WORD $0x4e3ce372 // pmull2 v18.8h,v27.16b,v28.16b
+
+// first reduction
+#define FIRST_REDUCTION \
+	WORD $0x0f088402 \ // shrn  v2.8b, v0.8h, #8
+	WORD $0x0f0884c8 \ // shrn  v8.8b, v6.8h, #8
+	WORD $0x0f08858e \ // shrn  v14.8b, v12.8h, #8
+	WORD $0x0f088654 \ // shrn  v20.8b, v18.8h, #8
+	WORD $0x0e22e3c3 \ // pmull v3.8h,v30.8b,v2.8b
+	WORD $0x0e28e3c9 \ // pmull v9.8h,v30.8b,v8.8b
+	WORD $0x0e2ee3cf \ // pmull v15.8h,v30.8b,v14.8b
+	WORD $0x0e34e3d5 \ // pmull v21.8h,v30.8b,v20.8b
+	WORD $0x6e201c60 \ // eor   v0.16b,v3.16b,v0.16b
+	WORD $0x6e261d26 \ // eor   v6.16b,v9.16b,v6.16b
+	WORD $0x6e2c1dec \ // eor   v12.16b,v15.16b,v12.16b
+	WORD $0x6e321eb2 // eor   v18.16b,v21.16b,v18.16b
+
+// second reduction
+#define SECOND_REDUCTION \
+	WORD $0x0f088404 \ // shrn  v4.8b, v0.8h, #8
+	WORD $0x0f0884ca \ // shrn  v10.8b, v6.8h, #8
+	WORD $0x0f088590 \ // shrn  v16.8b, v12.8h, #8
+	WORD $0x0f088656 \ // shrn  v22.8b, v18.8h, #8
+	WORD $0x6e241c44 \ // eor   v4.16b,v2.16b,v4.16b
+	WORD $0x6e2a1d0a \ // eor   v10.16b,v8.16b,v10.16b
+	WORD $0x6e301dd0 \ // eor   v16.16b,v14.16b,v16.16b
+	WORD $0x6e361e96 \ // eor   v22.16b,v20.16b,v22.16b
+	WORD $0x0e24e3c5 \ // pmull v5.8h,v30.8b,v4.8b
+	WORD $0x0e2ae3cb \ // pmull v11.8h,v30.8b,v10.8b
+	WORD $0x0e30e3d1 \ // pmull v17.8h,v30.8b,v16.8b
+	WORD $0x0e36e3d7 \ // pmull v23.8h,v30.8b,v22.8b
+	WORD $0x6e201ca0 \ // eor   v0.16b,v5.16b,v0.16b
+	WORD $0x6e261d61 \ // eor   v1.16b,v11.16b,v6.16b
+	WORD $0x6e2c1e22 \ // eor   v2.16b,v17.16b,v12.16b
+	WORD $0x6e321ee3 // eor   v3.16b,v23.16b,v18.16b
+
+// func galMulNEON(c uint64, in, out []byte)
+TEXT ·galMulNEON(SB), 7, $0
+	MOVD c+0(FP), R0
+	MOVD in_base+8(FP), R1
+	MOVD in_len+16(FP), R2   // length of message
+	MOVD out_base+32(FP), R5
+	SUBS $32, R2
+	BMI  complete
+
+	// Load constants table pointer
+	MOVD $·constants(SB), R3
+
+	// and load constants into v30 & v31
+	WORD $0x4c40a07e // ld1    {v30.16b-v31.16b}, [x3]
+
+	WORD $0x4e010c1c // dup    v28.16b, w0
+
+loop:
+	// Main loop
+	WORD $0x4cdfa83a // ld1   {v26.4s-v27.4s}, [x1], #32
+
+	POLYNOMIAL_MULTIPLICATION
+
+	FIRST_REDUCTION
+
+	SECOND_REDUCTION
+
+	// combine results
+	WORD $0x4e1f2000 // tbl v0.16b,{v0.16b,v1.16b},v31.16b
+	WORD $0x4e1f2041 // tbl v1.16b,{v2.16b,v3.16b},v31.16b
+
+	// Store result
+	WORD $0x4c9faca0 // st1    {v0.2d-v1.2d}, [x5], #32
+
+	SUBS $32, R2
+	BPL  loop
+
+complete:
+	RET
+
+// func galMulXorNEON(c uint64, in, out []byte)
+TEXT ·galMulXorNEON(SB), 7, $0
+	MOVD c+0(FP), R0
+	MOVD in_base+8(FP), R1
+	MOVD in_len+16(FP), R2   // length of message
+	MOVD out_base+32(FP), R5
+	SUBS $32, R2
+	BMI  completeXor
+
+	// Load constants table pointer
+	MOVD $·constants(SB), R3
+
+	// and load constants into v30 & v31
+	WORD $0x4c40a07e // ld1    {v30.16b-v31.16b}, [x3]
+
+	WORD $0x4e010c1c // dup    v28.16b, w0
+
+loopXor:
+	// Main loop
+	WORD $0x4cdfa83a // ld1   {v26.4s-v27.4s}, [x1], #32
+	WORD $0x4c40a8b8 // ld1   {v24.4s-v25.4s}, [x5]
+
+	POLYNOMIAL_MULTIPLICATION
+
+	FIRST_REDUCTION
+
+	SECOND_REDUCTION
+
+	// combine results
+	WORD $0x4e1f2000 // tbl v0.16b,{v0.16b,v1.16b},v31.16b
+	WORD $0x4e1f2041 // tbl v1.16b,{v2.16b,v3.16b},v31.16b
+
+	// Xor result and store
+	WORD $0x6e381c00 // eor v0.16b,v0.16b,v24.16b
+	WORD $0x6e391c21 // eor v1.16b,v1.16b,v25.16b
+	WORD $0x4c9faca0 // st1   {v0.2d-v1.2d}, [x5], #32
+
+	SUBS $32, R2
+	BPL  loopXor
+
+completeXor:
+	RET
+
+// Constants table
+//   generating polynomial is 29 (= 0x1d)
+DATA ·constants+0x0(SB)/8, $0x1d1d1d1d1d1d1d1d
+DATA ·constants+0x8(SB)/8, $0x1d1d1d1d1d1d1d1d
+//   constant for TBL instruction
+DATA ·constants+0x10(SB)/8, $0x0e0c0a0806040200
+DATA ·constants+0x18(SB)/8, $0x1e1c1a1816141210
+
+GLOBL ·constants(SB), 8, $32

+ 27 - 0
vendor/github.com/klauspost/reedsolomon/galois_noasm.go

@@ -0,0 +1,27 @@
+//+build !amd64 noasm appengine
+//+build !arm64 noasm appengine
+
+// Copyright 2015, Klaus Post, see LICENSE for details.
+
+package reedsolomon
+
+func galMulSlice(c byte, in, out []byte, ssse3, avx2 bool) {
+	mt := mulTable[c]
+	for n, input := range in {
+		out[n] = mt[input]
+	}
+}
+
+func galMulSliceXor(c byte, in, out []byte, ssse3, avx2 bool) {
+	mt := mulTable[c]
+	for n, input := range in {
+		out[n] ^= mt[input]
+	}
+}
+
+// slice galois add
+func sliceXor(in, out []byte, sse2 bool) {
+	for n, input := range in {
+		out[n] ^= input
+	}
+}

+ 132 - 0
vendor/github.com/klauspost/reedsolomon/gentables.go

@@ -0,0 +1,132 @@
+//+build ignore
+
+package main
+
+import (
+	"fmt"
+)
+
+var logTable = [fieldSize]int16{
+	-1, 0, 1, 25, 2, 50, 26, 198,
+	3, 223, 51, 238, 27, 104, 199, 75,
+	4, 100, 224, 14, 52, 141, 239, 129,
+	28, 193, 105, 248, 200, 8, 76, 113,
+	5, 138, 101, 47, 225, 36, 15, 33,
+	53, 147, 142, 218, 240, 18, 130, 69,
+	29, 181, 194, 125, 106, 39, 249, 185,
+	201, 154, 9, 120, 77, 228, 114, 166,
+	6, 191, 139, 98, 102, 221, 48, 253,
+	226, 152, 37, 179, 16, 145, 34, 136,
+	54, 208, 148, 206, 143, 150, 219, 189,
+	241, 210, 19, 92, 131, 56, 70, 64,
+	30, 66, 182, 163, 195, 72, 126, 110,
+	107, 58, 40, 84, 250, 133, 186, 61,
+	202, 94, 155, 159, 10, 21, 121, 43,
+	78, 212, 229, 172, 115, 243, 167, 87,
+	7, 112, 192, 247, 140, 128, 99, 13,
+	103, 74, 222, 237, 49, 197, 254, 24,
+	227, 165, 153, 119, 38, 184, 180, 124,
+	17, 68, 146, 217, 35, 32, 137, 46,
+	55, 63, 209, 91, 149, 188, 207, 205,
+	144, 135, 151, 178, 220, 252, 190, 97,
+	242, 86, 211, 171, 20, 42, 93, 158,
+	132, 60, 57, 83, 71, 109, 65, 162,
+	31, 45, 67, 216, 183, 123, 164, 118,
+	196, 23, 73, 236, 127, 12, 111, 246,
+	108, 161, 59, 82, 41, 157, 85, 170,
+	251, 96, 134, 177, 187, 204, 62, 90,
+	203, 89, 95, 176, 156, 169, 160, 81,
+	11, 245, 22, 235, 122, 117, 44, 215,
+	79, 174, 213, 233, 230, 231, 173, 232,
+	116, 214, 244, 234, 168, 80, 88, 175,
+}
+
+const (
+	// The number of elements in the field.
+	fieldSize = 256
+
+	// The polynomial used to generate the logarithm table.
+	//
+	// There are a number of polynomials that work to generate
+	// a Galois field of 256 elements.  The choice is arbitrary,
+	// and we just use the first one.
+	//
+	// The possibilities are: 29, 43, 45, 77, 95, 99, 101, 105,
+	//* 113, 135, 141, 169, 195, 207, 231, and 245.
+	generatingPolynomial = 29
+)
+
+func main() {
+	t := generateExpTable()
+	fmt.Printf("var expTable = %#v\n", t)
+	//t2 := generateMulTableSplit(t)
+	//fmt.Printf("var mulTable = %#v\n", t2)
+	low, high := generateMulTableHalf(t)
+	fmt.Printf("var mulTableLow = %#v\n", low)
+	fmt.Printf("var mulTableHigh = %#v\n", high)
+}
+
+/**
+ * Generates the inverse log table.
+ */
+func generateExpTable() []byte {
+	result := make([]byte, fieldSize*2-2)
+	for i := 1; i < fieldSize; i++ {
+		log := logTable[i]
+		result[log] = byte(i)
+		result[log+fieldSize-1] = byte(i)
+	}
+	return result
+}
+
+func generateMulTable(expTable []byte) []byte {
+	result := make([]byte, 256*256)
+	for v := range result {
+		a := byte(v & 0xff)
+		b := byte(v >> 8)
+		if a == 0 || b == 0 {
+			result[v] = 0
+			continue
+		}
+		logA := int(logTable[a])
+		logB := int(logTable[b])
+		result[v] = expTable[logA+logB]
+	}
+	return result
+}
+
+func generateMulTableSplit(expTable []byte) [256][256]byte {
+	var result [256][256]byte
+	for a := range result {
+		for b := range result[a] {
+			if a == 0 || b == 0 {
+				result[a][b] = 0
+				continue
+			}
+			logA := int(logTable[a])
+			logB := int(logTable[b])
+			result[a][b] = expTable[logA+logB]
+		}
+	}
+	return result
+}
+
+func generateMulTableHalf(expTable []byte) (low [256][16]byte, high [256][16]byte) {
+	for a := range low {
+		for b := range low {
+			result := 0
+			if !(a == 0 || b == 0) {
+				logA := int(logTable[a])
+				logB := int(logTable[b])
+				result = int(expTable[logA+logB])
+			}
+			if (b & 0xf) == b {
+				low[a][b] = byte(result)
+			}
+			if (b & 0xf0) == b {
+				high[a][b>>4] = byte(result)
+			}
+		}
+	}
+	return
+}

+ 160 - 0
vendor/github.com/klauspost/reedsolomon/inversion_tree.go

@@ -0,0 +1,160 @@
+/**
+ * A thread-safe tree which caches inverted matrices.
+ *
+ * Copyright 2016, Peter Collins
+ */
+
+package reedsolomon
+
+import (
+	"errors"
+	"sync"
+)
+
+// The tree uses a Reader-Writer mutex to make it thread-safe
+// when accessing cached matrices and inserting new ones.
+type inversionTree struct {
+	mutex *sync.RWMutex
+	root  inversionNode
+}
+
+type inversionNode struct {
+	matrix   matrix
+	children []*inversionNode
+}
+
+// newInversionTree initializes a tree for storing inverted matrices.
+// Note that the root node is the identity matrix as it implies
+// there were no errors with the original data.
+func newInversionTree(dataShards, parityShards int) inversionTree {
+	identity, _ := identityMatrix(dataShards)
+	root := inversionNode{
+		matrix:   identity,
+		children: make([]*inversionNode, dataShards+parityShards),
+	}
+	return inversionTree{
+		mutex: &sync.RWMutex{},
+		root:  root,
+	}
+}
+
+// GetInvertedMatrix returns the cached inverted matrix or nil if it
+// is not found in the tree keyed on the indices of invalid rows.
+func (t inversionTree) GetInvertedMatrix(invalidIndices []int) matrix {
+	// Lock the tree for reading before accessing the tree.
+	t.mutex.RLock()
+	defer t.mutex.RUnlock()
+
+	// If no invalid indices were give we should return the root
+	// identity matrix.
+	if len(invalidIndices) == 0 {
+		return t.root.matrix
+	}
+
+	// Recursively search for the inverted matrix in the tree, passing in
+	// 0 as the parent index as we start at the root of the tree.
+	return t.root.getInvertedMatrix(invalidIndices, 0)
+}
+
+// errAlreadySet is returned if the root node matrix is overwritten
+var errAlreadySet = errors.New("the root node identity matrix is already set")
+
+// InsertInvertedMatrix inserts a new inverted matrix into the tree
+// keyed by the indices of invalid rows.  The total number of shards
+// is required for creating the proper length lists of child nodes for
+// each node.
+func (t inversionTree) InsertInvertedMatrix(invalidIndices []int, matrix matrix, shards int) error {
+	// If no invalid indices were given then we are done because the
+	// root node is already set with the identity matrix.
+	if len(invalidIndices) == 0 {
+		return errAlreadySet
+	}
+
+	if !matrix.IsSquare() {
+		return errNotSquare
+	}
+
+	// Lock the tree for writing and reading before accessing the tree.
+	t.mutex.Lock()
+	defer t.mutex.Unlock()
+
+	// Recursively create nodes for the inverted matrix in the tree until
+	// we reach the node to insert the matrix to.  We start by passing in
+	// 0 as the parent index as we start at the root of the tree.
+	t.root.insertInvertedMatrix(invalidIndices, matrix, shards, 0)
+
+	return nil
+}
+
+func (n inversionNode) getInvertedMatrix(invalidIndices []int, parent int) matrix {
+	// Get the child node to search next from the list of children.  The
+	// list of children starts relative to the parent index passed in
+	// because the indices of invalid rows is sorted (by default).  As we
+	// search recursively, the first invalid index gets popped off the list,
+	// so when searching through the list of children, use that first invalid
+	// index to find the child node.
+	firstIndex := invalidIndices[0]
+	node := n.children[firstIndex-parent]
+
+	// If the child node doesn't exist in the list yet, fail fast by
+	// returning, so we can construct and insert the proper inverted matrix.
+	if node == nil {
+		return nil
+	}
+
+	// If there's more than one invalid index left in the list we should
+	// keep searching recursively.
+	if len(invalidIndices) > 1 {
+		// Search recursively on the child node by passing in the invalid indices
+		// with the first index popped off the front.  Also the parent index to
+		// pass down is the first index plus one.
+		return node.getInvertedMatrix(invalidIndices[1:], firstIndex+1)
+	}
+	// If there aren't any more invalid indices to search, we've found our
+	// node.  Return it, however keep in mind that the matrix could still be
+	// nil because intermediary nodes in the tree are created sometimes with
+	// their inversion matrices uninitialized.
+	return node.matrix
+}
+
+func (n inversionNode) insertInvertedMatrix(invalidIndices []int, matrix matrix, shards, parent int) {
+	// As above, get the child node to search next from the list of children.
+	// The list of children starts relative to the parent index passed in
+	// because the indices of invalid rows is sorted (by default).  As we
+	// search recursively, the first invalid index gets popped off the list,
+	// so when searching through the list of children, use that first invalid
+	// index to find the child node.
+	firstIndex := invalidIndices[0]
+	node := n.children[firstIndex-parent]
+
+	// If the child node doesn't exist in the list yet, create a new
+	// node because we have the writer lock and add it to the list
+	// of children.
+	if node == nil {
+		// Make the length of the list of children equal to the number
+		// of shards minus the first invalid index because the list of
+		// invalid indices is sorted, so only this length of errors
+		// are possible in the tree.
+		node = &inversionNode{
+			children: make([]*inversionNode, shards-firstIndex),
+		}
+		// Insert the new node into the tree at the first index relative
+		// to the parent index that was given in this recursive call.
+		n.children[firstIndex-parent] = node
+	}
+
+	// If there's more than one invalid index left in the list we should
+	// keep searching recursively in order to find the node to add our
+	// matrix.
+	if len(invalidIndices) > 1 {
+		// As above, search recursively on the child node by passing in
+		// the invalid indices with the first index popped off the front.
+		// Also the total number of shards and parent index are passed down
+		// which is equal to the first index plus one.
+		node.insertInvertedMatrix(invalidIndices[1:], matrix, shards, firstIndex+1)
+	} else {
+		// If there aren't any more invalid indices to search, we've found our
+		// node.  Cache the inverted matrix in this node.
+		node.matrix = matrix
+	}
+}

+ 279 - 0
vendor/github.com/klauspost/reedsolomon/matrix.go

@@ -0,0 +1,279 @@
+/**
+ * Matrix Algebra over an 8-bit Galois Field
+ *
+ * Copyright 2015, Klaus Post
+ * Copyright 2015, Backblaze, Inc.
+ */
+
+package reedsolomon
+
+import (
+	"errors"
+	"fmt"
+	"strconv"
+	"strings"
+)
+
+// byte[row][col]
+type matrix [][]byte
+
+// newMatrix returns a matrix of zeros.
+func newMatrix(rows, cols int) (matrix, error) {
+	if rows <= 0 {
+		return nil, errInvalidRowSize
+	}
+	if cols <= 0 {
+		return nil, errInvalidColSize
+	}
+
+	m := matrix(make([][]byte, rows))
+	for i := range m {
+		m[i] = make([]byte, cols)
+	}
+	return m, nil
+}
+
+// NewMatrixData initializes a matrix with the given row-major data.
+// Note that data is not copied from input.
+func newMatrixData(data [][]byte) (matrix, error) {
+	m := matrix(data)
+	err := m.Check()
+	if err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+// IdentityMatrix returns an identity matrix of the given size.
+func identityMatrix(size int) (matrix, error) {
+	m, err := newMatrix(size, size)
+	if err != nil {
+		return nil, err
+	}
+	for i := range m {
+		m[i][i] = 1
+	}
+	return m, nil
+}
+
+// errInvalidRowSize will be returned if attempting to create a matrix with negative or zero row number.
+var errInvalidRowSize = errors.New("invalid row size")
+
+// errInvalidColSize will be returned if attempting to create a matrix with negative or zero column number.
+var errInvalidColSize = errors.New("invalid column size")
+
+// errColSizeMismatch is returned if the size of matrix columns mismatch.
+var errColSizeMismatch = errors.New("column size is not the same for all rows")
+
+func (m matrix) Check() error {
+	rows := len(m)
+	if rows <= 0 {
+		return errInvalidRowSize
+	}
+	cols := len(m[0])
+	if cols <= 0 {
+		return errInvalidColSize
+	}
+
+	for _, col := range m {
+		if len(col) != cols {
+			return errColSizeMismatch
+		}
+	}
+	return nil
+}
+
+// String returns a human-readable string of the matrix contents.
+//
+// Example: [[1, 2], [3, 4]]
+func (m matrix) String() string {
+	rowOut := make([]string, 0, len(m))
+	for _, row := range m {
+		colOut := make([]string, 0, len(row))
+		for _, col := range row {
+			colOut = append(colOut, strconv.Itoa(int(col)))
+		}
+		rowOut = append(rowOut, "["+strings.Join(colOut, ", ")+"]")
+	}
+	return "[" + strings.Join(rowOut, ", ") + "]"
+}
+
+// Multiply multiplies this matrix (the one on the left) by another
+// matrix (the one on the right) and returns a new matrix with the result.
+func (m matrix) Multiply(right matrix) (matrix, error) {
+	if len(m[0]) != len(right) {
+		return nil, fmt.Errorf("columns on left (%d) is different than rows on right (%d)", len(m[0]), len(right))
+	}
+	result, _ := newMatrix(len(m), len(right[0]))
+	for r, row := range result {
+		for c := range row {
+			var value byte
+			for i := range m[0] {
+				value ^= galMultiply(m[r][i], right[i][c])
+			}
+			result[r][c] = value
+		}
+	}
+	return result, nil
+}
+
+// Augment returns the concatenation of this matrix and the matrix on the right.
+func (m matrix) Augment(right matrix) (matrix, error) {
+	if len(m) != len(right) {
+		return nil, errMatrixSize
+	}
+
+	result, _ := newMatrix(len(m), len(m[0])+len(right[0]))
+	for r, row := range m {
+		for c := range row {
+			result[r][c] = m[r][c]
+		}
+		cols := len(m[0])
+		for c := range right[0] {
+			result[r][cols+c] = right[r][c]
+		}
+	}
+	return result, nil
+}
+
+// errMatrixSize is returned if matrix dimensions are doesn't match.
+var errMatrixSize = errors.New("matrix sizes do not match")
+
+func (m matrix) SameSize(n matrix) error {
+	if len(m) != len(n) {
+		return errMatrixSize
+	}
+	for i := range m {
+		if len(m[i]) != len(n[i]) {
+			return errMatrixSize
+		}
+	}
+	return nil
+}
+
+// Returns a part of this matrix. Data is copied.
+func (m matrix) SubMatrix(rmin, cmin, rmax, cmax int) (matrix, error) {
+	result, err := newMatrix(rmax-rmin, cmax-cmin)
+	if err != nil {
+		return nil, err
+	}
+	// OPTME: If used heavily, use copy function to copy slice
+	for r := rmin; r < rmax; r++ {
+		for c := cmin; c < cmax; c++ {
+			result[r-rmin][c-cmin] = m[r][c]
+		}
+	}
+	return result, nil
+}
+
+// SwapRows Exchanges two rows in the matrix.
+func (m matrix) SwapRows(r1, r2 int) error {
+	if r1 < 0 || len(m) <= r1 || r2 < 0 || len(m) <= r2 {
+		return errInvalidRowSize
+	}
+	m[r2], m[r1] = m[r1], m[r2]
+	return nil
+}
+
+// IsSquare will return true if the matrix is square
+// and nil if the matrix is square
+func (m matrix) IsSquare() bool {
+	return len(m) == len(m[0])
+}
+
+// errSingular is returned if the matrix is singular and cannot be inversed
+var errSingular = errors.New("matrix is singular")
+
+// errNotSquare is returned if attempting to inverse a non-square matrix.
+var errNotSquare = errors.New("only square matrices can be inverted")
+
+// Invert returns the inverse of this matrix.
+// Returns ErrSingular when the matrix is singular and doesn't have an inverse.
+// The matrix must be square, otherwise ErrNotSquare is returned.
+func (m matrix) Invert() (matrix, error) {
+	if !m.IsSquare() {
+		return nil, errNotSquare
+	}
+
+	size := len(m)
+	work, _ := identityMatrix(size)
+	work, _ = m.Augment(work)
+
+	err := work.gaussianElimination()
+	if err != nil {
+		return nil, err
+	}
+
+	return work.SubMatrix(0, size, size, size*2)
+}
+
+func (m matrix) gaussianElimination() error {
+	rows := len(m)
+	columns := len(m[0])
+	// Clear out the part below the main diagonal and scale the main
+	// diagonal to be 1.
+	for r := 0; r < rows; r++ {
+		// If the element on the diagonal is 0, find a row below
+		// that has a non-zero and swap them.
+		if m[r][r] == 0 {
+			for rowBelow := r + 1; rowBelow < rows; rowBelow++ {
+				if m[rowBelow][r] != 0 {
+					m.SwapRows(r, rowBelow)
+					break
+				}
+			}
+		}
+		// If we couldn't find one, the matrix is singular.
+		if m[r][r] == 0 {
+			return errSingular
+		}
+		// Scale to 1.
+		if m[r][r] != 1 {
+			scale := galDivide(1, m[r][r])
+			for c := 0; c < columns; c++ {
+				m[r][c] = galMultiply(m[r][c], scale)
+			}
+		}
+		// Make everything below the 1 be a 0 by subtracting
+		// a multiple of it.  (Subtraction and addition are
+		// both exclusive or in the Galois field.)
+		for rowBelow := r + 1; rowBelow < rows; rowBelow++ {
+			if m[rowBelow][r] != 0 {
+				scale := m[rowBelow][r]
+				for c := 0; c < columns; c++ {
+					m[rowBelow][c] ^= galMultiply(scale, m[r][c])
+				}
+			}
+		}
+	}
+
+	// Now clear the part above the main diagonal.
+	for d := 0; d < rows; d++ {
+		for rowAbove := 0; rowAbove < d; rowAbove++ {
+			if m[rowAbove][d] != 0 {
+				scale := m[rowAbove][d]
+				for c := 0; c < columns; c++ {
+					m[rowAbove][c] ^= galMultiply(scale, m[d][c])
+				}
+
+			}
+		}
+	}
+	return nil
+}
+
+// Create a Vandermonde matrix, which is guaranteed to have the
+// property that any subset of rows that forms a square matrix
+// is invertible.
+func vandermonde(rows, cols int) (matrix, error) {
+	result, err := newMatrix(rows, cols)
+	if err != nil {
+		return nil, err
+	}
+	for r, row := range result {
+		for c := range row {
+			result[r][c] = galExp(byte(r), c)
+		}
+	}
+	return result, nil
+}

+ 111 - 0
vendor/github.com/klauspost/reedsolomon/options.go

@@ -0,0 +1,111 @@
+package reedsolomon
+
+import (
+	"runtime"
+
+	"github.com/klauspost/cpuid"
+)
+
+// Option allows to override processing parameters.
+type Option func(*options)
+
+type options struct {
+	maxGoroutines              int
+	minSplitSize               int
+	useAVX2, useSSSE3, useSSE2 bool
+	usePAR1Matrix              bool
+	useCauchy                  bool
+	shardSize                  int
+}
+
+var defaultOptions = options{
+	maxGoroutines: 384,
+	minSplitSize:  1024,
+}
+
+func init() {
+	if runtime.GOMAXPROCS(0) <= 1 {
+		defaultOptions.maxGoroutines = 1
+	}
+	// Detect CPU capabilities.
+	defaultOptions.useSSSE3 = cpuid.CPU.SSSE3()
+	defaultOptions.useAVX2 = cpuid.CPU.AVX2()
+	defaultOptions.useSSE2 = cpuid.CPU.SSE2()
+}
+
+// WithMaxGoroutines is the maximum number of goroutines number for encoding & decoding.
+// Jobs will be split into this many parts, unless each goroutine would have to process
+// less than minSplitSize bytes (set with WithMinSplitSize).
+// For the best speed, keep this well above the GOMAXPROCS number for more fine grained
+// scheduling.
+// If n <= 0, it is ignored.
+func WithMaxGoroutines(n int) Option {
+	return func(o *options) {
+		if n > 0 {
+			o.maxGoroutines = n
+		}
+	}
+}
+
+// WithAutoGoroutines will adjust the number of goroutines for optimal speed with a
+// specific shard size.
+// Send in the shard size you expect to send. Other shard sizes will work, but may not
+// run at the optimal speed.
+// Overwrites WithMaxGoroutines.
+// If shardSize <= 0, it is ignored.
+func WithAutoGoroutines(shardSize int) Option {
+	return func(o *options) {
+		o.shardSize = shardSize
+	}
+}
+
+// WithMinSplitSize is the minimum encoding size in bytes per goroutine.
+// See WithMaxGoroutines on how jobs are split.
+// If n <= 0, it is ignored.
+func WithMinSplitSize(n int) Option {
+	return func(o *options) {
+		if n > 0 {
+			o.minSplitSize = n
+		}
+	}
+}
+
+func withSSE3(enabled bool) Option {
+	return func(o *options) {
+		o.useSSSE3 = enabled
+	}
+}
+
+func withAVX2(enabled bool) Option {
+	return func(o *options) {
+		o.useAVX2 = enabled
+	}
+}
+
+func withSSE2(enabled bool) Option {
+	return func(o *options) {
+		o.useSSE2 = enabled
+	}
+}
+
+// WithPAR1Matrix causes the encoder to build the matrix how PARv1
+// does. Note that the method they use is buggy, and may lead to cases
+// where recovery is impossible, even if there are enough parity
+// shards.
+func WithPAR1Matrix() Option {
+	return func(o *options) {
+		o.usePAR1Matrix = true
+		o.useCauchy = false
+	}
+}
+
+// WithCauchyMatrix will make the encoder build a Cauchy style matrix.
+// The output of this is not compatible with the standard output.
+// A Cauchy matrix is faster to generate. This does not affect data throughput,
+// but will result in slightly faster start-up time.
+func WithCauchyMatrix() Option {
+	return func(o *options) {
+		o.useCauchy = true
+		o.usePAR1Matrix = false
+	}
+}

+ 884 - 0
vendor/github.com/klauspost/reedsolomon/reedsolomon.go

@@ -0,0 +1,884 @@
+/**
+ * Reed-Solomon Coding over 8-bit values.
+ *
+ * Copyright 2015, Klaus Post
+ * Copyright 2015, Backblaze, Inc.
+ */
+
+// Package reedsolomon enables Erasure Coding in Go
+//
+// For usage and examples, see https://github.com/klauspost/reedsolomon
+//
+package reedsolomon
+
+import (
+	"bytes"
+	"errors"
+	"io"
+	"runtime"
+	"sync"
+
+	"github.com/klauspost/cpuid"
+)
+
+// Encoder is an interface to encode Reed-Salomon parity sets for your data.
+type Encoder interface {
+	// Encodes parity for a set of data shards.
+	// Input is 'shards' containing data shards followed by parity shards.
+	// The number of shards must match the number given to New().
+	// Each shard is a byte array, and they must all be the same size.
+	// The parity shards will always be overwritten and the data shards
+	// will remain the same, so it is safe for you to read from the
+	// data shards while this is running.
+	Encode(shards [][]byte) error
+
+	// Verify returns true if the parity shards contain correct data.
+	// The data is the same format as Encode. No data is modified, so
+	// you are allowed to read from data while this is running.
+	Verify(shards [][]byte) (bool, error)
+
+	// Reconstruct will recreate the missing shards if possible.
+	//
+	// Given a list of shards, some of which contain data, fills in the
+	// ones that don't have data.
+	//
+	// The length of the array must be equal to the total number of shards.
+	// You indicate that a shard is missing by setting it to nil or zero-length.
+	// If a shard is zero-length but has sufficient capacity, that memory will
+	// be used, otherwise a new []byte will be allocated.
+	//
+	// If there are too few shards to reconstruct the missing
+	// ones, ErrTooFewShards will be returned.
+	//
+	// The reconstructed shard set is complete, but integrity is not verified.
+	// Use the Verify function to check if data set is ok.
+	Reconstruct(shards [][]byte) error
+
+	// ReconstructData will recreate any missing data shards, if possible.
+	//
+	// Given a list of shards, some of which contain data, fills in the
+	// data shards that don't have data.
+	//
+	// The length of the array must be equal to Shards.
+	// You indicate that a shard is missing by setting it to nil or zero-length.
+	// If a shard is zero-length but has sufficient capacity, that memory will
+	// be used, otherwise a new []byte will be allocated.
+	//
+	// If there are too few shards to reconstruct the missing
+	// ones, ErrTooFewShards will be returned.
+	//
+	// As the reconstructed shard set may contain missing parity shards,
+	// calling the Verify function is likely to fail.
+	ReconstructData(shards [][]byte) error
+
+	// Update parity is use for change a few data shards and update it's parity.
+	// Input 'newDatashards' containing data shards changed.
+	// Input 'shards' containing old data shards (if data shard not changed, it can be nil) and old parity shards.
+	// new parity shards will in shards[DataShards:]
+	// Update is very useful if  DataShards much larger than ParityShards and changed data shards is few. It will
+	// faster than Encode and not need read all data shards to encode.
+	Update(shards [][]byte, newDatashards [][]byte) error
+
+	// Split a data slice into the number of shards given to the encoder,
+	// and create empty parity shards.
+	//
+	// The data will be split into equally sized shards.
+	// If the data size isn't dividable by the number of shards,
+	// the last shard will contain extra zeros.
+	//
+	// There must be at least 1 byte otherwise ErrShortData will be
+	// returned.
+	//
+	// The data will not be copied, except for the last shard, so you
+	// should not modify the data of the input slice afterwards.
+	Split(data []byte) ([][]byte, error)
+
+	// Join the shards and write the data segment to dst.
+	//
+	// Only the data shards are considered.
+	// You must supply the exact output size you want.
+	// If there are to few shards given, ErrTooFewShards will be returned.
+	// If the total data size is less than outSize, ErrShortData will be returned.
+	Join(dst io.Writer, shards [][]byte, outSize int) error
+}
+
+// reedSolomon contains a matrix for a specific
+// distribution of datashards and parity shards.
+// Construct if using New()
+type reedSolomon struct {
+	DataShards   int // Number of data shards, should not be modified.
+	ParityShards int // Number of parity shards, should not be modified.
+	Shards       int // Total number of shards. Calculated, and should not be modified.
+	m            matrix
+	tree         inversionTree
+	parity       [][]byte
+	o            options
+}
+
+// ErrInvShardNum will be returned by New, if you attempt to create
+// an Encoder where either data or parity shards is zero or less.
+var ErrInvShardNum = errors.New("cannot create Encoder with zero or less data/parity shards")
+
+// ErrMaxShardNum will be returned by New, if you attempt to create an
+// Encoder where data and parity shards are bigger than the order of
+// GF(2^8).
+var ErrMaxShardNum = errors.New("cannot create Encoder with more than 256 data+parity shards")
+
+// buildMatrix creates the matrix to use for encoding, given the
+// number of data shards and the number of total shards.
+//
+// The top square of the matrix is guaranteed to be an identity
+// matrix, which means that the data shards are unchanged after
+// encoding.
+func buildMatrix(dataShards, totalShards int) (matrix, error) {
+	// Start with a Vandermonde matrix.  This matrix would work,
+	// in theory, but doesn't have the property that the data
+	// shards are unchanged after encoding.
+	vm, err := vandermonde(totalShards, dataShards)
+	if err != nil {
+		return nil, err
+	}
+
+	// Multiply by the inverse of the top square of the matrix.
+	// This will make the top square be the identity matrix, but
+	// preserve the property that any square subset of rows is
+	// invertible.
+	top, err := vm.SubMatrix(0, 0, dataShards, dataShards)
+	if err != nil {
+		return nil, err
+	}
+
+	topInv, err := top.Invert()
+	if err != nil {
+		return nil, err
+	}
+
+	return vm.Multiply(topInv)
+}
+
+// buildMatrixPAR1 creates the matrix to use for encoding according to
+// the PARv1 spec, given the number of data shards and the number of
+// total shards. Note that the method they use is buggy, and may lead
+// to cases where recovery is impossible, even if there are enough
+// parity shards.
+//
+// The top square of the matrix is guaranteed to be an identity
+// matrix, which means that the data shards are unchanged after
+// encoding.
+func buildMatrixPAR1(dataShards, totalShards int) (matrix, error) {
+	result, err := newMatrix(totalShards, dataShards)
+	if err != nil {
+		return nil, err
+	}
+
+	for r, row := range result {
+		// The top portion of the matrix is the identity
+		// matrix, and the bottom is a transposed Vandermonde
+		// matrix starting at 1 instead of 0.
+		if r < dataShards {
+			result[r][r] = 1
+		} else {
+			for c := range row {
+				result[r][c] = galExp(byte(c+1), r-dataShards)
+			}
+		}
+	}
+	return result, nil
+}
+
+func buildMatrixCauchy(dataShards, totalShards int) (matrix, error) {
+	result, err := newMatrix(totalShards, dataShards)
+	if err != nil {
+		return nil, err
+	}
+
+	for r, row := range result {
+		// The top portion of the matrix is the identity
+		// matrix, and the bottom is a transposed Cauchy matrix.
+		if r < dataShards {
+			result[r][r] = 1
+		} else {
+			for c := range row {
+				result[r][c] = invTable[(byte(r ^ c))]
+			}
+		}
+	}
+	return result, nil
+}
+
+// New creates a new encoder and initializes it to
+// the number of data shards and parity shards that
+// you want to use. You can reuse this encoder.
+// Note that the maximum number of total shards is 256.
+// If no options are supplied, default options are used.
+func New(dataShards, parityShards int, opts ...Option) (Encoder, error) {
+	r := reedSolomon{
+		DataShards:   dataShards,
+		ParityShards: parityShards,
+		Shards:       dataShards + parityShards,
+		o:            defaultOptions,
+	}
+
+	for _, opt := range opts {
+		opt(&r.o)
+	}
+	if dataShards <= 0 || parityShards <= 0 {
+		return nil, ErrInvShardNum
+	}
+
+	if dataShards+parityShards > 256 {
+		return nil, ErrMaxShardNum
+	}
+
+	var err error
+	switch {
+	case r.o.useCauchy:
+		r.m, err = buildMatrixCauchy(dataShards, r.Shards)
+	case r.o.usePAR1Matrix:
+		r.m, err = buildMatrixPAR1(dataShards, r.Shards)
+	default:
+		r.m, err = buildMatrix(dataShards, r.Shards)
+	}
+	if err != nil {
+		return nil, err
+	}
+	if r.o.shardSize > 0 {
+		cacheSize := cpuid.CPU.Cache.L2
+		if cacheSize <= 0 {
+			// Set to 128K if undetectable.
+			cacheSize = 128 << 10
+		}
+		p := runtime.NumCPU()
+
+		// 1 input + parity must fit in cache, and we add one more to be safer.
+		shards := 1 + parityShards
+		g := (r.o.shardSize * shards) / (cacheSize - (cacheSize >> 4))
+
+		if cpuid.CPU.ThreadsPerCore > 1 {
+			// If multiple threads per core, make sure they don't contend for cache.
+			g *= cpuid.CPU.ThreadsPerCore
+		}
+		g *= 2
+		if g < p {
+			g = p
+		}
+
+		// Have g be multiple of p
+		g += p - 1
+		g -= g % p
+
+		r.o.maxGoroutines = g
+	}
+
+	// Inverted matrices are cached in a tree keyed by the indices
+	// of the invalid rows of the data to reconstruct.
+	// The inversion root node will have the identity matrix as
+	// its inversion matrix because it implies there are no errors
+	// with the original data.
+	r.tree = newInversionTree(dataShards, parityShards)
+
+	r.parity = make([][]byte, parityShards)
+	for i := range r.parity {
+		r.parity[i] = r.m[dataShards+i]
+	}
+
+	return &r, err
+}
+
+// ErrTooFewShards is returned if too few shards where given to
+// Encode/Verify/Reconstruct/Update. It will also be returned from Reconstruct
+// if there were too few shards to reconstruct the missing data.
+var ErrTooFewShards = errors.New("too few shards given")
+
+// Encodes parity for a set of data shards.
+// An array 'shards' containing data shards followed by parity shards.
+// The number of shards must match the number given to New.
+// Each shard is a byte array, and they must all be the same size.
+// The parity shards will always be overwritten and the data shards
+// will remain the same.
+func (r reedSolomon) Encode(shards [][]byte) error {
+	if len(shards) != r.Shards {
+		return ErrTooFewShards
+	}
+
+	err := checkShards(shards, false)
+	if err != nil {
+		return err
+	}
+
+	// Get the slice of output buffers.
+	output := shards[r.DataShards:]
+
+	// Do the coding.
+	r.codeSomeShards(r.parity, shards[0:r.DataShards], output, r.ParityShards, len(shards[0]))
+	return nil
+}
+
+// ErrInvalidInput is returned if invalid input parameter of Update.
+var ErrInvalidInput = errors.New("invalid input")
+
+func (r reedSolomon) Update(shards [][]byte, newDatashards [][]byte) error {
+	if len(shards) != r.Shards {
+		return ErrTooFewShards
+	}
+
+	if len(newDatashards) != r.DataShards {
+		return ErrTooFewShards
+	}
+
+	err := checkShards(shards, true)
+	if err != nil {
+		return err
+	}
+
+	err = checkShards(newDatashards, true)
+	if err != nil {
+		return err
+	}
+
+	for i := range newDatashards {
+		if newDatashards[i] != nil && shards[i] == nil {
+			return ErrInvalidInput
+		}
+	}
+	for _, p := range shards[r.DataShards:] {
+		if p == nil {
+			return ErrInvalidInput
+		}
+	}
+
+	shardSize := shardSize(shards)
+
+	// Get the slice of output buffers.
+	output := shards[r.DataShards:]
+
+	// Do the coding.
+	r.updateParityShards(r.parity, shards[0:r.DataShards], newDatashards[0:r.DataShards], output, r.ParityShards, shardSize)
+	return nil
+}
+
+func (r reedSolomon) updateParityShards(matrixRows, oldinputs, newinputs, outputs [][]byte, outputCount, byteCount int) {
+	if r.o.maxGoroutines > 1 && byteCount > r.o.minSplitSize {
+		r.updateParityShardsP(matrixRows, oldinputs, newinputs, outputs, outputCount, byteCount)
+		return
+	}
+
+	for c := 0; c < r.DataShards; c++ {
+		in := newinputs[c]
+		if in == nil {
+			continue
+		}
+		oldin := oldinputs[c]
+		// oldinputs data will be change
+		sliceXor(in, oldin, r.o.useSSE2)
+		for iRow := 0; iRow < outputCount; iRow++ {
+			galMulSliceXor(matrixRows[iRow][c], oldin, outputs[iRow], r.o.useSSSE3, r.o.useAVX2)
+		}
+	}
+}
+
+func (r reedSolomon) updateParityShardsP(matrixRows, oldinputs, newinputs, outputs [][]byte, outputCount, byteCount int) {
+	var wg sync.WaitGroup
+	do := byteCount / r.o.maxGoroutines
+	if do < r.o.minSplitSize {
+		do = r.o.minSplitSize
+	}
+	start := 0
+	for start < byteCount {
+		if start+do > byteCount {
+			do = byteCount - start
+		}
+		wg.Add(1)
+		go func(start, stop int) {
+			for c := 0; c < r.DataShards; c++ {
+				in := newinputs[c]
+				if in == nil {
+					continue
+				}
+				oldin := oldinputs[c]
+				// oldinputs data will be change
+				sliceXor(in[start:stop], oldin[start:stop], r.o.useSSE2)
+				for iRow := 0; iRow < outputCount; iRow++ {
+					galMulSliceXor(matrixRows[iRow][c], oldin[start:stop], outputs[iRow][start:stop], r.o.useSSSE3, r.o.useAVX2)
+				}
+			}
+			wg.Done()
+		}(start, start+do)
+		start += do
+	}
+	wg.Wait()
+}
+
+// Verify returns true if the parity shards contain the right data.
+// The data is the same format as Encode. No data is modified.
+func (r reedSolomon) Verify(shards [][]byte) (bool, error) {
+	if len(shards) != r.Shards {
+		return false, ErrTooFewShards
+	}
+	err := checkShards(shards, false)
+	if err != nil {
+		return false, err
+	}
+
+	// Slice of buffers being checked.
+	toCheck := shards[r.DataShards:]
+
+	// Do the checking.
+	return r.checkSomeShards(r.parity, shards[0:r.DataShards], toCheck, r.ParityShards, len(shards[0])), nil
+}
+
+// Multiplies a subset of rows from a coding matrix by a full set of
+// input shards to produce some output shards.
+// 'matrixRows' is The rows from the matrix to use.
+// 'inputs' An array of byte arrays, each of which is one input shard.
+// The number of inputs used is determined by the length of each matrix row.
+// outputs Byte arrays where the computed shards are stored.
+// The number of outputs computed, and the
+// number of matrix rows used, is determined by
+// outputCount, which is the number of outputs to compute.
+func (r reedSolomon) codeSomeShards(matrixRows, inputs, outputs [][]byte, outputCount, byteCount int) {
+	if r.o.maxGoroutines > 1 && byteCount > r.o.minSplitSize {
+		r.codeSomeShardsP(matrixRows, inputs, outputs, outputCount, byteCount)
+		return
+	}
+	for c := 0; c < r.DataShards; c++ {
+		in := inputs[c]
+		for iRow := 0; iRow < outputCount; iRow++ {
+			if c == 0 {
+				galMulSlice(matrixRows[iRow][c], in, outputs[iRow], r.o.useSSSE3, r.o.useAVX2)
+			} else {
+				galMulSliceXor(matrixRows[iRow][c], in, outputs[iRow], r.o.useSSSE3, r.o.useAVX2)
+			}
+		}
+	}
+}
+
+// Perform the same as codeSomeShards, but split the workload into
+// several goroutines.
+func (r reedSolomon) codeSomeShardsP(matrixRows, inputs, outputs [][]byte, outputCount, byteCount int) {
+	var wg sync.WaitGroup
+	do := byteCount / r.o.maxGoroutines
+	if do < r.o.minSplitSize {
+		do = r.o.minSplitSize
+	}
+	// Make sizes divisible by 16
+	do = (do + 15) & (^15)
+	start := 0
+	for start < byteCount {
+		if start+do > byteCount {
+			do = byteCount - start
+		}
+		wg.Add(1)
+		go func(start, stop int) {
+			for c := 0; c < r.DataShards; c++ {
+				in := inputs[c]
+				for iRow := 0; iRow < outputCount; iRow++ {
+					if c == 0 {
+						galMulSlice(matrixRows[iRow][c], in[start:stop], outputs[iRow][start:stop], r.o.useSSSE3, r.o.useAVX2)
+					} else {
+						galMulSliceXor(matrixRows[iRow][c], in[start:stop], outputs[iRow][start:stop], r.o.useSSSE3, r.o.useAVX2)
+					}
+				}
+			}
+			wg.Done()
+		}(start, start+do)
+		start += do
+	}
+	wg.Wait()
+}
+
+// checkSomeShards is mostly the same as codeSomeShards,
+// except this will check values and return
+// as soon as a difference is found.
+func (r reedSolomon) checkSomeShards(matrixRows, inputs, toCheck [][]byte, outputCount, byteCount int) bool {
+	if r.o.maxGoroutines > 1 && byteCount > r.o.minSplitSize {
+		return r.checkSomeShardsP(matrixRows, inputs, toCheck, outputCount, byteCount)
+	}
+	outputs := make([][]byte, len(toCheck))
+	for i := range outputs {
+		outputs[i] = make([]byte, byteCount)
+	}
+	for c := 0; c < r.DataShards; c++ {
+		in := inputs[c]
+		for iRow := 0; iRow < outputCount; iRow++ {
+			galMulSliceXor(matrixRows[iRow][c], in, outputs[iRow], r.o.useSSSE3, r.o.useAVX2)
+		}
+	}
+
+	for i, calc := range outputs {
+		if !bytes.Equal(calc, toCheck[i]) {
+			return false
+		}
+	}
+	return true
+}
+
+func (r reedSolomon) checkSomeShardsP(matrixRows, inputs, toCheck [][]byte, outputCount, byteCount int) bool {
+	same := true
+	var mu sync.RWMutex // For above
+
+	var wg sync.WaitGroup
+	do := byteCount / r.o.maxGoroutines
+	if do < r.o.minSplitSize {
+		do = r.o.minSplitSize
+	}
+	// Make sizes divisible by 16
+	do = (do + 15) & (^15)
+	start := 0
+	for start < byteCount {
+		if start+do > byteCount {
+			do = byteCount - start
+		}
+		wg.Add(1)
+		go func(start, do int) {
+			defer wg.Done()
+			outputs := make([][]byte, len(toCheck))
+			for i := range outputs {
+				outputs[i] = make([]byte, do)
+			}
+			for c := 0; c < r.DataShards; c++ {
+				mu.RLock()
+				if !same {
+					mu.RUnlock()
+					return
+				}
+				mu.RUnlock()
+				in := inputs[c][start : start+do]
+				for iRow := 0; iRow < outputCount; iRow++ {
+					galMulSliceXor(matrixRows[iRow][c], in, outputs[iRow], r.o.useSSSE3, r.o.useAVX2)
+				}
+			}
+
+			for i, calc := range outputs {
+				if !bytes.Equal(calc, toCheck[i][start:start+do]) {
+					mu.Lock()
+					same = false
+					mu.Unlock()
+					return
+				}
+			}
+		}(start, do)
+		start += do
+	}
+	wg.Wait()
+	return same
+}
+
+// ErrShardNoData will be returned if there are no shards,
+// or if the length of all shards is zero.
+var ErrShardNoData = errors.New("no shard data")
+
+// ErrShardSize is returned if shard length isn't the same for all
+// shards.
+var ErrShardSize = errors.New("shard sizes do not match")
+
+// checkShards will check if shards are the same size
+// or 0, if allowed. An error is returned if this fails.
+// An error is also returned if all shards are size 0.
+func checkShards(shards [][]byte, nilok bool) error {
+	size := shardSize(shards)
+	if size == 0 {
+		return ErrShardNoData
+	}
+	for _, shard := range shards {
+		if len(shard) != size {
+			if len(shard) != 0 || !nilok {
+				return ErrShardSize
+			}
+		}
+	}
+	return nil
+}
+
+// shardSize return the size of a single shard.
+// The first non-zero size is returned,
+// or 0 if all shards are size 0.
+func shardSize(shards [][]byte) int {
+	for _, shard := range shards {
+		if len(shard) != 0 {
+			return len(shard)
+		}
+	}
+	return 0
+}
+
+// Reconstruct will recreate the missing shards, if possible.
+//
+// Given a list of shards, some of which contain data, fills in the
+// ones that don't have data.
+//
+// The length of the array must be equal to Shards.
+// You indicate that a shard is missing by setting it to nil or zero-length.
+// If a shard is zero-length but has sufficient capacity, that memory will
+// be used, otherwise a new []byte will be allocated.
+//
+// If there are too few shards to reconstruct the missing
+// ones, ErrTooFewShards will be returned.
+//
+// The reconstructed shard set is complete, but integrity is not verified.
+// Use the Verify function to check if data set is ok.
+func (r reedSolomon) Reconstruct(shards [][]byte) error {
+	return r.reconstruct(shards, false)
+}
+
+// ReconstructData will recreate any missing data shards, if possible.
+//
+// Given a list of shards, some of which contain data, fills in the
+// data shards that don't have data.
+//
+// The length of the array must be equal to Shards.
+// You indicate that a shard is missing by setting it to nil or zero-length.
+// If a shard is zero-length but has sufficient capacity, that memory will
+// be used, otherwise a new []byte will be allocated.
+//
+// If there are too few shards to reconstruct the missing
+// ones, ErrTooFewShards will be returned.
+//
+// As the reconstructed shard set may contain missing parity shards,
+// calling the Verify function is likely to fail.
+func (r reedSolomon) ReconstructData(shards [][]byte) error {
+	return r.reconstruct(shards, true)
+}
+
+// reconstruct will recreate the missing data shards, and unless
+// dataOnly is true, also the missing parity shards
+//
+// The length of the array must be equal to Shards.
+// You indicate that a shard is missing by setting it to nil.
+//
+// If there are too few shards to reconstruct the missing
+// ones, ErrTooFewShards will be returned.
+func (r reedSolomon) reconstruct(shards [][]byte, dataOnly bool) error {
+	if len(shards) != r.Shards {
+		return ErrTooFewShards
+	}
+	// Check arguments.
+	err := checkShards(shards, true)
+	if err != nil {
+		return err
+	}
+
+	shardSize := shardSize(shards)
+
+	// Quick check: are all of the shards present?  If so, there's
+	// nothing to do.
+	numberPresent := 0
+	for i := 0; i < r.Shards; i++ {
+		if len(shards[i]) != 0 {
+			numberPresent++
+		}
+	}
+	if numberPresent == r.Shards {
+		// Cool.  All of the shards data data.  We don't
+		// need to do anything.
+		return nil
+	}
+
+	// More complete sanity check
+	if numberPresent < r.DataShards {
+		return ErrTooFewShards
+	}
+
+	// Pull out an array holding just the shards that
+	// correspond to the rows of the submatrix.  These shards
+	// will be the input to the decoding process that re-creates
+	// the missing data shards.
+	//
+	// Also, create an array of indices of the valid rows we do have
+	// and the invalid rows we don't have up until we have enough valid rows.
+	subShards := make([][]byte, r.DataShards)
+	validIndices := make([]int, r.DataShards)
+	invalidIndices := make([]int, 0)
+	subMatrixRow := 0
+	for matrixRow := 0; matrixRow < r.Shards && subMatrixRow < r.DataShards; matrixRow++ {
+		if len(shards[matrixRow]) != 0 {
+			subShards[subMatrixRow] = shards[matrixRow]
+			validIndices[subMatrixRow] = matrixRow
+			subMatrixRow++
+		} else {
+			invalidIndices = append(invalidIndices, matrixRow)
+		}
+	}
+
+	// Attempt to get the cached inverted matrix out of the tree
+	// based on the indices of the invalid rows.
+	dataDecodeMatrix := r.tree.GetInvertedMatrix(invalidIndices)
+
+	// If the inverted matrix isn't cached in the tree yet we must
+	// construct it ourselves and insert it into the tree for the
+	// future.  In this way the inversion tree is lazily loaded.
+	if dataDecodeMatrix == nil {
+		// Pull out the rows of the matrix that correspond to the
+		// shards that we have and build a square matrix.  This
+		// matrix could be used to generate the shards that we have
+		// from the original data.
+		subMatrix, _ := newMatrix(r.DataShards, r.DataShards)
+		for subMatrixRow, validIndex := range validIndices {
+			for c := 0; c < r.DataShards; c++ {
+				subMatrix[subMatrixRow][c] = r.m[validIndex][c]
+			}
+		}
+		// Invert the matrix, so we can go from the encoded shards
+		// back to the original data.  Then pull out the row that
+		// generates the shard that we want to decode.  Note that
+		// since this matrix maps back to the original data, it can
+		// be used to create a data shard, but not a parity shard.
+		dataDecodeMatrix, err = subMatrix.Invert()
+		if err != nil {
+			return err
+		}
+
+		// Cache the inverted matrix in the tree for future use keyed on the
+		// indices of the invalid rows.
+		err = r.tree.InsertInvertedMatrix(invalidIndices, dataDecodeMatrix, r.Shards)
+		if err != nil {
+			return err
+		}
+	}
+
+	// Re-create any data shards that were missing.
+	//
+	// The input to the coding is all of the shards we actually
+	// have, and the output is the missing data shards.  The computation
+	// is done using the special decode matrix we just built.
+	outputs := make([][]byte, r.ParityShards)
+	matrixRows := make([][]byte, r.ParityShards)
+	outputCount := 0
+
+	for iShard := 0; iShard < r.DataShards; iShard++ {
+		if len(shards[iShard]) == 0 {
+			if cap(shards[iShard]) >= shardSize {
+				shards[iShard] = shards[iShard][0:shardSize]
+			} else {
+				shards[iShard] = make([]byte, shardSize)
+			}
+			outputs[outputCount] = shards[iShard]
+			matrixRows[outputCount] = dataDecodeMatrix[iShard]
+			outputCount++
+		}
+	}
+	r.codeSomeShards(matrixRows, subShards, outputs[:outputCount], outputCount, shardSize)
+
+	if dataOnly {
+		// Exit out early if we are only interested in the data shards
+		return nil
+	}
+
+	// Now that we have all of the data shards intact, we can
+	// compute any of the parity that is missing.
+	//
+	// The input to the coding is ALL of the data shards, including
+	// any that we just calculated.  The output is whichever of the
+	// data shards were missing.
+	outputCount = 0
+	for iShard := r.DataShards; iShard < r.Shards; iShard++ {
+		if len(shards[iShard]) == 0 {
+			if cap(shards[iShard]) >= shardSize {
+				shards[iShard] = shards[iShard][0:shardSize]
+			} else {
+				shards[iShard] = make([]byte, shardSize)
+			}
+			outputs[outputCount] = shards[iShard]
+			matrixRows[outputCount] = r.parity[iShard-r.DataShards]
+			outputCount++
+		}
+	}
+	r.codeSomeShards(matrixRows, shards[:r.DataShards], outputs[:outputCount], outputCount, shardSize)
+	return nil
+}
+
+// ErrShortData will be returned by Split(), if there isn't enough data
+// to fill the number of shards.
+var ErrShortData = errors.New("not enough data to fill the number of requested shards")
+
+// Split a data slice into the number of shards given to the encoder,
+// and create empty parity shards if necessary.
+//
+// The data will be split into equally sized shards.
+// If the data size isn't divisible by the number of shards,
+// the last shard will contain extra zeros.
+//
+// There must be at least 1 byte otherwise ErrShortData will be
+// returned.
+//
+// The data will not be copied, except for the last shard, so you
+// should not modify the data of the input slice afterwards.
+func (r reedSolomon) Split(data []byte) ([][]byte, error) {
+	if len(data) == 0 {
+		return nil, ErrShortData
+	}
+	// Calculate number of bytes per data shard.
+	perShard := (len(data) + r.DataShards - 1) / r.DataShards
+
+	if cap(data) > len(data) {
+		data = data[:cap(data)]
+	}
+
+	// Only allocate memory if necessary
+	if len(data) < (r.Shards * perShard) {
+		// Pad data to r.Shards*perShard.
+		padding := make([]byte, (r.Shards*perShard)-len(data))
+		data = append(data, padding...)
+	}
+
+	// Split into equal-length shards.
+	dst := make([][]byte, r.Shards)
+	for i := range dst {
+		dst[i] = data[:perShard]
+		data = data[perShard:]
+	}
+
+	return dst, nil
+}
+
+// ErrReconstructRequired is returned if too few data shards are intact and a
+// reconstruction is required before you can successfully join the shards.
+var ErrReconstructRequired = errors.New("reconstruction required as one or more required data shards are nil")
+
+// Join the shards and write the data segment to dst.
+//
+// Only the data shards are considered.
+// You must supply the exact output size you want.
+//
+// If there are to few shards given, ErrTooFewShards will be returned.
+// If the total data size is less than outSize, ErrShortData will be returned.
+// If one or more required data shards are nil, ErrReconstructRequired will be returned.
+func (r reedSolomon) Join(dst io.Writer, shards [][]byte, outSize int) error {
+	// Do we have enough shards?
+	if len(shards) < r.DataShards {
+		return ErrTooFewShards
+	}
+	shards = shards[:r.DataShards]
+
+	// Do we have enough data?
+	size := 0
+	for _, shard := range shards {
+		if shard == nil {
+			return ErrReconstructRequired
+		}
+		size += len(shard)
+
+		// Do we have enough data already?
+		if size >= outSize {
+			break
+		}
+	}
+	if size < outSize {
+		return ErrShortData
+	}
+
+	// Copy data to dst
+	write := outSize
+	for _, shard := range shards {
+		if write < len(shard) {
+			_, err := dst.Write(shard[:write])
+			return err
+		}
+		n, err := dst.Write(shard)
+		if err != nil {
+			return err
+		}
+		write -= n
+	}
+	return nil
+}

+ 584 - 0
vendor/github.com/klauspost/reedsolomon/streaming.go

@@ -0,0 +1,584 @@
+/**
+ * Reed-Solomon Coding over 8-bit values.
+ *
+ * Copyright 2015, Klaus Post
+ * Copyright 2015, Backblaze, Inc.
+ */
+
+package reedsolomon
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"sync"
+)
+
+// StreamEncoder is an interface to encode Reed-Salomon parity sets for your data.
+// It provides a fully streaming interface, and processes data in blocks of up to 4MB.
+//
+// For small shard sizes, 10MB and below, it is recommended to use the in-memory interface,
+// since the streaming interface has a start up overhead.
+//
+// For all operations, no readers and writers should not assume any order/size of
+// individual reads/writes.
+//
+// For usage examples, see "stream-encoder.go" and "streamdecoder.go" in the examples
+// folder.
+type StreamEncoder interface {
+	// Encodes parity shards for a set of data shards.
+	//
+	// Input is 'shards' containing readers for data shards followed by parity shards
+	// io.Writer.
+	//
+	// The number of shards must match the number given to NewStream().
+	//
+	// Each reader must supply the same number of bytes.
+	//
+	// The parity shards will be written to the writer.
+	// The number of bytes written will match the input size.
+	//
+	// If a data stream returns an error, a StreamReadError type error
+	// will be returned. If a parity writer returns an error, a
+	// StreamWriteError will be returned.
+	Encode(data []io.Reader, parity []io.Writer) error
+
+	// Verify returns true if the parity shards contain correct data.
+	//
+	// The number of shards must match the number total data+parity shards
+	// given to NewStream().
+	//
+	// Each reader must supply the same number of bytes.
+	// If a shard stream returns an error, a StreamReadError type error
+	// will be returned.
+	Verify(shards []io.Reader) (bool, error)
+
+	// Reconstruct will recreate the missing shards if possible.
+	//
+	// Given a list of valid shards (to read) and invalid shards (to write)
+	//
+	// You indicate that a shard is missing by setting it to nil in the 'valid'
+	// slice and at the same time setting a non-nil writer in "fill".
+	// An index cannot contain both non-nil 'valid' and 'fill' entry.
+	// If both are provided 'ErrReconstructMismatch' is returned.
+	//
+	// If there are too few shards to reconstruct the missing
+	// ones, ErrTooFewShards will be returned.
+	//
+	// The reconstructed shard set is complete, but integrity is not verified.
+	// Use the Verify function to check if data set is ok.
+	Reconstruct(valid []io.Reader, fill []io.Writer) error
+
+	// Split a an input stream into the number of shards given to the encoder.
+	//
+	// The data will be split into equally sized shards.
+	// If the data size isn't dividable by the number of shards,
+	// the last shard will contain extra zeros.
+	//
+	// You must supply the total size of your input.
+	// 'ErrShortData' will be returned if it is unable to retrieve the
+	// number of bytes indicated.
+	Split(data io.Reader, dst []io.Writer, size int64) (err error)
+
+	// Join the shards and write the data segment to dst.
+	//
+	// Only the data shards are considered.
+	//
+	// You must supply the exact output size you want.
+	// If there are to few shards given, ErrTooFewShards will be returned.
+	// If the total data size is less than outSize, ErrShortData will be returned.
+	Join(dst io.Writer, shards []io.Reader, outSize int64) error
+}
+
+// StreamReadError is returned when a read error is encountered
+// that relates to a supplied stream.
+// This will allow you to find out which reader has failed.
+type StreamReadError struct {
+	Err    error // The error
+	Stream int   // The stream number on which the error occurred
+}
+
+// Error returns the error as a string
+func (s StreamReadError) Error() string {
+	return fmt.Sprintf("error reading stream %d: %s", s.Stream, s.Err)
+}
+
+// String returns the error as a string
+func (s StreamReadError) String() string {
+	return s.Error()
+}
+
+// StreamWriteError is returned when a write error is encountered
+// that relates to a supplied stream. This will allow you to
+// find out which reader has failed.
+type StreamWriteError struct {
+	Err    error // The error
+	Stream int   // The stream number on which the error occurred
+}
+
+// Error returns the error as a string
+func (s StreamWriteError) Error() string {
+	return fmt.Sprintf("error writing stream %d: %s", s.Stream, s.Err)
+}
+
+// String returns the error as a string
+func (s StreamWriteError) String() string {
+	return s.Error()
+}
+
+// rsStream contains a matrix for a specific
+// distribution of datashards and parity shards.
+// Construct if using NewStream()
+type rsStream struct {
+	r  *reedSolomon
+	bs int // Block size
+	// Shard reader
+	readShards func(dst [][]byte, in []io.Reader) error
+	// Shard writer
+	writeShards func(out []io.Writer, in [][]byte) error
+	creads      bool
+	cwrites     bool
+}
+
+// NewStream creates a new encoder and initializes it to
+// the number of data shards and parity shards that
+// you want to use. You can reuse this encoder.
+// Note that the maximum number of data shards is 256.
+func NewStream(dataShards, parityShards int, o ...Option) (StreamEncoder, error) {
+	enc, err := New(dataShards, parityShards, o...)
+	if err != nil {
+		return nil, err
+	}
+	rs := enc.(*reedSolomon)
+	r := rsStream{r: rs, bs: 4 << 20}
+	r.readShards = readShards
+	r.writeShards = writeShards
+	return &r, err
+}
+
+// NewStreamC creates a new encoder and initializes it to
+// the number of data shards and parity shards given.
+//
+// This functions as 'NewStream', but allows you to enable CONCURRENT reads and writes.
+func NewStreamC(dataShards, parityShards int, conReads, conWrites bool, o ...Option) (StreamEncoder, error) {
+	enc, err := New(dataShards, parityShards, o...)
+	if err != nil {
+		return nil, err
+	}
+	rs := enc.(*reedSolomon)
+	r := rsStream{r: rs, bs: 4 << 20}
+	r.readShards = readShards
+	r.writeShards = writeShards
+	if conReads {
+		r.readShards = cReadShards
+	}
+	if conWrites {
+		r.writeShards = cWriteShards
+	}
+	return &r, err
+}
+
+func createSlice(n, length int) [][]byte {
+	out := make([][]byte, n)
+	for i := range out {
+		out[i] = make([]byte, length)
+	}
+	return out
+}
+
+// Encodes parity shards for a set of data shards.
+//
+// Input is 'shards' containing readers for data shards followed by parity shards
+// io.Writer.
+//
+// The number of shards must match the number given to NewStream().
+//
+// Each reader must supply the same number of bytes.
+//
+// The parity shards will be written to the writer.
+// The number of bytes written will match the input size.
+//
+// If a data stream returns an error, a StreamReadError type error
+// will be returned. If a parity writer returns an error, a
+// StreamWriteError will be returned.
+func (r rsStream) Encode(data []io.Reader, parity []io.Writer) error {
+	if len(data) != r.r.DataShards {
+		return ErrTooFewShards
+	}
+
+	if len(parity) != r.r.ParityShards {
+		return ErrTooFewShards
+	}
+
+	all := createSlice(r.r.Shards, r.bs)
+	in := all[:r.r.DataShards]
+	out := all[r.r.DataShards:]
+	read := 0
+
+	for {
+		err := r.readShards(in, data)
+		switch err {
+		case nil:
+		case io.EOF:
+			if read == 0 {
+				return ErrShardNoData
+			}
+			return nil
+		default:
+			return err
+		}
+		out = trimShards(out, shardSize(in))
+		read += shardSize(in)
+		err = r.r.Encode(all)
+		if err != nil {
+			return err
+		}
+		err = r.writeShards(parity, out)
+		if err != nil {
+			return err
+		}
+	}
+}
+
+// Trim the shards so they are all the same size
+func trimShards(in [][]byte, size int) [][]byte {
+	for i := range in {
+		if in[i] != nil {
+			in[i] = in[i][0:size]
+		}
+		if len(in[i]) < size {
+			in[i] = nil
+		}
+	}
+	return in
+}
+
+func readShards(dst [][]byte, in []io.Reader) error {
+	if len(in) != len(dst) {
+		panic("internal error: in and dst size do not match")
+	}
+	size := -1
+	for i := range in {
+		if in[i] == nil {
+			dst[i] = nil
+			continue
+		}
+		n, err := io.ReadFull(in[i], dst[i])
+		// The error is EOF only if no bytes were read.
+		// If an EOF happens after reading some but not all the bytes,
+		// ReadFull returns ErrUnexpectedEOF.
+		switch err {
+		case io.ErrUnexpectedEOF, io.EOF:
+			if size < 0 {
+				size = n
+			} else if n != size {
+				// Shard sizes must match.
+				return ErrShardSize
+			}
+			dst[i] = dst[i][0:n]
+		case nil:
+			continue
+		default:
+			return StreamReadError{Err: err, Stream: i}
+		}
+	}
+	if size == 0 {
+		return io.EOF
+	}
+	return nil
+}
+
+func writeShards(out []io.Writer, in [][]byte) error {
+	if len(out) != len(in) {
+		panic("internal error: in and out size do not match")
+	}
+	for i := range in {
+		if out[i] == nil {
+			continue
+		}
+		n, err := out[i].Write(in[i])
+		if err != nil {
+			return StreamWriteError{Err: err, Stream: i}
+		}
+		//
+		if n != len(in[i]) {
+			return StreamWriteError{Err: io.ErrShortWrite, Stream: i}
+		}
+	}
+	return nil
+}
+
+type readResult struct {
+	n    int
+	size int
+	err  error
+}
+
+// cReadShards reads shards concurrently
+func cReadShards(dst [][]byte, in []io.Reader) error {
+	if len(in) != len(dst) {
+		panic("internal error: in and dst size do not match")
+	}
+	var wg sync.WaitGroup
+	wg.Add(len(in))
+	res := make(chan readResult, len(in))
+	for i := range in {
+		if in[i] == nil {
+			dst[i] = nil
+			wg.Done()
+			continue
+		}
+		go func(i int) {
+			defer wg.Done()
+			n, err := io.ReadFull(in[i], dst[i])
+			// The error is EOF only if no bytes were read.
+			// If an EOF happens after reading some but not all the bytes,
+			// ReadFull returns ErrUnexpectedEOF.
+			res <- readResult{size: n, err: err, n: i}
+
+		}(i)
+	}
+	wg.Wait()
+	close(res)
+	size := -1
+	for r := range res {
+		switch r.err {
+		case io.ErrUnexpectedEOF, io.EOF:
+			if size < 0 {
+				size = r.size
+			} else if r.size != size {
+				// Shard sizes must match.
+				return ErrShardSize
+			}
+			dst[r.n] = dst[r.n][0:r.size]
+		case nil:
+		default:
+			return StreamReadError{Err: r.err, Stream: r.n}
+		}
+	}
+	if size == 0 {
+		return io.EOF
+	}
+	return nil
+}
+
+// cWriteShards writes shards concurrently
+func cWriteShards(out []io.Writer, in [][]byte) error {
+	if len(out) != len(in) {
+		panic("internal error: in and out size do not match")
+	}
+	var errs = make(chan error, len(out))
+	var wg sync.WaitGroup
+	wg.Add(len(out))
+	for i := range in {
+		go func(i int) {
+			defer wg.Done()
+			if out[i] == nil {
+				errs <- nil
+				return
+			}
+			n, err := out[i].Write(in[i])
+			if err != nil {
+				errs <- StreamWriteError{Err: err, Stream: i}
+				return
+			}
+			if n != len(in[i]) {
+				errs <- StreamWriteError{Err: io.ErrShortWrite, Stream: i}
+			}
+		}(i)
+	}
+	wg.Wait()
+	close(errs)
+	for err := range errs {
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// Verify returns true if the parity shards contain correct data.
+//
+// The number of shards must match the number total data+parity shards
+// given to NewStream().
+//
+// Each reader must supply the same number of bytes.
+// If a shard stream returns an error, a StreamReadError type error
+// will be returned.
+func (r rsStream) Verify(shards []io.Reader) (bool, error) {
+	if len(shards) != r.r.Shards {
+		return false, ErrTooFewShards
+	}
+
+	read := 0
+	all := createSlice(r.r.Shards, r.bs)
+	for {
+		err := r.readShards(all, shards)
+		if err == io.EOF {
+			if read == 0 {
+				return false, ErrShardNoData
+			}
+			return true, nil
+		}
+		if err != nil {
+			return false, err
+		}
+		read += shardSize(all)
+		ok, err := r.r.Verify(all)
+		if !ok || err != nil {
+			return ok, err
+		}
+	}
+}
+
+// ErrReconstructMismatch is returned by the StreamEncoder, if you supply
+// "valid" and "fill" streams on the same index.
+// Therefore it is impossible to see if you consider the shard valid
+// or would like to have it reconstructed.
+var ErrReconstructMismatch = errors.New("valid shards and fill shards are mutually exclusive")
+
+// Reconstruct will recreate the missing shards if possible.
+//
+// Given a list of valid shards (to read) and invalid shards (to write)
+//
+// You indicate that a shard is missing by setting it to nil in the 'valid'
+// slice and at the same time setting a non-nil writer in "fill".
+// An index cannot contain both non-nil 'valid' and 'fill' entry.
+//
+// If there are too few shards to reconstruct the missing
+// ones, ErrTooFewShards will be returned.
+//
+// The reconstructed shard set is complete when explicitly asked for all missing shards.
+// However its integrity is not automatically verified.
+// Use the Verify function to check in case the data set is complete.
+func (r rsStream) Reconstruct(valid []io.Reader, fill []io.Writer) error {
+	if len(valid) != r.r.Shards {
+		return ErrTooFewShards
+	}
+	if len(fill) != r.r.Shards {
+		return ErrTooFewShards
+	}
+
+	all := createSlice(r.r.Shards, r.bs)
+	reconDataOnly := true
+	for i := range valid {
+		if valid[i] != nil && fill[i] != nil {
+			return ErrReconstructMismatch
+		}
+		if i >= r.r.DataShards && fill[i] != nil {
+			reconDataOnly = false
+		}
+	}
+
+	read := 0
+	for {
+		err := r.readShards(all, valid)
+		if err == io.EOF {
+			if read == 0 {
+				return ErrShardNoData
+			}
+			return nil
+		}
+		if err != nil {
+			return err
+		}
+		read += shardSize(all)
+		all = trimShards(all, shardSize(all))
+
+		if reconDataOnly {
+			err = r.r.ReconstructData(all) // just reconstruct missing data shards
+		} else {
+			err = r.r.Reconstruct(all) //  reconstruct all missing shards
+		}
+		if err != nil {
+			return err
+		}
+		err = r.writeShards(fill, all)
+		if err != nil {
+			return err
+		}
+	}
+}
+
+// Join the shards and write the data segment to dst.
+//
+// Only the data shards are considered.
+//
+// You must supply the exact output size you want.
+// If there are to few shards given, ErrTooFewShards will be returned.
+// If the total data size is less than outSize, ErrShortData will be returned.
+func (r rsStream) Join(dst io.Writer, shards []io.Reader, outSize int64) error {
+	// Do we have enough shards?
+	if len(shards) < r.r.DataShards {
+		return ErrTooFewShards
+	}
+
+	// Trim off parity shards if any
+	shards = shards[:r.r.DataShards]
+	for i := range shards {
+		if shards[i] == nil {
+			return StreamReadError{Err: ErrShardNoData, Stream: i}
+		}
+	}
+	// Join all shards
+	src := io.MultiReader(shards...)
+
+	// Copy data to dst
+	n, err := io.CopyN(dst, src, outSize)
+	if err == io.EOF {
+		return ErrShortData
+	}
+	if err != nil {
+		return err
+	}
+	if n != outSize {
+		return ErrShortData
+	}
+	return nil
+}
+
+// Split a an input stream into the number of shards given to the encoder.
+//
+// The data will be split into equally sized shards.
+// If the data size isn't dividable by the number of shards,
+// the last shard will contain extra zeros.
+//
+// You must supply the total size of your input.
+// 'ErrShortData' will be returned if it is unable to retrieve the
+// number of bytes indicated.
+func (r rsStream) Split(data io.Reader, dst []io.Writer, size int64) error {
+	if size == 0 {
+		return ErrShortData
+	}
+	if len(dst) != r.r.DataShards {
+		return ErrInvShardNum
+	}
+
+	for i := range dst {
+		if dst[i] == nil {
+			return StreamWriteError{Err: ErrShardNoData, Stream: i}
+		}
+	}
+
+	// Calculate number of bytes per shard.
+	perShard := (size + int64(r.r.DataShards) - 1) / int64(r.r.DataShards)
+
+	// Pad data to r.Shards*perShard.
+	padding := make([]byte, (int64(r.r.Shards)*perShard)-size)
+	data = io.MultiReader(data, bytes.NewBuffer(padding))
+
+	// Split into equal-length shards and copy.
+	for i := range dst {
+		n, err := io.CopyN(dst[i], data, perShard)
+		if err != io.EOF && err != nil {
+			return err
+		}
+		if n != perShard {
+			return ErrShortData
+		}
+	}
+
+	return nil
+}

+ 0 - 0
vendor/github.com/AudriusButkevicius/kcp-go/LICENSE → vendor/github.com/xtaci/kcp-go/LICENSE


+ 13 - 0
vendor/github.com/AudriusButkevicius/kcp-go/blacklist.go → vendor/github.com/xtaci/kcp-go/blacklist.go

@@ -6,6 +6,19 @@ import (
 )
 
 var (
+	// BlacklistDuration sets a duration for which a session is blacklisted
+	// once it's established. This is simillar to TIME_WAIT state in TCP, whereby
+	// any connection attempt with the same session parameters is ignored for
+	// some amount of time.
+	//
+	// This is only useful when dial attempts happen from a pre-determined port,
+	// for example when you are dialing from the same connection you are listening on
+	// to punch through NAT, and helps with the fact that KCP is state-less.
+	// This helps better deal with scenarios where a process on one of the side (A)
+	// get's restarted, and stray packets from other side (B) makes it look like
+	// as if someone is trying to connect to A. Even if session dies on B,
+	// new stray reply packets from A resurrect the session on B, causing the
+	// session to be alive forever.
 	BlacklistDuration time.Duration
 	blacklist         = blacklistMap{
 		entries: make(map[sessionKey]time.Time),

+ 0 - 0
vendor/github.com/AudriusButkevicius/kcp-go/crypt.go → vendor/github.com/xtaci/kcp-go/crypt.go


+ 36 - 28
vendor/github.com/AudriusButkevicius/kcp-go/fec.go → vendor/github.com/xtaci/kcp-go/fec.go

@@ -4,7 +4,7 @@ import (
 	"encoding/binary"
 	"sync/atomic"
 
-	"github.com/templexxx/reedsolomon"
+	"github.com/klauspost/reedsolomon"
 )
 
 const (
@@ -34,6 +34,9 @@ type (
 		decodeCache [][]byte
 		flagCache   []bool
 
+		// zeros
+		zeros []byte
+
 		// RS decoder
 		codec reedsolomon.Encoder
 	}
@@ -47,19 +50,20 @@ func newFECDecoder(rxlimit, dataShards, parityShards int) *fecDecoder {
 		return nil
 	}
 
-	fec := new(fecDecoder)
-	fec.rxlimit = rxlimit
-	fec.dataShards = dataShards
-	fec.parityShards = parityShards
-	fec.shardSize = dataShards + parityShards
-	enc, err := reedsolomon.New(dataShards, parityShards)
+	dec := new(fecDecoder)
+	dec.rxlimit = rxlimit
+	dec.dataShards = dataShards
+	dec.parityShards = parityShards
+	dec.shardSize = dataShards + parityShards
+	codec, err := reedsolomon.New(dataShards, parityShards)
 	if err != nil {
 		return nil
 	}
-	fec.codec = enc
-	fec.decodeCache = make([][]byte, fec.shardSize)
-	fec.flagCache = make([]bool, fec.shardSize)
-	return fec
+	dec.codec = codec
+	dec.decodeCache = make([][]byte, dec.shardSize)
+	dec.flagCache = make([]bool, dec.shardSize)
+	dec.zeros = make([]byte, mtuLimit)
+	return dec
 }
 
 // decodeBytes a fec packet
@@ -154,7 +158,7 @@ func (dec *fecDecoder) decode(pkt fecPacket) (recovered [][]byte) {
 				if shards[k] != nil {
 					dlen := len(shards[k])
 					shards[k] = shards[k][:maxlen]
-					xorBytes(shards[k][dlen:], shards[k][dlen:], shards[k][dlen:])
+					copy(shards[k][dlen:], dec.zeros)
 				}
 			}
 			if err := dec.codec.ReconstructData(shards); err == nil {
@@ -209,6 +213,9 @@ type (
 		shardCache  [][]byte
 		encodeCache [][]byte
 
+		// zeros
+		zeros []byte
+
 		// RS encoder
 		codec reedsolomon.Encoder
 	}
@@ -218,27 +225,28 @@ func newFECEncoder(dataShards, parityShards, offset int) *fecEncoder {
 	if dataShards <= 0 || parityShards <= 0 {
 		return nil
 	}
-	fec := new(fecEncoder)
-	fec.dataShards = dataShards
-	fec.parityShards = parityShards
-	fec.shardSize = dataShards + parityShards
-	fec.paws = (0xffffffff/uint32(fec.shardSize) - 1) * uint32(fec.shardSize)
-	fec.headerOffset = offset
-	fec.payloadOffset = fec.headerOffset + fecHeaderSize
-
-	enc, err := reedsolomon.New(dataShards, parityShards)
+	enc := new(fecEncoder)
+	enc.dataShards = dataShards
+	enc.parityShards = parityShards
+	enc.shardSize = dataShards + parityShards
+	enc.paws = (0xffffffff/uint32(enc.shardSize) - 1) * uint32(enc.shardSize)
+	enc.headerOffset = offset
+	enc.payloadOffset = enc.headerOffset + fecHeaderSize
+
+	codec, err := reedsolomon.New(dataShards, parityShards)
 	if err != nil {
 		return nil
 	}
-	fec.codec = enc
+	enc.codec = codec
 
 	// caches
-	fec.encodeCache = make([][]byte, fec.shardSize)
-	fec.shardCache = make([][]byte, fec.shardSize)
-	for k := range fec.shardCache {
-		fec.shardCache[k] = make([]byte, mtuLimit)
+	enc.encodeCache = make([][]byte, enc.shardSize)
+	enc.shardCache = make([][]byte, enc.shardSize)
+	for k := range enc.shardCache {
+		enc.shardCache[k] = make([]byte, mtuLimit)
 	}
-	return fec
+	enc.zeros = make([]byte, mtuLimit)
+	return enc
 }
 
 // encode the packet, output parity shards if we have enough datashards
@@ -264,7 +272,7 @@ func (enc *fecEncoder) encode(b []byte) (ps [][]byte) {
 		for i := 0; i < enc.dataShards; i++ {
 			shard := enc.shardCache[i]
 			slen := len(shard)
-			xorBytes(shard[slen:enc.maxSize], shard[slen:enc.maxSize], shard[slen:enc.maxSize])
+			copy(shard[slen:enc.maxSize], enc.zeros)
 		}
 
 		// construct equal-sized slice with stripped header

+ 2 - 3
vendor/github.com/AudriusButkevicius/kcp-go/kcp.go → vendor/github.com/xtaci/kcp-go/kcp.go

@@ -539,10 +539,11 @@ func (kcp *KCP) Input(data []byte, regular, ackNoDelay bool) int {
 			if flag == 0 {
 				flag = 1
 				maxack = sn
+				lastackts = ts
 			} else if _itimediff(sn, maxack) > 0 {
 				maxack = sn
+				lastackts = ts
 			}
-			lastackts = ts
 		} else if cmd == IKCP_CMD_PUSH {
 			if _itimediff(sn, kcp.rcv_nxt+kcp.rcv_wnd) < 0 {
 				kcp.ack_push(sn, ts)
@@ -610,8 +611,6 @@ func (kcp *KCP) Input(data []byte, regular, ackNoDelay bool) int {
 
 	if ackNoDelay && len(kcp.acklist) > 0 { // ack immediately
 		kcp.flush(true)
-	} else if kcp.rmt_wnd == 0 && len(kcp.acklist) > 0 { // window zero
-		kcp.flush(true)
 	}
 	return 0
 }

+ 25 - 0
vendor/github.com/xtaci/kcp-go/rand.go

@@ -0,0 +1,25 @@
+package kcp
+
+import (
+	"crypto/md5"
+	"crypto/rand"
+	"io"
+)
+
+// nonceMD5 is a nonce generator for each packet header
+// which took the advantages of both MD5 and CSPRNG(like /dev/urandom).
+// The benchmark shows it's faster than previous CSPRNG only method.
+type nonceMD5 struct {
+	data [md5.Size]byte
+}
+
+// Nonce fills a nonce into the provided slice with no more than md5.Size bytes
+// the entropy will be updated whenever a leading 0 appears
+func (n *nonceMD5) Fill(nonce []byte) {
+	if n.data[0] == 0 { // 1/256 chance for entropy update
+		io.ReadFull(rand.Reader, n.data[:])
+	}
+	n.data = md5.Sum(n.data[:])
+	copy(nonce, n.data[:])
+	return
+}

+ 5 - 3
vendor/github.com/AudriusButkevicius/kcp-go/sess.go → vendor/github.com/xtaci/kcp-go/sess.go

@@ -4,7 +4,6 @@ import (
 	"crypto/rand"
 	"encoding/binary"
 	"hash/crc32"
-	"io"
 	"net"
 	"sync"
 	"sync/atomic"
@@ -97,6 +96,9 @@ type (
 		chWriteEvent chan struct{} // notify Write() can be called without blocking
 		chErrorEvent chan error    // notify Read() have an error
 
+		// nonce generator
+		nonce nonceMD5
+
 		isClosed bool // flag the session has Closed
 		mu       sync.Mutex
 	}
@@ -478,13 +480,13 @@ func (s *UDPSession) output(buf []byte) {
 
 	// 2&3. crc32 & encryption
 	if s.block != nil {
-		io.ReadFull(rand.Reader, ext[:nonceSize])
+		s.nonce.Fill(ext[:nonceSize])
 		checksum := crc32.ChecksumIEEE(ext[cryptHeaderSize:])
 		binary.LittleEndian.PutUint32(ext[nonceSize:], checksum)
 		s.block.Encrypt(ext, ext)
 
 		for k := range ecc {
-			io.ReadFull(rand.Reader, ecc[k][:nonceSize])
+			s.nonce.Fill(ecc[k][:nonceSize])
 			checksum := crc32.ChecksumIEEE(ecc[k][cryptHeaderSize:])
 			binary.LittleEndian.PutUint32(ecc[k][nonceSize:], checksum)
 			s.block.Encrypt(ecc[k], ecc[k])

+ 0 - 0
vendor/github.com/AudriusButkevicius/kcp-go/snmp.go → vendor/github.com/xtaci/kcp-go/snmp.go


+ 0 - 0
vendor/github.com/AudriusButkevicius/kcp-go/updater.go → vendor/github.com/xtaci/kcp-go/updater.go


+ 0 - 0
vendor/github.com/AudriusButkevicius/kcp-go/xor.go → vendor/github.com/xtaci/kcp-go/xor.go


+ 16 - 8
vendor/manifest

@@ -17,14 +17,6 @@
 			"branch": "master",
 			"notests": true
 		},
-		{
-			"importpath": "github.com/AudriusButkevicius/kcp-go",
-			"repository": "https://github.com/AudriusButkevicius/kcp-go",
-			"vcs": "git",
-			"revision": "8ae5f528469c6ab76110f41eb7a51341b7efb946",
-			"branch": "master",
-			"notests": true
-		},
 		{
 			"importpath": "github.com/AudriusButkevicius/pfilter",
 			"repository": "https://github.com/AudriusButkevicius/pfilter",
@@ -264,6 +256,14 @@
 			"branch": "master",
 			"notests": true
 		},
+		{
+			"importpath": "github.com/klauspost/reedsolomon",
+			"repository": "https://github.com/klauspost/reedsolomon",
+			"vcs": "git",
+			"revision": "0b30fa71cc8e4e9010c9aba6d0320e2e5b163b29",
+			"branch": "master",
+			"notests": true
+		},
 		{
 			"importpath": "github.com/lib/pq",
 			"repository": "https://github.com/lib/pq",
@@ -437,6 +437,14 @@
 			"path": "/qr",
 			"notests": true
 		},
+		{
+			"importpath": "github.com/xtaci/kcp-go",
+			"repository": "https://github.com/xtaci/kcp-go",
+			"vcs": "git",
+			"revision": "86eebd5cadb519b7c9306082c7eb3bcee2c49a7b",
+			"branch": "master",
+			"notests": true
+		},
 		{
 			"importpath": "github.com/xtaci/smux",
 			"repository": "https://github.com/xtaci/smux",

Some files were not shown because too many files changed in this diff