فهرست منبع

Dependency update

Jakob Borg 10 سال پیش
والد
کامیت
eba98717c9
21فایلهای تغییر یافته به همراه2097 افزوده شده و 340 حذف شده
  1. 12 8
      Godeps/Godeps.json
  2. 3 0
      Godeps/_workspace/src/github.com/juju/ratelimit/LICENSE
  3. 7 1
      Godeps/_workspace/src/github.com/kardianos/osext/osext_procfs.go
  4. 119 18
      Godeps/_workspace/src/github.com/kardianos/osext/osext_test.go
  5. 7 7
      Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go
  6. 170 2
      Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/decode.go
  7. 84 0
      Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/encode.go
  8. 30 0
      Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/snappy.go
  9. 152 49
      Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/snappy_test.go
  10. 7 0
      Godeps/_workspace/src/github.com/thejerf/suture/.travis.yml
  11. 19 0
      Godeps/_workspace/src/github.com/thejerf/suture/LICENSE
  12. 45 0
      Godeps/_workspace/src/github.com/thejerf/suture/README.md
  13. 11 0
      Godeps/_workspace/src/github.com/thejerf/suture/pre-commit
  14. 650 0
      Godeps/_workspace/src/github.com/thejerf/suture/suture.go
  15. 49 0
      Godeps/_workspace/src/github.com/thejerf/suture/suture_simple_test.go
  16. 592 0
      Godeps/_workspace/src/github.com/thejerf/suture/suture_test.go
  17. 0 23
      Godeps/_workspace/src/golang.org/x/text/unicode/norm/Makefile
  18. 77 131
      Godeps/_workspace/src/golang.org/x/text/unicode/norm/maketables.go
  19. 3 0
      Godeps/_workspace/src/golang.org/x/text/unicode/norm/normalize.go
  20. 1 3
      Godeps/_workspace/src/golang.org/x/text/unicode/norm/tables.go
  21. 59 98
      Godeps/_workspace/src/golang.org/x/text/unicode/norm/ucd_test.go

+ 12 - 8
Godeps/Godeps.json

@@ -23,11 +23,11 @@
 		},
 		{
 			"ImportPath": "github.com/juju/ratelimit",
-			"Rev": "f9f36d11773655c0485207f0ad30dc2655f69d56"
+			"Rev": "c5abe513796336ee2869745bff0638508450e9c5"
 		},
 		{
 			"ImportPath": "github.com/kardianos/osext",
-			"Rev": "91292666f7e40f03185cdd1da7d85633c973eca7"
+			"Rev": "efacde03154693404c65e7aa7d461ac9014acd0c"
 		},
 		{
 			"ImportPath": "github.com/syncthing/protocol",
@@ -35,11 +35,15 @@
 		},
 		{
 			"ImportPath": "github.com/syndtr/goleveldb/leveldb",
-			"Rev": "e3f32eb300aa1e514fe8ba58d008da90a062273d"
+			"Rev": "87e4e645d80ae9c537e8f2dee52b28036a5dd75e"
 		},
 		{
 			"ImportPath": "github.com/syndtr/gosnappy/snappy",
-			"Rev": "ce8acff4829e0c2458a67ead32390ac0a381c862"
+			"Rev": "156a073208e131d7d2e212cb749feae7c339e846"
+		},
+		{
+			"ImportPath": "github.com/thejerf/suture",
+			"Rev": "ff19fb384c3fe30f42717967eaa69da91e5f317c"
 		},
 		{
 			"ImportPath": "github.com/vitrun/qart/coding",
@@ -55,19 +59,19 @@
 		},
 		{
 			"ImportPath": "golang.org/x/crypto/bcrypt",
-			"Rev": "4ed45ec682102c643324fae5dff8dab085b6c300"
+			"Rev": "c57d4a71915a248dbad846d60825145062b4c18e"
 		},
 		{
 			"ImportPath": "golang.org/x/crypto/blowfish",
-			"Rev": "4ed45ec682102c643324fae5dff8dab085b6c300"
+			"Rev": "c57d4a71915a248dbad846d60825145062b4c18e"
 		},
 		{
 			"ImportPath": "golang.org/x/text/transform",
-			"Rev": "c980adc4a823548817b9c47d38c6ca6b7d7d8b6a"
+			"Rev": "2076e9cab4147459c82bc81169e46c139d358547"
 		},
 		{
 			"ImportPath": "golang.org/x/text/unicode/norm",
-			"Rev": "c980adc4a823548817b9c47d38c6ca6b7d7d8b6a"
+			"Rev": "2076e9cab4147459c82bc81169e46c139d358547"
 		}
 	]
 }

+ 3 - 0
Godeps/_workspace/src/github.com/juju/ratelimit/LICENSE

@@ -1,3 +1,6 @@
+This package contains an efficient token-bucket-based rate limiter.
+Copyright (C) 2015 Canonical Ltd.
+
 This software is licensed under the LGPLv3, included below.
 
 As a special exception to the GNU Lesser General Public License version 3

+ 7 - 1
Godeps/_workspace/src/github.com/kardianos/osext/osext_procfs.go

@@ -11,12 +11,18 @@ import (
 	"fmt"
 	"os"
 	"runtime"
+	"strings"
 )
 
 func executable() (string, error) {
 	switch runtime.GOOS {
 	case "linux":
-		return os.Readlink("/proc/self/exe")
+		const deletedSuffix = " (deleted)"
+		execpath, err := os.Readlink("/proc/self/exe")
+		if err != nil {
+			return execpath, err
+		}
+		return strings.TrimSuffix(execpath, deletedSuffix), nil
 	case "netbsd":
 		return os.Readlink("/proc/curproc/exe")
 	case "openbsd", "dragonfly":

+ 119 - 18
Godeps/_workspace/src/github.com/kardianos/osext/osext_test.go

@@ -7,35 +7,42 @@
 package osext
 
 import (
+	"bytes"
 	"fmt"
+	"io"
 	"os"
-	oexec "os/exec"
+	"os/exec"
 	"path/filepath"
 	"runtime"
 	"testing"
 )
 
-const execPath_EnvVar = "OSTEST_OUTPUT_EXECPATH"
+const (
+	executableEnvVar = "OSTEST_OUTPUT_EXECUTABLE"
 
-func TestExecPath(t *testing.T) {
+	executableEnvValueMatch  = "match"
+	executableEnvValueDelete = "delete"
+)
+
+func TestExecutableMatch(t *testing.T) {
 	ep, err := Executable()
 	if err != nil {
-		t.Fatalf("ExecPath failed: %v", err)
+		t.Fatalf("Executable failed: %v", err)
 	}
-	// we want fn to be of the form "dir/prog"
+
+	// fullpath to be of the form "dir/prog".
 	dir := filepath.Dir(filepath.Dir(ep))
-	fn, err := filepath.Rel(dir, ep)
+	fullpath, err := filepath.Rel(dir, ep)
 	if err != nil {
 		t.Fatalf("filepath.Rel: %v", err)
 	}
-	cmd := &oexec.Cmd{}
-	// make child start with a relative program path
-	cmd.Dir = dir
-	cmd.Path = fn
-	// forge argv[0] for child, so that we can verify we could correctly
-	// get real path of the executable without influenced by argv[0].
-	cmd.Args = []string{"-", "-test.run=XXXX"}
-	cmd.Env = []string{fmt.Sprintf("%s=1", execPath_EnvVar)}
+	// Make child start with a relative program path.
+	// Alter argv[0] for child to verify getting real path without argv[0].
+	cmd := &exec.Cmd{
+		Dir:  dir,
+		Path: fullpath,
+		Env:  []string{fmt.Sprintf("%s=%s", executableEnvVar, executableEnvValueMatch)},
+	}
 	out, err := cmd.CombinedOutput()
 	if err != nil {
 		t.Fatalf("exec(self) failed: %v", err)
@@ -49,6 +56,63 @@ func TestExecPath(t *testing.T) {
 	}
 }
 
+func TestExecutableDelete(t *testing.T) {
+	if runtime.GOOS != "linux" {
+		t.Skip()
+	}
+	fpath, err := Executable()
+	if err != nil {
+		t.Fatalf("Executable failed: %v", err)
+	}
+
+	r, w := io.Pipe()
+	stderrBuff := &bytes.Buffer{}
+	stdoutBuff := &bytes.Buffer{}
+	cmd := &exec.Cmd{
+		Path:   fpath,
+		Env:    []string{fmt.Sprintf("%s=%s", executableEnvVar, executableEnvValueDelete)},
+		Stdin:  r,
+		Stderr: stderrBuff,
+		Stdout: stdoutBuff,
+	}
+	err = cmd.Start()
+	if err != nil {
+		t.Fatalf("exec(self) start failed: %v", err)
+	}
+
+	tempPath := fpath + "_copy"
+	_ = os.Remove(tempPath)
+
+	err = copyFile(tempPath, fpath)
+	if err != nil {
+		t.Fatalf("copy file failed: %v", err)
+	}
+	err = os.Remove(fpath)
+	if err != nil {
+		t.Fatalf("remove running test file failed: %v", err)
+	}
+	err = os.Rename(tempPath, fpath)
+	if err != nil {
+		t.Fatalf("rename copy to previous name failed: %v", err)
+	}
+
+	w.Write([]byte{0})
+	w.Close()
+
+	err = cmd.Wait()
+	if err != nil {
+		t.Fatalf("exec wait failed: %v", err)
+	}
+
+	childPath := stderrBuff.String()
+	if !filepath.IsAbs(childPath) {
+		t.Fatalf("Child returned %q, want an absolute path", childPath)
+	}
+	if !sameFile(childPath, fpath) {
+		t.Fatalf("Child returned %q, not the same file as %q", childPath, fpath)
+	}
+}
+
 func sameFile(fn1, fn2 string) bool {
 	fi1, err := os.Stat(fn1)
 	if err != nil {
@@ -60,10 +124,30 @@ func sameFile(fn1, fn2 string) bool {
 	}
 	return os.SameFile(fi1, fi2)
 }
+func copyFile(dest, src string) error {
+	df, err := os.Create(dest)
+	if err != nil {
+		return err
+	}
+	defer df.Close()
+
+	sf, err := os.Open(src)
+	if err != nil {
+		return err
+	}
+	defer sf.Close()
 
-func init() {
-	if e := os.Getenv(execPath_EnvVar); e != "" {
-		// first chdir to another path
+	_, err = io.Copy(df, sf)
+	return err
+}
+
+func TestMain(m *testing.M) {
+	env := os.Getenv(executableEnvVar)
+	switch env {
+	case "":
+		os.Exit(m.Run())
+	case executableEnvValueMatch:
+		// First chdir to another path.
 		dir := "/"
 		if runtime.GOOS == "windows" {
 			dir = filepath.VolumeName(".")
@@ -74,6 +158,23 @@ func init() {
 		} else {
 			fmt.Fprint(os.Stderr, ep)
 		}
-		os.Exit(0)
+	case executableEnvValueDelete:
+		bb := make([]byte, 1)
+		var err error
+		n, err := os.Stdin.Read(bb)
+		if err != nil {
+			fmt.Fprint(os.Stderr, "ERROR: ", err)
+			os.Exit(2)
+		}
+		if n != 1 {
+			fmt.Fprint(os.Stderr, "ERROR: n != 1, n == ", n)
+			os.Exit(2)
+		}
+		if ep, err := Executable(); err != nil {
+			fmt.Fprint(os.Stderr, "ERROR: ", err)
+		} else {
+			fmt.Fprint(os.Stderr, ep)
+		}
 	}
+	os.Exit(0)
 }

+ 7 - 7
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go

@@ -153,7 +153,7 @@ type Options struct {
 	BlockCacher Cacher
 
 	// BlockCacheCapacity defines the capacity of the 'sorted table' block caching.
-	// Use -1 for zero, this has same effect with specifying NoCacher to BlockCacher.
+	// Use -1 for zero, this has same effect as specifying NoCacher to BlockCacher.
 	//
 	// The default value is 8MiB.
 	BlockCacheCapacity int
@@ -308,7 +308,7 @@ type Options struct {
 	OpenFilesCacher Cacher
 
 	// OpenFilesCacheCapacity defines the capacity of the open files caching.
-	// Use -1 for zero, this has same effect with specifying NoCacher to OpenFilesCacher.
+	// Use -1 for zero, this has same effect as specifying NoCacher to OpenFilesCacher.
 	//
 	// The default value is 500.
 	OpenFilesCacheCapacity int
@@ -355,9 +355,9 @@ func (o *Options) GetBlockCacher() Cacher {
 }
 
 func (o *Options) GetBlockCacheCapacity() int {
-	if o == nil || o.BlockCacheCapacity <= 0 {
+	if o == nil || o.BlockCacheCapacity == 0 {
 		return DefaultBlockCacheCapacity
-	} else if o.BlockCacheCapacity == -1 {
+	} else if o.BlockCacheCapacity < 0 {
 		return 0
 	}
 	return o.BlockCacheCapacity
@@ -497,7 +497,7 @@ func (o *Options) GetMaxMemCompationLevel() int {
 	if o != nil {
 		if o.MaxMemCompationLevel > 0 {
 			level = o.MaxMemCompationLevel
-		} else if o.MaxMemCompationLevel == -1 {
+		} else if o.MaxMemCompationLevel < 0 {
 			level = 0
 		}
 	}
@@ -525,9 +525,9 @@ func (o *Options) GetOpenFilesCacher() Cacher {
 }
 
 func (o *Options) GetOpenFilesCacheCapacity() int {
-	if o == nil || o.OpenFilesCacheCapacity <= 0 {
+	if o == nil || o.OpenFilesCacheCapacity == 0 {
 		return DefaultOpenFilesCacheCapacity
-	} else if o.OpenFilesCacheCapacity == -1 {
+	} else if o.OpenFilesCacheCapacity < 0 {
 		return 0
 	}
 	return o.OpenFilesCacheCapacity

+ 170 - 2
Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/decode.go

@@ -7,10 +7,15 @@ package snappy
 import (
 	"encoding/binary"
 	"errors"
+	"io"
 )
 
-// ErrCorrupt reports that the input is invalid.
-var ErrCorrupt = errors.New("snappy: corrupt input")
+var (
+	// ErrCorrupt reports that the input is invalid.
+	ErrCorrupt = errors.New("snappy: corrupt input")
+	// ErrUnsupported reports that the input isn't supported.
+	ErrUnsupported = errors.New("snappy: unsupported input")
+)
 
 // DecodedLen returns the length of the decoded block.
 func DecodedLen(src []byte) (int, error) {
@@ -122,3 +127,166 @@ func Decode(dst, src []byte) ([]byte, error) {
 	}
 	return dst[:d], nil
 }
+
+// NewReader returns a new Reader that decompresses from r, using the framing
+// format described at
+// https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt
+func NewReader(r io.Reader) *Reader {
+	return &Reader{
+		r:       r,
+		decoded: make([]byte, maxUncompressedChunkLen),
+		buf:     make([]byte, MaxEncodedLen(maxUncompressedChunkLen)+checksumSize),
+	}
+}
+
+// Reader is an io.Reader than can read Snappy-compressed bytes.
+type Reader struct {
+	r       io.Reader
+	err     error
+	decoded []byte
+	buf     []byte
+	// decoded[i:j] contains decoded bytes that have not yet been passed on.
+	i, j       int
+	readHeader bool
+}
+
+// Reset discards any buffered data, resets all state, and switches the Snappy
+// reader to read from r. This permits reusing a Reader rather than allocating
+// a new one.
+func (r *Reader) Reset(reader io.Reader) {
+	r.r = reader
+	r.err = nil
+	r.i = 0
+	r.j = 0
+	r.readHeader = false
+}
+
+func (r *Reader) readFull(p []byte) (ok bool) {
+	if _, r.err = io.ReadFull(r.r, p); r.err != nil {
+		if r.err == io.ErrUnexpectedEOF {
+			r.err = ErrCorrupt
+		}
+		return false
+	}
+	return true
+}
+
+// Read satisfies the io.Reader interface.
+func (r *Reader) Read(p []byte) (int, error) {
+	if r.err != nil {
+		return 0, r.err
+	}
+	for {
+		if r.i < r.j {
+			n := copy(p, r.decoded[r.i:r.j])
+			r.i += n
+			return n, nil
+		}
+		if !r.readFull(r.buf[:4]) {
+			return 0, r.err
+		}
+		chunkType := r.buf[0]
+		if !r.readHeader {
+			if chunkType != chunkTypeStreamIdentifier {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			r.readHeader = true
+		}
+		chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
+		if chunkLen > len(r.buf) {
+			r.err = ErrUnsupported
+			return 0, r.err
+		}
+
+		// The chunk types are specified at
+		// https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt
+		switch chunkType {
+		case chunkTypeCompressedData:
+			// Section 4.2. Compressed data (chunk type 0x00).
+			if chunkLen < checksumSize {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			buf := r.buf[:chunkLen]
+			if !r.readFull(buf) {
+				return 0, r.err
+			}
+			checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+			buf = buf[checksumSize:]
+
+			n, err := DecodedLen(buf)
+			if err != nil {
+				r.err = err
+				return 0, r.err
+			}
+			if n > len(r.decoded) {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			if _, err := Decode(r.decoded, buf); err != nil {
+				r.err = err
+				return 0, r.err
+			}
+			if crc(r.decoded[:n]) != checksum {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			r.i, r.j = 0, n
+			continue
+
+		case chunkTypeUncompressedData:
+			// Section 4.3. Uncompressed data (chunk type 0x01).
+			if chunkLen < checksumSize {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			buf := r.buf[:checksumSize]
+			if !r.readFull(buf) {
+				return 0, r.err
+			}
+			checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+			// Read directly into r.decoded instead of via r.buf.
+			n := chunkLen - checksumSize
+			if !r.readFull(r.decoded[:n]) {
+				return 0, r.err
+			}
+			if crc(r.decoded[:n]) != checksum {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			r.i, r.j = 0, n
+			continue
+
+		case chunkTypeStreamIdentifier:
+			// Section 4.1. Stream identifier (chunk type 0xff).
+			if chunkLen != len(magicBody) {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			if !r.readFull(r.buf[:len(magicBody)]) {
+				return 0, r.err
+			}
+			for i := 0; i < len(magicBody); i++ {
+				if r.buf[i] != magicBody[i] {
+					r.err = ErrCorrupt
+					return 0, r.err
+				}
+			}
+			continue
+		}
+
+		if chunkType <= 0x7f {
+			// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
+			r.err = ErrUnsupported
+			return 0, r.err
+
+		} else {
+			// Section 4.4 Padding (chunk type 0xfe).
+			// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
+			if !r.readFull(r.buf[:chunkLen]) {
+				return 0, r.err
+			}
+		}
+	}
+}

+ 84 - 0
Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/encode.go

@@ -6,6 +6,7 @@ package snappy
 
 import (
 	"encoding/binary"
+	"io"
 )
 
 // We limit how far copy back-references can go, the same as the C++ code.
@@ -172,3 +173,86 @@ func MaxEncodedLen(srcLen int) int {
 	// This last factor dominates the blowup, so the final estimate is:
 	return 32 + srcLen + srcLen/6
 }
+
+// NewWriter returns a new Writer that compresses to w, using the framing
+// format described at
+// https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt
+func NewWriter(w io.Writer) *Writer {
+	return &Writer{
+		w:   w,
+		enc: make([]byte, MaxEncodedLen(maxUncompressedChunkLen)),
+	}
+}
+
+// Writer is an io.Writer than can write Snappy-compressed bytes.
+type Writer struct {
+	w           io.Writer
+	err         error
+	enc         []byte
+	buf         [checksumSize + chunkHeaderSize]byte
+	wroteHeader bool
+}
+
+// Reset discards the writer's state and switches the Snappy writer to write to
+// w. This permits reusing a Writer rather than allocating a new one.
+func (w *Writer) Reset(writer io.Writer) {
+	w.w = writer
+	w.err = nil
+	w.wroteHeader = false
+}
+
+// Write satisfies the io.Writer interface.
+func (w *Writer) Write(p []byte) (n int, errRet error) {
+	if w.err != nil {
+		return 0, w.err
+	}
+	if !w.wroteHeader {
+		copy(w.enc, magicChunk)
+		if _, err := w.w.Write(w.enc[:len(magicChunk)]); err != nil {
+			w.err = err
+			return n, err
+		}
+		w.wroteHeader = true
+	}
+	for len(p) > 0 {
+		var uncompressed []byte
+		if len(p) > maxUncompressedChunkLen {
+			uncompressed, p = p[:maxUncompressedChunkLen], p[maxUncompressedChunkLen:]
+		} else {
+			uncompressed, p = p, nil
+		}
+		checksum := crc(uncompressed)
+
+		// Compress the buffer, discarding the result if the improvement
+		// isn't at least 12.5%.
+		chunkType := uint8(chunkTypeCompressedData)
+		chunkBody, err := Encode(w.enc, uncompressed)
+		if err != nil {
+			w.err = err
+			return n, err
+		}
+		if len(chunkBody) >= len(uncompressed)-len(uncompressed)/8 {
+			chunkType, chunkBody = chunkTypeUncompressedData, uncompressed
+		}
+
+		chunkLen := 4 + len(chunkBody)
+		w.buf[0] = chunkType
+		w.buf[1] = uint8(chunkLen >> 0)
+		w.buf[2] = uint8(chunkLen >> 8)
+		w.buf[3] = uint8(chunkLen >> 16)
+		w.buf[4] = uint8(checksum >> 0)
+		w.buf[5] = uint8(checksum >> 8)
+		w.buf[6] = uint8(checksum >> 16)
+		w.buf[7] = uint8(checksum >> 24)
+		if _, err = w.w.Write(w.buf[:]); err != nil {
+			w.err = err
+			return n, err
+		}
+		if _, err = w.w.Write(chunkBody); err != nil {
+			w.err = err
+			return n, err
+		}
+		n += len(uncompressed)
+	}
+	return n, nil
+}

+ 30 - 0
Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/snappy.go

@@ -8,6 +8,10 @@
 // The C++ snappy implementation is at http://code.google.com/p/snappy/
 package snappy
 
+import (
+	"hash/crc32"
+)
+
 /*
 Each encoded block begins with the varint-encoded length of the decoded data,
 followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
@@ -36,3 +40,29 @@ const (
 	tagCopy2   = 0x02
 	tagCopy4   = 0x03
 )
+
+const (
+	checksumSize    = 4
+	chunkHeaderSize = 4
+	magicChunk      = "\xff\x06\x00\x00" + magicBody
+	magicBody       = "sNaPpY"
+	// https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt says
+	// that "the uncompressed data in a chunk must be no longer than 65536 bytes".
+	maxUncompressedChunkLen = 65536
+)
+
+const (
+	chunkTypeCompressedData   = 0x00
+	chunkTypeUncompressedData = 0x01
+	chunkTypePadding          = 0xfe
+	chunkTypeStreamIdentifier = 0xff
+)
+
+var crcTable = crc32.MakeTable(crc32.Castagnoli)
+
+// crc implements the checksum specified in section 3 of
+// https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt
+func crc(b []byte) uint32 {
+	c := crc32.Update(0, crcTable, b)
+	return uint32(c>>15|c<<17) + 0xa282ead8
+}

+ 152 - 49
Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/snappy_test.go

@@ -18,7 +18,10 @@ import (
 	"testing"
 )
 
-var download = flag.Bool("download", false, "If true, download any missing files before running benchmarks")
+var (
+	download = flag.Bool("download", false, "If true, download any missing files before running benchmarks")
+	testdata = flag.String("testdata", "testdata", "Directory containing the test data")
+)
 
 func roundtrip(b, ebuf, dbuf []byte) error {
 	e, err := Encode(ebuf, b)
@@ -55,11 +58,11 @@ func TestSmallCopy(t *testing.T) {
 }
 
 func TestSmallRand(t *testing.T) {
-	rand.Seed(27354294)
+	rng := rand.New(rand.NewSource(27354294))
 	for n := 1; n < 20000; n += 23 {
 		b := make([]byte, n)
-		for i, _ := range b {
-			b[i] = uint8(rand.Uint32())
+		for i := range b {
+			b[i] = uint8(rng.Uint32())
 		}
 		if err := roundtrip(b, nil, nil); err != nil {
 			t.Fatal(err)
@@ -70,7 +73,7 @@ func TestSmallRand(t *testing.T) {
 func TestSmallRegular(t *testing.T) {
 	for n := 1; n < 20000; n += 23 {
 		b := make([]byte, n)
-		for i, _ := range b {
+		for i := range b {
 			b[i] = uint8(i%10 + 'a')
 		}
 		if err := roundtrip(b, nil, nil); err != nil {
@@ -79,6 +82,120 @@ func TestSmallRegular(t *testing.T) {
 	}
 }
 
+func cmp(a, b []byte) error {
+	if len(a) != len(b) {
+		return fmt.Errorf("got %d bytes, want %d", len(a), len(b))
+	}
+	for i := range a {
+		if a[i] != b[i] {
+			return fmt.Errorf("byte #%d: got 0x%02x, want 0x%02x", i, a[i], b[i])
+		}
+	}
+	return nil
+}
+
+func TestFramingFormat(t *testing.T) {
+	// src is comprised of alternating 1e5-sized sequences of random
+	// (incompressible) bytes and repeated (compressible) bytes. 1e5 was chosen
+	// because it is larger than maxUncompressedChunkLen (64k).
+	src := make([]byte, 1e6)
+	rng := rand.New(rand.NewSource(1))
+	for i := 0; i < 10; i++ {
+		if i%2 == 0 {
+			for j := 0; j < 1e5; j++ {
+				src[1e5*i+j] = uint8(rng.Intn(256))
+			}
+		} else {
+			for j := 0; j < 1e5; j++ {
+				src[1e5*i+j] = uint8(i)
+			}
+		}
+	}
+
+	buf := new(bytes.Buffer)
+	if _, err := NewWriter(buf).Write(src); err != nil {
+		t.Fatalf("Write: encoding: %v", err)
+	}
+	dst, err := ioutil.ReadAll(NewReader(buf))
+	if err != nil {
+		t.Fatalf("ReadAll: decoding: %v", err)
+	}
+	if err := cmp(dst, src); err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestReaderReset(t *testing.T) {
+	gold := bytes.Repeat([]byte("All that is gold does not glitter,\n"), 10000)
+	buf := new(bytes.Buffer)
+	if _, err := NewWriter(buf).Write(gold); err != nil {
+		t.Fatalf("Write: %v", err)
+	}
+	encoded, invalid, partial := buf.String(), "invalid", "partial"
+	r := NewReader(nil)
+	for i, s := range []string{encoded, invalid, partial, encoded, partial, invalid, encoded, encoded} {
+		if s == partial {
+			r.Reset(strings.NewReader(encoded))
+			if _, err := r.Read(make([]byte, 101)); err != nil {
+				t.Errorf("#%d: %v", i, err)
+				continue
+			}
+			continue
+		}
+		r.Reset(strings.NewReader(s))
+		got, err := ioutil.ReadAll(r)
+		switch s {
+		case encoded:
+			if err != nil {
+				t.Errorf("#%d: %v", i, err)
+				continue
+			}
+			if err := cmp(got, gold); err != nil {
+				t.Errorf("#%d: %v", i, err)
+				continue
+			}
+		case invalid:
+			if err == nil {
+				t.Errorf("#%d: got nil error, want non-nil", i)
+				continue
+			}
+		}
+	}
+}
+
+func TestWriterReset(t *testing.T) {
+	gold := bytes.Repeat([]byte("Not all those who wander are lost;\n"), 10000)
+	var gots, wants [][]byte
+	const n = 20
+	w, failed := NewWriter(nil), false
+	for i := 0; i <= n; i++ {
+		buf := new(bytes.Buffer)
+		w.Reset(buf)
+		want := gold[:len(gold)*i/n]
+		if _, err := w.Write(want); err != nil {
+			t.Errorf("#%d: Write: %v", i, err)
+			failed = true
+			continue
+		}
+		got, err := ioutil.ReadAll(NewReader(buf))
+		if err != nil {
+			t.Errorf("#%d: ReadAll: %v", i, err)
+			failed = true
+			continue
+		}
+		gots = append(gots, got)
+		wants = append(wants, want)
+	}
+	if failed {
+		return
+	}
+	for i := range gots {
+		if err := cmp(gots[i], wants[i]); err != nil {
+			t.Errorf("#%d: %v", i, err)
+		}
+	}
+}
+
 func benchDecode(b *testing.B, src []byte) {
 	encoded, err := Encode(nil, src)
 	if err != nil {
@@ -102,7 +219,7 @@ func benchEncode(b *testing.B, src []byte) {
 	}
 }
 
-func readFile(b *testing.B, filename string) []byte {
+func readFile(b testing.TB, filename string) []byte {
 	src, err := ioutil.ReadFile(filename)
 	if err != nil {
 		b.Fatalf("failed reading %s: %s", filename, err)
@@ -144,7 +261,7 @@ func BenchmarkWordsEncode1e5(b *testing.B) { benchWords(b, 1e5, false) }
 func BenchmarkWordsEncode1e6(b *testing.B) { benchWords(b, 1e6, false) }
 
 // testFiles' values are copied directly from
-// https://code.google.com/p/snappy/source/browse/trunk/snappy_unittest.cc.
+// https://raw.githubusercontent.com/google/snappy/master/snappy_unittest.cc
 // The label field is unused in snappy-go.
 var testFiles = []struct {
 	label    string
@@ -152,29 +269,36 @@ var testFiles = []struct {
 }{
 	{"html", "html"},
 	{"urls", "urls.10K"},
-	{"jpg", "house.jpg"},
-	{"pdf", "mapreduce-osdi-1.pdf"},
+	{"jpg", "fireworks.jpeg"},
+	{"jpg_200", "fireworks.jpeg"},
+	{"pdf", "paper-100k.pdf"},
 	{"html4", "html_x_4"},
-	{"cp", "cp.html"},
-	{"c", "fields.c"},
-	{"lsp", "grammar.lsp"},
-	{"xls", "kennedy.xls"},
 	{"txt1", "alice29.txt"},
 	{"txt2", "asyoulik.txt"},
 	{"txt3", "lcet10.txt"},
 	{"txt4", "plrabn12.txt"},
-	{"bin", "ptt5"},
-	{"sum", "sum"},
-	{"man", "xargs.1"},
 	{"pb", "geo.protodata"},
 	{"gaviota", "kppkn.gtb"},
 }
 
 // The test data files are present at this canonical URL.
-const baseURL = "https://snappy.googlecode.com/svn/trunk/testdata/"
+const baseURL = "https://raw.githubusercontent.com/google/snappy/master/testdata/"
 
 func downloadTestdata(basename string) (errRet error) {
-	filename := filepath.Join("testdata", basename)
+	filename := filepath.Join(*testdata, basename)
+	if stat, err := os.Stat(filename); err == nil && stat.Size() != 0 {
+		return nil
+	}
+
+	if !*download {
+		return fmt.Errorf("test data not found; skipping benchmark without the -download flag")
+	}
+	// Download the official snappy C++ implementation reference test data
+	// files for benchmarking.
+	if err := os.Mkdir(*testdata, 0777); err != nil && !os.IsExist(err) {
+		return fmt.Errorf("failed to create testdata: %s", err)
+	}
+
 	f, err := os.Create(filename)
 	if err != nil {
 		return fmt.Errorf("failed to create %s: %s", filename, err)
@@ -185,36 +309,27 @@ func downloadTestdata(basename string) (errRet error) {
 			os.Remove(filename)
 		}
 	}()
-	resp, err := http.Get(baseURL + basename)
+	url := baseURL + basename
+	resp, err := http.Get(url)
 	if err != nil {
-		return fmt.Errorf("failed to download %s: %s", baseURL+basename, err)
+		return fmt.Errorf("failed to download %s: %s", url, err)
 	}
 	defer resp.Body.Close()
+	if s := resp.StatusCode; s != http.StatusOK {
+		return fmt.Errorf("downloading %s: HTTP status code %d (%s)", url, s, http.StatusText(s))
+	}
 	_, err = io.Copy(f, resp.Body)
 	if err != nil {
-		return fmt.Errorf("failed to write %s: %s", filename, err)
+		return fmt.Errorf("failed to download %s to %s: %s", url, filename, err)
 	}
 	return nil
 }
 
 func benchFile(b *testing.B, n int, decode bool) {
-	filename := filepath.Join("testdata", testFiles[n].filename)
-	if stat, err := os.Stat(filename); err != nil || stat.Size() == 0 {
-		if !*download {
-			b.Fatal("test data not found; skipping benchmark without the -download flag")
-		}
-		// Download the official snappy C++ implementation reference test data
-		// files for benchmarking.
-		if err := os.Mkdir("testdata", 0777); err != nil && !os.IsExist(err) {
-			b.Fatalf("failed to create testdata: %s", err)
-		}
-		for _, tf := range testFiles {
-			if err := downloadTestdata(tf.filename); err != nil {
-				b.Fatalf("failed to download testdata: %s", err)
-			}
-		}
+	if err := downloadTestdata(testFiles[n].filename); err != nil {
+		b.Fatalf("failed to download testdata: %s", err)
 	}
-	data := readFile(b, filename)
+	data := readFile(b, filepath.Join(*testdata, testFiles[n].filename))
 	if decode {
 		benchDecode(b, data)
 	} else {
@@ -235,12 +350,6 @@ func Benchmark_UFlat8(b *testing.B)  { benchFile(b, 8, true) }
 func Benchmark_UFlat9(b *testing.B)  { benchFile(b, 9, true) }
 func Benchmark_UFlat10(b *testing.B) { benchFile(b, 10, true) }
 func Benchmark_UFlat11(b *testing.B) { benchFile(b, 11, true) }
-func Benchmark_UFlat12(b *testing.B) { benchFile(b, 12, true) }
-func Benchmark_UFlat13(b *testing.B) { benchFile(b, 13, true) }
-func Benchmark_UFlat14(b *testing.B) { benchFile(b, 14, true) }
-func Benchmark_UFlat15(b *testing.B) { benchFile(b, 15, true) }
-func Benchmark_UFlat16(b *testing.B) { benchFile(b, 16, true) }
-func Benchmark_UFlat17(b *testing.B) { benchFile(b, 17, true) }
 func Benchmark_ZFlat0(b *testing.B)  { benchFile(b, 0, false) }
 func Benchmark_ZFlat1(b *testing.B)  { benchFile(b, 1, false) }
 func Benchmark_ZFlat2(b *testing.B)  { benchFile(b, 2, false) }
@@ -253,9 +362,3 @@ func Benchmark_ZFlat8(b *testing.B)  { benchFile(b, 8, false) }
 func Benchmark_ZFlat9(b *testing.B)  { benchFile(b, 9, false) }
 func Benchmark_ZFlat10(b *testing.B) { benchFile(b, 10, false) }
 func Benchmark_ZFlat11(b *testing.B) { benchFile(b, 11, false) }
-func Benchmark_ZFlat12(b *testing.B) { benchFile(b, 12, false) }
-func Benchmark_ZFlat13(b *testing.B) { benchFile(b, 13, false) }
-func Benchmark_ZFlat14(b *testing.B) { benchFile(b, 14, false) }
-func Benchmark_ZFlat15(b *testing.B) { benchFile(b, 15, false) }
-func Benchmark_ZFlat16(b *testing.B) { benchFile(b, 16, false) }
-func Benchmark_ZFlat17(b *testing.B) { benchFile(b, 17, false) }

+ 7 - 0
Godeps/_workspace/src/github.com/thejerf/suture/.travis.yml

@@ -0,0 +1,7 @@
+language: go
+go:
+  - 1.1
+  - 1.2
+  - 1.3
+  - 1.4
+  - tip

+ 19 - 0
Godeps/_workspace/src/github.com/thejerf/suture/LICENSE

@@ -0,0 +1,19 @@
+Copyright (c) 2014 Barracuda Networks, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.

+ 45 - 0
Godeps/_workspace/src/github.com/thejerf/suture/README.md

@@ -0,0 +1,45 @@
+Suture
+======
+
+[![Build Status](https://travis-ci.org/thejerf/suture.png?branch=master)](https://travis-ci.org/thejerf/suture)
+
+Suture provides Erlang-ish supervisor trees for Go. "Supervisor trees" ->
+"sutree" -> "suture" -> holds your code together when it's trying to die.
+
+This is intended to be a production-quality library going into code that I
+will be very early on the phone tree to support when it goes down. However,
+it has not been deployed into something quite that serious yet. (I will
+update this statement when that changes.)
+
+It is intended to deal gracefully with the real failure cases that can
+occur with supervision trees (such as burning all your CPU time endlessly
+restarting dead services), while also making no unnecessary demands on the
+"service" code, and providing hooks to perform adequate logging with in a
+production environment.
+
+[A blog post describing the design decisions](http://www.jerf.org/iri/post/2930)
+is available.
+
+This module is fully covered with [godoc](http://godoc.org/github.com/thejerf/suture),
+including an example, usage, and everything else you might expect from a
+README.md on GitHub. (DRY.)
+
+This is not currently tagged with particular git tags for Go as this is
+currently considered to be alpha code. As I move this into production and
+feel more confident about it, I'll give it relevant tags.
+
+Code Signing
+------------
+
+Starting with the commit after ac7cf8591b, I will be signing this repository
+with the ["jerf" keybase account](https://keybase.io/jerf).
+
+Aspiration
+----------
+
+One of the big wins the Erlang community has with their pervasive OTP
+support is that it makes it easy for them to distribute libraries that
+easily fit into the OTP paradigm. It ought to someday be considered a good
+idea to distribute libraries that provide some sort of supervisor tree
+functionality out of the box. It is possible to provide this functionality
+without explicitly depending on the Suture library.

+ 11 - 0
Godeps/_workspace/src/github.com/thejerf/suture/pre-commit

@@ -0,0 +1,11 @@
+#!/bin/bash
+
+GOLINTOUT=$(golint *go)
+
+if [ ! -z "$GOLINTOUT" -o "$?" != 0 ]; then
+    echo golint failed:
+    echo $GOLINTOUT
+    exit 1
+fi
+
+go test

+ 650 - 0
Godeps/_workspace/src/github.com/thejerf/suture/suture.go

@@ -0,0 +1,650 @@
+/*
+
+Package suture provides Erlang-like supervisor trees.
+
+This implements Erlang-esque supervisor trees, as adapted for Go. This is
+intended to be an industrial-strength implementation, but it has not yet
+been deployed in a hostile environment. (It's headed there, though.)
+
+Supervisor Tree -> SuTree -> suture -> holds your code together when it's
+trying to fall apart.
+
+Why use Suture?
+
+ * You want to write bullet-resistant services that will remain available
+   despite unforeseen failure.
+ * You need the code to be smart enough not to consume 100% of the CPU
+   restarting things.
+ * You want to easily compose multiple such services in one program.
+ * You want the Erlang programmers to stop lording their supervision
+   trees over you.
+
+Suture has 100% test coverage, and is golint clean. This doesn't prove it
+free of bugs, but it shows I care.
+
+A blog post describing the design decisions is available at
+http://www.jerf.org/iri/post/2930 .
+
+Using Suture
+
+To idiomatically use Suture, create a Supervisor which is your top level
+"application" supervisor. This will often occur in your program's "main"
+function.
+
+Create "Service"s, which implement the Service interface. .Add() them
+to your Supervisor. Supervisors are also services, so you can create a
+tree structure here, depending on the exact combination of restarts
+you want to create.
+
+Finally, as what is probably the last line of your main() function, call
+.Serve() on your top level supervisor. This will start all the services
+you've defined.
+
+See the Example for an example, using a simple service that serves out
+incrementing integers.
+
+*/
+package suture
+
+import (
+	"errors"
+	"fmt"
+	"log"
+	"math"
+	"runtime"
+	"sync/atomic"
+	"time"
+)
+
+const (
+	notRunning = iota
+	normal
+	paused
+)
+
+type supervisorID uint32
+type serviceID uint32
+
+var currentSupervisorID uint32
+
+// ErrWrongSupervisor is returned by the (*Supervisor).Remove method
+// if you pass a ServiceToken from the wrong Supervisor.
+var ErrWrongSupervisor = errors.New("wrong supervisor for this service token, no service removed")
+
+// ServiceToken is an opaque identifier that can be used to terminate a service that
+// has been Add()ed to a Supervisor.
+type ServiceToken struct {
+	id uint64
+}
+
+/*
+Supervisor is the core type of the module that represents a Supervisor.
+
+Supervisors should be constructed either by New or NewSimple.
+
+Once constructed, a Supervisor should be started in one of three ways:
+
+ 1. Calling .Serve().
+ 2. Calling .ServeBackground().
+ 3. Adding it to an existing Supervisor.
+
+Calling Serve will cause the supervisor to run until it is shut down by
+an external user calling Stop() on it. If that never happens, it simply
+runs forever. I suggest creating your services in Supervisors, then making
+a Serve() call on your top-level Supervisor be the last line of your main
+func.
+
+Calling ServeBackground will CORRECTLY start the supervisor running in a
+new goroutine. You do not want to just:
+
+  go supervisor.Serve()
+
+because that will briefly create a race condition as it starts up, if you
+try to .Add() services immediately afterward.
+
+*/
+type Supervisor struct {
+	Name string
+	id   supervisorID
+
+	failureDecay     float64
+	failureThreshold float64
+	failureBackoff   time.Duration
+	timeout          time.Duration
+	log              func(string)
+	services         map[serviceID]Service
+	lastFail         time.Time
+	failures         float64
+	restartQueue     []serviceID
+	state            uint8
+	serviceCounter   serviceID
+	control          chan supervisorMessage
+	resumeTimer      <-chan time.Time
+
+	// The testing uses the ability to grab these individual logging functions
+	// and get inside of suture's handling at a deep level.
+	// If you ever come up with some need to get into these, submit a pull
+	// request to make them public and some smidge of justification, and
+	// I'll happily do it.
+	logBadStop func(Service)
+	logFailure func(service Service, currentFailures float64, failureThreshold float64, restarting bool, error interface{}, stacktrace []byte)
+	logBackoff func(*Supervisor, bool)
+
+	// avoid a dependency on github.com/thejerf/abtime by just implementing
+	// a minimal chunk.
+	getNow    func() time.Time
+	getResume func(time.Duration) <-chan time.Time
+}
+
+// Spec is used to pass arguments to the New function to create a
+// supervisor. See the New function for full documentation.
+type Spec struct {
+	Log              func(string)
+	FailureDecay     float64
+	FailureThreshold float64
+	FailureBackoff   time.Duration
+	Timeout          time.Duration
+}
+
+/*
+
+New is the full constructor function for a supervisor.
+
+The name is a friendly human name for the supervisor, used in logging. Suture
+does not care if this is unique, but it is good for your sanity if it is.
+
+If not set, the following values are used:
+
+ * Log:               A function is created that uses log.Print.
+ * FailureDecay:      30 seconds
+ * FailureThreshold:  5 failures
+ * FailureBackoff:    15 seconds
+ * Timeout:           10 seconds
+
+The Log function will be called when errors occur. Suture will log the
+following:
+
+ * When a service has failed, with a descriptive message about the
+   current backoff status, and whether it was immediately restarted
+ * When the supervisor has gone into its backoff mode, and when it
+   exits it
+ * When a service fails to stop
+
+The failureRate, failureThreshold, and failureBackoff controls how failures
+are handled, in order to avoid the supervisor failure case where the
+program does nothing but restarting failed services. If you do not
+care how failures behave, the default values should be fine for the
+vast majority of services, but if you want the details:
+
+The supervisor tracks the number of failures that have occurred, with an
+exponential decay on the count. Every FailureDecay seconds, the number of
+failures that have occurred is cut in half. (This is done smoothly with an
+exponential function.) When a failure occurs, the number of failures
+is incremented by one. When the number of failures passes the
+FailureThreshold, the entire service waits for FailureBackoff seconds
+before attempting any further restarts, at which point it resets its
+failure count to zero.
+
+Timeout is how long Suture will wait for a service to properly terminate.
+
+*/
+func New(name string, spec Spec) (s *Supervisor) {
+	s = new(Supervisor)
+
+	s.Name = name
+	s.id = supervisorID(atomic.AddUint32(&currentSupervisorID, 1))
+
+	if spec.Log == nil {
+		s.log = func(msg string) {
+			log.Print(fmt.Sprintf("Supervisor %s: %s", s.Name, msg))
+		}
+	} else {
+		s.log = spec.Log
+	}
+
+	if spec.FailureDecay == 0 {
+		s.failureDecay = 30
+	} else {
+		s.failureDecay = spec.FailureDecay
+	}
+	if spec.FailureThreshold == 0 {
+		s.failureThreshold = 5
+	} else {
+		s.failureThreshold = spec.FailureThreshold
+	}
+	if spec.FailureBackoff == 0 {
+		s.failureBackoff = time.Second * 15
+	} else {
+		s.failureBackoff = spec.FailureBackoff
+	}
+	if spec.Timeout == 0 {
+		s.timeout = time.Second * 10
+	} else {
+		s.timeout = spec.Timeout
+	}
+
+	// overriding these allows for testing the threshold behavior
+	s.getNow = time.Now
+	s.getResume = time.After
+
+	s.control = make(chan supervisorMessage)
+	s.services = make(map[serviceID]Service)
+	s.restartQueue = make([]serviceID, 0, 1)
+	s.resumeTimer = make(chan time.Time)
+
+	// set up the default logging handlers
+	s.logBadStop = func(service Service) {
+		s.log(fmt.Sprintf("Service %s failed to terminate in a timely manner", serviceName(service)))
+	}
+	s.logFailure = func(service Service, failures float64, threshold float64, restarting bool, err interface{}, st []byte) {
+		var errString string
+
+		e, canError := err.(error)
+		if canError {
+			errString = e.Error()
+		} else {
+			errString = fmt.Sprintf("%#v", err)
+		}
+
+		s.log(fmt.Sprintf("Failed service '%s' (%f failures of %f), restarting: %#v, error: %s, stacktrace: %s", serviceName(service), failures, threshold, restarting, errString, string(st)))
+	}
+	s.logBackoff = func(s *Supervisor, entering bool) {
+		if entering {
+			s.log("Entering the backoff state.")
+		} else {
+			s.log("Exiting backoff state.")
+		}
+	}
+
+	return
+}
+
+func serviceName(service Service) (serviceName string) {
+	stringer, canStringer := service.(fmt.Stringer)
+	if canStringer {
+		serviceName = stringer.String()
+	} else {
+		serviceName = fmt.Sprintf("%#v", service)
+	}
+	return
+}
+
+// NewSimple is a convenience function to create a service with just a name
+// and the sensible defaults.
+func NewSimple(name string) *Supervisor {
+	return New(name, Spec{})
+}
+
+/*
+Service is the interface that describes a service to a Supervisor.
+
+Serve Method
+
+The Serve method is called by a Supervisor to start the service.
+The service should execute within the goroutine that this is
+called in. If this function either returns or panics, the Supervisor
+will call it again.
+
+A Serve method SHOULD do as much cleanup of the state as possible,
+to prevent any corruption in the previous state from crashing the
+service again.
+
+Stop Method
+
+This method is used by the supervisor to stop the service. Calling this
+directly on a Service given to a Supervisor will simply result in the
+Service being restarted; use the Supervisor's .Remove(ServiceToken) method
+to stop a service. A supervisor will call .Stop() only once. Thus, it may
+be as destructive as it likes to get the service to stop.
+
+Once Stop has been called on a Service, the Service SHOULD NOT be
+reused in any other supervisor! Because of the impossibility of
+guaranteeing that the service has actually stopped in Go, you can't
+prove that you won't be starting two goroutines using the exact
+same memory to store state, causing completely unpredictable behavior.
+
+Stop should not return until the service has actually stopped.
+"Stopped" here is defined as "the service will stop servicing any
+further requests in the future". For instance, a common implementation
+is to receive a message on a dedicated "stop" channel and immediately
+returning. Once the stop command has been processed, the service is
+stopped.
+
+Another common Stop implementation is to forcibly close an open socket
+or other resource, which will cause detectable errors to manifest in the
+service code. Bear in mind that to perfectly correctly use this
+approach requires a bit more work to handle the chance of a Stop
+command coming in before the resource has been created.
+
+If a service does not Stop within the supervisor's timeout duration, a log
+entry will be made with a descriptive string to that effect. This does
+not guarantee that the service is hung; it may still get around to being
+properly stopped in the future. Until the service is fully stopped,
+both the service and the spawned goroutine trying to stop it will be
+"leaked".
+
+Stringer Interface
+
+It is not mandatory to implement the fmt.Stringer interface on your
+service, but if your Service does happen to implement that, the log
+messages that describe your service will use that when naming the
+service. Otherwise, you'll see the GoString of your service object,
+obtained via fmt.Sprintf("%#v", service).
+
+*/
+type Service interface {
+	Serve()
+	Stop()
+}
+
+/*
+Add adds a service to this supervisor.
+
+If the supervisor is currently running, the service will be started
+immediately. If the supervisor is not currently running, the service
+will be started when the supervisor is.
+
+The returned ServiceID may be passed to the Remove method of the Supervisor
+to terminate the service.
+*/
+func (s *Supervisor) Add(service Service) ServiceToken {
+	if s == nil {
+		panic("can't add service to nil *suture.Supervisor")
+	}
+
+	if s.state == notRunning {
+		id := s.serviceCounter
+		s.serviceCounter++
+
+		s.services[id] = service
+		s.restartQueue = append(s.restartQueue, id)
+
+		return ServiceToken{uint64(s.id)<<32 | uint64(id)}
+	}
+
+	response := make(chan serviceID)
+	s.control <- addService{service, response}
+	return ServiceToken{uint64(s.id)<<32 | uint64(<-response)}
+}
+
+// ServeBackground starts running a supervisor in its own goroutine. This
+// method does not return until it is safe to use .Add() on the Supervisor.
+func (s *Supervisor) ServeBackground() {
+	go s.Serve()
+	s.sync()
+}
+
+/*
+Serve starts the supervisor. You should call this on the top-level supervisor,
+but nothing else.
+*/
+func (s *Supervisor) Serve() {
+	if s == nil {
+		panic("Can't serve with a nil *suture.Supervisor")
+	}
+	if s.id == 0 {
+		panic("Can't call Serve on an incorrectly-constructed *suture.Supervisor")
+	}
+
+	defer func() {
+		s.state = notRunning
+	}()
+
+	if s.state != notRunning {
+		// FIXME: Don't explain why I don't need a semaphore, just use one
+		// This doesn't use a semaphore because it's just a sanity check.
+		panic("Running a supervisor while it is already running?")
+	}
+
+	s.state = normal
+
+	// for all the services I currently know about, start them
+	for _, id := range s.restartQueue {
+		service, present := s.services[id]
+		if present {
+			s.runService(service, id)
+		}
+	}
+	s.restartQueue = make([]serviceID, 0, 1)
+
+	for {
+		select {
+		case m := <-s.control:
+			switch msg := m.(type) {
+			case serviceFailed:
+				s.handleFailedService(msg.id, msg.err, msg.stacktrace)
+			case serviceEnded:
+				service, monitored := s.services[msg.id]
+				if monitored {
+					s.handleFailedService(msg.id, fmt.Sprintf("%s returned unexpectedly", service), []byte("[unknown stack trace]"))
+				}
+			case addService:
+				id := s.serviceCounter
+				s.serviceCounter++
+
+				s.services[id] = msg.service
+				s.runService(msg.service, id)
+
+				msg.response <- id
+			case removeService:
+				s.removeService(msg.id)
+			case stopSupervisor:
+				for id := range s.services {
+					s.removeService(id)
+				}
+				return
+			case listServices:
+				services := []Service{}
+				for _, service := range s.services {
+					services = append(services, service)
+				}
+				msg.c <- services
+			case syncSupervisor:
+				// this does nothing on purpose; its sole purpose is to
+				// introduce a sync point via the channel receive
+			case panicSupervisor:
+				// used only by tests
+				panic("Panicking as requested!")
+			}
+		case _ = <-s.resumeTimer:
+			// We're resuming normal operation after a pause due to
+			// excessive thrashing
+			// FIXME: Ought to permit some spacing of these functions, rather
+			// than simply hammering through them
+			s.state = normal
+			s.failures = 0
+			s.logBackoff(s, false)
+			for _, id := range s.restartQueue {
+				service, present := s.services[id]
+				if present {
+					s.runService(service, id)
+				}
+			}
+			s.restartQueue = make([]serviceID, 0, 1)
+		}
+	}
+}
+
+func (s *Supervisor) handleFailedService(id serviceID, err interface{}, stacktrace []byte) {
+	now := s.getNow()
+
+	if s.lastFail.IsZero() {
+		s.lastFail = now
+		s.failures = 1.0
+	} else {
+		sinceLastFail := now.Sub(s.lastFail).Seconds()
+		intervals := sinceLastFail / s.failureDecay
+		s.failures = s.failures*math.Pow(.5, intervals) + 1
+	}
+
+	if s.failures > s.failureThreshold {
+		s.state = paused
+		s.logBackoff(s, true)
+		s.resumeTimer = s.getResume(s.failureBackoff)
+	}
+
+	s.lastFail = now
+
+	failedService, monitored := s.services[id]
+
+	// It is possible for a service to be no longer monitored
+	// by the time we get here. In that case, just ignore it.
+	if monitored {
+		if s.state == normal {
+			s.runService(failedService, id)
+			s.logFailure(failedService, s.failures, s.failureThreshold, true, err, stacktrace)
+		} else {
+			// FIXME: When restarting, check that the service still
+			// exists (it may have been stopped in the meantime)
+			s.restartQueue = append(s.restartQueue, id)
+			s.logFailure(failedService, s.failures, s.failureThreshold, false, err, stacktrace)
+		}
+	}
+}
+
+func (s *Supervisor) runService(service Service, id serviceID) {
+	go func() {
+		defer func() {
+			if r := recover(); r != nil {
+				buf := make([]byte, 65535, 65535)
+				written := runtime.Stack(buf, false)
+				buf = buf[:written]
+				s.fail(id, r, buf)
+			}
+		}()
+
+		service.Serve()
+
+		s.serviceEnded(id)
+	}()
+}
+
+func (s *Supervisor) removeService(id serviceID) {
+	service, present := s.services[id]
+	if present {
+		delete(s.services, id)
+		go func() {
+			successChan := make(chan bool)
+			go func() {
+				service.Stop()
+				successChan <- true
+			}()
+
+			failChan := s.getResume(s.timeout)
+
+			select {
+			case <-successChan:
+				// Life is good!
+			case <-failChan:
+				s.logBadStop(service)
+			}
+		}()
+	}
+}
+
+// String implements the fmt.Stringer interface.
+func (s *Supervisor) String() string {
+	return s.Name
+}
+
+// sum type pattern for type-safe message passing; see
+// http://www.jerf.org/iri/post/2917
+
+type supervisorMessage interface {
+	isSupervisorMessage()
+}
+
+/*
+Remove will remove the given service from the Supervisor, and attempt to Stop() it.
+The ServiceID token comes from the Add() call.
+*/
+func (s *Supervisor) Remove(id ServiceToken) error {
+	sID := supervisorID(id.id >> 32)
+	if sID != s.id {
+		return ErrWrongSupervisor
+	}
+	s.control <- removeService{serviceID(id.id & 0xffffffff)}
+	return nil
+}
+
+/*
+
+Services returns a []Service containing a snapshot of the services this
+Supervisor is managing.
+
+*/
+func (s *Supervisor) Services() []Service {
+	ls := listServices{make(chan []Service)}
+	s.control <- ls
+	return <-ls.c
+}
+
+type listServices struct {
+	c chan []Service
+}
+
+func (ls listServices) isSupervisorMessage() {}
+
+type removeService struct {
+	id serviceID
+}
+
+func (rs removeService) isSupervisorMessage() {}
+
+func (s *Supervisor) sync() {
+	s.control <- syncSupervisor{}
+}
+
+type syncSupervisor struct {
+}
+
+func (ss syncSupervisor) isSupervisorMessage() {}
+
+func (s *Supervisor) fail(id serviceID, err interface{}, stacktrace []byte) {
+	s.control <- serviceFailed{id, err, stacktrace}
+}
+
+type serviceFailed struct {
+	id         serviceID
+	err        interface{}
+	stacktrace []byte
+}
+
+func (sf serviceFailed) isSupervisorMessage() {}
+
+func (s *Supervisor) serviceEnded(id serviceID) {
+	s.control <- serviceEnded{id}
+}
+
+type serviceEnded struct {
+	id serviceID
+}
+
+func (s serviceEnded) isSupervisorMessage() {}
+
+// added by the Add() method
+type addService struct {
+	service  Service
+	response chan serviceID
+}
+
+func (as addService) isSupervisorMessage() {}
+
+// Stop stops the Supervisor.
+func (s *Supervisor) Stop() {
+	s.control <- stopSupervisor{}
+}
+
+type stopSupervisor struct {
+}
+
+func (ss stopSupervisor) isSupervisorMessage() {}
+
+func (s *Supervisor) panic() {
+	s.control <- panicSupervisor{}
+}
+
+type panicSupervisor struct {
+}
+
+func (ps panicSupervisor) isSupervisorMessage() {}

+ 49 - 0
Godeps/_workspace/src/github.com/thejerf/suture/suture_simple_test.go

@@ -0,0 +1,49 @@
+package suture
+
+import "fmt"
+
+type Incrementor struct {
+	current int
+	next    chan int
+	stop    chan bool
+}
+
+func (i *Incrementor) Stop() {
+	fmt.Println("Stopping the service")
+	i.stop <- true
+}
+
+func (i *Incrementor) Serve() {
+	for {
+		select {
+		case i.next <- i.current:
+			i.current += 1
+		case <-i.stop:
+			// We sync here just to guarantee the output of "Stopping the service",
+			// so this passes the test reliably.
+			// Most services would simply "return" here.
+			i.stop <- true
+			return
+		}
+	}
+}
+
+func ExampleNew_simple() {
+	supervisor := NewSimple("Supervisor")
+	service := &Incrementor{0, make(chan int), make(chan bool)}
+	supervisor.Add(service)
+
+	go supervisor.ServeBackground()
+
+	fmt.Println("Got:", <-service.next)
+	fmt.Println("Got:", <-service.next)
+	supervisor.Stop()
+
+	// We sync here just to guarantee the output of "Stopping the service"
+	<-service.stop
+
+	// Output:
+	// Got: 0
+	// Got: 1
+	// Stopping the service
+}

+ 592 - 0
Godeps/_workspace/src/github.com/thejerf/suture/suture_test.go

@@ -0,0 +1,592 @@
+package suture
+
+import (
+	"errors"
+	"fmt"
+	"reflect"
+	"sync"
+	"testing"
+	"time"
+)
+
+const (
+	Happy = iota
+	Fail
+	Panic
+	Hang
+	UseStopChan
+)
+
+var everMultistarted = false
+
+// Test that supervisors work perfectly when everything is hunky dory.
+func TestTheHappyCase(t *testing.T) {
+	t.Parallel()
+
+	s := NewSimple("A")
+	if s.String() != "A" {
+		t.Fatal("Can't get name from a supervisor")
+	}
+	service := NewService("B")
+
+	s.Add(service)
+
+	go s.Serve()
+
+	<-service.started
+
+	// If we stop the service, it just gets restarted
+	service.Stop()
+	<-service.started
+
+	// And it is shut down when we stop the supervisor
+	service.take <- UseStopChan
+	s.Stop()
+	<-service.stop
+}
+
+// Test that adding to a running supervisor does indeed start the service.
+func TestAddingToRunningSupervisor(t *testing.T) {
+	t.Parallel()
+
+	s := NewSimple("A1")
+
+	s.ServeBackground()
+	defer s.Stop()
+
+	service := NewService("B1")
+	s.Add(service)
+
+	<-service.started
+
+	services := s.Services()
+	if !reflect.DeepEqual([]Service{service}, services) {
+		t.Fatal("Can't get list of services as expected.")
+	}
+}
+
+// Test what happens when services fail.
+func TestFailures(t *testing.T) {
+	t.Parallel()
+
+	s := NewSimple("A2")
+	s.failureThreshold = 3.5
+
+	go s.Serve()
+	defer func() {
+		// to avoid deadlocks during shutdown, we have to not try to send
+		// things out on channels while we're shutting down (this undoes the
+		// logFailure overide about 25 lines down)
+		s.logFailure = func(Service, float64, float64, bool, interface{}, []byte) {}
+		s.Stop()
+	}()
+	s.sync()
+
+	service1 := NewService("B2")
+	service2 := NewService("C2")
+
+	s.Add(service1)
+	<-service1.started
+	s.Add(service2)
+	<-service2.started
+
+	nowFeeder := NewNowFeeder()
+	pastVal := time.Unix(1000000, 0)
+	nowFeeder.appendTimes(pastVal)
+	s.getNow = nowFeeder.getter
+
+	resumeChan := make(chan time.Time)
+	s.getResume = func(d time.Duration) <-chan time.Time {
+		return resumeChan
+	}
+
+	failNotify := make(chan bool)
+	// use this to synchronize on here
+	s.logFailure = func(s Service, cf float64, ft float64, r bool, error interface{}, stacktrace []byte) {
+		failNotify <- r
+	}
+
+	// All that setup was for this: Service1, please return now.
+	service1.take <- Fail
+	restarted := <-failNotify
+	<-service1.started
+
+	if !restarted || s.failures != 1 || s.lastFail != pastVal {
+		t.Fatal("Did not fail in the expected manner")
+	}
+	// Getting past this means the service was restarted.
+	service1.take <- Happy
+
+	// Service2, your turn.
+	service2.take <- Fail
+	nowFeeder.appendTimes(pastVal)
+	restarted = <-failNotify
+	<-service2.started
+	if !restarted || s.failures != 2 || s.lastFail != pastVal {
+		t.Fatal("Did not fail in the expected manner")
+	}
+	// And you're back. (That is, the correct service was restarted.)
+	service2.take <- Happy
+
+	// Now, one failureDecay later, is everything working correctly?
+	oneDecayLater := time.Unix(1000030, 0)
+	nowFeeder.appendTimes(oneDecayLater)
+	service2.take <- Fail
+	restarted = <-failNotify
+	<-service2.started
+	// playing a bit fast and loose here with floating point, but...
+	// we get 2 by taking the current failure value of 2, decaying it
+	// by one interval, which cuts it in half to 1, then adding 1 again,
+	// all of which "should" be precise
+	if !restarted || s.failures != 2 || s.lastFail != oneDecayLater {
+		t.Fatal("Did not decay properly", s.lastFail, oneDecayLater)
+	}
+
+	// For a change of pace, service1 would you be so kind as to panic?
+	nowFeeder.appendTimes(oneDecayLater)
+	service1.take <- Panic
+	restarted = <-failNotify
+	<-service1.started
+	if !restarted || s.failures != 3 || s.lastFail != oneDecayLater {
+		t.Fatal("Did not correctly recover from a panic")
+	}
+
+	nowFeeder.appendTimes(oneDecayLater)
+	backingoff := make(chan bool)
+	s.logBackoff = func(s *Supervisor, backingOff bool) {
+		backingoff <- backingOff
+	}
+
+	// And with this failure, we trigger the backoff code.
+	service1.take <- Fail
+	backoff := <-backingoff
+	restarted = <-failNotify
+
+	if !backoff || restarted || s.failures != 4 {
+		t.Fatal("Broke past the threshold but did not log correctly", s.failures)
+	}
+	if service1.existing != 0 {
+		t.Fatal("service1 still exists according to itself?")
+	}
+
+	// service2 is still running, because we don't shut anything down in a
+	// backoff, we just stop restarting.
+	service2.take <- Happy
+
+	var correct bool
+	timer := time.NewTimer(time.Millisecond * 10)
+	// verify the service has not been restarted
+	// hard to get around race conditions here without simply using a timer...
+	select {
+	case service1.take <- Happy:
+		correct = false
+	case <-timer.C:
+		correct = true
+	}
+	if !correct {
+		t.Fatal("Restarted the service during the backoff interval")
+	}
+
+	// tell the supervisor the restart interval has passed
+	resumeChan <- time.Time{}
+	backoff = <-backingoff
+	<-service1.started
+	s.sync()
+	if s.failures != 0 {
+		t.Fatal("Did not reset failure count after coming back from timeout.")
+	}
+
+	nowFeeder.appendTimes(oneDecayLater)
+	service1.take <- Fail
+	restarted = <-failNotify
+	<-service1.started
+	if !restarted || backoff {
+		t.Fatal("For some reason, got that we were backing off again.", restarted, backoff)
+	}
+}
+
+func TestRunningAlreadyRunning(t *testing.T) {
+	t.Parallel()
+
+	s := NewSimple("A3")
+	go s.Serve()
+	defer s.Stop()
+
+	// ensure the supervisor has made it to its main loop
+	s.sync()
+	var errored bool
+	func() {
+		defer func() {
+			if r := recover(); r != nil {
+				errored = true
+			}
+		}()
+
+		s.Serve()
+	}()
+	if !errored {
+		t.Fatal("Supervisor failed to prevent itself from double-running.")
+	}
+}
+
+func TestFullConstruction(t *testing.T) {
+	t.Parallel()
+
+	s := New("Moo", Spec{
+		Log:              func(string) {},
+		FailureDecay:     1,
+		FailureThreshold: 2,
+		FailureBackoff:   3,
+		Timeout:          time.Second * 29,
+	})
+	if s.String() != "Moo" || s.failureDecay != 1 || s.failureThreshold != 2 || s.failureBackoff != 3 || s.timeout != time.Second*29 {
+		t.Fatal("Full construction failed somehow")
+	}
+}
+
+// This is mostly for coverage testing.
+func TestDefaultLogging(t *testing.T) {
+	t.Parallel()
+
+	s := NewSimple("A4")
+
+	service := NewService("B4")
+	s.Add(service)
+
+	s.failureThreshold = .5
+	s.failureBackoff = time.Millisecond * 25
+	go s.Serve()
+	s.sync()
+
+	<-service.started
+
+	resumeChan := make(chan time.Time)
+	s.getResume = func(d time.Duration) <-chan time.Time {
+		return resumeChan
+	}
+
+	service.take <- UseStopChan
+	service.take <- Fail
+	<-service.stop
+	resumeChan <- time.Time{}
+
+	<-service.started
+
+	service.take <- Happy
+
+	serviceName(&BarelyService{})
+
+	s.logBadStop(service)
+	s.logFailure(service, 1, 1, true, errors.New("test error"), []byte{})
+
+	s.Stop()
+}
+
+func TestNestedSupervisors(t *testing.T) {
+	t.Parallel()
+
+	super1 := NewSimple("Top5")
+	super2 := NewSimple("Nested5")
+	service := NewService("Service5")
+
+	super1.Add(super2)
+	super2.Add(service)
+
+	go super1.Serve()
+	super1.sync()
+
+	<-service.started
+	service.take <- Happy
+
+	super1.Stop()
+}
+
+func TestStoppingSupervisorStopsServices(t *testing.T) {
+	t.Parallel()
+
+	s := NewSimple("Top6")
+	service := NewService("Service 6")
+
+	s.Add(service)
+
+	go s.Serve()
+	s.sync()
+
+	<-service.started
+
+	service.take <- UseStopChan
+
+	s.Stop()
+	<-service.stop
+}
+
+func TestStoppingStillWorksWithHungServices(t *testing.T) {
+	t.Parallel()
+
+	s := NewSimple("Top7")
+	service := NewService("Service WillHang7")
+
+	s.Add(service)
+
+	go s.Serve()
+
+	<-service.started
+
+	service.take <- UseStopChan
+	service.take <- Hang
+
+	resumeChan := make(chan time.Time)
+	s.getResume = func(d time.Duration) <-chan time.Time {
+		return resumeChan
+	}
+	failNotify := make(chan struct{})
+	s.logBadStop = func(s Service) {
+		failNotify <- struct{}{}
+	}
+
+	s.Stop()
+
+	resumeChan <- time.Time{}
+	<-failNotify
+	service.release <- true
+	<-service.stop
+}
+
+func TestRemoveService(t *testing.T) {
+	t.Parallel()
+
+	s := NewSimple("Top")
+	service := NewService("ServiceToRemove8")
+
+	id := s.Add(service)
+
+	go s.Serve()
+
+	<-service.started
+	service.take <- UseStopChan
+
+	err := s.Remove(id)
+	if err != nil {
+		t.Fatal("Removing service somehow failed")
+	}
+	<-service.stop
+
+	err = s.Remove(ServiceToken{1<<36 + 1})
+	if err != ErrWrongSupervisor {
+		t.Fatal("Did not detect that the ServiceToken was wrong")
+	}
+}
+
+func TestFailureToConstruct(t *testing.T) {
+	t.Parallel()
+
+	var s *Supervisor
+
+	panics(func() {
+		s.Serve()
+	})
+
+	s = new(Supervisor)
+	panics(func() {
+		s.Serve()
+	})
+}
+
+func TestFailingSupervisors(t *testing.T) {
+	t.Parallel()
+
+	// This is a bit of a complicated test, so let me explain what
+	// all this is doing:
+	// 1. Set up a top-level supervisor with a hair-trigger backoff.
+	// 2. Add a supervisor to that.
+	// 3. To that supervisor, add a service.
+	// 4. Panic the supervisor in the middle, sending the top-level into
+	//    backoff.
+	// 5. Kill the lower level service too.
+	// 6. Verify that when the top-level service comes out of backoff,
+	//    the service ends up restarted as expected.
+
+	// Ultimately, we can't have more than a best-effort recovery here.
+	// A panic'ed supervisor can't really be trusted to have consistent state,
+	// and without *that*, we can't trust it to do anything sensible with
+	// the children it may have been running. So unlike Erlang, we can't
+	// can't really expect to be able to safely restart them or anything.
+	// Really, the "correct" answer is that the Supervisor must never panic,
+	// but in the event that it does, this verifies that it at least tries
+	// to get on with life.
+
+	// This also tests that if a Supervisor itself panics, and one of its
+	// monitored services goes down in the meantime, that the monitored
+	// service also gets correctly restarted when the supervisor does.
+
+	s1 := NewSimple("Top9")
+	s2 := NewSimple("Nested9")
+	service := NewService("Service9")
+
+	s1.Add(s2)
+	s2.Add(service)
+
+	go s1.Serve()
+	<-service.started
+
+	s1.failureThreshold = .5
+
+	// let us control precisely when s1 comes back
+	resumeChan := make(chan time.Time)
+	s1.getResume = func(d time.Duration) <-chan time.Time {
+		return resumeChan
+	}
+	failNotify := make(chan string)
+	// use this to synchronize on here
+	s1.logFailure = func(s Service, cf float64, ft float64, r bool, error interface{}, stacktrace []byte) {
+		failNotify <- fmt.Sprintf("%s", s)
+	}
+
+	s2.panic()
+
+	failing := <-failNotify
+	// that's enough sync to guarantee this:
+	if failing != "Nested9" || s1.state != paused {
+		t.Fatal("Top-level supervisor did not go into backoff as expected")
+	}
+
+	service.take <- Fail
+
+	resumeChan <- time.Time{}
+	<-service.started
+}
+
+func TestNilSupervisorAdd(t *testing.T) {
+	t.Parallel()
+
+	var s *Supervisor
+
+	defer func() {
+		if r := recover(); r == nil {
+			t.Fatal("did not panic as expected on nil add")
+		}
+	}()
+
+	s.Add(s)
+}
+
+// http://golangtutorials.blogspot.com/2011/10/gotest-unit-testing-and-benchmarking-go.html
+// claims test function are run in the same order as the source file...
+// I'm not sure if this is part of the contract, though. Especially in the
+// face of "t.Parallel()"...
+func TestEverMultistarted(t *testing.T) {
+	if everMultistarted {
+		t.Fatal("Seem to have multistarted a service at some point, bummer.")
+	}
+}
+
+// A test service that can be induced to fail, panic, or hang on demand.
+func NewService(name string) *FailableService {
+	return &FailableService{name, make(chan bool), make(chan int),
+		make(chan bool, 1), make(chan bool), make(chan bool), 0}
+}
+
+type FailableService struct {
+	name     string
+	started  chan bool
+	take     chan int
+	shutdown chan bool
+	release  chan bool
+	stop     chan bool
+	existing int
+}
+
+func (s *FailableService) Serve() {
+	if s.existing != 0 {
+		everMultistarted = true
+		panic("Multi-started the same service! " + s.name)
+	}
+	s.existing += 1
+
+	s.started <- true
+
+	useStopChan := false
+
+	for {
+		select {
+		case val := <-s.take:
+			switch val {
+			case Happy:
+				// Do nothing on purpose. Life is good!
+			case Fail:
+				s.existing -= 1
+				if useStopChan {
+					s.stop <- true
+				}
+				return
+			case Panic:
+				s.existing -= 1
+				panic("Panic!")
+			case Hang:
+				// or more specifically, "hang until I release you"
+				<-s.release
+			case UseStopChan:
+				useStopChan = true
+			}
+		case <-s.shutdown:
+			s.existing -= 1
+			if useStopChan {
+				s.stop <- true
+			}
+			return
+		}
+	}
+}
+
+func (s *FailableService) String() string {
+	return s.name
+}
+
+func (s *FailableService) Stop() {
+	s.shutdown <- true
+}
+
+type NowFeeder struct {
+	values []time.Time
+	getter func() time.Time
+	m      sync.Mutex
+}
+
+// This is used to test serviceName; it's a service without a Stringer.
+type BarelyService struct{}
+
+func (bs *BarelyService) Serve() {}
+func (bs *BarelyService) Stop()  {}
+
+func NewNowFeeder() (nf *NowFeeder) {
+	nf = new(NowFeeder)
+	nf.getter = func() time.Time {
+		nf.m.Lock()
+		defer nf.m.Unlock()
+		if len(nf.values) > 0 {
+			ret := nf.values[0]
+			nf.values = nf.values[1:]
+			return ret
+		}
+		panic("Ran out of values for NowFeeder")
+	}
+	return
+}
+
+func (nf *NowFeeder) appendTimes(t ...time.Time) {
+	nf.m.Lock()
+	defer nf.m.Unlock()
+	nf.values = append(nf.values, t...)
+}
+
+func panics(doesItPanic func()) (panics bool) {
+	defer func() {
+		if r := recover(); r != nil {
+			panics = true
+		}
+	}()
+
+	doesItPanic()
+
+	return
+}

+ 0 - 23
Godeps/_workspace/src/golang.org/x/text/unicode/norm/Makefile

@@ -1,23 +0,0 @@
-# Copyright 2011 The Go Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style
-# license that can be found in the LICENSE file.
-
-maketables: maketables.go triegen.go
-	go build $^
-
-normregtest: normregtest.go
-	go build $^
-
-tables:	maketables
-	./maketables > tables.go
-	gofmt -w tables.go
-
-# Downloads from www.unicode.org, so not part
-# of standard test scripts.
-test: testtables regtest
-
-testtables: maketables
-	./maketables -test > data_test.go && go test -tags=test
-
-regtest: normregtest
-	./normregtest

+ 77 - 131
Godeps/_workspace/src/golang.org/x/text/unicode/norm/maketables.go

@@ -16,20 +16,17 @@ import (
 	"fmt"
 	"io"
 	"log"
-	"net/http"
-	"os"
-	"regexp"
 	"sort"
 	"strconv"
 	"strings"
-	"unicode"
 
+	"golang.org/x/text/internal/gen"
 	"golang.org/x/text/internal/triegen"
 	"golang.org/x/text/internal/ucd"
 )
 
 func main() {
-	flag.Parse()
+	gen.Init()
 	loadUnicodeData()
 	compactCCC()
 	loadCompositionExclusions()
@@ -46,24 +43,18 @@ func main() {
 	}
 }
 
-var url = flag.String("url",
-	"http://www.unicode.org/Public/"+unicode.Version+"/ucd/",
-	"URL of Unicode database directory")
-var tablelist = flag.String("tables",
-	"all",
-	"comma-separated list of which tables to generate; "+
-		"can be 'decomp', 'recomp', 'info' and 'all'")
-var test = flag.Bool("test",
-	false,
-	"test existing tables against DerivedNormalizationProps and generate test data for regression testing")
-var verbose = flag.Bool("verbose",
-	false,
-	"write data to stdout as it is parsed")
-var localFiles = flag.Bool("local",
-	false,
-	"data files have been copied to the current directory; for debugging only")
-
-var logger = log.New(os.Stderr, "", log.Lshortfile)
+var (
+	tablelist = flag.String("tables",
+		"all",
+		"comma-separated list of which tables to generate; "+
+			"can be 'decomp', 'recomp', 'info' and 'all'")
+	test = flag.Bool("test",
+		false,
+		"test existing tables against DerivedNormalizationProps and generate test data for regression testing")
+	verbose = flag.Bool("verbose",
+		false,
+		"write data to stdout as it is parsed")
+)
 
 const MaxChar = 0x10FFFF // anything above this shouldn't exist
 
@@ -189,27 +180,6 @@ func (f FormInfo) String() string {
 
 type Decomposition []rune
 
-func openReader(file string) (input io.ReadCloser) {
-	if *localFiles {
-		f, err := os.Open(file)
-		if err != nil {
-			logger.Fatal(err)
-		}
-		input = f
-	} else {
-		path := *url + file
-		resp, err := http.Get(path)
-		if err != nil {
-			logger.Fatal(err)
-		}
-		if resp.StatusCode != 200 {
-			logger.Fatal("bad GET status for "+file, resp.Status)
-		}
-		input = resp.Body
-	}
-	return
-}
-
 func parseDecomposition(s string, skipfirst bool) (a []rune, err error) {
 	decomp := strings.Split(s, " ")
 	if len(decomp) > 0 && skipfirst {
@@ -226,7 +196,7 @@ func parseDecomposition(s string, skipfirst bool) (a []rune, err error) {
 }
 
 func loadUnicodeData() {
-	f := openReader("UnicodeData.txt")
+	f := gen.OpenUCDFile("UnicodeData.txt")
 	defer f.Close()
 	p := ucd.New(f)
 	for p.Next() {
@@ -242,7 +212,7 @@ func loadUnicodeData() {
 			if len(decmap) > 0 {
 				exp, err = parseDecomposition(decmap, true)
 				if err != nil {
-					logger.Fatalf(`%U: bad decomp |%v|: "%s"`, r, decmap, err)
+					log.Fatalf(`%U: bad decomp |%v|: "%s"`, r, decmap, err)
 				}
 				isCompat = true
 			}
@@ -261,7 +231,7 @@ func loadUnicodeData() {
 		}
 	}
 	if err := p.Err(); err != nil {
-		logger.Fatal(err)
+		log.Fatal(err)
 	}
 }
 
@@ -296,18 +266,18 @@ func compactCCC() {
 // 0958    # ...
 // See http://unicode.org/reports/tr44/ for full explanation
 func loadCompositionExclusions() {
-	f := openReader("CompositionExclusions.txt")
+	f := gen.OpenUCDFile("CompositionExclusions.txt")
 	defer f.Close()
 	p := ucd.New(f)
 	for p.Next() {
 		c := &chars[p.Rune(0)]
 		if c.excludeInComp {
-			logger.Fatalf("%U: Duplicate entry in exclusions.", c.codePoint)
+			log.Fatalf("%U: Duplicate entry in exclusions.", c.codePoint)
 		}
 		c.excludeInComp = true
 	}
 	if e := p.Err(); e != nil {
-		logger.Fatal(e)
+		log.Fatal(e)
 	}
 }
 
@@ -542,19 +512,19 @@ func computeNonStarterCounts() {
 	}
 }
 
-func printBytes(b []byte, name string) {
-	fmt.Printf("// %s: %d bytes\n", name, len(b))
-	fmt.Printf("var %s = [...]byte {", name)
+func printBytes(w io.Writer, b []byte, name string) {
+	fmt.Fprintf(w, "// %s: %d bytes\n", name, len(b))
+	fmt.Fprintf(w, "var %s = [...]byte {", name)
 	for i, c := range b {
 		switch {
 		case i%64 == 0:
-			fmt.Printf("\n// Bytes %x - %x\n", i, i+63)
+			fmt.Fprintf(w, "\n// Bytes %x - %x\n", i, i+63)
 		case i%8 == 0:
-			fmt.Printf("\n")
+			fmt.Fprintf(w, "\n")
 		}
-		fmt.Printf("0x%.2X, ", c)
+		fmt.Fprintf(w, "0x%.2X, ", c)
 	}
-	fmt.Print("\n}\n\n")
+	fmt.Fprint(w, "\n}\n\n")
 }
 
 // See forminfo.go for format.
@@ -610,13 +580,13 @@ func (m *decompSet) insert(key int, s string) {
 	m[key][s] = true
 }
 
-func printCharInfoTables() int {
+func printCharInfoTables(w io.Writer) int {
 	mkstr := func(r rune, f *FormInfo) (int, string) {
 		d := f.expandedDecomp
 		s := string([]rune(d))
 		if max := 1 << 6; len(s) >= max {
 			const msg = "%U: too many bytes in decomposition: %d >= %d"
-			logger.Fatalf(msg, r, len(s), max)
+			log.Fatalf(msg, r, len(s), max)
 		}
 		head := uint8(len(s))
 		if f.quickCheck[MComposed] != QCYes {
@@ -631,11 +601,11 @@ func printCharInfoTables() int {
 		tccc := ccc(d[len(d)-1])
 		cc := ccc(r)
 		if cc != 0 && lccc == 0 && tccc == 0 {
-			logger.Fatalf("%U: trailing and leading ccc are 0 for non-zero ccc %d", r, cc)
+			log.Fatalf("%U: trailing and leading ccc are 0 for non-zero ccc %d", r, cc)
 		}
 		if tccc < lccc && lccc != 0 {
 			const msg = "%U: lccc (%d) must be <= tcc (%d)"
-			logger.Fatalf(msg, r, lccc, tccc)
+			log.Fatalf(msg, r, lccc, tccc)
 		}
 		index := normalDecomp
 		nTrail := chars[r].nTrailingNonStarters
@@ -652,13 +622,13 @@ func printCharInfoTables() int {
 			if lccc > 0 {
 				s += string([]byte{lccc})
 				if index == firstCCC {
-					logger.Fatalf("%U: multi-segment decomposition not supported for decompositions with leading CCC != 0", r)
+					log.Fatalf("%U: multi-segment decomposition not supported for decompositions with leading CCC != 0", r)
 				}
 				index = firstLeadingCCC
 			}
 			if cc != lccc {
 				if cc != 0 {
-					logger.Fatalf("%U: for lccc != ccc, expected ccc to be 0; was %d", r, cc)
+					log.Fatalf("%U: for lccc != ccc, expected ccc to be 0; was %d", r, cc)
 				}
 				index = firstCCCZeroExcept
 			}
@@ -680,7 +650,7 @@ func printCharInfoTables() int {
 				continue
 			}
 			if f.combinesBackward {
-				logger.Fatalf("%U: combinesBackward and decompose", c.codePoint)
+				log.Fatalf("%U: combinesBackward and decompose", c.codePoint)
 			}
 			index, s := mkstr(c.codePoint, &f)
 			decompSet.insert(index, s)
@@ -691,7 +661,7 @@ func printCharInfoTables() int {
 	size := 0
 	positionMap := make(map[string]uint16)
 	decompositions.WriteString("\000")
-	fmt.Println("const (")
+	fmt.Fprintln(w, "const (")
 	for i, m := range decompSet {
 		sa := []string{}
 		for s := range m {
@@ -704,13 +674,13 @@ func printCharInfoTables() int {
 			positionMap[s] = uint16(p)
 		}
 		if cname[i] != "" {
-			fmt.Printf("%s = 0x%X\n", cname[i], decompositions.Len())
+			fmt.Fprintf(w, "%s = 0x%X\n", cname[i], decompositions.Len())
 		}
 	}
-	fmt.Println("maxDecomp = 0x8000")
-	fmt.Println(")")
+	fmt.Fprintln(w, "maxDecomp = 0x8000")
+	fmt.Fprintln(w, ")")
 	b := decompositions.Bytes()
-	printBytes(b, "decomps")
+	printBytes(w, b, "decomps")
 	size += len(b)
 
 	varnames := []string{"nfc", "nfkc"}
@@ -726,7 +696,7 @@ func printCharInfoTables() int {
 				if c.ccc != ccc(d[0]) {
 					// We assume the lead ccc of a decomposition !=0 in this case.
 					if ccc(d[0]) == 0 {
-						logger.Fatalf("Expected leading CCC to be non-zero; ccc is %d", c.ccc)
+						log.Fatalf("Expected leading CCC to be non-zero; ccc is %d", c.ccc)
 					}
 				}
 			} else if c.nLeadingNonStarters > 0 && len(f.expandedDecomp) == 0 && c.ccc == 0 && !f.combinesBackward {
@@ -737,9 +707,9 @@ func printCharInfoTables() int {
 				trie.Insert(c.codePoint, uint64(0x8000|v))
 			}
 		}
-		sz, err := trie.Gen(os.Stdout, triegen.Compact(&normCompacter{name: varnames[i]}))
+		sz, err := trie.Gen(w, triegen.Compact(&normCompacter{name: varnames[i]}))
 		if err != nil {
-			logger.Fatal(err)
+			log.Fatal(err)
 		}
 		size += sz
 	}
@@ -755,30 +725,9 @@ func contains(sa []string, s string) bool {
 	return false
 }
 
-// Extract the version number from the URL.
-func version() string {
-	// From http://www.unicode.org/standard/versions/#Version_Numbering:
-	// for the later Unicode versions, data files are located in
-	// versioned directories.
-	fields := strings.Split(*url, "/")
-	for _, f := range fields {
-		if match, _ := regexp.MatchString(`[0-9]\.[0-9]\.[0-9]`, f); match {
-			return f
-		}
-	}
-	logger.Fatal("unknown version")
-	return "Unknown"
-}
-
-const fileHeader = `// Generated by running
-//	maketables --tables=%s --url=%s
-// DO NOT EDIT
-
-package norm
-
-`
-
 func makeTables() {
+	w := &bytes.Buffer{}
+
 	size := 0
 	if *tablelist == "" {
 		return
@@ -787,7 +736,6 @@ func makeTables() {
 	if *tablelist == "all" {
 		list = []string{"recomp", "info"}
 	}
-	fmt.Printf(fileHeader, *tablelist, *url)
 
 	// Compute maximum decomposition size.
 	max := 0
@@ -797,30 +745,30 @@ func makeTables() {
 		}
 	}
 
-	fmt.Println("const (")
-	fmt.Println("\t// Version is the Unicode edition from which the tables are derived.")
-	fmt.Printf("\tVersion = %q\n", version())
-	fmt.Println()
-	fmt.Println("\t// MaxTransformChunkSize indicates the maximum number of bytes that Transform")
-	fmt.Println("\t// may need to write atomically for any Form. Making a destination buffer at")
-	fmt.Println("\t// least this size ensures that Transform can always make progress and that")
-	fmt.Println("\t// the user does not need to grow the buffer on an ErrShortDst.")
-	fmt.Printf("\tMaxTransformChunkSize = %d+maxNonStarters*4\n", len(string(0x034F))+max)
-	fmt.Println(")\n")
+	fmt.Fprintln(w, "const (")
+	fmt.Fprintln(w, "\t// Version is the Unicode edition from which the tables are derived.")
+	fmt.Fprintf(w, "\tVersion = %q\n", gen.UnicodeVersion())
+	fmt.Fprintln(w)
+	fmt.Fprintln(w, "\t// MaxTransformChunkSize indicates the maximum number of bytes that Transform")
+	fmt.Fprintln(w, "\t// may need to write atomically for any Form. Making a destination buffer at")
+	fmt.Fprintln(w, "\t// least this size ensures that Transform can always make progress and that")
+	fmt.Fprintln(w, "\t// the user does not need to grow the buffer on an ErrShortDst.")
+	fmt.Fprintf(w, "\tMaxTransformChunkSize = %d+maxNonStarters*4\n", len(string(0x034F))+max)
+	fmt.Fprintln(w, ")\n")
 
 	// Print the CCC remap table.
 	size += len(cccMap)
-	fmt.Printf("var ccc = [%d]uint8{", len(cccMap))
+	fmt.Fprintf(w, "var ccc = [%d]uint8{", len(cccMap))
 	for i := 0; i < len(cccMap); i++ {
 		if i%8 == 0 {
-			fmt.Println()
+			fmt.Fprintln(w)
 		}
-		fmt.Printf("%3d, ", cccMap[uint8(i)])
+		fmt.Fprintf(w, "%3d, ", cccMap[uint8(i)])
 	}
-	fmt.Println("\n}\n")
+	fmt.Fprintln(w, "\n}\n")
 
 	if contains(list, "info") {
-		size += printCharInfoTables()
+		size += printCharInfoTables(w)
 	}
 
 	if contains(list, "recomp") {
@@ -842,20 +790,21 @@ func makeTables() {
 		}
 		sz := nrentries * 8
 		size += sz
-		fmt.Printf("// recompMap: %d bytes (entries only)\n", sz)
-		fmt.Println("var recompMap = map[uint32]rune{")
+		fmt.Fprintf(w, "// recompMap: %d bytes (entries only)\n", sz)
+		fmt.Fprintln(w, "var recompMap = map[uint32]rune{")
 		for i, c := range chars {
 			f := c.forms[FCanonical]
 			d := f.decomp
 			if !f.isOneWay && len(d) > 0 {
 				key := uint32(uint16(d[0]))<<16 + uint32(uint16(d[1]))
-				fmt.Printf("0x%.8X: 0x%.4X,\n", key, i)
+				fmt.Fprintf(w, "0x%.8X: 0x%.4X,\n", key, i)
 			}
 		}
-		fmt.Printf("}\n\n")
+		fmt.Fprintf(w, "}\n\n")
 	}
 
-	fmt.Printf("// Total size of tables: %dKB (%d bytes)\n", (size+512)/1024, size)
+	fmt.Fprintf(w, "// Total size of tables: %dKB (%d bytes)\n", (size+512)/1024, size)
+	gen.WriteGoFile("tables.go", "norm", w.Bytes())
 }
 
 func printChars() {
@@ -901,7 +850,7 @@ func verifyComputed() {
 		nfc := c.forms[FCanonical]
 		nfkc := c.forms[FCompatibility]
 		if nfc.combinesBackward != nfkc.combinesBackward {
-			logger.Fatalf("%U: Cannot combine combinesBackward\n", c.codePoint)
+			log.Fatalf("%U: Cannot combine combinesBackward\n", c.codePoint)
 		}
 	}
 }
@@ -913,7 +862,7 @@ func verifyComputed() {
 // 0374          ; NFD_QC; N # ...
 // See http://unicode.org/reports/tr44/ for full explanation
 func testDerived() {
-	f := openReader("DerivedNormalizationProps.txt")
+	f := gen.OpenUCDFile("DerivedNormalizationProps.txt")
 	defer f.Close()
 	p := ucd.New(f)
 	for p.Next() {
@@ -946,12 +895,12 @@ func testDerived() {
 			log.Fatalf(`Unexpected quick check value "%s"`, p.String(2))
 		}
 		if got := c.forms[ftype].quickCheck[mode]; got != qr {
-			logger.Printf("%U: FAILED %s (was %v need %v)\n", r, qt, got, qr)
+			log.Printf("%U: FAILED %s (was %v need %v)\n", r, qt, got, qr)
 		}
 		c.forms[ftype].verified[mode] = true
 	}
 	if err := p.Err(); err != nil {
-		logger.Fatal(err)
+		log.Fatal(err)
 	}
 	// Any unspecified value must be QCYes. Verify this.
 	for i, c := range chars {
@@ -959,20 +908,14 @@ func testDerived() {
 			for k, qr := range fd.quickCheck {
 				if !fd.verified[k] && qr != QCYes {
 					m := "%U: FAIL F:%d M:%d (was %v need Yes) %s\n"
-					logger.Printf(m, i, j, k, qr, c.name)
+					log.Printf(m, i, j, k, qr, c.name)
 				}
 			}
 		}
 	}
 }
 
-var testHeader = `// Generated by running
-//   maketables --test --url=%s
-// +build test
-
-package norm
-
-const (
+var testHeader = `const (
 	Yes = iota
 	No
 	Maybe
@@ -1010,8 +953,10 @@ func printTestdata() {
 		nTrail uint8
 		f      string
 	}
+
 	last := lastInfo{}
-	fmt.Printf(testHeader, *url)
+	w := &bytes.Buffer{}
+	fmt.Fprintf(w, testHeader)
 	for r, c := range chars {
 		f := c.forms[FCanonical]
 		qc, cf, d := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp)
@@ -1025,9 +970,10 @@ func printTestdata() {
 		}
 		current := lastInfo{c.ccc, c.nLeadingNonStarters, c.nTrailingNonStarters, s}
 		if last != current {
-			fmt.Printf("\t{0x%x, %d, %d, %d, %s},\n", r, c.origCCC, c.nLeadingNonStarters, c.nTrailingNonStarters, s)
+			fmt.Fprintf(w, "\t{0x%x, %d, %d, %d, %s},\n", r, c.origCCC, c.nLeadingNonStarters, c.nTrailingNonStarters, s)
 			last = current
 		}
 	}
-	fmt.Println("}")
+	fmt.Fprintln(w, "}")
+	gen.WriteGoFile("data_test.go", "norm", w.Bytes())
 }

+ 3 - 0
Godeps/_workspace/src/golang.org/x/text/unicode/norm/normalize.go

@@ -2,6 +2,9 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
+//go:generate go run maketables.go triegen.go
+//go:generate go run maketables.go triegen.go -test
+
 // Package norm contains types and functions for normalizing Unicode strings.
 package norm
 

+ 1 - 3
Godeps/_workspace/src/golang.org/x/text/unicode/norm/tables.go

@@ -1,6 +1,4 @@
-// Generated by running
-//	maketables --tables=all --url=http://www.unicode.org/Public/7.0.0/ucd/
-// DO NOT EDIT
+// This file was generated by go generate; DO NOT EDIT
 
 package norm
 

+ 59 - 98
Godeps/_workspace/src/golang.org/x/text/unicode/norm/normregtest.go → Godeps/_workspace/src/golang.org/x/text/unicode/norm/ucd_test.go

@@ -2,51 +2,36 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-// +build ignore
-
-package main
+package norm
 
 import (
 	"bufio"
 	"bytes"
 	"flag"
 	"fmt"
-	"log"
-	"net/http"
-	"os"
-	"path"
 	"regexp"
 	"runtime"
 	"strconv"
 	"strings"
+	"sync"
+	"testing"
 	"time"
-	"unicode"
 	"unicode/utf8"
 
-	"golang.org/x/text/unicode/norm"
+	"golang.org/x/text/internal/gen"
 )
 
-func main() {
-	flag.Parse()
-	loadTestData()
-	CharacterByCharacterTests()
-	StandardTests()
-	PerformanceTest()
-	if errorCount == 0 {
-		fmt.Println("PASS")
-	}
-}
-
-const file = "NormalizationTest.txt"
+var long = flag.Bool("long", false,
+	"run time-consuming tests, such as tests that fetch data online")
 
-var url = flag.String("url",
-	"http://www.unicode.org/Public/"+unicode.Version+"/ucd/"+file,
-	"URL of Unicode database directory")
-var localFiles = flag.Bool("local",
-	false,
-	"data files have been copied to the current directory; for debugging only")
+var once sync.Once
 
-var logger = log.New(os.Stderr, "", log.Lshortfile)
+func skipShort(t *testing.T) {
+	if !gen.IsLocal() && !*long {
+		t.Skip("skipping test to prevent downloading; to run use -long or use -local to specify a local source")
+	}
+	once.Do(func() { loadTestData(t) })
+}
 
 // This regression test runs the test set in NormalizationTest.txt
 // (taken from http://www.unicode.org/Public/<unicode.Version>/ucd/).
@@ -124,22 +109,8 @@ var testRe = regexp.MustCompile(`^` + strings.Repeat(`([\dA-F ]+);`, 5) + ` # (.
 var counter int
 
 // Load the data form NormalizationTest.txt
-func loadTestData() {
-	if *localFiles {
-		pwd, _ := os.Getwd()
-		*url = "file://" + path.Join(pwd, file)
-	}
-	t := &http.Transport{}
-	t.RegisterProtocol("file", http.NewFileTransport(http.Dir("/")))
-	c := &http.Client{Transport: t}
-	resp, err := c.Get(*url)
-	if err != nil {
-		logger.Fatal(err)
-	}
-	if resp.StatusCode != 200 {
-		logger.Fatal("bad GET status for "+file, resp.Status)
-	}
-	f := resp.Body
+func loadTestData(t *testing.T) {
+	f := gen.OpenUCDFile("NormalizationTest.txt")
 	defer f.Close()
 	scanner := bufio.NewScanner(f)
 	for scanner.Scan() {
@@ -150,11 +121,11 @@ func loadTestData() {
 		m := partRe.FindStringSubmatch(line)
 		if m != nil {
 			if len(m) < 3 {
-				logger.Fatal("Failed to parse Part: ", line)
+				t.Fatal("Failed to parse Part: ", line)
 			}
 			i, err := strconv.Atoi(m[1])
 			if err != nil {
-				logger.Fatal(err)
+				t.Fatal(err)
 			}
 			name := m[2]
 			part = append(part, Part{name: name[:len(name)-1], number: i})
@@ -162,7 +133,7 @@ func loadTestData() {
 		}
 		m = testRe.FindStringSubmatch(line)
 		if m == nil || len(m) < 7 {
-			logger.Fatalf(`Failed to parse: "%s" result: %#v`, line, m)
+			t.Fatalf(`Failed to parse: "%s" result: %#v`, line, m)
 		}
 		test := Test{name: m[6], partnr: len(part) - 1, number: counter}
 		counter++
@@ -170,7 +141,7 @@ func loadTestData() {
 			for _, split := range strings.Split(m[j], " ") {
 				r, err := strconv.ParseUint(split, 16, 64)
 				if err != nil {
-					logger.Fatal(err)
+					t.Fatal(err)
 				}
 				if test.r == 0 {
 					// save for CharacterByCharacterTests
@@ -185,50 +156,38 @@ func loadTestData() {
 		part.tests = append(part.tests, test)
 	}
 	if scanner.Err() != nil {
-		logger.Fatal(scanner.Err())
+		t.Fatal(scanner.Err())
 	}
 }
 
-var fstr = []string{"NFC", "NFD", "NFKC", "NFKD"}
-
-var errorCount int
-
-func cmpResult(t *Test, name string, f norm.Form, gold, test, result string) {
+func cmpResult(t *testing.T, tc *Test, name string, f Form, gold, test, result string) {
 	if gold != result {
-		errorCount++
-		if errorCount > 20 {
-			return
-		}
-		logger.Printf("%s:%s: %s(%+q)=%+q; want %+q: %s",
-			t.Name(), name, fstr[f], test, result, gold, t.name)
+		t.Errorf("%s:%s: %s(%+q)=%+q; want %+q: %s",
+			tc.Name(), name, fstr[f], test, result, gold, tc.name)
 	}
 }
 
-func cmpIsNormal(t *Test, name string, f norm.Form, test string, result, want bool) {
+func cmpIsNormal(t *testing.T, tc *Test, name string, f Form, test string, result, want bool) {
 	if result != want {
-		errorCount++
-		if errorCount > 20 {
-			return
-		}
-		logger.Printf("%s:%s: %s(%+q)=%v; want %v", t.Name(), name, fstr[f], test, result, want)
+		t.Errorf("%s:%s: %s(%+q)=%v; want %v", tc.Name(), name, fstr[f], test, result, want)
 	}
 }
 
-func doTest(t *Test, f norm.Form, gold, test string) {
+func doTest(t *testing.T, tc *Test, f Form, gold, test string) {
 	testb := []byte(test)
 	result := f.Bytes(testb)
-	cmpResult(t, "Bytes", f, gold, test, string(result))
+	cmpResult(t, tc, "Bytes", f, gold, test, string(result))
 
 	sresult := f.String(test)
-	cmpResult(t, "String", f, gold, test, sresult)
+	cmpResult(t, tc, "String", f, gold, test, sresult)
 
 	acc := []byte{}
-	i := norm.Iter{}
+	i := Iter{}
 	i.InitString(f, test)
 	for !i.Done() {
 		acc = append(acc, i.Next()...)
 	}
-	cmpResult(t, "Iter.Next", f, gold, test, string(acc))
+	cmpResult(t, tc, "Iter.Next", f, gold, test, string(acc))
 
 	buf := make([]byte, 128)
 	acc = nil
@@ -237,32 +196,33 @@ func doTest(t *Test, f norm.Form, gold, test string) {
 		acc = append(acc, buf[:nDst]...)
 		p += nSrc
 	}
-	cmpResult(t, "Transform", f, gold, test, string(acc))
+	cmpResult(t, tc, "Transform", f, gold, test, string(acc))
 
 	for i := range test {
 		out := f.Append(f.Bytes([]byte(test[:i])), []byte(test[i:])...)
-		cmpResult(t, fmt.Sprintf(":Append:%d", i), f, gold, test, string(out))
+		cmpResult(t, tc, fmt.Sprintf(":Append:%d", i), f, gold, test, string(out))
 	}
-	cmpIsNormal(t, "IsNormal", f, test, f.IsNormal([]byte(test)), test == gold)
-	cmpIsNormal(t, "IsNormalString", f, test, f.IsNormalString(test), test == gold)
+	cmpIsNormal(t, tc, "IsNormal", f, test, f.IsNormal([]byte(test)), test == gold)
+	cmpIsNormal(t, tc, "IsNormalString", f, test, f.IsNormalString(test), test == gold)
 }
 
-func doConformanceTests(t *Test, partn int) {
+func doConformanceTests(t *testing.T, tc *Test, partn int) {
 	for i := 0; i <= 2; i++ {
-		doTest(t, norm.NFC, t.cols[1], t.cols[i])
-		doTest(t, norm.NFD, t.cols[2], t.cols[i])
-		doTest(t, norm.NFKC, t.cols[3], t.cols[i])
-		doTest(t, norm.NFKD, t.cols[4], t.cols[i])
+		doTest(t, tc, NFC, tc.cols[1], tc.cols[i])
+		doTest(t, tc, NFD, tc.cols[2], tc.cols[i])
+		doTest(t, tc, NFKC, tc.cols[3], tc.cols[i])
+		doTest(t, tc, NFKD, tc.cols[4], tc.cols[i])
 	}
 	for i := 3; i <= 4; i++ {
-		doTest(t, norm.NFC, t.cols[3], t.cols[i])
-		doTest(t, norm.NFD, t.cols[4], t.cols[i])
-		doTest(t, norm.NFKC, t.cols[3], t.cols[i])
-		doTest(t, norm.NFKD, t.cols[4], t.cols[i])
+		doTest(t, tc, NFC, tc.cols[3], tc.cols[i])
+		doTest(t, tc, NFD, tc.cols[4], tc.cols[i])
+		doTest(t, tc, NFKC, tc.cols[3], tc.cols[i])
+		doTest(t, tc, NFKD, tc.cols[4], tc.cols[i])
 	}
 }
 
-func CharacterByCharacterTests() {
+func TestCharacterByCharacter(t *testing.T) {
+	skipShort(t)
 	tests := part[1].tests
 	var last rune = 0
 	for i := 0; i <= len(tests); i++ { // last one is special case
@@ -274,37 +234,39 @@ func CharacterByCharacterTests() {
 		}
 		for last++; last < r; last++ {
 			// Check all characters that were not explicitly listed in the test.
-			t := &Test{partnr: 1, number: -1}
+			tc := &Test{partnr: 1, number: -1}
 			char := string(last)
-			doTest(t, norm.NFC, char, char)
-			doTest(t, norm.NFD, char, char)
-			doTest(t, norm.NFKC, char, char)
-			doTest(t, norm.NFKD, char, char)
+			doTest(t, tc, NFC, char, char)
+			doTest(t, tc, NFD, char, char)
+			doTest(t, tc, NFKC, char, char)
+			doTest(t, tc, NFKD, char, char)
 		}
 		if i < len(tests) {
-			doConformanceTests(&tests[i], 1)
+			doConformanceTests(t, &tests[i], 1)
 		}
 	}
 }
 
-func StandardTests() {
+func TestStandardTests(t *testing.T) {
+	skipShort(t)
 	for _, j := range []int{0, 2, 3} {
 		for _, test := range part[j].tests {
-			doConformanceTests(&test, j)
+			doConformanceTests(t, &test, j)
 		}
 	}
 }
 
-// PerformanceTest verifies that normalization is O(n). If any of the
+// TestPerformance verifies that normalization is O(n). If any of the
 // code does not properly check for maxCombiningChars, normalization
 // may exhibit O(n**2) behavior.
-func PerformanceTest() {
+func TestPerformance(t *testing.T) {
+	skipShort(t)
 	runtime.GOMAXPROCS(2)
 	success := make(chan bool, 1)
 	go func() {
 		buf := bytes.Repeat([]byte("\u035D"), 1024*1024)
 		buf = append(buf, "\u035B"...)
-		norm.NFC.Append(nil, buf...)
+		NFC.Append(nil, buf...)
 		success <- true
 	}()
 	timeout := time.After(1 * time.Second)
@@ -312,7 +274,6 @@ func PerformanceTest() {
 	case <-success:
 		// test completed before the timeout
 	case <-timeout:
-		errorCount++
-		logger.Printf(`unexpectedly long time to complete PerformanceTest`)
+		t.Errorf(`unexpectedly long time to complete PerformanceTest`)
 	}
 }