1
0
Jakob Borg 11 жил өмнө
parent
commit
9a549a853b
24 өөрчлөгдсөн 446 нэмэгдсэн , 175 устгасан
  1. 2 2
      Godeps/Godeps.json
  2. 45 15
      Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/corrupt_test.go
  3. 71 8
      Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go
  4. 18 1
      Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_snapshot.go
  5. 67 56
      Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go
  6. 2 1
      Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/external_test.go
  7. 1 7
      Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter_suite_test.go
  8. 2 11
      Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/leveldb_suite_test.go
  9. 2 8
      Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_suite_test.go
  10. 14 2
      Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go
  11. 4 1
      Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/options.go
  12. 21 0
      Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage_test.go
  13. 11 1
      Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go
  14. 1 0
      Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/block_test.go
  15. 86 46
      Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go
  16. 1 7
      Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_suite_test.go
  17. 1 1
      Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_test.go
  18. 5 0
      Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/db.go
  19. 21 0
      Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/ginkgo.go
  20. 42 6
      Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go
  21. 1 0
      Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/storage.go
  22. 14 0
      Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/util.go
  23. 4 0
      Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil_test.go
  24. 10 2
      Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go

+ 2 - 2
Godeps/Godeps.json

@@ -1,6 +1,6 @@
 {
 	"ImportPath": "github.com/syncthing/syncthing",
-	"GoVersion": "go1.3.3",
+	"GoVersion": "go1.4rc1",
 	"Packages": [
 		"./cmd/..."
 	],
@@ -51,7 +51,7 @@
 		},
 		{
 			"ImportPath": "github.com/syndtr/goleveldb/leveldb",
-			"Rev": "d8d1d2a5cc2d34c950dffa2f554525415d59f737"
+			"Rev": "97e257099d2ab9578151ba85e2641e2cd14d3ca8"
 		},
 		{
 			"ImportPath": "github.com/syndtr/gosnappy/snappy",

+ 45 - 15
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/corrupt_test.go

@@ -14,6 +14,7 @@ import (
 	"testing"
 
 	"github.com/syndtr/goleveldb/leveldb/cache"
+	"github.com/syndtr/goleveldb/leveldb/filter"
 	"github.com/syndtr/goleveldb/leveldb/opt"
 	"github.com/syndtr/goleveldb/leveldb/storage"
 )
@@ -96,21 +97,22 @@ func (h *dbCorruptHarness) deleteRand(n, max int, rnd *rand.Rand) {
 	}
 }
 
-func (h *dbCorruptHarness) corrupt(ft storage.FileType, offset, n int) {
+func (h *dbCorruptHarness) corrupt(ft storage.FileType, fi, offset, n int) {
 	p := &h.dbHarness
 	t := p.t
 
-	var file storage.File
 	ff, _ := p.stor.GetFiles(ft)
-	for _, f := range ff {
-		if file == nil || f.Num() > file.Num() {
-			file = f
-		}
+	sff := files(ff)
+	sff.sort()
+	if fi < 0 {
+		fi = len(sff) - 1
 	}
-	if file == nil {
-		t.Fatalf("no such file with type %q", ft)
+	if fi >= len(sff) {
+		t.Fatalf("no such file with type %q with index %d", ft, fi)
 	}
 
+	file := sff[fi]
+
 	r, err := file.Open()
 	if err != nil {
 		t.Fatal("cannot open file: ", err)
@@ -225,8 +227,8 @@ func TestCorruptDB_Journal(t *testing.T) {
 	h.build(100)
 	h.check(100, 100)
 	h.closeDB()
-	h.corrupt(storage.TypeJournal, 19, 1)
-	h.corrupt(storage.TypeJournal, 32*1024+1000, 1)
+	h.corrupt(storage.TypeJournal, -1, 19, 1)
+	h.corrupt(storage.TypeJournal, -1, 32*1024+1000, 1)
 
 	h.openDB()
 	h.check(36, 36)
@@ -242,7 +244,7 @@ func TestCorruptDB_Table(t *testing.T) {
 	h.compactRangeAt(0, "", "")
 	h.compactRangeAt(1, "", "")
 	h.closeDB()
-	h.corrupt(storage.TypeTable, 100, 1)
+	h.corrupt(storage.TypeTable, -1, 100, 1)
 
 	h.openDB()
 	h.check(99, 99)
@@ -256,7 +258,7 @@ func TestCorruptDB_TableIndex(t *testing.T) {
 	h.build(10000)
 	h.compactMem()
 	h.closeDB()
-	h.corrupt(storage.TypeTable, -2000, 500)
+	h.corrupt(storage.TypeTable, -1, -2000, 500)
 
 	h.openDB()
 	h.check(5000, 9999)
@@ -355,7 +357,7 @@ func TestCorruptDB_CorruptedManifest(t *testing.T) {
 	h.compactMem()
 	h.compactRange("", "")
 	h.closeDB()
-	h.corrupt(storage.TypeManifest, 0, 1000)
+	h.corrupt(storage.TypeManifest, -1, 0, 1000)
 	h.openAssert(false)
 
 	h.recover()
@@ -370,7 +372,7 @@ func TestCorruptDB_CompactionInputError(t *testing.T) {
 	h.build(10)
 	h.compactMem()
 	h.closeDB()
-	h.corrupt(storage.TypeTable, 100, 1)
+	h.corrupt(storage.TypeTable, -1, 100, 1)
 
 	h.openDB()
 	h.check(9, 9)
@@ -387,7 +389,7 @@ func TestCorruptDB_UnrelatedKeys(t *testing.T) {
 	h.build(10)
 	h.compactMem()
 	h.closeDB()
-	h.corrupt(storage.TypeTable, 100, 1)
+	h.corrupt(storage.TypeTable, -1, 100, 1)
 
 	h.openDB()
 	h.put(string(tkey(1000)), string(tval(1000, ctValSize)))
@@ -470,3 +472,31 @@ func TestCorruptDB_MissingTableFiles(t *testing.T) {
 
 	h.close()
 }
+
+func TestCorruptDB_RecoverTable(t *testing.T) {
+	h := newDbCorruptHarnessWopt(t, &opt.Options{
+		WriteBuffer:         112 * opt.KiB,
+		CompactionTableSize: 90 * opt.KiB,
+		Filter:              filter.NewBloomFilter(10),
+	})
+
+	h.build(1000)
+	h.compactMem()
+	h.compactRangeAt(0, "", "")
+	h.compactRangeAt(1, "", "")
+	seq := h.db.seq
+	h.closeDB()
+	h.corrupt(storage.TypeTable, 0, 1000, 1)
+	h.corrupt(storage.TypeTable, 3, 10000, 1)
+	// Corrupted filter shouldn't affect recovery.
+	h.corrupt(storage.TypeTable, 3, 113888, 10)
+	h.corrupt(storage.TypeTable, -1, 20000, 1)
+
+	h.recover()
+	if h.db.seq != seq {
+		t.Errorf("invalid seq, want=%d got=%d", seq, h.db.seq)
+	}
+	h.check(985, 985)
+
+	h.close()
+}

+ 71 - 8
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go

@@ -269,7 +269,7 @@ func recoverTable(s *session, o *opt.Options) error {
 	tableFiles.sort()
 
 	var (
-		mSeq                                                              uint64
+		maxSeq                                                            uint64
 		recoveredKey, goodKey, corruptedKey, corruptedBlock, droppedTable int
 
 		// We will drop corrupted table.
@@ -324,7 +324,12 @@ func recoverTable(s *session, o *opt.Options) error {
 		if err != nil {
 			return err
 		}
-		defer reader.Close()
+		var closed bool
+		defer func() {
+			if !closed {
+				reader.Close()
+			}
+		}()
 
 		// Get file size.
 		size, err := reader.Seek(0, 2)
@@ -392,14 +397,15 @@ func recoverTable(s *session, o *opt.Options) error {
 				if err != nil {
 					return err
 				}
+				closed = true
 				reader.Close()
 				if err := file.Replace(tmp); err != nil {
 					return err
 				}
 				size = newSize
 			}
-			if tSeq > mSeq {
-				mSeq = tSeq
+			if tSeq > maxSeq {
+				maxSeq = tSeq
 			}
 			recoveredKey += tgoodKey
 			// Add table to level 0.
@@ -426,11 +432,11 @@ func recoverTable(s *session, o *opt.Options) error {
 			}
 		}
 
-		s.logf("table@recovery recovered F·%d N·%d Gk·%d Ck·%d Q·%d", len(tableFiles), recoveredKey, goodKey, corruptedKey, mSeq)
+		s.logf("table@recovery recovered F·%d N·%d Gk·%d Ck·%d Q·%d", len(tableFiles), recoveredKey, goodKey, corruptedKey, maxSeq)
 	}
 
 	// Set sequence number.
-	rec.setSeqNum(mSeq + 1)
+	rec.setSeqNum(maxSeq)
 
 	// Create new manifest.
 	if err := s.create(); err != nil {
@@ -625,7 +631,7 @@ func (db *DB) get(key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, er
 	}
 
 	v := db.s.version()
-	value, cSched, err := v.get(ikey, ro)
+	value, cSched, err := v.get(ikey, ro, false)
 	v.release()
 	if cSched {
 		// Trigger table compaction.
@@ -634,8 +640,51 @@ func (db *DB) get(key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, er
 	return
 }
 
+func (db *DB) has(key []byte, seq uint64, ro *opt.ReadOptions) (ret bool, err error) {
+	ikey := newIkey(key, seq, ktSeek)
+
+	em, fm := db.getMems()
+	for _, m := range [...]*memDB{em, fm} {
+		if m == nil {
+			continue
+		}
+		defer m.decref()
+
+		mk, _, me := m.mdb.Find(ikey)
+		if me == nil {
+			ukey, _, kt, kerr := parseIkey(mk)
+			if kerr != nil {
+				// Shouldn't have had happen.
+				panic(kerr)
+			}
+			if db.s.icmp.uCompare(ukey, key) == 0 {
+				if kt == ktDel {
+					return false, nil
+				}
+				return true, nil
+			}
+		} else if me != ErrNotFound {
+			return false, me
+		}
+	}
+
+	v := db.s.version()
+	_, cSched, err := v.get(ikey, ro, true)
+	v.release()
+	if cSched {
+		// Trigger table compaction.
+		db.compSendTrigger(db.tcompCmdC)
+	}
+	if err == nil {
+		ret = true
+	} else if err == ErrNotFound {
+		err = nil
+	}
+	return
+}
+
 // Get gets the value for the given key. It returns ErrNotFound if the
-// DB does not contain the key.
+// DB does not contains the key.
 //
 // The returned slice is its own copy, it is safe to modify the contents
 // of the returned slice.
@@ -651,6 +700,20 @@ func (db *DB) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) {
 	return db.get(key, se.seq, ro)
 }
 
+// Has returns true if the DB does contains the given key.
+//
+// It is safe to modify the contents of the argument after Get returns.
+func (db *DB) Has(key []byte, ro *opt.ReadOptions) (ret bool, err error) {
+	err = db.ok()
+	if err != nil {
+		return
+	}
+
+	se := db.acquireSnapshot()
+	defer db.releaseSnapshot(se)
+	return db.has(key, se.seq, ro)
+}
+
 // NewIterator returns an iterator for the latest snapshot of the
 // uderlying DB.
 // The returned iterator is not goroutine-safe, but it is safe to use

+ 18 - 1
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_snapshot.go

@@ -90,7 +90,7 @@ func (db *DB) newSnapshot() *Snapshot {
 }
 
 // Get gets the value for the given key. It returns ErrNotFound if
-// the DB does not contain the key.
+// the DB does not contains the key.
 //
 // The caller should not modify the contents of the returned slice, but
 // it is safe to modify the contents of the argument after Get returns.
@@ -108,6 +108,23 @@ func (snap *Snapshot) Get(key []byte, ro *opt.ReadOptions) (value []byte, err er
 	return snap.db.get(key, snap.elem.seq, ro)
 }
 
+// Has returns true if the DB does contains the given key.
+//
+// It is safe to modify the contents of the argument after Get returns.
+func (snap *Snapshot) Has(key []byte, ro *opt.ReadOptions) (ret bool, err error) {
+	err = snap.db.ok()
+	if err != nil {
+		return
+	}
+	snap.mu.RLock()
+	defer snap.mu.RUnlock()
+	if snap.released {
+		err = ErrSnapshotReleased
+		return
+	}
+	return snap.db.has(key, snap.elem.seq, ro)
+}
+
 // NewIterator returns an iterator for the snapshot of the uderlying DB.
 // The returned iterator is not goroutine-safe, but it is safe to use
 // multiple iterators concurrently, with each in a dedicated goroutine.

+ 67 - 56
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go

@@ -530,7 +530,7 @@ func Test_FieldsAligned(t *testing.T) {
 	testAligned(t, "session.stSeqNum", unsafe.Offsetof(p2.stSeqNum))
 }
 
-func TestDb_Locking(t *testing.T) {
+func TestDB_Locking(t *testing.T) {
 	h := newDbHarness(t)
 	defer h.stor.Close()
 	h.openAssert(false)
@@ -538,7 +538,7 @@ func TestDb_Locking(t *testing.T) {
 	h.openAssert(true)
 }
 
-func TestDb_Empty(t *testing.T) {
+func TestDB_Empty(t *testing.T) {
 	trun(t, func(h *dbHarness) {
 		h.get("foo", false)
 
@@ -547,7 +547,7 @@ func TestDb_Empty(t *testing.T) {
 	})
 }
 
-func TestDb_ReadWrite(t *testing.T) {
+func TestDB_ReadWrite(t *testing.T) {
 	trun(t, func(h *dbHarness) {
 		h.put("foo", "v1")
 		h.getVal("foo", "v1")
@@ -562,7 +562,7 @@ func TestDb_ReadWrite(t *testing.T) {
 	})
 }
 
-func TestDb_PutDeleteGet(t *testing.T) {
+func TestDB_PutDeleteGet(t *testing.T) {
 	trun(t, func(h *dbHarness) {
 		h.put("foo", "v1")
 		h.getVal("foo", "v1")
@@ -576,7 +576,7 @@ func TestDb_PutDeleteGet(t *testing.T) {
 	})
 }
 
-func TestDb_EmptyBatch(t *testing.T) {
+func TestDB_EmptyBatch(t *testing.T) {
 	h := newDbHarness(t)
 	defer h.close()
 
@@ -588,7 +588,7 @@ func TestDb_EmptyBatch(t *testing.T) {
 	h.get("foo", false)
 }
 
-func TestDb_GetFromFrozen(t *testing.T) {
+func TestDB_GetFromFrozen(t *testing.T) {
 	h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 100100})
 	defer h.close()
 
@@ -614,7 +614,7 @@ func TestDb_GetFromFrozen(t *testing.T) {
 	h.get("k2", true)
 }
 
-func TestDb_GetFromTable(t *testing.T) {
+func TestDB_GetFromTable(t *testing.T) {
 	trun(t, func(h *dbHarness) {
 		h.put("foo", "v1")
 		h.compactMem()
@@ -622,7 +622,7 @@ func TestDb_GetFromTable(t *testing.T) {
 	})
 }
 
-func TestDb_GetSnapshot(t *testing.T) {
+func TestDB_GetSnapshot(t *testing.T) {
 	trun(t, func(h *dbHarness) {
 		bar := strings.Repeat("b", 200)
 		h.put("foo", "v1")
@@ -656,7 +656,7 @@ func TestDb_GetSnapshot(t *testing.T) {
 	})
 }
 
-func TestDb_GetLevel0Ordering(t *testing.T) {
+func TestDB_GetLevel0Ordering(t *testing.T) {
 	trun(t, func(h *dbHarness) {
 		for i := 0; i < 4; i++ {
 			h.put("bar", fmt.Sprintf("b%d", i))
@@ -679,7 +679,7 @@ func TestDb_GetLevel0Ordering(t *testing.T) {
 	})
 }
 
-func TestDb_GetOrderedByLevels(t *testing.T) {
+func TestDB_GetOrderedByLevels(t *testing.T) {
 	trun(t, func(h *dbHarness) {
 		h.put("foo", "v1")
 		h.compactMem()
@@ -691,7 +691,7 @@ func TestDb_GetOrderedByLevels(t *testing.T) {
 	})
 }
 
-func TestDb_GetPicksCorrectFile(t *testing.T) {
+func TestDB_GetPicksCorrectFile(t *testing.T) {
 	trun(t, func(h *dbHarness) {
 		// Arrange to have multiple files in a non-level-0 level.
 		h.put("a", "va")
@@ -715,7 +715,7 @@ func TestDb_GetPicksCorrectFile(t *testing.T) {
 	})
 }
 
-func TestDb_GetEncountersEmptyLevel(t *testing.T) {
+func TestDB_GetEncountersEmptyLevel(t *testing.T) {
 	trun(t, func(h *dbHarness) {
 		// Arrange for the following to happen:
 		//   * sstable A in level 0
@@ -770,7 +770,7 @@ func TestDb_GetEncountersEmptyLevel(t *testing.T) {
 	})
 }
 
-func TestDb_IterMultiWithDelete(t *testing.T) {
+func TestDB_IterMultiWithDelete(t *testing.T) {
 	trun(t, func(h *dbHarness) {
 		h.put("a", "va")
 		h.put("b", "vb")
@@ -796,7 +796,7 @@ func TestDb_IterMultiWithDelete(t *testing.T) {
 	})
 }
 
-func TestDb_IteratorPinsRef(t *testing.T) {
+func TestDB_IteratorPinsRef(t *testing.T) {
 	h := newDbHarness(t)
 	defer h.close()
 
@@ -820,7 +820,7 @@ func TestDb_IteratorPinsRef(t *testing.T) {
 	iter.Release()
 }
 
-func TestDb_Recover(t *testing.T) {
+func TestDB_Recover(t *testing.T) {
 	trun(t, func(h *dbHarness) {
 		h.put("foo", "v1")
 		h.put("baz", "v5")
@@ -842,7 +842,7 @@ func TestDb_Recover(t *testing.T) {
 	})
 }
 
-func TestDb_RecoverWithEmptyJournal(t *testing.T) {
+func TestDB_RecoverWithEmptyJournal(t *testing.T) {
 	trun(t, func(h *dbHarness) {
 		h.put("foo", "v1")
 		h.put("foo", "v2")
@@ -856,7 +856,7 @@ func TestDb_RecoverWithEmptyJournal(t *testing.T) {
 	})
 }
 
-func TestDb_RecoverDuringMemtableCompaction(t *testing.T) {
+func TestDB_RecoverDuringMemtableCompaction(t *testing.T) {
 	truno(t, &opt.Options{WriteBuffer: 1000000}, func(h *dbHarness) {
 
 		h.stor.DelaySync(storage.TypeTable)
@@ -872,7 +872,7 @@ func TestDb_RecoverDuringMemtableCompaction(t *testing.T) {
 	})
 }
 
-func TestDb_MinorCompactionsHappen(t *testing.T) {
+func TestDB_MinorCompactionsHappen(t *testing.T) {
 	h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 10000})
 	defer h.close()
 
@@ -896,7 +896,7 @@ func TestDb_MinorCompactionsHappen(t *testing.T) {
 	}
 }
 
-func TestDb_RecoverWithLargeJournal(t *testing.T) {
+func TestDB_RecoverWithLargeJournal(t *testing.T) {
 	h := newDbHarness(t)
 	defer h.close()
 
@@ -921,7 +921,7 @@ func TestDb_RecoverWithLargeJournal(t *testing.T) {
 	v.release()
 }
 
-func TestDb_CompactionsGenerateMultipleFiles(t *testing.T) {
+func TestDB_CompactionsGenerateMultipleFiles(t *testing.T) {
 	h := newDbHarnessWopt(t, &opt.Options{
 		WriteBuffer: 10000000,
 		Compression: opt.NoCompression,
@@ -959,7 +959,7 @@ func TestDb_CompactionsGenerateMultipleFiles(t *testing.T) {
 	}
 }
 
-func TestDb_RepeatedWritesToSameKey(t *testing.T) {
+func TestDB_RepeatedWritesToSameKey(t *testing.T) {
 	h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 100000})
 	defer h.close()
 
@@ -975,7 +975,7 @@ func TestDb_RepeatedWritesToSameKey(t *testing.T) {
 	}
 }
 
-func TestDb_RepeatedWritesToSameKeyAfterReopen(t *testing.T) {
+func TestDB_RepeatedWritesToSameKeyAfterReopen(t *testing.T) {
 	h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 100000})
 	defer h.close()
 
@@ -993,7 +993,7 @@ func TestDb_RepeatedWritesToSameKeyAfterReopen(t *testing.T) {
 	}
 }
 
-func TestDb_SparseMerge(t *testing.T) {
+func TestDB_SparseMerge(t *testing.T) {
 	h := newDbHarnessWopt(t, &opt.Options{Compression: opt.NoCompression})
 	defer h.close()
 
@@ -1031,7 +1031,7 @@ func TestDb_SparseMerge(t *testing.T) {
 	h.maxNextLevelOverlappingBytes(20 * 1048576)
 }
 
-func TestDb_SizeOf(t *testing.T) {
+func TestDB_SizeOf(t *testing.T) {
 	h := newDbHarnessWopt(t, &opt.Options{
 		Compression: opt.NoCompression,
 		WriteBuffer: 10000000,
@@ -1081,7 +1081,7 @@ func TestDb_SizeOf(t *testing.T) {
 	}
 }
 
-func TestDb_SizeOf_MixOfSmallAndLarge(t *testing.T) {
+func TestDB_SizeOf_MixOfSmallAndLarge(t *testing.T) {
 	h := newDbHarnessWopt(t, &opt.Options{Compression: opt.NoCompression})
 	defer h.close()
 
@@ -1119,7 +1119,7 @@ func TestDb_SizeOf_MixOfSmallAndLarge(t *testing.T) {
 	}
 }
 
-func TestDb_Snapshot(t *testing.T) {
+func TestDB_Snapshot(t *testing.T) {
 	trun(t, func(h *dbHarness) {
 		h.put("foo", "v1")
 		s1 := h.getSnapshot()
@@ -1148,7 +1148,7 @@ func TestDb_Snapshot(t *testing.T) {
 	})
 }
 
-func TestDb_SnapshotList(t *testing.T) {
+func TestDB_SnapshotList(t *testing.T) {
 	db := &DB{snapsList: list.New()}
 	e0a := db.acquireSnapshot()
 	e0b := db.acquireSnapshot()
@@ -1186,7 +1186,7 @@ func TestDb_SnapshotList(t *testing.T) {
 	}
 }
 
-func TestDb_HiddenValuesAreRemoved(t *testing.T) {
+func TestDB_HiddenValuesAreRemoved(t *testing.T) {
 	trun(t, func(h *dbHarness) {
 		s := h.db.s
 
@@ -1229,7 +1229,7 @@ func TestDb_HiddenValuesAreRemoved(t *testing.T) {
 	})
 }
 
-func TestDb_DeletionMarkers2(t *testing.T) {
+func TestDB_DeletionMarkers2(t *testing.T) {
 	h := newDbHarness(t)
 	defer h.close()
 	s := h.db.s
@@ -1270,7 +1270,7 @@ func TestDb_DeletionMarkers2(t *testing.T) {
 	h.allEntriesFor("foo", "[ ]")
 }
 
-func TestDb_CompactionTableOpenError(t *testing.T) {
+func TestDB_CompactionTableOpenError(t *testing.T) {
 	h := newDbHarnessWopt(t, &opt.Options{CachedOpenFiles: -1})
 	defer h.close()
 
@@ -1305,7 +1305,7 @@ func TestDb_CompactionTableOpenError(t *testing.T) {
 	}
 }
 
-func TestDb_OverlapInLevel0(t *testing.T) {
+func TestDB_OverlapInLevel0(t *testing.T) {
 	trun(t, func(h *dbHarness) {
 		if h.o.GetMaxMemCompationLevel() != 2 {
 			t.Fatal("fix test to reflect the config")
@@ -1348,7 +1348,7 @@ func TestDb_OverlapInLevel0(t *testing.T) {
 	})
 }
 
-func TestDb_L0_CompactionBug_Issue44_a(t *testing.T) {
+func TestDB_L0_CompactionBug_Issue44_a(t *testing.T) {
 	h := newDbHarness(t)
 	defer h.close()
 
@@ -1368,7 +1368,7 @@ func TestDb_L0_CompactionBug_Issue44_a(t *testing.T) {
 	h.getKeyVal("(a->v)")
 }
 
-func TestDb_L0_CompactionBug_Issue44_b(t *testing.T) {
+func TestDB_L0_CompactionBug_Issue44_b(t *testing.T) {
 	h := newDbHarness(t)
 	defer h.close()
 
@@ -1397,7 +1397,7 @@ func TestDb_L0_CompactionBug_Issue44_b(t *testing.T) {
 	h.getKeyVal("(->)(c->cv)")
 }
 
-func TestDb_SingleEntryMemCompaction(t *testing.T) {
+func TestDB_SingleEntryMemCompaction(t *testing.T) {
 	trun(t, func(h *dbHarness) {
 		for i := 0; i < 10; i++ {
 			h.put("big", strings.Repeat("v", opt.DefaultWriteBuffer))
@@ -1414,7 +1414,7 @@ func TestDb_SingleEntryMemCompaction(t *testing.T) {
 	})
 }
 
-func TestDb_ManifestWriteError(t *testing.T) {
+func TestDB_ManifestWriteError(t *testing.T) {
 	for i := 0; i < 2; i++ {
 		func() {
 			h := newDbHarness(t)
@@ -1464,7 +1464,7 @@ func assertErr(t *testing.T, err error, wanterr bool) {
 	}
 }
 
-func TestDb_ClosedIsClosed(t *testing.T) {
+func TestDB_ClosedIsClosed(t *testing.T) {
 	h := newDbHarness(t)
 	db := h.db
 
@@ -1559,7 +1559,7 @@ func (p numberComparer) Compare(a, b []byte) int {
 func (numberComparer) Separator(dst, a, b []byte) []byte { return nil }
 func (numberComparer) Successor(dst, b []byte) []byte    { return nil }
 
-func TestDb_CustomComparer(t *testing.T) {
+func TestDB_CustomComparer(t *testing.T) {
 	h := newDbHarnessWopt(t, &opt.Options{
 		Comparer:    numberComparer{},
 		WriteBuffer: 1000,
@@ -1589,7 +1589,7 @@ func TestDb_CustomComparer(t *testing.T) {
 	}
 }
 
-func TestDb_ManualCompaction(t *testing.T) {
+func TestDB_ManualCompaction(t *testing.T) {
 	h := newDbHarness(t)
 	defer h.close()
 
@@ -1627,7 +1627,7 @@ func TestDb_ManualCompaction(t *testing.T) {
 	h.tablesPerLevel("0,0,1")
 }
 
-func TestDb_BloomFilter(t *testing.T) {
+func TestDB_BloomFilter(t *testing.T) {
 	h := newDbHarnessWopt(t, &opt.Options{
 		BlockCache: opt.NoCache,
 		Filter:     filter.NewBloomFilter(10),
@@ -1680,7 +1680,7 @@ func TestDb_BloomFilter(t *testing.T) {
 	h.stor.ReleaseSync(storage.TypeTable)
 }
 
-func TestDb_Concurrent(t *testing.T) {
+func TestDB_Concurrent(t *testing.T) {
 	const n, secs, maxkey = 4, 2, 1000
 
 	runtime.GOMAXPROCS(n)
@@ -1745,7 +1745,7 @@ func TestDb_Concurrent(t *testing.T) {
 	runtime.GOMAXPROCS(1)
 }
 
-func TestDb_Concurrent2(t *testing.T) {
+func TestDB_Concurrent2(t *testing.T) {
 	const n, n2 = 4, 4000
 
 	runtime.GOMAXPROCS(n*2 + 2)
@@ -1816,7 +1816,7 @@ func TestDb_Concurrent2(t *testing.T) {
 	runtime.GOMAXPROCS(1)
 }
 
-func TestDb_CreateReopenDbOnFile(t *testing.T) {
+func TestDB_CreateReopenDbOnFile(t *testing.T) {
 	dbpath := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbtestCreateReopenDbOnFile-%d", os.Getuid()))
 	if err := os.RemoveAll(dbpath); err != nil {
 		t.Fatal("cannot remove old db: ", err)
@@ -1844,7 +1844,7 @@ func TestDb_CreateReopenDbOnFile(t *testing.T) {
 	}
 }
 
-func TestDb_CreateReopenDbOnFile2(t *testing.T) {
+func TestDB_CreateReopenDbOnFile2(t *testing.T) {
 	dbpath := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbtestCreateReopenDbOnFile2-%d", os.Getuid()))
 	if err := os.RemoveAll(dbpath); err != nil {
 		t.Fatal("cannot remove old db: ", err)
@@ -1865,7 +1865,7 @@ func TestDb_CreateReopenDbOnFile2(t *testing.T) {
 	}
 }
 
-func TestDb_DeletionMarkersOnMemdb(t *testing.T) {
+func TestDB_DeletionMarkersOnMemdb(t *testing.T) {
 	h := newDbHarness(t)
 	defer h.close()
 
@@ -1876,7 +1876,7 @@ func TestDb_DeletionMarkersOnMemdb(t *testing.T) {
 	h.getKeyVal("")
 }
 
-func TestDb_LeveldbIssue178(t *testing.T) {
+func TestDB_LeveldbIssue178(t *testing.T) {
 	nKeys := (opt.DefaultCompactionTableSize / 30) * 5
 	key1 := func(i int) string {
 		return fmt.Sprintf("my_key_%d", i)
@@ -1919,7 +1919,7 @@ func TestDb_LeveldbIssue178(t *testing.T) {
 	h.assertNumKeys(nKeys)
 }
 
-func TestDb_LeveldbIssue200(t *testing.T) {
+func TestDB_LeveldbIssue200(t *testing.T) {
 	h := newDbHarness(t)
 	defer h.close()
 
@@ -1946,7 +1946,7 @@ func TestDb_LeveldbIssue200(t *testing.T) {
 	assertBytes(t, []byte("5"), iter.Key())
 }
 
-func TestDb_GoleveldbIssue74(t *testing.T) {
+func TestDB_GoleveldbIssue74(t *testing.T) {
 	h := newDbHarnessWopt(t, &opt.Options{
 		WriteBuffer: 1 * opt.MiB,
 	})
@@ -2044,7 +2044,7 @@ func TestDb_GoleveldbIssue74(t *testing.T) {
 	wg.Wait()
 }
 
-func TestDb_GetProperties(t *testing.T) {
+func TestDB_GetProperties(t *testing.T) {
 	h := newDbHarness(t)
 	defer h.close()
 
@@ -2064,7 +2064,7 @@ func TestDb_GetProperties(t *testing.T) {
 	}
 }
 
-func TestDb_GoleveldbIssue72and83(t *testing.T) {
+func TestDB_GoleveldbIssue72and83(t *testing.T) {
 	h := newDbHarnessWopt(t, &opt.Options{
 		WriteBuffer:     1 * opt.MiB,
 		CachedOpenFiles: 3,
@@ -2077,12 +2077,13 @@ func TestDb_GoleveldbIssue72and83(t *testing.T) {
 
 	randomData := func(prefix byte, i int) []byte {
 		data := make([]byte, 1+4+32+64+32)
-		_, err := crand.Reader.Read(data[1 : len(data)-4])
+		_, err := crand.Reader.Read(data[1 : len(data)-8])
 		if err != nil {
 			panic(err)
 		}
 		data[0] = prefix
-		binary.LittleEndian.PutUint32(data[len(data)-4:], uint32(i))
+		binary.LittleEndian.PutUint32(data[len(data)-8:], uint32(i))
+		binary.LittleEndian.PutUint32(data[len(data)-4:], util.NewCRC(data[:len(data)-4]).Value())
 		return data
 	}
 
@@ -2131,12 +2132,22 @@ func TestDb_GoleveldbIssue72and83(t *testing.T) {
 				continue
 			}
 			iter := snap.NewIterator(util.BytesPrefix([]byte{1}), nil)
-			writei := int(snap.elem.seq/(n*2) - 1)
+			writei := int(seq/(n*2) - 1)
 			var k int
 			for ; iter.Next(); k++ {
 				k1 := iter.Key()
 				k2 := iter.Value()
-				kwritei := int(binary.LittleEndian.Uint32(k2[len(k2)-4:]))
+				k1checksum0 := binary.LittleEndian.Uint32(k1[len(k1)-4:])
+				k1checksum1 := util.NewCRC(k1[:len(k1)-4]).Value()
+				if k1checksum0 != k1checksum1 {
+					t.Fatalf("READER0 #%d.%d W#%d invalid K1 checksum: %#x != %#x", i, k, k1checksum0, k1checksum0)
+				}
+				k2checksum0 := binary.LittleEndian.Uint32(k2[len(k2)-4:])
+				k2checksum1 := util.NewCRC(k2[:len(k2)-4]).Value()
+				if k2checksum0 != k2checksum1 {
+					t.Fatalf("READER0 #%d.%d W#%d invalid K2 checksum: %#x != %#x", i, k, k2checksum0, k2checksum1)
+				}
+				kwritei := int(binary.LittleEndian.Uint32(k2[len(k2)-8:]))
 				if writei != kwritei {
 					t.Fatalf("READER0 #%d.%d W#%d invalid write iteration num: %d", i, k, writei, kwritei)
 				}
@@ -2186,7 +2197,7 @@ func TestDb_GoleveldbIssue72and83(t *testing.T) {
 	wg.Wait()
 }
 
-func TestDb_TransientError(t *testing.T) {
+func TestDB_TransientError(t *testing.T) {
 	h := newDbHarnessWopt(t, &opt.Options{
 		WriteBuffer:              128 * opt.KiB,
 		CachedOpenFiles:          3,
@@ -2299,7 +2310,7 @@ func TestDb_TransientError(t *testing.T) {
 	wg.Wait()
 }
 
-func TestDb_UkeyShouldntHopAcrossTable(t *testing.T) {
+func TestDB_UkeyShouldntHopAcrossTable(t *testing.T) {
 	h := newDbHarnessWopt(t, &opt.Options{
 		WriteBuffer:                 112 * opt.KiB,
 		CompactionTableSize:         90 * opt.KiB,
@@ -2388,7 +2399,7 @@ func TestDb_UkeyShouldntHopAcrossTable(t *testing.T) {
 	wg.Wait()
 }
 
-func TestDb_TableCompactionBuilder(t *testing.T) {
+func TestDB_TableCompactionBuilder(t *testing.T) {
 	stor := newTestStorage(t)
 	defer stor.Close()
 

+ 2 - 1
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/external_test.go

@@ -19,11 +19,12 @@ var _ = testutil.Defer(func() {
 		o := &opt.Options{
 			BlockCache:           opt.NoCache,
 			BlockRestartInterval: 5,
-			BlockSize:            50,
+			BlockSize:            80,
 			Compression:          opt.NoCompression,
 			CachedOpenFiles:      -1,
 			Strict:               opt.StrictAll,
 			WriteBuffer:          1000,
+			CompactionTableSize:  2000,
 		}
 
 		Describe("write test", func() {

+ 1 - 7
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter_suite_test.go

@@ -3,15 +3,9 @@ package iterator_test
 import (
 	"testing"
 
-	. "github.com/onsi/ginkgo"
-	. "github.com/onsi/gomega"
-
 	"github.com/syndtr/goleveldb/leveldb/testutil"
 )
 
 func TestIterator(t *testing.T) {
-	testutil.RunDefer()
-
-	RegisterFailHandler(Fail)
-	RunSpecs(t, "Iterator Suite")
+	testutil.RunSuite(t, "Iterator Suite")
 }

+ 2 - 11
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/leveldb_suite_test.go

@@ -3,18 +3,9 @@ package leveldb
 import (
 	"testing"
 
-	. "github.com/onsi/ginkgo"
-	. "github.com/onsi/gomega"
-
 	"github.com/syndtr/goleveldb/leveldb/testutil"
 )
 
-func TestLeveldb(t *testing.T) {
-	testutil.RunDefer()
-
-	RegisterFailHandler(Fail)
-	RunSpecs(t, "Leveldb Suite")
-
-	RegisterTestingT(t)
-	testutil.RunDefer("teardown")
+func TestLevelDB(t *testing.T) {
+	testutil.RunSuite(t, "LevelDB Suite")
 }

+ 2 - 8
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_suite_test.go

@@ -3,15 +3,9 @@ package memdb
 import (
 	"testing"
 
-	. "github.com/onsi/ginkgo"
-	. "github.com/onsi/gomega"
-
 	"github.com/syndtr/goleveldb/leveldb/testutil"
 )
 
-func TestMemdb(t *testing.T) {
-	testutil.RunDefer()
-
-	RegisterFailHandler(Fail)
-	RunSpecs(t, "Memdb Suite")
+func TestMemDB(t *testing.T) {
+	testutil.RunSuite(t, "MemDB Suite")
 }

+ 14 - 2
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go

@@ -114,7 +114,7 @@ const (
 	StrictOverride
 
 	// StrictAll enables all strict flags.
-	StrictAll = StrictManifest | StrictJournalChecksum | StrictJournal | StrictBlockChecksum | StrictCompaction | StrictReader
+	StrictAll = StrictManifest | StrictJournalChecksum | StrictJournal | StrictBlockChecksum | StrictCompaction | StrictReader | StrictRecovery
 
 	// DefaultStrict is the default strict flags. Specify any strict flags
 	// will override default strict flags as whole (i.e. not OR'ed).
@@ -136,9 +136,14 @@ type Options struct {
 	// BlockCache provides per-block caching for LevelDB. Specify NoCache to
 	// disable block caching.
 	//
-	// By default LevelDB will create LRU-cache with capacity of 8MiB.
+	// By default LevelDB will create LRU-cache with capacity of BlockCacheSize.
 	BlockCache cache.Cache
 
+	// BlockCacheSize defines the capacity of the default 'block cache'.
+	//
+	// The default value is 8MiB.
+	BlockCacheSize int
+
 	// BlockRestartInterval is the number of keys between restart points for
 	// delta encoding of keys.
 	//
@@ -322,6 +327,13 @@ func (o *Options) GetBlockCache() cache.Cache {
 	return o.BlockCache
 }
 
+func (o *Options) GetBlockCacheSize() int {
+	if o == nil || o.BlockCacheSize <= 0 {
+		return DefaultBlockCacheSize
+	}
+	return o.BlockCacheSize
+}
+
 func (o *Options) GetBlockRestartInterval() int {
 	if o == nil || o.BlockRestartInterval <= 0 {
 		return DefaultBlockRestartInterval

+ 4 - 1
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/options.go

@@ -17,6 +17,9 @@ func dupOptions(o *opt.Options) *opt.Options {
 	if o != nil {
 		*newo = *o
 	}
+	if newo.Strict == 0 {
+		newo.Strict = opt.DefaultStrict
+	}
 	return newo
 }
 
@@ -32,7 +35,7 @@ func (s *session) setOptions(o *opt.Options) {
 	// Block cache.
 	switch o.GetBlockCache() {
 	case nil:
-		no.BlockCache = cache.NewLRUCache(opt.DefaultBlockCacheSize)
+		no.BlockCache = cache.NewLRUCache(o.GetBlockCacheSize())
 	case opt.NoCache:
 		no.BlockCache = nil
 	}

+ 21 - 0
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage_test.go

@@ -233,6 +233,23 @@ func (tf tsFile) Create() (w storage.Writer, err error) {
 	return
 }
 
+func (tf tsFile) Replace(newfile storage.File) (err error) {
+	ts := tf.ts
+	ts.mu.Lock()
+	defer ts.mu.Unlock()
+	err = tf.checkOpen("replace")
+	if err != nil {
+		return
+	}
+	err = tf.File.Replace(newfile.(tsFile).File)
+	if err != nil {
+		ts.t.Errorf("E: cannot replace file, num=%d type=%v: %v", tf.Num(), tf.Type(), err)
+	} else {
+		ts.t.Logf("I: file replace, num=%d type=%v", tf.Num(), tf.Type())
+	}
+	return
+}
+
 func (tf tsFile) Remove() (err error) {
 	ts := tf.ts
 	ts.mu.Lock()
@@ -492,6 +509,10 @@ func newTestStorage(t *testing.T) *testStorage {
 						}
 						f.Close()
 					}
+					if t.Failed() {
+						t.Logf("testing failed, test DB preserved at %s", path)
+						return nil
+					}
 					if tsKeepFS {
 						return nil
 					}

+ 11 - 1
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go

@@ -373,7 +373,17 @@ func (t *tOps) find(f *tFile, key []byte, ro *opt.ReadOptions) (rkey, rvalue []b
 		return nil, nil, err
 	}
 	defer ch.Release()
-	return ch.Value().(*table.Reader).Find(key, ro)
+	return ch.Value().(*table.Reader).Find(key, true, ro)
+}
+
+// Finds key that is greater than or equal to the given key.
+func (t *tOps) findKey(f *tFile, key []byte, ro *opt.ReadOptions) (rkey []byte, err error) {
+	ch, err := t.open(f)
+	if err != nil {
+		return nil, err
+	}
+	defer ch.Release()
+	return ch.Value().(*table.Reader).FindKey(key, true, ro)
 }
 
 // Returns approximate offset of the given key.

+ 1 - 0
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/block_test.go

@@ -122,6 +122,7 @@ var _ = testutil.Defer(func() {
 							}
 
 							testutil.DoIteratorTesting(&t)
+							iter.Release()
 							done <- true
 						}
 					}

+ 86 - 46
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go

@@ -50,13 +50,6 @@ func max(x, y int) int {
 	return y
 }
 
-func verifyBlockChecksum(data []byte) bool {
-	n := len(data) - 4
-	checksum0 := binary.LittleEndian.Uint32(data[n:])
-	checksum1 := util.NewCRC(data[:n]).Value()
-	return checksum0 == checksum1
-}
-
 type block struct {
 	bpool          *util.BufferPool
 	bh             blockHandle
@@ -525,21 +518,24 @@ type Reader struct {
 	filter         filter.Filter
 	verifyChecksum bool
 
-	dataEnd           int64
-	indexBH, filterBH blockHandle
-	indexBlock        *block
-	filterBlock       *filterBlock
+	dataEnd                   int64
+	metaBH, indexBH, filterBH blockHandle
+	indexBlock                *block
+	filterBlock               *filterBlock
 }
 
 func (r *Reader) blockKind(bh blockHandle) string {
 	switch bh.offset {
+	case r.metaBH.offset:
+		return "meta-block"
 	case r.indexBH.offset:
 		return "index-block"
 	case r.filterBH.offset:
-		return "filter-block"
-	default:
-		return "data-block"
+		if r.filterBH.length > 0 {
+			return "filter-block"
+		}
 	}
+	return "data-block"
 }
 
 func (r *Reader) newErrCorrupted(pos, size int64, kind, reason string) error {
@@ -565,10 +561,17 @@ func (r *Reader) readRawBlock(bh blockHandle, verifyChecksum bool) ([]byte, erro
 	if _, err := r.reader.ReadAt(data, int64(bh.offset)); err != nil && err != io.EOF {
 		return nil, err
 	}
-	if verifyChecksum && !verifyBlockChecksum(data) {
-		r.bpool.Put(data)
-		return nil, r.newErrCorruptedBH(bh, "checksum mismatch")
+
+	if verifyChecksum {
+		n := bh.length + 1
+		checksum0 := binary.LittleEndian.Uint32(data[n:])
+		checksum1 := util.NewCRC(data[:n]).Value()
+		if checksum0 != checksum1 {
+			r.bpool.Put(data)
+			return nil, r.newErrCorruptedBH(bh, fmt.Sprintf("checksum mismatch, want=%#x got=%#x", checksum0, checksum1))
+		}
 	}
+
 	switch data[bh.length] {
 	case blockTypeNoCompression:
 		data = data[:bh.length]
@@ -798,13 +801,7 @@ func (r *Reader) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.It
 	return iterator.NewIndexedIterator(index, opt.GetStrict(r.o, ro, opt.StrictReader))
 }
 
-// Find finds key/value pair whose key is greater than or equal to the
-// given key. It returns ErrNotFound if the table doesn't contain
-// such pair.
-//
-// The caller should not modify the contents of the returned slice, but
-// it is safe to modify the contents of the argument after Find returns.
-func (r *Reader) Find(key []byte, ro *opt.ReadOptions) (rkey, value []byte, err error) {
+func (r *Reader) find(key []byte, filtered bool, ro *opt.ReadOptions, noValue bool) (rkey, value []byte, err error) {
 	r.mu.RLock()
 	defer r.mu.RUnlock()
 
@@ -833,14 +830,17 @@ func (r *Reader) Find(key []byte, ro *opt.ReadOptions) (rkey, value []byte, err
 		r.err = r.newErrCorruptedBH(r.indexBH, "bad data block handle")
 		return
 	}
-	if r.filter != nil {
-		filterBlock, rel, ferr := r.getFilterBlock(true)
+	if filtered && r.filter != nil {
+		filterBlock, frel, ferr := r.getFilterBlock(true)
 		if ferr == nil {
 			if !filterBlock.contains(r.filter, dataBH.offset, key) {
-				rel.Release()
+				frel.Release()
 				return nil, nil, ErrNotFound
 			}
-			rel.Release()
+			frel.Release()
+		} else if !errors.IsCorrupted(ferr) {
+			err = ferr
+			return
 		}
 	}
 	data := r.getDataIter(dataBH, nil, r.verifyChecksum, !ro.GetDontFillCache())
@@ -854,21 +854,52 @@ func (r *Reader) Find(key []byte, ro *opt.ReadOptions) (rkey, value []byte, err
 	}
 	// Don't use block buffer, no need to copy the buffer.
 	rkey = data.Key()
-	if r.bpool == nil {
-		value = data.Value()
-	} else {
-		// Use block buffer, and since the buffer will be recycled, the buffer
-		// need to be copied.
-		value = append([]byte{}, data.Value()...)
+	if !noValue {
+		if r.bpool == nil {
+			value = data.Value()
+		} else {
+			// Use block buffer, and since the buffer will be recycled, the buffer
+			// need to be copied.
+			value = append([]byte{}, data.Value()...)
+		}
 	}
 	return
 }
 
+// Find finds key/value pair whose key is greater than or equal to the
+// given key. It returns ErrNotFound if the table doesn't contain
+// such pair.
+// If filtered is true then the nearest 'block' will be checked against
+// 'filter data' (if present) and will immediately return ErrNotFound if
+// 'filter data' indicates that such pair doesn't exist.
+//
+// The caller may modify the contents of the returned slice as it is its
+// own copy.
+// It is safe to modify the contents of the argument after Find returns.
+func (r *Reader) Find(key []byte, filtered bool, ro *opt.ReadOptions) (rkey, value []byte, err error) {
+	return r.find(key, filtered, ro, false)
+}
+
+// Find finds key that is greater than or equal to the given key.
+// It returns ErrNotFound if the table doesn't contain such key.
+// If filtered is true then the nearest 'block' will be checked against
+// 'filter data' (if present) and will immediately return ErrNotFound if
+// 'filter data' indicates that such key doesn't exist.
+//
+// The caller may modify the contents of the returned slice as it is its
+// own copy.
+// It is safe to modify the contents of the argument after Find returns.
+func (r *Reader) FindKey(key []byte, filtered bool, ro *opt.ReadOptions) (rkey []byte, err error) {
+	rkey, _, err = r.find(key, filtered, ro, true)
+	return
+}
+
 // Get gets the value for the given key. It returns errors.ErrNotFound
 // if the table does not contain the key.
 //
-// The caller should not modify the contents of the returned slice, but
-// it is safe to modify the contents of the argument after Get returns.
+// The caller may modify the contents of the returned slice as it is its
+// own copy.
+// It is safe to modify the contents of the argument after Find returns.
 func (r *Reader) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) {
 	r.mu.RLock()
 	defer r.mu.RUnlock()
@@ -878,7 +909,7 @@ func (r *Reader) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error)
 		return
 	}
 
-	rkey, value, err := r.Find(key, ro)
+	rkey, value, err := r.find(key, false, ro, false)
 	if err == nil && r.cmp.Compare(rkey, key) != 0 {
 		value = nil
 		err = ErrNotFound
@@ -950,6 +981,10 @@ func (r *Reader) Release() {
 //
 // The returned table reader instance is goroutine-safe.
 func NewReader(f io.ReaderAt, size int64, fi *storage.FileInfo, cache cache.Namespace, bpool *util.BufferPool, o *opt.Options) (*Reader, error) {
+	if f == nil {
+		return nil, errors.New("leveldb/table: nil file")
+	}
+
 	r := &Reader{
 		fi:             fi,
 		reader:         f,
@@ -959,13 +994,12 @@ func NewReader(f io.ReaderAt, size int64, fi *storage.FileInfo, cache cache.Name
 		cmp:            o.GetComparer(),
 		verifyChecksum: o.GetStrict(opt.StrictBlockChecksum),
 	}
-	if f == nil {
-		return nil, errors.New("leveldb/table: nil file")
-	}
+
 	if size < footerLen {
 		r.err = r.newErrCorrupted(0, size, "table", "too small")
 		return r, nil
 	}
+
 	footerPos := size - footerLen
 	var footer [footerLen]byte
 	if _, err := r.reader.ReadAt(footer[:], footerPos); err != nil && err != io.EOF {
@@ -975,20 +1009,24 @@ func NewReader(f io.ReaderAt, size int64, fi *storage.FileInfo, cache cache.Name
 		r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad magic number")
 		return r, nil
 	}
+
+	var n int
 	// Decode the metaindex block handle.
-	metaBH, n := decodeBlockHandle(footer[:])
+	r.metaBH, n = decodeBlockHandle(footer[:])
 	if n == 0 {
 		r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad metaindex block handle")
 		return r, nil
 	}
+
 	// Decode the index block handle.
 	r.indexBH, n = decodeBlockHandle(footer[n:])
 	if n == 0 {
 		r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad index block handle")
 		return r, nil
 	}
+
 	// Read metaindex block.
-	metaBlock, err := r.readBlock(metaBH, true)
+	metaBlock, err := r.readBlock(r.metaBH, true)
 	if err != nil {
 		if errors.IsCorrupted(err) {
 			r.err = err
@@ -997,9 +1035,12 @@ func NewReader(f io.ReaderAt, size int64, fi *storage.FileInfo, cache cache.Name
 			return nil, err
 		}
 	}
+
 	// Set data end.
-	r.dataEnd = int64(metaBH.offset)
-	metaIter := r.newBlockIter(metaBlock, nil, nil, false)
+	r.dataEnd = int64(r.metaBH.offset)
+
+	// Read metaindex.
+	metaIter := r.newBlockIter(metaBlock, nil, nil, true)
 	for metaIter.Next() {
 		key := string(metaIter.Key())
 		if !strings.HasPrefix(key, "filter.") {
@@ -1044,13 +1085,12 @@ func NewReader(f io.ReaderAt, size int64, fi *storage.FileInfo, cache cache.Name
 		if r.filter != nil {
 			r.filterBlock, err = r.readFilterBlock(r.filterBH)
 			if err != nil {
-				if !errors.IsCorrupted(r.err) {
+				if !errors.IsCorrupted(err) {
 					return nil, err
 				}
 
 				// Don't use filter then.
 				r.filter = nil
-				r.filterBH = blockHandle{}
 			}
 		}
 	}

+ 1 - 7
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_suite_test.go

@@ -3,15 +3,9 @@ package table
 import (
 	"testing"
 
-	. "github.com/onsi/ginkgo"
-	. "github.com/onsi/gomega"
-
 	"github.com/syndtr/goleveldb/leveldb/testutil"
 )
 
 func TestTable(t *testing.T) {
-	testutil.RunDefer()
-
-	RegisterFailHandler(Fail)
-	RunSpecs(t, "Table Suite")
+	testutil.RunSuite(t, "Table Suite")
 }

+ 1 - 1
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_test.go

@@ -23,7 +23,7 @@ type tableWrapper struct {
 }
 
 func (t tableWrapper) TestFind(key []byte) (rkey, rvalue []byte, err error) {
-	return t.Reader.Find(key, nil)
+	return t.Reader.Find(key, false, nil)
 }
 
 func (t tableWrapper) TestGet(key []byte) (value []byte, err error) {

+ 5 - 0
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/db.go

@@ -35,6 +35,10 @@ type Get interface {
 	TestGet(key []byte) (value []byte, err error)
 }
 
+type Has interface {
+	TestHas(key []byte) (ret bool, err error)
+}
+
 type NewIterator interface {
 	TestNewIterator(slice *util.Range) iterator.Iterator
 }
@@ -213,5 +217,6 @@ func DoDBTesting(t *DBTesting) {
 		}
 
 		DoIteratorTesting(&it)
+		iter.Release()
 	}
 }

+ 21 - 0
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/ginkgo.go

@@ -0,0 +1,21 @@
+package testutil
+
+import (
+	. "github.com/onsi/ginkgo"
+	. "github.com/onsi/gomega"
+)
+
+func RunSuite(t GinkgoTestingT, name string) {
+	RunDefer()
+
+	SynchronizedBeforeSuite(func() []byte {
+		RunDefer("setup")
+		return nil
+	}, func(data []byte) {})
+	SynchronizedAfterSuite(func() {
+		RunDefer("teardown")
+	}, func() {})
+
+	RegisterFailHandler(Fail)
+	RunSpecs(t, name)
+}

+ 42 - 6
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go

@@ -26,9 +26,11 @@ func KeyValueTesting(rnd *rand.Rand, kv KeyValue, p DB, setup func(KeyValue) DB,
 		BeforeEach(func() {
 			p = setup(kv)
 		})
-		AfterEach(func() {
-			teardown(p)
-		})
+		if teardown != nil {
+			AfterEach(func() {
+				teardown(p)
+			})
+		}
 	}
 
 	It("Should find all keys with Find", func() {
@@ -84,6 +86,26 @@ func KeyValueTesting(rnd *rand.Rand, kv KeyValue, p DB, setup func(KeyValue) DB,
 		}
 	})
 
+	It("Should only find present key with Has", func() {
+		if db, ok := p.(Has); ok {
+			ShuffledIndex(nil, kv.Len(), 1, func(i int) {
+				key_, key, _ := kv.IndexInexact(i)
+
+				// Using exact key.
+				ret, err := db.TestHas(key)
+				Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key)
+				Expect(ret).Should(BeTrue(), "False for key %q", key)
+
+				// Using inexact key.
+				if len(key_) > 0 {
+					ret, err = db.TestHas(key_)
+					Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key_)
+					Expect(ret).ShouldNot(BeTrue(), "True for key %q", key)
+				}
+			})
+		}
+	})
+
 	TestIter := func(r *util.Range, _kv KeyValue) {
 		if db, ok := p.(NewIterator); ok {
 			iter := db.TestNewIterator(r)
@@ -95,6 +117,7 @@ func KeyValueTesting(rnd *rand.Rand, kv KeyValue, p DB, setup func(KeyValue) DB,
 			}
 
 			DoIteratorTesting(&t)
+			iter.Release()
 		}
 	}
 
@@ -103,7 +126,7 @@ func KeyValueTesting(rnd *rand.Rand, kv KeyValue, p DB, setup func(KeyValue) DB,
 		done <- true
 	}, 3.0)
 
-	RandomIndex(rnd, kv.Len(), kv.Len(), func(i int) {
+	RandomIndex(rnd, kv.Len(), Min(kv.Len(), 50), func(i int) {
 		type slice struct {
 			r            *util.Range
 			start, limit int
@@ -121,7 +144,7 @@ func KeyValueTesting(rnd *rand.Rand, kv KeyValue, p DB, setup func(KeyValue) DB,
 		}
 	})
 
-	RandomRange(rnd, kv.Len(), kv.Len(), func(start, limit int) {
+	RandomRange(rnd, kv.Len(), Min(kv.Len(), 50), func(start, limit int) {
 		It(fmt.Sprintf("Should iterates and seeks correctly of a slice %d .. %d", start, limit), func(done Done) {
 			r := kv.Range(start, limit)
 			TestIter(&r, kv.Slice(start, limit))
@@ -134,10 +157,22 @@ func AllKeyValueTesting(rnd *rand.Rand, body, setup func(KeyValue) DB, teardown
 	Test := func(kv *KeyValue) func() {
 		return func() {
 			var p DB
+			if setup != nil {
+				Defer("setup", func() {
+					p = setup(*kv)
+				})
+			}
+			if teardown != nil {
+				Defer("teardown", func() {
+					teardown(p)
+				})
+			}
 			if body != nil {
 				p = body(*kv)
 			}
-			KeyValueTesting(rnd, *kv, p, setup, teardown)
+			KeyValueTesting(rnd, *kv, p, func(KeyValue) DB {
+				return p
+			}, nil)
 		}
 	}
 
@@ -148,4 +183,5 @@ func AllKeyValueTesting(rnd *rand.Rand, body, setup func(KeyValue) DB, teardown
 	Describe("with big value", Test(KeyValue_BigValue()))
 	Describe("with special key", Test(KeyValue_SpecialKey()))
 	Describe("with multiple key/value", Test(KeyValue_MultipleKeyValue()))
+	Describe("with generated key/value", Test(KeyValue_Generate(nil, 120, 1, 50, 10, 120)))
 }

+ 1 - 0
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/storage.go

@@ -397,6 +397,7 @@ func (s *Storage) logI(format string, args ...interface{}) {
 
 func (s *Storage) Log(str string) {
 	s.log(1, "Log: "+str)
+	s.Storage.Log(str)
 }
 
 func (s *Storage) Lock() (r util.Releaser, err error) {

+ 14 - 0
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/util.go

@@ -155,3 +155,17 @@ func RandomRange(rnd *rand.Rand, n, round int, fn func(start, limit int)) {
 	}
 	return
 }
+
+func Max(x, y int) int {
+	if x > y {
+		return x
+	}
+	return y
+}
+
+func Min(x, y int) int {
+	if x < y {
+		return x
+	}
+	return y
+}

+ 4 - 0
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil_test.go

@@ -34,6 +34,10 @@ func (t *testingDB) TestGet(key []byte) (value []byte, err error) {
 	return t.Get(key, t.ro)
 }
 
+func (t *testingDB) TestHas(key []byte) (ret bool, err error) {
+	return t.Has(key, t.ro)
+}
+
 func (t *testingDB) TestNewIterator(slice *util.Range) iterator.Iterator {
 	return t.NewIterator(slice, t.ro)
 }

+ 10 - 2
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go

@@ -114,7 +114,7 @@ func (v *version) walkOverlapping(ikey iKey, f func(level int, t *tFile) bool, l
 	}
 }
 
-func (v *version) get(ikey iKey, ro *opt.ReadOptions) (value []byte, tcomp bool, err error) {
+func (v *version) get(ikey iKey, ro *opt.ReadOptions, noValue bool) (value []byte, tcomp bool, err error) {
 	ukey := ikey.ukey()
 
 	var (
@@ -142,7 +142,15 @@ func (v *version) get(ikey iKey, ro *opt.ReadOptions) (value []byte, tcomp bool,
 			}
 		}
 
-		fikey, fval, ferr := v.s.tops.find(t, ikey, ro)
+		var (
+			fikey, fval []byte
+			ferr        error
+		)
+		if noValue {
+			fikey, ferr = v.s.tops.findKey(t, ikey, ro)
+		} else {
+			fikey, fval, ferr = v.s.tops.find(t, ikey, ro)
+		}
 		switch ferr {
 		case nil:
 		case ErrNotFound: