|
|
@@ -2,7 +2,6 @@ package files_test
|
|
|
|
|
|
import (
|
|
|
"crypto/rand"
|
|
|
- "fmt"
|
|
|
"log"
|
|
|
"os"
|
|
|
"sync"
|
|
|
@@ -10,63 +9,40 @@ import (
|
|
|
"time"
|
|
|
|
|
|
"github.com/syndtr/goleveldb/leveldb"
|
|
|
+ "github.com/syndtr/goleveldb/leveldb/opt"
|
|
|
"github.com/syndtr/goleveldb/leveldb/util"
|
|
|
)
|
|
|
|
|
|
-var items map[string][]byte
|
|
|
-var keys map[string]string
|
|
|
-
|
|
|
-const nItems = 10000
|
|
|
-
|
|
|
-func setupMaps() {
|
|
|
-
|
|
|
- // Set up two simple maps, one "key" => data and one "indirect key" =>
|
|
|
- // "key".
|
|
|
-
|
|
|
- items = make(map[string][]byte, nItems)
|
|
|
- keys = make(map[string]string, nItems)
|
|
|
+var keys [][]byte
|
|
|
|
|
|
+func init() {
|
|
|
for i := 0; i < nItems; i++ {
|
|
|
- k1 := fmt.Sprintf("key%d", i)
|
|
|
- data := make([]byte, 87)
|
|
|
- _, err := rand.Reader.Read(data)
|
|
|
- if err != nil {
|
|
|
- panic(err)
|
|
|
- }
|
|
|
- items[k1] = data
|
|
|
-
|
|
|
- k2 := fmt.Sprintf("indirect%d", i)
|
|
|
- keys[k2] = k1
|
|
|
+ keys = append(keys, randomData(1))
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-func makeK1(s string) []byte {
|
|
|
- k1 := make([]byte, 1+len(s))
|
|
|
- k1[0] = 1
|
|
|
- copy(k1[1:], []byte(s))
|
|
|
- return k1
|
|
|
-}
|
|
|
+const nItems = 10000
|
|
|
|
|
|
-func makeK2(s string) []byte {
|
|
|
- k2 := make([]byte, 1+len(s))
|
|
|
- k2[0] = 2 // Only difference from makeK1
|
|
|
- copy(k2[1:], []byte(s))
|
|
|
- return k2
|
|
|
+func randomData(prefix byte) []byte {
|
|
|
+ data := make([]byte, 1+32+64+32)
|
|
|
+ _, err := rand.Reader.Read(data)
|
|
|
+ if err != nil {
|
|
|
+ panic(err)
|
|
|
+ }
|
|
|
+ return append([]byte{prefix}, data...)
|
|
|
}
|
|
|
|
|
|
func setItems(db *leveldb.DB) error {
|
|
|
- snap, err := db.GetSnapshot()
|
|
|
- if err != nil {
|
|
|
- return err
|
|
|
+ batch := new(leveldb.Batch)
|
|
|
+ for _, k1 := range keys {
|
|
|
+ k2 := randomData(2)
|
|
|
+ // k2 -> data
|
|
|
+ batch.Put(k2, randomData(42))
|
|
|
+ // k1 -> k2
|
|
|
+ batch.Put(k1, k2)
|
|
|
}
|
|
|
- defer snap.Release()
|
|
|
-
|
|
|
- batch := &leveldb.Batch{}
|
|
|
- for k2, k1 := range keys {
|
|
|
- // Create k1 => item mapping first
|
|
|
- batch.Put(makeK1(k1), items[k1])
|
|
|
- // Then the k2 => k1 mapping
|
|
|
- batch.Put(makeK2(k2), makeK1(k1))
|
|
|
+ if testing.Verbose() {
|
|
|
+ log.Printf("batch write (set) %p", batch)
|
|
|
}
|
|
|
return db.Write(batch, nil)
|
|
|
}
|
|
|
@@ -78,77 +54,100 @@ func clearItems(db *leveldb.DB) error {
|
|
|
}
|
|
|
defer snap.Release()
|
|
|
|
|
|
- // Iterate from the start of k2 space to the end
|
|
|
- it := snap.NewIterator(&util.Range{Start: []byte{2}, Limit: []byte{2, 0xff, 0xff, 0xff, 0xff}}, nil)
|
|
|
+ // Iterate over k2
|
|
|
+
|
|
|
+ it := snap.NewIterator(util.BytesPrefix([]byte{1}), nil)
|
|
|
defer it.Release()
|
|
|
|
|
|
- batch := &leveldb.Batch{}
|
|
|
+ batch := new(leveldb.Batch)
|
|
|
for it.Next() {
|
|
|
- k2 := it.Key()
|
|
|
- k1 := it.Value()
|
|
|
+ k1 := it.Key()
|
|
|
+ k2 := it.Value()
|
|
|
|
|
|
- // k1 should exist
|
|
|
- _, err := snap.Get(k1, nil)
|
|
|
+ // k2 should exist
|
|
|
+ _, err := snap.Get(k2, nil)
|
|
|
if err != nil {
|
|
|
return err
|
|
|
}
|
|
|
|
|
|
- // Delete the k2 => k1 mapping first
|
|
|
- batch.Delete(k2)
|
|
|
- // Then the k1 => key mapping
|
|
|
+ // Delete the k1 => k2 mapping first
|
|
|
batch.Delete(k1)
|
|
|
+ // Then the k2 => data mapping
|
|
|
+ batch.Delete(k2)
|
|
|
+ }
|
|
|
+ if testing.Verbose() {
|
|
|
+ log.Printf("batch write (clear) %p", batch)
|
|
|
}
|
|
|
return db.Write(batch, nil)
|
|
|
}
|
|
|
|
|
|
func scanItems(db *leveldb.DB) error {
|
|
|
snap, err := db.GetSnapshot()
|
|
|
+ if testing.Verbose() {
|
|
|
+ log.Printf("snap create %p", snap)
|
|
|
+ }
|
|
|
if err != nil {
|
|
|
return err
|
|
|
}
|
|
|
- defer snap.Release()
|
|
|
+ defer func() {
|
|
|
+ if testing.Verbose() {
|
|
|
+ log.Printf("snap release %p", snap)
|
|
|
+ }
|
|
|
+ snap.Release()
|
|
|
+ }()
|
|
|
|
|
|
// Iterate from the start of k2 space to the end
|
|
|
- it := snap.NewIterator(&util.Range{Start: []byte{2}, Limit: []byte{2, 0xff, 0xff, 0xff, 0xff}}, nil)
|
|
|
+ it := snap.NewIterator(util.BytesPrefix([]byte{1}), nil)
|
|
|
defer it.Release()
|
|
|
|
|
|
+ i := 0
|
|
|
for it.Next() {
|
|
|
// k2 => k1 => data
|
|
|
- k2 := it.Key()
|
|
|
- k1 := it.Value()
|
|
|
- _, err := snap.Get(k1, nil)
|
|
|
+ k1 := it.Key()
|
|
|
+ k2 := it.Value()
|
|
|
+ _, err := snap.Get(k2, nil)
|
|
|
if err != nil {
|
|
|
- log.Printf("k1: %q (%x)", k1, k1)
|
|
|
- log.Printf("k2: %q (%x)", k2, k2)
|
|
|
+ log.Printf("k1: %x", k1)
|
|
|
+ log.Printf("k2: %x (missing)", k2)
|
|
|
return err
|
|
|
}
|
|
|
+ i++
|
|
|
+ }
|
|
|
+ if testing.Verbose() {
|
|
|
+ log.Println("scanned", i)
|
|
|
}
|
|
|
return nil
|
|
|
}
|
|
|
|
|
|
-func TestConcurrent(t *testing.T) {
|
|
|
- setupMaps()
|
|
|
+func TestConcurrentSetClear(t *testing.T) {
|
|
|
+ if testing.Short() {
|
|
|
+ return
|
|
|
+ }
|
|
|
|
|
|
- dur := 2 * time.Second
|
|
|
+ dur := 30 * time.Second
|
|
|
t0 := time.Now()
|
|
|
var wg sync.WaitGroup
|
|
|
|
|
|
- os.RemoveAll("testdata/global.db")
|
|
|
- db, err := leveldb.OpenFile("testdata/global.db", nil)
|
|
|
+ os.RemoveAll("testdata/concurrent-set-clear.db")
|
|
|
+ db, err := leveldb.OpenFile("testdata/concurrent-set-clear.db", &opt.Options{CachedOpenFiles: 10})
|
|
|
if err != nil {
|
|
|
t.Fatal(err)
|
|
|
}
|
|
|
- defer os.RemoveAll("testdata/global.db")
|
|
|
+ defer os.RemoveAll("testdata/concurrent-set-clear.db")
|
|
|
+
|
|
|
+ errChan := make(chan error, 3)
|
|
|
|
|
|
wg.Add(1)
|
|
|
go func() {
|
|
|
defer wg.Done()
|
|
|
for time.Since(t0) < dur {
|
|
|
if err := setItems(db); err != nil {
|
|
|
- t.Fatal(err)
|
|
|
+ errChan <- err
|
|
|
+ return
|
|
|
}
|
|
|
if err := clearItems(db); err != nil {
|
|
|
- t.Fatal(err)
|
|
|
+ errChan <- err
|
|
|
+ return
|
|
|
}
|
|
|
}
|
|
|
}()
|
|
|
@@ -158,11 +157,71 @@ func TestConcurrent(t *testing.T) {
|
|
|
defer wg.Done()
|
|
|
for time.Since(t0) < dur {
|
|
|
if err := scanItems(db); err != nil {
|
|
|
- t.Fatal(err)
|
|
|
+ errChan <- err
|
|
|
+ return
|
|
|
}
|
|
|
}
|
|
|
}()
|
|
|
|
|
|
- wg.Wait()
|
|
|
+ go func() {
|
|
|
+ wg.Wait()
|
|
|
+ errChan <- nil
|
|
|
+ }()
|
|
|
+
|
|
|
+ err = <-errChan
|
|
|
+ if err != nil {
|
|
|
+ t.Error(err)
|
|
|
+ }
|
|
|
db.Close()
|
|
|
}
|
|
|
+
|
|
|
+func TestConcurrentSetOnly(t *testing.T) {
|
|
|
+ if testing.Short() {
|
|
|
+ return
|
|
|
+ }
|
|
|
+
|
|
|
+ dur := 30 * time.Second
|
|
|
+ t0 := time.Now()
|
|
|
+ var wg sync.WaitGroup
|
|
|
+
|
|
|
+ os.RemoveAll("testdata/concurrent-set-only.db")
|
|
|
+ db, err := leveldb.OpenFile("testdata/concurrent-set-only.db", &opt.Options{CachedOpenFiles: 10})
|
|
|
+ if err != nil {
|
|
|
+ t.Fatal(err)
|
|
|
+ }
|
|
|
+ defer os.RemoveAll("testdata/concurrent-set-only.db")
|
|
|
+
|
|
|
+ errChan := make(chan error, 3)
|
|
|
+
|
|
|
+ wg.Add(1)
|
|
|
+ go func() {
|
|
|
+ defer wg.Done()
|
|
|
+ for time.Since(t0) < dur {
|
|
|
+ if err := setItems(db); err != nil {
|
|
|
+ errChan <- err
|
|
|
+ return
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }()
|
|
|
+
|
|
|
+ wg.Add(1)
|
|
|
+ go func() {
|
|
|
+ defer wg.Done()
|
|
|
+ for time.Since(t0) < dur {
|
|
|
+ if err := scanItems(db); err != nil {
|
|
|
+ errChan <- err
|
|
|
+ return
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }()
|
|
|
+
|
|
|
+ go func() {
|
|
|
+ wg.Wait()
|
|
|
+ errChan <- nil
|
|
|
+ }()
|
|
|
+
|
|
|
+ err = <-errChan
|
|
|
+ if err != nil {
|
|
|
+ t.Error(err)
|
|
|
+ }
|
|
|
+}
|