1
0
Эх сурвалжийг харах

We should pass around db.Instance instead of leveldb.DB

We're going to need the db.Instance to keep some state, and for that to
work we need the same one passed around everywhere. Hence this moves the
leveldb-specific file opening stuff into the db package and exports the
dbInstance type.
Jakob Borg 10 жил өмнө
parent
commit
2a4fc28318

+ 4 - 70
cmd/syncthing/main.go

@@ -9,6 +9,7 @@ package main
 import (
 	"bytes"
 	"crypto/tls"
+	"errors"
 	"flag"
 	"fmt"
 	"io/ioutil"
@@ -42,9 +43,6 @@ import (
 	"github.com/syncthing/syncthing/lib/tlsutil"
 	"github.com/syncthing/syncthing/lib/upgrade"
 
-	"github.com/syndtr/goleveldb/leveldb"
-	"github.com/syndtr/goleveldb/leveldb/errors"
-	"github.com/syndtr/goleveldb/leveldb/opt"
 	"github.com/thejerf/suture"
 )
 
@@ -371,7 +369,7 @@ func main() {
 
 		if doUpgrade {
 			// Use leveldb database locks to protect against concurrent upgrades
-			_, err = leveldb.OpenFile(locations[locDatabase], &opt.Options{OpenFilesCacheCapacity: 100})
+			_, err = db.Open(locations[locDatabase])
 			if err != nil {
 				l.Infoln("Attempting upgrade through running Syncthing...")
 				err = upgradeViaRest()
@@ -617,21 +615,7 @@ func syncthingMain() {
 	}
 
 	dbFile := locations[locDatabase]
-	dbOpts := dbOpts(cfg)
-	ldb, err := leveldb.OpenFile(dbFile, dbOpts)
-	if leveldbIsCorrupted(err) {
-		ldb, err = leveldb.RecoverFile(dbFile, dbOpts)
-	}
-	if leveldbIsCorrupted(err) {
-		// The database is corrupted, and we've tried to recover it but it
-		// didn't work. At this point there isn't much to do beyond dropping
-		// the database and reindexing...
-		l.Infoln("Database corruption detected, unable to recover. Reinitializing...")
-		if err := resetDB(); err != nil {
-			l.Fatalln("Remove database:", err)
-		}
-		ldb, err = leveldb.OpenFile(dbFile, dbOpts)
-	}
+	ldb, err := db.Open(dbFile)
 	if err != nil {
 		l.Fatalln("Cannot open database:", err, "- Is another copy of Syncthing already running?")
 	}
@@ -642,7 +626,7 @@ func syncthingMain() {
 
 	// Remove database entries for folders that no longer exist in the config
 	folders := cfg.Folders()
-	for _, folder := range db.ListFolders(ldb) {
+	for _, folder := range ldb.ListFolders() {
 		if _, ok := folders[folder]; !ok {
 			l.Infof("Cleaning data for dropped folder %q", folder)
 			db.DropFolder(ldb, folder)
@@ -881,40 +865,6 @@ func loadConfig(cfgFile string) (*config.Wrapper, string, error) {
 	return cfg, myName, nil
 }
 
-func dbOpts(cfg *config.Wrapper) *opt.Options {
-	// Calculate a suitable database block cache capacity.
-
-	// Default is 8 MiB.
-	blockCacheCapacity := 8 << 20
-	// Increase block cache up to this maximum:
-	const maxCapacity = 64 << 20
-	// ... which we reach when the box has this much RAM:
-	const maxAtRAM = 8 << 30
-
-	if v := cfg.Options().DatabaseBlockCacheMiB; v != 0 {
-		// Use the value from the config, if it's set.
-		blockCacheCapacity = v << 20
-	} else if bytes, err := memorySize(); err == nil {
-		// We start at the default of 8 MiB and use larger values for machines
-		// with more memory.
-
-		if bytes > maxAtRAM {
-			// Cap the cache at maxCapacity when we reach maxAtRam amount of memory
-			blockCacheCapacity = maxCapacity
-		} else if bytes > maxAtRAM/maxCapacity*int64(blockCacheCapacity) {
-			// Grow from the default to maxCapacity at maxAtRam amount of memory
-			blockCacheCapacity = int(bytes * maxCapacity / maxAtRAM)
-		}
-		l.Infoln("Database block cache capacity", blockCacheCapacity/1024, "KiB")
-	}
-
-	return &opt.Options{
-		OpenFilesCacheCapacity: 100,
-		BlockCacheCapacity:     blockCacheCapacity,
-		WriteBuffer:            4 << 20,
-	}
-}
-
 func startAuditing(mainSvc *suture.Supervisor) {
 	auditFile := timestampedLoc(locAuditLog)
 	fd, err := os.OpenFile(auditFile, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
@@ -1166,19 +1116,3 @@ func checkShortIDs(cfg *config.Wrapper) error {
 	}
 	return nil
 }
-
-// A "better" version of leveldb's errors.IsCorrupted.
-func leveldbIsCorrupted(err error) bool {
-	switch {
-	case err == nil:
-		return false
-
-	case errors.IsCorrupted(err):
-		return true
-
-	case strings.Contains(err.Error(), "corrupted"):
-		return true
-	}
-
-	return false
-}

+ 1 - 4
cmd/syncthing/main_test.go

@@ -14,9 +14,6 @@ import (
 	"github.com/syncthing/syncthing/lib/db"
 	"github.com/syncthing/syncthing/lib/model"
 	"github.com/syncthing/syncthing/lib/protocol"
-
-	"github.com/syndtr/goleveldb/leveldb"
-	"github.com/syndtr/goleveldb/leveldb/storage"
 )
 
 func TestFolderErrors(t *testing.T) {
@@ -38,7 +35,7 @@ func TestFolderErrors(t *testing.T) {
 		}
 	}
 
-	ldb, _ := leveldb.Open(storage.NewMemStorage(), nil)
+	ldb := db.OpenMemory()
 
 	// Case 1 - new folder, directory and marker created
 

+ 0 - 2
lib/config/config_test.go

@@ -56,7 +56,6 @@ func TestDefaultValues(t *testing.T) {
 		ProgressUpdateIntervalS: 5,
 		SymlinksEnabled:         true,
 		LimitBandwidthInLan:     false,
-		DatabaseBlockCacheMiB:   0,
 		MinHomeDiskFreePct:      1,
 		URURL:                   "https://data.syncthing.net/newdata",
 		URInitialDelayS:         1800,
@@ -180,7 +179,6 @@ func TestOverriddenValues(t *testing.T) {
 		ProgressUpdateIntervalS: 10,
 		SymlinksEnabled:         false,
 		LimitBandwidthInLan:     true,
-		DatabaseBlockCacheMiB:   42,
 		MinHomeDiskFreePct:      5.2,
 		URURL:                   "https://localhost/newdata",
 		URInitialDelayS:         800,

+ 0 - 1
lib/config/optionsconfiguration.go

@@ -37,7 +37,6 @@ type OptionsConfiguration struct {
 	ProgressUpdateIntervalS int      `xml:"progressUpdateIntervalS" json:"progressUpdateIntervalS" default:"5"`
 	SymlinksEnabled         bool     `xml:"symlinksEnabled" json:"symlinksEnabled" default:"true"`
 	LimitBandwidthInLan     bool     `xml:"limitBandwidthInLan" json:"limitBandwidthInLan" default:"false"`
-	DatabaseBlockCacheMiB   int      `xml:"databaseBlockCacheMiB" json:"databaseBlockCacheMiB" default:"0"`
 	MinHomeDiskFreePct      float64  `xml:"minHomeDiskFreePct" json:"minHomeDiskFreePct" default:"1"`
 	ReleasesURL             string   `xml:"releasesURL" json:"releasesURL" default:"https://api.github.com/repos/syncthing/syncthing/releases?per_page=30"`
 	AlwaysLocalNets         []string `xml:"alwaysLocalNet" json:"alwaysLocalNets"`

+ 4 - 4
lib/db/blockmap.go

@@ -29,11 +29,11 @@ var blockFinder *BlockFinder
 const maxBatchSize = 256 << 10
 
 type BlockMap struct {
-	db     *leveldb.DB
+	db     *Instance
 	folder string
 }
 
-func NewBlockMap(db *leveldb.DB, folder string) *BlockMap {
+func NewBlockMap(db *Instance, folder string) *BlockMap {
 	return &BlockMap{
 		db:     db,
 		folder: folder,
@@ -146,10 +146,10 @@ func (m *BlockMap) blockKeyInto(o, hash []byte, file string) []byte {
 }
 
 type BlockFinder struct {
-	db *leveldb.DB
+	db *Instance
 }
 
-func NewBlockFinder(db *leveldb.DB) *BlockFinder {
+func NewBlockFinder(db *Instance) *BlockFinder {
 	if blockFinder != nil {
 		return blockFinder
 	}

+ 3 - 9
lib/db/blockmap_test.go

@@ -10,9 +10,6 @@ import (
 	"testing"
 
 	"github.com/syncthing/syncthing/lib/protocol"
-
-	"github.com/syndtr/goleveldb/leveldb"
-	"github.com/syndtr/goleveldb/leveldb/storage"
 )
 
 func genBlocks(n int) []protocol.BlockInfo {
@@ -50,17 +47,14 @@ func init() {
 	}
 }
 
-func setup() (*leveldb.DB, *BlockFinder) {
+func setup() (*Instance, *BlockFinder) {
 	// Setup
 
-	db, err := leveldb.Open(storage.NewMemStorage(), nil)
-	if err != nil {
-		panic(err)
-	}
+	db := OpenMemory()
 	return db, NewBlockFinder(db)
 }
 
-func dbEmpty(db *leveldb.DB) bool {
+func dbEmpty(db *Instance) bool {
 	iter := db.NewIterator(nil, nil)
 	defer iter.Release()
 	if iter.Next() {

+ 77 - 24
lib/db/leveldb_dbinstance.go

@@ -8,27 +8,64 @@ package db
 
 import (
 	"bytes"
+	"os"
 	"sort"
+	"strings"
 
 	"github.com/syncthing/syncthing/lib/protocol"
 	"github.com/syndtr/goleveldb/leveldb"
+	"github.com/syndtr/goleveldb/leveldb/errors"
 	"github.com/syndtr/goleveldb/leveldb/iterator"
+	"github.com/syndtr/goleveldb/leveldb/opt"
+	"github.com/syndtr/goleveldb/leveldb/storage"
 	"github.com/syndtr/goleveldb/leveldb/util"
 )
 
 type deletionHandler func(t readWriteTransaction, folder, device, name []byte, dbi iterator.Iterator) int64
 
-type dbInstance struct {
+type Instance struct {
 	*leveldb.DB
 }
 
-func newDBInstance(db *leveldb.DB) *dbInstance {
-	return &dbInstance{
+func Open(file string) (*Instance, error) {
+	opts := &opt.Options{
+		OpenFilesCacheCapacity: 100,
+		WriteBuffer:            4 << 20,
+	}
+
+	db, err := leveldb.OpenFile(file, opts)
+	if leveldbIsCorrupted(err) {
+		db, err = leveldb.RecoverFile(file, opts)
+	}
+	if leveldbIsCorrupted(err) {
+		// The database is corrupted, and we've tried to recover it but it
+		// didn't work. At this point there isn't much to do beyond dropping
+		// the database and reindexing...
+		l.Infoln("Database corruption detected, unable to recover. Reinitializing...")
+		if err := os.RemoveAll(file); err != nil {
+			return nil, err
+		}
+		db, err = leveldb.OpenFile(file, opts)
+	}
+	if err != nil {
+		return nil, err
+	}
+
+	return newDBInstance(db), nil
+}
+
+func OpenMemory() *Instance {
+	db, _ := leveldb.Open(storage.NewMemStorage(), nil)
+	return newDBInstance(db)
+}
+
+func newDBInstance(db *leveldb.DB) *Instance {
+	return &Instance{
 		DB: db,
 	}
 }
 
-func (db *dbInstance) genericReplace(folder, device []byte, fs []protocol.FileInfo, localSize, globalSize *sizeTracker, deleteFn deletionHandler) int64 {
+func (db *Instance) genericReplace(folder, device []byte, fs []protocol.FileInfo, localSize, globalSize *sizeTracker, deleteFn deletionHandler) int64 {
 	sort.Sort(fileList(fs)) // sort list on name, same as in the database
 
 	start := db.deviceKey(folder, device, nil)                            // before all folder/device files
@@ -126,7 +163,7 @@ func (db *dbInstance) genericReplace(folder, device []byte, fs []protocol.FileIn
 	return maxLocalVer
 }
 
-func (db *dbInstance) replace(folder, device []byte, fs []protocol.FileInfo, localSize, globalSize *sizeTracker) int64 {
+func (db *Instance) replace(folder, device []byte, fs []protocol.FileInfo, localSize, globalSize *sizeTracker) int64 {
 	// TODO: Return the remaining maxLocalVer?
 	return db.genericReplace(folder, device, fs, localSize, globalSize, func(t readWriteTransaction, folder, device, name []byte, dbi iterator.Iterator) int64 {
 		// Database has a file that we are missing. Remove it.
@@ -137,7 +174,7 @@ func (db *dbInstance) replace(folder, device []byte, fs []protocol.FileInfo, loc
 	})
 }
 
-func (db *dbInstance) updateFiles(folder, device []byte, fs []protocol.FileInfo, localSize, globalSize *sizeTracker) int64 {
+func (db *Instance) updateFiles(folder, device []byte, fs []protocol.FileInfo, localSize, globalSize *sizeTracker) int64 {
 	t := db.newReadWriteTransaction()
 	defer t.close()
 
@@ -195,7 +232,7 @@ func (db *dbInstance) updateFiles(folder, device []byte, fs []protocol.FileInfo,
 	return maxLocalVer
 }
 
-func (db *dbInstance) withHave(folder, device []byte, truncate bool, fn Iterator) {
+func (db *Instance) withHave(folder, device []byte, truncate bool, fn Iterator) {
 	start := db.deviceKey(folder, device, nil)                            // before all folder/device files
 	limit := db.deviceKey(folder, device, []byte{0xff, 0xff, 0xff, 0xff}) // after all folder/device files
 
@@ -216,7 +253,7 @@ func (db *dbInstance) withHave(folder, device []byte, truncate bool, fn Iterator
 	}
 }
 
-func (db *dbInstance) withAllFolderTruncated(folder []byte, fn func(device []byte, f FileInfoTruncated) bool) {
+func (db *Instance) withAllFolderTruncated(folder []byte, fn func(device []byte, f FileInfoTruncated) bool) {
 	start := db.deviceKey(folder, nil, nil)                                                  // before all folder/device files
 	limit := db.deviceKey(folder, protocol.LocalDeviceID[:], []byte{0xff, 0xff, 0xff, 0xff}) // after all folder/device files
 
@@ -249,11 +286,11 @@ func (db *dbInstance) withAllFolderTruncated(folder []byte, fn func(device []byt
 	}
 }
 
-func (db *dbInstance) getFile(folder, device, file []byte) (protocol.FileInfo, bool) {
+func (db *Instance) getFile(folder, device, file []byte) (protocol.FileInfo, bool) {
 	return getFile(db, db.deviceKey(folder, device, file))
 }
 
-func (db *dbInstance) getGlobal(folder, file []byte, truncate bool) (FileIntf, bool) {
+func (db *Instance) getGlobal(folder, file []byte, truncate bool) (FileIntf, bool) {
 	k := db.globalKey(folder, file)
 
 	t := db.newReadOnlyTransaction()
@@ -290,7 +327,7 @@ func (db *dbInstance) getGlobal(folder, file []byte, truncate bool) (FileIntf, b
 	return fi, true
 }
 
-func (db *dbInstance) withGlobal(folder, prefix []byte, truncate bool, fn Iterator) {
+func (db *Instance) withGlobal(folder, prefix []byte, truncate bool, fn Iterator) {
 	t := db.newReadOnlyTransaction()
 	defer t.close()
 
@@ -333,7 +370,7 @@ func (db *dbInstance) withGlobal(folder, prefix []byte, truncate bool, fn Iterat
 	}
 }
 
-func (db *dbInstance) availability(folder, file []byte) []protocol.DeviceID {
+func (db *Instance) availability(folder, file []byte) []protocol.DeviceID {
 	k := db.globalKey(folder, file)
 	bs, err := db.Get(k, nil)
 	if err == leveldb.ErrNotFound {
@@ -361,7 +398,7 @@ func (db *dbInstance) availability(folder, file []byte) []protocol.DeviceID {
 	return devices
 }
 
-func (db *dbInstance) withNeed(folder, device []byte, truncate bool, fn Iterator) {
+func (db *Instance) withNeed(folder, device []byte, truncate bool, fn Iterator) {
 	start := db.globalKey(folder, nil)
 	limit := db.globalKey(folder, []byte{0xff, 0xff, 0xff, 0xff})
 
@@ -452,7 +489,7 @@ nextFile:
 	}
 }
 
-func (db *dbInstance) listFolders() []string {
+func (db *Instance) ListFolders() []string {
 	t := db.newReadOnlyTransaction()
 	defer t.close()
 
@@ -476,7 +513,7 @@ func (db *dbInstance) listFolders() []string {
 	return folders
 }
 
-func (db *dbInstance) dropFolder(folder []byte) {
+func (db *Instance) dropFolder(folder []byte) {
 	t := db.newReadOnlyTransaction()
 	defer t.close()
 
@@ -501,7 +538,7 @@ func (db *dbInstance) dropFolder(folder []byte) {
 	dbi.Release()
 }
 
-func (db *dbInstance) checkGlobals(folder []byte, globalSize *sizeTracker) {
+func (db *Instance) checkGlobals(folder []byte, globalSize *sizeTracker) {
 	t := db.newReadWriteTransaction()
 	defer t.close()
 
@@ -560,11 +597,11 @@ func (db *dbInstance) checkGlobals(folder []byte, globalSize *sizeTracker) {
 //	   folder (64 bytes)
 //	   device (32 bytes)
 //	   name (variable size)
-func (db *dbInstance) deviceKey(folder, device, file []byte) []byte {
+func (db *Instance) deviceKey(folder, device, file []byte) []byte {
 	return db.deviceKeyInto(nil, folder, device, file)
 }
 
-func (db *dbInstance) deviceKeyInto(k []byte, folder, device, file []byte) []byte {
+func (db *Instance) deviceKeyInto(k []byte, folder, device, file []byte) []byte {
 	reqLen := 1 + 64 + 32 + len(file)
 	if len(k) < reqLen {
 		k = make([]byte, reqLen)
@@ -579,11 +616,11 @@ func (db *dbInstance) deviceKeyInto(k []byte, folder, device, file []byte) []byt
 	return k[:reqLen]
 }
 
-func (db *dbInstance) deviceKeyName(key []byte) []byte {
+func (db *Instance) deviceKeyName(key []byte) []byte {
 	return key[1+64+32:]
 }
 
-func (db *dbInstance) deviceKeyFolder(key []byte) []byte {
+func (db *Instance) deviceKeyFolder(key []byte) []byte {
 	folder := key[1 : 1+64]
 	izero := bytes.IndexByte(folder, 0)
 	if izero < 0 {
@@ -592,7 +629,7 @@ func (db *dbInstance) deviceKeyFolder(key []byte) []byte {
 	return folder[:izero]
 }
 
-func (db *dbInstance) deviceKeyDevice(key []byte) []byte {
+func (db *Instance) deviceKeyDevice(key []byte) []byte {
 	return key[1+64 : 1+64+32]
 }
 
@@ -600,7 +637,7 @@ func (db *dbInstance) deviceKeyDevice(key []byte) []byte {
 //	   keyTypeGlobal (1 byte)
 //	   folder (64 bytes)
 //	   name (variable size)
-func (db *dbInstance) globalKey(folder, file []byte) []byte {
+func (db *Instance) globalKey(folder, file []byte) []byte {
 	k := make([]byte, 1+64+len(file))
 	k[0] = KeyTypeGlobal
 	if len(folder) > 64 {
@@ -611,11 +648,11 @@ func (db *dbInstance) globalKey(folder, file []byte) []byte {
 	return k
 }
 
-func (db *dbInstance) globalKeyName(key []byte) []byte {
+func (db *Instance) globalKeyName(key []byte) []byte {
 	return key[1+64:]
 }
 
-func (db *dbInstance) globalKeyFolder(key []byte) []byte {
+func (db *Instance) globalKeyFolder(key []byte) []byte {
 	folder := key[1 : 1+64]
 	izero := bytes.IndexByte(folder, 0)
 	if izero < 0 {
@@ -635,3 +672,19 @@ func unmarshalTrunc(bs []byte, truncate bool) (FileIntf, error) {
 	err := tf.UnmarshalXDR(bs)
 	return tf, err
 }
+
+// A "better" version of leveldb's errors.IsCorrupted.
+func leveldbIsCorrupted(err error) bool {
+	switch {
+	case err == nil:
+		return false
+
+	case errors.IsCorrupted(err):
+		return true
+
+	case strings.Contains(err.Error(), "corrupted"):
+		return true
+	}
+
+	return false
+}

+ 2 - 2
lib/db/leveldb_test.go

@@ -16,7 +16,7 @@ func TestDeviceKey(t *testing.T) {
 	dev := []byte("device67890123456789012345678901")
 	name := []byte("name")
 
-	db := &dbInstance{}
+	db := &Instance{}
 
 	key := db.deviceKey(fld, dev, name)
 
@@ -38,7 +38,7 @@ func TestGlobalKey(t *testing.T) {
 	fld := []byte("folder6789012345678901234567890123456789012345678901234567890123")
 	name := []byte("name")
 
-	db := &dbInstance{}
+	db := &Instance{}
 
 	key := db.globalKey(fld, name)
 

+ 3 - 3
lib/db/leveldb_transactions.go

@@ -16,10 +16,10 @@ import (
 // A readOnlyTransaction represents a database snapshot.
 type readOnlyTransaction struct {
 	*leveldb.Snapshot
-	db *dbInstance
+	db *Instance
 }
 
-func (db *dbInstance) newReadOnlyTransaction() readOnlyTransaction {
+func (db *Instance) newReadOnlyTransaction() readOnlyTransaction {
 	snap, err := db.GetSnapshot()
 	if err != nil {
 		panic(err)
@@ -46,7 +46,7 @@ type readWriteTransaction struct {
 	*leveldb.Batch
 }
 
-func (db *dbInstance) newReadWriteTransaction() readWriteTransaction {
+func (db *Instance) newReadWriteTransaction() readWriteTransaction {
 	t := db.newReadOnlyTransaction()
 	return readWriteTransaction{
 		readOnlyTransaction: t,

+ 2 - 2
lib/db/namespaced.go

@@ -17,13 +17,13 @@ import (
 // NamespacedKV is a simple key-value store using a specific namespace within
 // a leveldb.
 type NamespacedKV struct {
-	db     *leveldb.DB
+	db     *Instance
 	prefix []byte
 }
 
 // NewNamespacedKV returns a new NamespacedKV that lives in the namespace
 // specified by the prefix.
-func NewNamespacedKV(db *leveldb.DB, prefix string) *NamespacedKV {
+func NewNamespacedKV(db *Instance, prefix string) *NamespacedKV {
 	return &NamespacedKV{
 		db:     db,
 		prefix: []byte(prefix),

+ 4 - 19
lib/db/namespaced_test.go

@@ -9,16 +9,10 @@ package db
 import (
 	"testing"
 	"time"
-
-	"github.com/syndtr/goleveldb/leveldb"
-	"github.com/syndtr/goleveldb/leveldb/storage"
 )
 
 func TestNamespacedInt(t *testing.T) {
-	ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
-	if err != nil {
-		t.Fatal(err)
-	}
+	ldb := OpenMemory()
 
 	n1 := NewNamespacedKV(ldb, "foo")
 	n2 := NewNamespacedKV(ldb, "bar")
@@ -53,10 +47,7 @@ func TestNamespacedInt(t *testing.T) {
 }
 
 func TestNamespacedTime(t *testing.T) {
-	ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
-	if err != nil {
-		t.Fatal(err)
-	}
+	ldb := OpenMemory()
 
 	n1 := NewNamespacedKV(ldb, "foo")
 
@@ -73,10 +64,7 @@ func TestNamespacedTime(t *testing.T) {
 }
 
 func TestNamespacedString(t *testing.T) {
-	ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
-	if err != nil {
-		t.Fatal(err)
-	}
+	ldb := OpenMemory()
 
 	n1 := NewNamespacedKV(ldb, "foo")
 
@@ -92,10 +80,7 @@ func TestNamespacedString(t *testing.T) {
 }
 
 func TestNamespacedReset(t *testing.T) {
-	ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
-	if err != nil {
-		t.Fatal(err)
-	}
+	ldb := OpenMemory()
 
 	n1 := NewNamespacedKV(ldb, "foo")
 

+ 5 - 13
lib/db/set.go

@@ -18,14 +18,13 @@ import (
 	"github.com/syncthing/syncthing/lib/osutil"
 	"github.com/syncthing/syncthing/lib/protocol"
 	"github.com/syncthing/syncthing/lib/sync"
-	"github.com/syndtr/goleveldb/leveldb"
 )
 
 type FileSet struct {
 	localVersion map[protocol.DeviceID]int64
 	mutex        sync.Mutex
 	folder       string
-	db           *dbInstance
+	db           *Instance
 	blockmap     *BlockMap
 	localSize    sizeTracker
 	globalSize   sizeTracker
@@ -93,11 +92,11 @@ func (s *sizeTracker) Size() (files, deleted int, bytes int64) {
 	return s.files, s.deleted, s.bytes
 }
 
-func NewFileSet(folder string, db *leveldb.DB) *FileSet {
+func NewFileSet(folder string, db *Instance) *FileSet {
 	var s = FileSet{
 		localVersion: make(map[protocol.DeviceID]int64),
 		folder:       folder,
-		db:           newDBInstance(db),
+		db:           db,
 		blockmap:     NewBlockMap(db, folder),
 		mutex:        sync.NewMutex(),
 	}
@@ -239,17 +238,10 @@ func (s *FileSet) GlobalSize() (files, deleted int, bytes int64) {
 	return s.globalSize.Size()
 }
 
-// ListFolders returns the folder IDs seen in the database.
-func ListFolders(db *leveldb.DB) []string {
-	i := newDBInstance(db)
-	return i.listFolders()
-}
-
 // DropFolder clears out all information related to the given folder from the
 // database.
-func DropFolder(db *leveldb.DB, folder string) {
-	i := newDBInstance(db)
-	i.dropFolder([]byte(folder))
+func DropFolder(db *Instance, folder string) {
+	db.dropFolder([]byte(folder))
 	bm := &BlockMap{
 		db:     db,
 		folder: folder,

+ 12 - 45
lib/db/set_test.go

@@ -15,8 +15,6 @@ import (
 
 	"github.com/syncthing/syncthing/lib/db"
 	"github.com/syncthing/syncthing/lib/protocol"
-	"github.com/syndtr/goleveldb/leveldb"
-	"github.com/syndtr/goleveldb/leveldb/storage"
 )
 
 var remoteDevice0, remoteDevice1 protocol.DeviceID
@@ -96,11 +94,7 @@ func (l fileList) String() string {
 }
 
 func TestGlobalSet(t *testing.T) {
-
-	ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
-	if err != nil {
-		t.Fatal(err)
-	}
+	ldb := db.OpenMemory()
 
 	m := db.NewFileSet("test", ldb)
 
@@ -303,10 +297,7 @@ func TestGlobalSet(t *testing.T) {
 }
 
 func TestNeedWithInvalid(t *testing.T) {
-	ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
-	if err != nil {
-		t.Fatal(err)
-	}
+	ldb := db.OpenMemory()
 
 	s := db.NewFileSet("test", ldb)
 
@@ -343,10 +334,7 @@ func TestNeedWithInvalid(t *testing.T) {
 }
 
 func TestUpdateToInvalid(t *testing.T) {
-	ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
-	if err != nil {
-		t.Fatal(err)
-	}
+	ldb := db.OpenMemory()
 
 	s := db.NewFileSet("test", ldb)
 
@@ -378,10 +366,7 @@ func TestUpdateToInvalid(t *testing.T) {
 }
 
 func TestInvalidAvailability(t *testing.T) {
-	ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
-	if err != nil {
-		t.Fatal(err)
-	}
+	ldb := db.OpenMemory()
 
 	s := db.NewFileSet("test", ldb)
 
@@ -419,10 +404,7 @@ func TestInvalidAvailability(t *testing.T) {
 }
 
 func TestGlobalReset(t *testing.T) {
-	ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
-	if err != nil {
-		t.Fatal(err)
-	}
+	ldb := db.OpenMemory()
 
 	m := db.NewFileSet("test", ldb)
 
@@ -460,10 +442,7 @@ func TestGlobalReset(t *testing.T) {
 }
 
 func TestNeed(t *testing.T) {
-	ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
-	if err != nil {
-		t.Fatal(err)
-	}
+	ldb := db.OpenMemory()
 
 	m := db.NewFileSet("test", ldb)
 
@@ -501,10 +480,7 @@ func TestNeed(t *testing.T) {
 }
 
 func TestLocalVersion(t *testing.T) {
-	ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
-	if err != nil {
-		t.Fatal(err)
-	}
+	ldb := db.OpenMemory()
 
 	m := db.NewFileSet("test", ldb)
 
@@ -534,10 +510,7 @@ func TestLocalVersion(t *testing.T) {
 }
 
 func TestListDropFolder(t *testing.T) {
-	ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
-	if err != nil {
-		t.Fatal(err)
-	}
+	ldb := db.OpenMemory()
 
 	s0 := db.NewFileSet("test0", ldb)
 	local1 := []protocol.FileInfo{
@@ -558,7 +531,7 @@ func TestListDropFolder(t *testing.T) {
 	// Check that we have both folders and their data is in the global list
 
 	expectedFolderList := []string{"test0", "test1"}
-	if actualFolderList := db.ListFolders(ldb); !reflect.DeepEqual(actualFolderList, expectedFolderList) {
+	if actualFolderList := ldb.ListFolders(); !reflect.DeepEqual(actualFolderList, expectedFolderList) {
 		t.Fatalf("FolderList mismatch\nE: %v\nA: %v", expectedFolderList, actualFolderList)
 	}
 	if l := len(globalList(s0)); l != 3 {
@@ -573,7 +546,7 @@ func TestListDropFolder(t *testing.T) {
 	db.DropFolder(ldb, "test1")
 
 	expectedFolderList = []string{"test0"}
-	if actualFolderList := db.ListFolders(ldb); !reflect.DeepEqual(actualFolderList, expectedFolderList) {
+	if actualFolderList := ldb.ListFolders(); !reflect.DeepEqual(actualFolderList, expectedFolderList) {
 		t.Fatalf("FolderList mismatch\nE: %v\nA: %v", expectedFolderList, actualFolderList)
 	}
 	if l := len(globalList(s0)); l != 3 {
@@ -585,10 +558,7 @@ func TestListDropFolder(t *testing.T) {
 }
 
 func TestGlobalNeedWithInvalid(t *testing.T) {
-	ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
-	if err != nil {
-		t.Fatal(err)
-	}
+	ldb := db.OpenMemory()
 
 	s := db.NewFileSet("test1", ldb)
 
@@ -625,10 +595,7 @@ func TestGlobalNeedWithInvalid(t *testing.T) {
 }
 
 func TestLongPath(t *testing.T) {
-	ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
-	if err != nil {
-		t.Fatal(err)
-	}
+	ldb := db.OpenMemory()
 
 	s := db.NewFileSet("test", ldb)
 

+ 1 - 3
lib/db/virtualmtime.go

@@ -9,8 +9,6 @@ package db
 import (
 	"fmt"
 	"time"
-
-	"github.com/syndtr/goleveldb/leveldb"
 )
 
 // This type encapsulates a repository of mtimes for platforms where file mtimes
@@ -25,7 +23,7 @@ type VirtualMtimeRepo struct {
 	ns *NamespacedKV
 }
 
-func NewVirtualMtimeRepo(ldb *leveldb.DB, folder string) *VirtualMtimeRepo {
+func NewVirtualMtimeRepo(ldb *Instance, folder string) *VirtualMtimeRepo {
 	prefix := string(KeyTypeVirtualMtime) + folder
 
 	return &VirtualMtimeRepo{

+ 1 - 7
lib/db/virtualmtime_test.go

@@ -9,16 +9,10 @@ package db
 import (
 	"testing"
 	"time"
-
-	"github.com/syndtr/goleveldb/leveldb"
-	"github.com/syndtr/goleveldb/leveldb/storage"
 )
 
 func TestVirtualMtimeRepo(t *testing.T) {
-	ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
-	if err != nil {
-		t.Fatal(err)
-	}
+	ldb := OpenMemory()
 
 	// A few repos so we can ensure they don't pollute each other
 	repo1 := NewVirtualMtimeRepo(ldb, "folder1")

+ 2 - 3
lib/model/model.go

@@ -33,7 +33,6 @@ import (
 	"github.com/syncthing/syncthing/lib/symlinks"
 	"github.com/syncthing/syncthing/lib/sync"
 	"github.com/syncthing/syncthing/lib/versioner"
-	"github.com/syndtr/goleveldb/leveldb"
 	"github.com/thejerf/suture"
 )
 
@@ -64,7 +63,7 @@ type Model struct {
 	*suture.Supervisor
 
 	cfg               *config.Wrapper
-	db                *leveldb.DB
+	db                *db.Instance
 	finder            *db.BlockFinder
 	progressEmitter   *ProgressEmitter
 	id                protocol.DeviceID
@@ -99,7 +98,7 @@ var (
 // NewModel creates and starts a new model. The model starts in read-only mode,
 // where it sends index information to connected peers and responds to requests
 // for file data without altering the local folder in any way.
-func NewModel(cfg *config.Wrapper, id protocol.DeviceID, deviceName, clientName, clientVersion string, ldb *leveldb.DB, protectedFiles []string) *Model {
+func NewModel(cfg *config.Wrapper, id protocol.DeviceID, deviceName, clientName, clientVersion string, ldb *db.Instance, protectedFiles []string) *Model {
 	m := &Model{
 		Supervisor: suture.New("model", suture.Spec{
 			Log: func(line string) {

+ 14 - 16
lib/model/model_test.go

@@ -22,8 +22,6 @@ import (
 	"github.com/syncthing/syncthing/lib/config"
 	"github.com/syncthing/syncthing/lib/db"
 	"github.com/syncthing/syncthing/lib/protocol"
-	"github.com/syndtr/goleveldb/leveldb"
-	"github.com/syndtr/goleveldb/leveldb/storage"
 )
 
 var device1, device2 protocol.DeviceID
@@ -90,7 +88,7 @@ func init() {
 }
 
 func TestRequest(t *testing.T) {
-	db, _ := leveldb.Open(storage.NewMemStorage(), nil)
+	db := db.OpenMemory()
 
 	m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
 
@@ -167,7 +165,7 @@ func BenchmarkIndex_100(b *testing.B) {
 }
 
 func benchmarkIndex(b *testing.B, nfiles int) {
-	db, _ := leveldb.Open(storage.NewMemStorage(), nil)
+	db := db.OpenMemory()
 	m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
 	m.AddFolder(defaultFolderConfig)
 	m.StartFolderRO("default")
@@ -196,7 +194,7 @@ func BenchmarkIndexUpdate_10000_1(b *testing.B) {
 }
 
 func benchmarkIndexUpdate(b *testing.B, nfiles, nufiles int) {
-	db, _ := leveldb.Open(storage.NewMemStorage(), nil)
+	db := db.OpenMemory()
 	m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
 	m.AddFolder(defaultFolderConfig)
 	m.StartFolderRO("default")
@@ -261,7 +259,7 @@ func (FakeConnection) Statistics() protocol.Statistics {
 }
 
 func BenchmarkRequest(b *testing.B) {
-	db, _ := leveldb.Open(storage.NewMemStorage(), nil)
+	db := db.OpenMemory()
 	m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
 	m.AddFolder(defaultFolderConfig)
 	m.ServeBackground()
@@ -317,7 +315,7 @@ func TestDeviceRename(t *testing.T) {
 	}
 	cfg := config.Wrap("tmpconfig.xml", rawCfg)
 
-	db, _ := leveldb.Open(storage.NewMemStorage(), nil)
+	db := db.OpenMemory()
 	m := NewModel(cfg, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
 
 	fc := FakeConnection{
@@ -391,7 +389,7 @@ func TestClusterConfig(t *testing.T) {
 		},
 	}
 
-	db, _ := leveldb.Open(storage.NewMemStorage(), nil)
+	db := db.OpenMemory()
 
 	m := NewModel(config.Wrap("/tmp/test", cfg), protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
 	m.AddFolder(cfg.Folders[0])
@@ -463,7 +461,7 @@ func TestIgnores(t *testing.T) {
 	ioutil.WriteFile("testdata/.stfolder", nil, 0644)
 	ioutil.WriteFile("testdata/.stignore", []byte(".*\nquux\n"), 0644)
 
-	db, _ := leveldb.Open(storage.NewMemStorage(), nil)
+	db := db.OpenMemory()
 	m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
 	m.AddFolder(defaultFolderConfig)
 	m.StartFolderRO("default")
@@ -538,7 +536,7 @@ func TestIgnores(t *testing.T) {
 }
 
 func TestRefuseUnknownBits(t *testing.T) {
-	db, _ := leveldb.Open(storage.NewMemStorage(), nil)
+	db := db.OpenMemory()
 	m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
 	m.AddFolder(defaultFolderConfig)
 	m.ServeBackground()
@@ -576,7 +574,7 @@ func TestRefuseUnknownBits(t *testing.T) {
 }
 
 func TestROScanRecovery(t *testing.T) {
-	ldb, _ := leveldb.Open(storage.NewMemStorage(), nil)
+	ldb := db.OpenMemory()
 	set := db.NewFileSet("default", ldb)
 	set.Update(protocol.LocalDeviceID, []protocol.FileInfo{
 		{Name: "dummyfile"},
@@ -660,7 +658,7 @@ func TestROScanRecovery(t *testing.T) {
 }
 
 func TestRWScanRecovery(t *testing.T) {
-	ldb, _ := leveldb.Open(storage.NewMemStorage(), nil)
+	ldb := db.OpenMemory()
 	set := db.NewFileSet("default", ldb)
 	set.Update(protocol.LocalDeviceID, []protocol.FileInfo{
 		{Name: "dummyfile"},
@@ -744,7 +742,7 @@ func TestRWScanRecovery(t *testing.T) {
 }
 
 func TestGlobalDirectoryTree(t *testing.T) {
-	db, _ := leveldb.Open(storage.NewMemStorage(), nil)
+	db := db.OpenMemory()
 	m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
 	m.AddFolder(defaultFolderConfig)
 	m.ServeBackground()
@@ -994,7 +992,7 @@ func TestGlobalDirectoryTree(t *testing.T) {
 }
 
 func TestGlobalDirectorySelfFixing(t *testing.T) {
-	db, _ := leveldb.Open(storage.NewMemStorage(), nil)
+	db := db.OpenMemory()
 	m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
 	m.AddFolder(defaultFolderConfig)
 	m.ServeBackground()
@@ -1168,7 +1166,7 @@ func BenchmarkTree_100_10(b *testing.B) {
 }
 
 func benchmarkTree(b *testing.B, n1, n2 int) {
-	db, _ := leveldb.Open(storage.NewMemStorage(), nil)
+	db := db.OpenMemory()
 	m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
 	m.AddFolder(defaultFolderConfig)
 	m.ServeBackground()
@@ -1186,7 +1184,7 @@ func benchmarkTree(b *testing.B, n1, n2 int) {
 }
 
 func TestIgnoreDelete(t *testing.T) {
-	db, _ := leveldb.Open(storage.NewMemStorage(), nil)
+	db := db.OpenMemory()
 	m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
 
 	// This folder should ignore external deletes

+ 8 - 10
lib/model/rwfolder_test.go

@@ -12,12 +12,10 @@ import (
 	"testing"
 	"time"
 
+	"github.com/syncthing/syncthing/lib/db"
 	"github.com/syncthing/syncthing/lib/protocol"
 	"github.com/syncthing/syncthing/lib/scanner"
 	"github.com/syncthing/syncthing/lib/sync"
-
-	"github.com/syndtr/goleveldb/leveldb"
-	"github.com/syndtr/goleveldb/leveldb/storage"
 )
 
 func init() {
@@ -69,7 +67,7 @@ func TestHandleFile(t *testing.T) {
 	requiredFile := existingFile
 	requiredFile.Blocks = blocks[1:]
 
-	db, _ := leveldb.Open(storage.NewMemStorage(), nil)
+	db := db.OpenMemory()
 	m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
 	m.AddFolder(defaultFolderConfig)
 	// Update index
@@ -125,7 +123,7 @@ func TestHandleFileWithTemp(t *testing.T) {
 	requiredFile := existingFile
 	requiredFile.Blocks = blocks[1:]
 
-	db, _ := leveldb.Open(storage.NewMemStorage(), nil)
+	db := db.OpenMemory()
 	m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
 	m.AddFolder(defaultFolderConfig)
 	// Update index
@@ -187,7 +185,7 @@ func TestCopierFinder(t *testing.T) {
 	requiredFile.Blocks = blocks[1:]
 	requiredFile.Name = "file2"
 
-	db, _ := leveldb.Open(storage.NewMemStorage(), nil)
+	db := db.OpenMemory()
 	m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
 	m.AddFolder(defaultFolderConfig)
 	// Update index
@@ -264,7 +262,7 @@ func TestCopierCleanup(t *testing.T) {
 		return true
 	}
 
-	db, _ := leveldb.Open(storage.NewMemStorage(), nil)
+	db := db.OpenMemory()
 	m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
 	m.AddFolder(defaultFolderConfig)
 
@@ -313,7 +311,7 @@ func TestCopierCleanup(t *testing.T) {
 // Make sure that the copier routine hashes the content when asked, and pulls
 // if it fails to find the block.
 func TestLastResortPulling(t *testing.T) {
-	db, _ := leveldb.Open(storage.NewMemStorage(), nil)
+	db := db.OpenMemory()
 	m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
 	m.AddFolder(defaultFolderConfig)
 
@@ -387,7 +385,7 @@ func TestDeregisterOnFailInCopy(t *testing.T) {
 	}
 	defer os.Remove("testdata/" + defTempNamer.TempName("filex"))
 
-	db, _ := leveldb.Open(storage.NewMemStorage(), nil)
+	db := db.OpenMemory()
 
 	m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
 	m.AddFolder(defaultFolderConfig)
@@ -480,7 +478,7 @@ func TestDeregisterOnFailInPull(t *testing.T) {
 	}
 	defer os.Remove("testdata/" + defTempNamer.TempName("filex"))
 
-	db, _ := leveldb.Open(storage.NewMemStorage(), nil)
+	db := db.OpenMemory()
 	m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
 	m.AddFolder(defaultFolderConfig)
 

+ 1 - 2
lib/stats/device.go

@@ -10,7 +10,6 @@ import (
 	"time"
 
 	"github.com/syncthing/syncthing/lib/db"
-	"github.com/syndtr/goleveldb/leveldb"
 )
 
 type DeviceStatistics struct {
@@ -22,7 +21,7 @@ type DeviceStatisticsReference struct {
 	device string
 }
 
-func NewDeviceStatisticsReference(ldb *leveldb.DB, device string) *DeviceStatisticsReference {
+func NewDeviceStatisticsReference(ldb *db.Instance, device string) *DeviceStatisticsReference {
 	prefix := string(db.KeyTypeDeviceStatistic) + device
 	return &DeviceStatisticsReference{
 		ns:     db.NewNamespacedKV(ldb, prefix),

+ 1 - 2
lib/stats/folder.go

@@ -10,7 +10,6 @@ import (
 	"time"
 
 	"github.com/syncthing/syncthing/lib/db"
-	"github.com/syndtr/goleveldb/leveldb"
 )
 
 type FolderStatistics struct {
@@ -28,7 +27,7 @@ type LastFile struct {
 	Deleted  bool      `json:"deleted"`
 }
 
-func NewFolderStatisticsReference(ldb *leveldb.DB, folder string) *FolderStatisticsReference {
+func NewFolderStatisticsReference(ldb *db.Instance, folder string) *FolderStatisticsReference {
 	prefix := string(db.KeyTypeFolderStatistic) + folder
 	return &FolderStatisticsReference{
 		ns:     db.NewNamespacedKV(ldb, prefix),