Browse Source

Merge pull request #1242 from syncthing/rename-set

Renaming of package internal/files and type files.Set
Audrius Butkevicius 11 years ago
parent
commit
ce86131d12

+ 1 - 1
build.go

@@ -301,7 +301,7 @@ func assets() {
 }
 
 func xdr() {
-	runPrint("go", "generate", "./internal/discover", "./internal/files", "./internal/protocol")
+	runPrint("go", "generate", "./internal/discover", "./internal/db", "./internal/protocol")
 }
 
 func translate() {

+ 7 - 7
cmd/stindex/main.go

@@ -21,7 +21,7 @@ import (
 	"log"
 	"os"
 
-	"github.com/syncthing/syncthing/internal/files"
+	"github.com/syncthing/syncthing/internal/db"
 	"github.com/syncthing/syncthing/internal/protocol"
 	"github.com/syndtr/goleveldb/leveldb"
 )
@@ -34,17 +34,17 @@ func main() {
 	device := flag.String("device", "", "Device ID (blank for global)")
 	flag.Parse()
 
-	db, err := leveldb.OpenFile(flag.Arg(0), nil)
+	ldb, err := leveldb.OpenFile(flag.Arg(0), nil)
 	if err != nil {
 		log.Fatal(err)
 	}
 
-	fs := files.NewSet(*folder, db)
+	fs := db.NewFileSet(*folder, ldb)
 
 	if *device == "" {
 		log.Printf("*** Global index for folder %q", *folder)
-		fs.WithGlobalTruncated(func(fi files.FileIntf) bool {
-			f := fi.(files.FileInfoTruncated)
+		fs.WithGlobalTruncated(func(fi db.FileIntf) bool {
+			f := fi.(db.FileInfoTruncated)
 			fmt.Println(f)
 			fmt.Println("\t", fs.Availability(f.Name))
 			return true
@@ -55,8 +55,8 @@ func main() {
 			log.Fatal(err)
 		}
 		log.Printf("*** Have index for folder %q device %q", *folder, n)
-		fs.WithHaveTruncated(n, func(fi files.FileIntf) bool {
-			f := fi.(files.FileInfoTruncated)
+		fs.WithHaveTruncated(n, func(fi db.FileIntf) bool {
+			f := fi.(db.FileInfoTruncated)
 			fmt.Println(f)
 			return true
 		})

+ 3 - 3
cmd/syncthing/gui.go

@@ -34,9 +34,9 @@ import (
 	"github.com/calmh/logger"
 	"github.com/syncthing/syncthing/internal/auto"
 	"github.com/syncthing/syncthing/internal/config"
+	"github.com/syncthing/syncthing/internal/db"
 	"github.com/syncthing/syncthing/internal/discover"
 	"github.com/syncthing/syncthing/internal/events"
-	"github.com/syncthing/syncthing/internal/files"
 	"github.com/syncthing/syncthing/internal/model"
 	"github.com/syncthing/syncthing/internal/osutil"
 	"github.com/syncthing/syncthing/internal/protocol"
@@ -784,7 +784,7 @@ func mimeTypeForFile(file string) string {
 	}
 }
 
-func toNeedSlice(fs []files.FileInfoTruncated) []map[string]interface{} {
+func toNeedSlice(fs []db.FileInfoTruncated) []map[string]interface{} {
 	output := make([]map[string]interface{}, len(fs))
 	for i, file := range fs {
 		output[i] = map[string]interface{}{
@@ -794,7 +794,7 @@ func toNeedSlice(fs []files.FileInfoTruncated) []map[string]interface{} {
 			"Version":      file.Version,
 			"LocalVersion": file.LocalVersion,
 			"NumBlocks":    file.NumBlocks,
-			"Size":         files.BlocksToSize(file.NumBlocks),
+			"Size":         db.BlocksToSize(file.NumBlocks),
 		}
 	}
 	return output

+ 5 - 5
cmd/syncthing/main.go

@@ -38,9 +38,9 @@ import (
 	"github.com/calmh/logger"
 	"github.com/juju/ratelimit"
 	"github.com/syncthing/syncthing/internal/config"
+	"github.com/syncthing/syncthing/internal/db"
 	"github.com/syncthing/syncthing/internal/discover"
 	"github.com/syncthing/syncthing/internal/events"
-	"github.com/syncthing/syncthing/internal/files"
 	"github.com/syncthing/syncthing/internal/model"
 	"github.com/syncthing/syncthing/internal/osutil"
 	"github.com/syncthing/syncthing/internal/protocol"
@@ -489,21 +489,21 @@ func syncthingMain() {
 		readRateLimit = ratelimit.NewBucketWithRate(float64(1000*opts.MaxRecvKbps), int64(5*1000*opts.MaxRecvKbps))
 	}
 
-	db, err := leveldb.OpenFile(filepath.Join(confDir, "index"), &opt.Options{OpenFilesCacheCapacity: 100})
+	ldb, err := leveldb.OpenFile(filepath.Join(confDir, "index"), &opt.Options{OpenFilesCacheCapacity: 100})
 	if err != nil {
 		l.Fatalln("Cannot open database:", err, "- Is another copy of Syncthing already running?")
 	}
 
 	// Remove database entries for folders that no longer exist in the config
 	folders := cfg.Folders()
-	for _, folder := range files.ListFolders(db) {
+	for _, folder := range db.ListFolders(ldb) {
 		if _, ok := folders[folder]; !ok {
 			l.Infof("Cleaning data for dropped folder %q", folder)
-			files.DropFolder(db, folder)
+			db.DropFolder(ldb, folder)
 		}
 	}
 
-	m := model.NewModel(cfg, myName, "syncthing", Version, db)
+	m := model.NewModel(cfg, myName, "syncthing", Version, ldb)
 
 	sanityCheckFolders(cfg, m)
 

+ 7 - 7
cmd/syncthing/main_test.go

@@ -20,7 +20,7 @@ import (
 	"testing"
 
 	"github.com/syncthing/syncthing/internal/config"
-	"github.com/syncthing/syncthing/internal/files"
+	"github.com/syncthing/syncthing/internal/db"
 	"github.com/syncthing/syncthing/internal/model"
 	"github.com/syncthing/syncthing/internal/protocol"
 
@@ -44,11 +44,11 @@ func TestSanityCheck(t *testing.T) {
 		}
 	}
 
-	db, _ := leveldb.Open(storage.NewMemStorage(), nil)
+	ldb, _ := leveldb.Open(storage.NewMemStorage(), nil)
 
 	// Case 1 - new folder, directory and marker created
 
-	m := model.NewModel(cfg, "device", "syncthing", "dev", db)
+	m := model.NewModel(cfg, "device", "syncthing", "dev", ldb)
 	sanityCheckFolders(cfg, m)
 
 	if cfg.Folders()["folder"].Invalid != "" {
@@ -75,7 +75,7 @@ func TestSanityCheck(t *testing.T) {
 		Folders: []config.FolderConfiguration{fcfg},
 	})
 
-	m = model.NewModel(cfg, "device", "syncthing", "dev", db)
+	m = model.NewModel(cfg, "device", "syncthing", "dev", ldb)
 	sanityCheckFolders(cfg, m)
 
 	if cfg.Folders()["folder"].Invalid != "" {
@@ -91,12 +91,12 @@ func TestSanityCheck(t *testing.T) {
 
 	// Case 3 - marker missing
 
-	set := files.NewSet("folder", db)
+	set := db.NewFileSet("folder", ldb)
 	set.Update(protocol.LocalDeviceID, []protocol.FileInfo{
 		{Name: "dummyfile"},
 	})
 
-	m = model.NewModel(cfg, "device", "syncthing", "dev", db)
+	m = model.NewModel(cfg, "device", "syncthing", "dev", ldb)
 	sanityCheckFolders(cfg, m)
 
 	if cfg.Folders()["folder"].Invalid != "folder marker missing" {
@@ -110,7 +110,7 @@ func TestSanityCheck(t *testing.T) {
 		Folders: []config.FolderConfiguration{fcfg},
 	})
 
-	m = model.NewModel(cfg, "device", "syncthing", "dev", db)
+	m = model.NewModel(cfg, "device", "syncthing", "dev", ldb)
 	sanityCheckFolders(cfg, m)
 
 	if cfg.Folders()["folder"].Invalid != "folder path missing" {

+ 0 - 0
internal/files/.gitignore → internal/db/.gitignore


+ 2 - 2
internal/files/blockmap.go → internal/db/blockmap.go

@@ -13,14 +13,14 @@
 // You should have received a copy of the GNU General Public License along
 // with this program. If not, see <http://www.gnu.org/licenses/>.
 
-// Package files provides a set type to track local/remote files with newness
+// Package db provides a set type to track local/remote files with newness
 // checks. We must do a certain amount of normalization in here. We will get
 // fed paths with either native or wire-format separators and encodings
 // depending on who calls us. We transform paths to wire-format (NFC and
 // slashes) on the way to the database, and transform to native format
 // (varying separator and encoding) on the way back out.
 
-package files
+package db
 
 import (
 	"bytes"

+ 1 - 1
internal/files/blockmap_test.go → internal/db/blockmap_test.go

@@ -13,7 +13,7 @@
 // You should have received a copy of the GNU General Public License along
 // with this program. If not, see <http://www.gnu.org/licenses/>.
 
-package files
+package db
 
 import (
 	"testing"

+ 1 - 1
internal/files/concurrency_test.go → internal/db/concurrency_test.go

@@ -13,7 +13,7 @@
 // You should have received a copy of the GNU General Public License along
 // with this program. If not, see <http://www.gnu.org/licenses/>.
 
-package files_test
+package db_test
 
 import (
 	"crypto/rand"

+ 1 - 1
internal/files/debug.go → internal/db/debug.go

@@ -13,7 +13,7 @@
 // You should have received a copy of the GNU General Public License along
 // with this program. If not, see <http://www.gnu.org/licenses/>.
 
-package files
+package db
 
 import (
 	"os"

+ 1 - 1
internal/files/leveldb.go → internal/db/leveldb.go

@@ -16,7 +16,7 @@
 //go:generate -command genxdr go run ../../Godeps/_workspace/src/github.com/calmh/xdr/cmd/genxdr/main.go
 //go:generate genxdr -o leveldb_xdr.go leveldb.go
 
-package files
+package db
 
 import (
 	"bytes"

+ 1 - 1
internal/files/leveldb_test.go → internal/db/leveldb_test.go

@@ -13,7 +13,7 @@
 // You should have received a copy of the GNU General Public License along
 // with this program. If not, see <http://www.gnu.org/licenses/>.
 
-package files
+package db
 
 import (
 	"bytes"

+ 1 - 1
internal/files/leveldb_xdr.go → internal/db/leveldb_xdr.go

@@ -2,7 +2,7 @@
 // This file is automatically generated by genxdr. Do not edit.
 // ************************************************************
 
-package files
+package db
 
 import (
 	"bytes"

+ 19 - 19
internal/files/set.go → internal/db/set.go

@@ -13,13 +13,13 @@
 // You should have received a copy of the GNU General Public License along
 // with this program. If not, see <http://www.gnu.org/licenses/>.
 
-// Package files provides a set type to track local/remote files with newness
+// Package db provides a set type to track local/remote files with newness
 // checks. We must do a certain amount of normalization in here. We will get
 // fed paths with either native or wire-format separators and encodings
 // depending on who calls us. We transform paths to wire-format (NFC and
 // slashes) on the way to the database, and transform to native format
 // (varying separator and encoding) on the way back out.
-package files
+package db
 
 import (
 	"sync"
@@ -30,7 +30,7 @@ import (
 	"github.com/syndtr/goleveldb/leveldb"
 )
 
-type Set struct {
+type FileSet struct {
 	localVersion map[protocol.DeviceID]uint64
 	mutex        sync.Mutex
 	folder       string
@@ -54,8 +54,8 @@ type FileIntf interface {
 // continue iteration, false to stop.
 type Iterator func(f FileIntf) bool
 
-func NewSet(folder string, db *leveldb.DB) *Set {
-	var s = Set{
+func NewFileSet(folder string, db *leveldb.DB) *FileSet {
+	var s = FileSet{
 		localVersion: make(map[protocol.DeviceID]uint64),
 		folder:       folder,
 		db:           db,
@@ -81,7 +81,7 @@ func NewSet(folder string, db *leveldb.DB) *Set {
 	return &s
 }
 
-func (s *Set) Replace(device protocol.DeviceID, fs []protocol.FileInfo) {
+func (s *FileSet) Replace(device protocol.DeviceID, fs []protocol.FileInfo) {
 	if debug {
 		l.Debugf("%s Replace(%v, [%d])", s.folder, device, len(fs))
 	}
@@ -99,7 +99,7 @@ func (s *Set) Replace(device protocol.DeviceID, fs []protocol.FileInfo) {
 	}
 }
 
-func (s *Set) ReplaceWithDelete(device protocol.DeviceID, fs []protocol.FileInfo) {
+func (s *FileSet) ReplaceWithDelete(device protocol.DeviceID, fs []protocol.FileInfo) {
 	if debug {
 		l.Debugf("%s ReplaceWithDelete(%v, [%d])", s.folder, device, len(fs))
 	}
@@ -115,7 +115,7 @@ func (s *Set) ReplaceWithDelete(device protocol.DeviceID, fs []protocol.FileInfo
 	}
 }
 
-func (s *Set) Update(device protocol.DeviceID, fs []protocol.FileInfo) {
+func (s *FileSet) Update(device protocol.DeviceID, fs []protocol.FileInfo) {
 	if debug {
 		l.Debugf("%s Update(%v, [%d])", s.folder, device, len(fs))
 	}
@@ -140,55 +140,55 @@ func (s *Set) Update(device protocol.DeviceID, fs []protocol.FileInfo) {
 	}
 }
 
-func (s *Set) WithNeed(device protocol.DeviceID, fn Iterator) {
+func (s *FileSet) WithNeed(device protocol.DeviceID, fn Iterator) {
 	if debug {
 		l.Debugf("%s WithNeed(%v)", s.folder, device)
 	}
 	ldbWithNeed(s.db, []byte(s.folder), device[:], false, nativeFileIterator(fn))
 }
 
-func (s *Set) WithNeedTruncated(device protocol.DeviceID, fn Iterator) {
+func (s *FileSet) WithNeedTruncated(device protocol.DeviceID, fn Iterator) {
 	if debug {
 		l.Debugf("%s WithNeedTruncated(%v)", s.folder, device)
 	}
 	ldbWithNeed(s.db, []byte(s.folder), device[:], true, nativeFileIterator(fn))
 }
 
-func (s *Set) WithHave(device protocol.DeviceID, fn Iterator) {
+func (s *FileSet) WithHave(device protocol.DeviceID, fn Iterator) {
 	if debug {
 		l.Debugf("%s WithHave(%v)", s.folder, device)
 	}
 	ldbWithHave(s.db, []byte(s.folder), device[:], false, nativeFileIterator(fn))
 }
 
-func (s *Set) WithHaveTruncated(device protocol.DeviceID, fn Iterator) {
+func (s *FileSet) WithHaveTruncated(device protocol.DeviceID, fn Iterator) {
 	if debug {
 		l.Debugf("%s WithHaveTruncated(%v)", s.folder, device)
 	}
 	ldbWithHave(s.db, []byte(s.folder), device[:], true, nativeFileIterator(fn))
 }
 
-func (s *Set) WithGlobal(fn Iterator) {
+func (s *FileSet) WithGlobal(fn Iterator) {
 	if debug {
 		l.Debugf("%s WithGlobal()", s.folder)
 	}
 	ldbWithGlobal(s.db, []byte(s.folder), false, nativeFileIterator(fn))
 }
 
-func (s *Set) WithGlobalTruncated(fn Iterator) {
+func (s *FileSet) WithGlobalTruncated(fn Iterator) {
 	if debug {
 		l.Debugf("%s WithGlobalTruncated()", s.folder)
 	}
 	ldbWithGlobal(s.db, []byte(s.folder), true, nativeFileIterator(fn))
 }
 
-func (s *Set) Get(device protocol.DeviceID, file string) (protocol.FileInfo, bool) {
+func (s *FileSet) Get(device protocol.DeviceID, file string) (protocol.FileInfo, bool) {
 	f, ok := ldbGet(s.db, []byte(s.folder), device[:], []byte(osutil.NormalizedFilename(file)))
 	f.Name = osutil.NativeFilename(f.Name)
 	return f, ok
 }
 
-func (s *Set) GetGlobal(file string) (protocol.FileInfo, bool) {
+func (s *FileSet) GetGlobal(file string) (protocol.FileInfo, bool) {
 	fi, ok := ldbGetGlobal(s.db, []byte(s.folder), []byte(osutil.NormalizedFilename(file)), false)
 	if !ok {
 		return protocol.FileInfo{}, false
@@ -198,7 +198,7 @@ func (s *Set) GetGlobal(file string) (protocol.FileInfo, bool) {
 	return f, true
 }
 
-func (s *Set) GetGlobalTruncated(file string) (FileInfoTruncated, bool) {
+func (s *FileSet) GetGlobalTruncated(file string) (FileInfoTruncated, bool) {
 	fi, ok := ldbGetGlobal(s.db, []byte(s.folder), []byte(osutil.NormalizedFilename(file)), true)
 	if !ok {
 		return FileInfoTruncated{}, false
@@ -208,11 +208,11 @@ func (s *Set) GetGlobalTruncated(file string) (FileInfoTruncated, bool) {
 	return f, true
 }
 
-func (s *Set) Availability(file string) []protocol.DeviceID {
+func (s *FileSet) Availability(file string) []protocol.DeviceID {
 	return ldbAvailability(s.db, []byte(s.folder), []byte(osutil.NormalizedFilename(file)))
 }
 
-func (s *Set) LocalVersion(device protocol.DeviceID) uint64 {
+func (s *FileSet) LocalVersion(device protocol.DeviceID) uint64 {
 	s.mutex.Lock()
 	defer s.mutex.Unlock()
 	return s.localVersion[device]

+ 46 - 46
internal/files/set_test.go → internal/db/set_test.go

@@ -13,7 +13,7 @@
 // You should have received a copy of the GNU General Public License along
 // with this program. If not, see <http://www.gnu.org/licenses/>.
 
-package files_test
+package db_test
 
 import (
 	"bytes"
@@ -22,7 +22,7 @@ import (
 	"sort"
 	"testing"
 
-	"github.com/syncthing/syncthing/internal/files"
+	"github.com/syncthing/syncthing/internal/db"
 	"github.com/syncthing/syncthing/internal/lamport"
 	"github.com/syncthing/syncthing/internal/protocol"
 	"github.com/syndtr/goleveldb/leveldb"
@@ -49,9 +49,9 @@ func genBlocks(n int) []protocol.BlockInfo {
 	return b
 }
 
-func globalList(s *files.Set) []protocol.FileInfo {
+func globalList(s *db.FileSet) []protocol.FileInfo {
 	var fs []protocol.FileInfo
-	s.WithGlobal(func(fi files.FileIntf) bool {
+	s.WithGlobal(func(fi db.FileIntf) bool {
 		f := fi.(protocol.FileInfo)
 		fs = append(fs, f)
 		return true
@@ -59,9 +59,9 @@ func globalList(s *files.Set) []protocol.FileInfo {
 	return fs
 }
 
-func haveList(s *files.Set, n protocol.DeviceID) []protocol.FileInfo {
+func haveList(s *db.FileSet, n protocol.DeviceID) []protocol.FileInfo {
 	var fs []protocol.FileInfo
-	s.WithHave(n, func(fi files.FileIntf) bool {
+	s.WithHave(n, func(fi db.FileIntf) bool {
 		f := fi.(protocol.FileInfo)
 		fs = append(fs, f)
 		return true
@@ -69,9 +69,9 @@ func haveList(s *files.Set, n protocol.DeviceID) []protocol.FileInfo {
 	return fs
 }
 
-func needList(s *files.Set, n protocol.DeviceID) []protocol.FileInfo {
+func needList(s *db.FileSet, n protocol.DeviceID) []protocol.FileInfo {
 	var fs []protocol.FileInfo
-	s.WithNeed(n, func(fi files.FileIntf) bool {
+	s.WithNeed(n, func(fi db.FileIntf) bool {
 		f := fi.(protocol.FileInfo)
 		fs = append(fs, f)
 		return true
@@ -106,12 +106,12 @@ func (l fileList) String() string {
 func TestGlobalSet(t *testing.T) {
 	lamport.Default = lamport.Clock{}
 
-	db, err := leveldb.Open(storage.NewMemStorage(), nil)
+	ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
 	if err != nil {
 		t.Fatal(err)
 	}
 
-	m := files.NewSet("test", db)
+	m := db.NewFileSet("test", ldb)
 
 	local0 := fileList{
 		protocol.FileInfo{Name: "a", Version: 1000, Blocks: genBlocks(1)},
@@ -267,12 +267,12 @@ func TestGlobalSet(t *testing.T) {
 func TestNeedWithInvalid(t *testing.T) {
 	lamport.Default = lamport.Clock{}
 
-	db, err := leveldb.Open(storage.NewMemStorage(), nil)
+	ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
 	if err != nil {
 		t.Fatal(err)
 	}
 
-	s := files.NewSet("test", db)
+	s := db.NewFileSet("test", ldb)
 
 	localHave := fileList{
 		protocol.FileInfo{Name: "a", Version: 1000, Blocks: genBlocks(1)},
@@ -309,12 +309,12 @@ func TestNeedWithInvalid(t *testing.T) {
 func TestUpdateToInvalid(t *testing.T) {
 	lamport.Default = lamport.Clock{}
 
-	db, err := leveldb.Open(storage.NewMemStorage(), nil)
+	ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
 	if err != nil {
 		t.Fatal(err)
 	}
 
-	s := files.NewSet("test", db)
+	s := db.NewFileSet("test", ldb)
 
 	localHave := fileList{
 		protocol.FileInfo{Name: "a", Version: 1000, Blocks: genBlocks(1)},
@@ -346,12 +346,12 @@ func TestUpdateToInvalid(t *testing.T) {
 func TestInvalidAvailability(t *testing.T) {
 	lamport.Default = lamport.Clock{}
 
-	db, err := leveldb.Open(storage.NewMemStorage(), nil)
+	ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
 	if err != nil {
 		t.Fatal(err)
 	}
 
-	s := files.NewSet("test", db)
+	s := db.NewFileSet("test", ldb)
 
 	remote0Have := fileList{
 		protocol.FileInfo{Name: "both", Version: 1001, Blocks: genBlocks(2)},
@@ -387,11 +387,11 @@ func TestInvalidAvailability(t *testing.T) {
 }
 
 func TestLocalDeleted(t *testing.T) {
-	db, err := leveldb.Open(storage.NewMemStorage(), nil)
+	ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
 	if err != nil {
 		t.Fatal(err)
 	}
-	m := files.NewSet("test", db)
+	m := db.NewFileSet("test", ldb)
 	lamport.Default = lamport.Clock{}
 
 	local1 := []protocol.FileInfo{
@@ -462,7 +462,7 @@ func TestLocalDeleted(t *testing.T) {
 }
 
 func Benchmark10kReplace(b *testing.B) {
-	db, err := leveldb.Open(storage.NewMemStorage(), nil)
+	ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
 	if err != nil {
 		b.Fatal(err)
 	}
@@ -474,7 +474,7 @@ func Benchmark10kReplace(b *testing.B) {
 
 	b.ResetTimer()
 	for i := 0; i < b.N; i++ {
-		m := files.NewSet("test", db)
+		m := db.NewFileSet("test", ldb)
 		m.ReplaceWithDelete(protocol.LocalDeviceID, local)
 	}
 }
@@ -485,12 +485,12 @@ func Benchmark10kUpdateChg(b *testing.B) {
 		remote = append(remote, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: 1000})
 	}
 
-	db, err := leveldb.Open(storage.NewMemStorage(), nil)
+	ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
 	if err != nil {
 		b.Fatal(err)
 	}
 
-	m := files.NewSet("test", db)
+	m := db.NewFileSet("test", ldb)
 	m.Replace(remoteDevice0, remote)
 
 	var local []protocol.FileInfo
@@ -517,11 +517,11 @@ func Benchmark10kUpdateSme(b *testing.B) {
 		remote = append(remote, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: 1000})
 	}
 
-	db, err := leveldb.Open(storage.NewMemStorage(), nil)
+	ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
 	if err != nil {
 		b.Fatal(err)
 	}
-	m := files.NewSet("test", db)
+	m := db.NewFileSet("test", ldb)
 	m.Replace(remoteDevice0, remote)
 
 	var local []protocol.FileInfo
@@ -543,12 +543,12 @@ func Benchmark10kNeed2k(b *testing.B) {
 		remote = append(remote, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: 1000})
 	}
 
-	db, err := leveldb.Open(storage.NewMemStorage(), nil)
+	ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
 	if err != nil {
 		b.Fatal(err)
 	}
 
-	m := files.NewSet("test", db)
+	m := db.NewFileSet("test", ldb)
 	m.Replace(remoteDevice0, remote)
 
 	var local []protocol.FileInfo
@@ -576,12 +576,12 @@ func Benchmark10kHaveFullList(b *testing.B) {
 		remote = append(remote, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: 1000})
 	}
 
-	db, err := leveldb.Open(storage.NewMemStorage(), nil)
+	ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
 	if err != nil {
 		b.Fatal(err)
 	}
 
-	m := files.NewSet("test", db)
+	m := db.NewFileSet("test", ldb)
 	m.Replace(remoteDevice0, remote)
 
 	var local []protocol.FileInfo
@@ -609,12 +609,12 @@ func Benchmark10kGlobal(b *testing.B) {
 		remote = append(remote, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: 1000})
 	}
 
-	db, err := leveldb.Open(storage.NewMemStorage(), nil)
+	ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
 	if err != nil {
 		b.Fatal(err)
 	}
 
-	m := files.NewSet("test", db)
+	m := db.NewFileSet("test", ldb)
 	m.Replace(remoteDevice0, remote)
 
 	var local []protocol.FileInfo
@@ -637,12 +637,12 @@ func Benchmark10kGlobal(b *testing.B) {
 }
 
 func TestGlobalReset(t *testing.T) {
-	db, err := leveldb.Open(storage.NewMemStorage(), nil)
+	ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
 	if err != nil {
 		t.Fatal(err)
 	}
 
-	m := files.NewSet("test", db)
+	m := db.NewFileSet("test", ldb)
 
 	local := []protocol.FileInfo{
 		{Name: "a", Version: 1000},
@@ -678,12 +678,12 @@ func TestGlobalReset(t *testing.T) {
 }
 
 func TestNeed(t *testing.T) {
-	db, err := leveldb.Open(storage.NewMemStorage(), nil)
+	ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
 	if err != nil {
 		t.Fatal(err)
 	}
 
-	m := files.NewSet("test", db)
+	m := db.NewFileSet("test", ldb)
 
 	local := []protocol.FileInfo{
 		{Name: "a", Version: 1000},
@@ -719,12 +719,12 @@ func TestNeed(t *testing.T) {
 }
 
 func TestLocalVersion(t *testing.T) {
-	db, err := leveldb.Open(storage.NewMemStorage(), nil)
+	ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
 	if err != nil {
 		t.Fatal(err)
 	}
 
-	m := files.NewSet("test", db)
+	m := db.NewFileSet("test", ldb)
 
 	local1 := []protocol.FileInfo{
 		{Name: "a", Version: 1000},
@@ -758,12 +758,12 @@ func TestLocalVersion(t *testing.T) {
 }
 
 func TestListDropFolder(t *testing.T) {
-	db, err := leveldb.Open(storage.NewMemStorage(), nil)
+	ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
 	if err != nil {
 		t.Fatal(err)
 	}
 
-	s0 := files.NewSet("test0", db)
+	s0 := db.NewFileSet("test0", ldb)
 	local1 := []protocol.FileInfo{
 		{Name: "a", Version: 1000},
 		{Name: "b", Version: 1000},
@@ -771,7 +771,7 @@ func TestListDropFolder(t *testing.T) {
 	}
 	s0.Replace(protocol.LocalDeviceID, local1)
 
-	s1 := files.NewSet("test1", db)
+	s1 := db.NewFileSet("test1", ldb)
 	local2 := []protocol.FileInfo{
 		{Name: "d", Version: 1002},
 		{Name: "e", Version: 1002},
@@ -782,7 +782,7 @@ func TestListDropFolder(t *testing.T) {
 	// Check that we have both folders and their data is in the global list
 
 	expectedFolderList := []string{"test0", "test1"}
-	if actualFolderList := files.ListFolders(db); !reflect.DeepEqual(actualFolderList, expectedFolderList) {
+	if actualFolderList := db.ListFolders(ldb); !reflect.DeepEqual(actualFolderList, expectedFolderList) {
 		t.Fatalf("FolderList mismatch\nE: %v\nA: %v", expectedFolderList, actualFolderList)
 	}
 	if l := len(globalList(s0)); l != 3 {
@@ -794,10 +794,10 @@ func TestListDropFolder(t *testing.T) {
 
 	// Drop one of them and check that it's gone.
 
-	files.DropFolder(db, "test1")
+	db.DropFolder(ldb, "test1")
 
 	expectedFolderList = []string{"test0"}
-	if actualFolderList := files.ListFolders(db); !reflect.DeepEqual(actualFolderList, expectedFolderList) {
+	if actualFolderList := db.ListFolders(ldb); !reflect.DeepEqual(actualFolderList, expectedFolderList) {
 		t.Fatalf("FolderList mismatch\nE: %v\nA: %v", expectedFolderList, actualFolderList)
 	}
 	if l := len(globalList(s0)); l != 3 {
@@ -809,12 +809,12 @@ func TestListDropFolder(t *testing.T) {
 }
 
 func TestGlobalNeedWithInvalid(t *testing.T) {
-	db, err := leveldb.Open(storage.NewMemStorage(), nil)
+	ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
 	if err != nil {
 		t.Fatal(err)
 	}
 
-	s := files.NewSet("test1", db)
+	s := db.NewFileSet("test1", ldb)
 
 	rem0 := fileList{
 		protocol.FileInfo{Name: "a", Version: 1002, Blocks: genBlocks(4)},
@@ -849,12 +849,12 @@ func TestGlobalNeedWithInvalid(t *testing.T) {
 }
 
 func TestLongPath(t *testing.T) {
-	db, err := leveldb.Open(storage.NewMemStorage(), nil)
+	ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
 	if err != nil {
 		t.Fatal(err)
 	}
 
-	s := files.NewSet("test", db)
+	s := db.NewFileSet("test", ldb)
 
 	var b bytes.Buffer
 	for i := 0; i < 100; i++ {

+ 0 - 0
internal/files/testdata/.gitignore → internal/db/testdata/.gitignore


+ 1 - 1
internal/files/truncated.go → internal/db/truncated.go

@@ -16,7 +16,7 @@
 //go:generate -command genxdr go run ../../Godeps/_workspace/src/github.com/calmh/xdr/cmd/genxdr/main.go
 //go:generate genxdr -o truncated_xdr.go truncated.go
 
-package files
+package db
 
 import (
 	"fmt"

+ 1 - 1
internal/files/truncated_xdr.go → internal/db/truncated_xdr.go

@@ -2,7 +2,7 @@
 // This file is automatically generated by genxdr. Do not edit.
 // ************************************************************
 
-package files
+package db
 
 import (
 	"bytes"

+ 26 - 26
internal/model/model.go

@@ -31,8 +31,8 @@ import (
 	"time"
 
 	"github.com/syncthing/syncthing/internal/config"
+	"github.com/syncthing/syncthing/internal/db"
 	"github.com/syncthing/syncthing/internal/events"
-	"github.com/syncthing/syncthing/internal/files"
 	"github.com/syncthing/syncthing/internal/ignore"
 	"github.com/syncthing/syncthing/internal/lamport"
 	"github.com/syncthing/syncthing/internal/osutil"
@@ -86,7 +86,7 @@ type service interface {
 type Model struct {
 	cfg             *config.Wrapper
 	db              *leveldb.DB
-	finder          *files.BlockFinder
+	finder          *db.BlockFinder
 	progressEmitter *ProgressEmitter
 
 	deviceName    string
@@ -94,7 +94,7 @@ type Model struct {
 	clientVersion string
 
 	folderCfgs     map[string]config.FolderConfiguration                  // folder -> cfg
-	folderFiles    map[string]*files.Set                                  // folder -> files
+	folderFiles    map[string]*db.FileSet                                 // folder -> files
 	folderDevices  map[string][]protocol.DeviceID                         // folder -> deviceIDs
 	deviceFolders  map[protocol.DeviceID][]string                         // deviceID -> folders
 	deviceStatRefs map[protocol.DeviceID]*stats.DeviceStatisticsReference // deviceID -> statsRef
@@ -126,15 +126,15 @@ var (
 // NewModel creates and starts a new model. The model starts in read-only mode,
 // where it sends index information to connected peers and responds to requests
 // for file data without altering the local folder in any way.
-func NewModel(cfg *config.Wrapper, deviceName, clientName, clientVersion string, db *leveldb.DB) *Model {
+func NewModel(cfg *config.Wrapper, deviceName, clientName, clientVersion string, ldb *leveldb.DB) *Model {
 	m := &Model{
 		cfg:                cfg,
-		db:                 db,
+		db:                 ldb,
 		deviceName:         deviceName,
 		clientName:         clientName,
 		clientVersion:      clientVersion,
 		folderCfgs:         make(map[string]config.FolderConfiguration),
-		folderFiles:        make(map[string]*files.Set),
+		folderFiles:        make(map[string]*db.FileSet),
 		folderDevices:      make(map[string][]protocol.DeviceID),
 		deviceFolders:      make(map[protocol.DeviceID][]string),
 		deviceStatRefs:     make(map[protocol.DeviceID]*stats.DeviceStatisticsReference),
@@ -146,7 +146,7 @@ func NewModel(cfg *config.Wrapper, deviceName, clientName, clientVersion string,
 		protoConn:          make(map[protocol.DeviceID]protocol.Connection),
 		rawConn:            make(map[protocol.DeviceID]io.Closer),
 		deviceVer:          make(map[protocol.DeviceID]string),
-		finder:             files.NewBlockFinder(db, cfg),
+		finder:             db.NewBlockFinder(ldb, cfg),
 		progressEmitter:    NewProgressEmitter(cfg),
 	}
 	if cfg.Options().ProgressUpdateIntervalS > -1 {
@@ -309,7 +309,7 @@ func (m *Model) Completion(device protocol.DeviceID, folder string) float64 {
 		return 0 // Folder doesn't exist, so we hardly have any of it
 	}
 
-	rf.WithGlobalTruncated(func(f files.FileIntf) bool {
+	rf.WithGlobalTruncated(func(f db.FileIntf) bool {
 		if !f.IsDeleted() {
 			tot += f.Size()
 		}
@@ -321,7 +321,7 @@ func (m *Model) Completion(device protocol.DeviceID, folder string) float64 {
 	}
 
 	var need int64
-	rf.WithNeedTruncated(device, func(f files.FileIntf) bool {
+	rf.WithNeedTruncated(device, func(f db.FileIntf) bool {
 		if !f.IsDeleted() {
 			need += f.Size()
 		}
@@ -346,7 +346,7 @@ func sizeOf(fs []protocol.FileInfo) (files, deleted int, bytes int64) {
 	return
 }
 
-func sizeOfFile(f files.FileIntf) (files, deleted int, bytes int64) {
+func sizeOfFile(f db.FileIntf) (files, deleted int, bytes int64) {
 	if !f.IsDeleted() {
 		files++
 	} else {
@@ -364,7 +364,7 @@ func (m *Model) GlobalSize(folder string) (nfiles, deleted int, bytes int64) {
 	m.fmut.RLock()
 	defer m.fmut.RUnlock()
 	if rf, ok := m.folderFiles[folder]; ok {
-		rf.WithGlobalTruncated(func(f files.FileIntf) bool {
+		rf.WithGlobalTruncated(func(f db.FileIntf) bool {
 			fs, de, by := sizeOfFile(f)
 			nfiles += fs
 			deleted += de
@@ -383,7 +383,7 @@ func (m *Model) LocalSize(folder string) (nfiles, deleted int, bytes int64) {
 	m.fmut.RLock()
 	defer m.fmut.RUnlock()
 	if rf, ok := m.folderFiles[folder]; ok {
-		rf.WithHaveTruncated(protocol.LocalDeviceID, func(f files.FileIntf) bool {
+		rf.WithHaveTruncated(protocol.LocalDeviceID, func(f db.FileIntf) bool {
 			if f.IsInvalid() {
 				return true
 			}
@@ -404,7 +404,7 @@ func (m *Model) NeedSize(folder string) (nfiles int, bytes int64) {
 	m.fmut.RLock()
 	defer m.fmut.RUnlock()
 	if rf, ok := m.folderFiles[folder]; ok {
-		rf.WithNeedTruncated(protocol.LocalDeviceID, func(f files.FileIntf) bool {
+		rf.WithNeedTruncated(protocol.LocalDeviceID, func(f db.FileIntf) bool {
 			fs, de, by := sizeOfFile(f)
 			nfiles += fs + de
 			bytes += by
@@ -421,21 +421,21 @@ func (m *Model) NeedSize(folder string) (nfiles int, bytes int64) {
 // NeedFiles returns the list of currently needed files in progress, queued,
 // and to be queued on next puller iteration. Also takes a soft cap which is
 // only respected when adding files from the model rather than the runner queue.
-func (m *Model) NeedFolderFiles(folder string, max int) ([]files.FileInfoTruncated, []files.FileInfoTruncated, []files.FileInfoTruncated) {
+func (m *Model) NeedFolderFiles(folder string, max int) ([]db.FileInfoTruncated, []db.FileInfoTruncated, []db.FileInfoTruncated) {
 	defer m.leveldbPanicWorkaround()
 
 	m.fmut.RLock()
 	defer m.fmut.RUnlock()
 	if rf, ok := m.folderFiles[folder]; ok {
-		var progress, queued, rest []files.FileInfoTruncated
+		var progress, queued, rest []db.FileInfoTruncated
 		var seen map[string]bool
 
 		runner, ok := m.folderRunners[folder]
 		if ok {
 			progressNames, queuedNames := runner.Jobs()
 
-			progress = make([]files.FileInfoTruncated, len(progressNames))
-			queued = make([]files.FileInfoTruncated, len(queuedNames))
+			progress = make([]db.FileInfoTruncated, len(progressNames))
+			queued = make([]db.FileInfoTruncated, len(queuedNames))
 			seen = make(map[string]bool, len(progressNames)+len(queuedNames))
 
 			for i, name := range progressNames {
@@ -454,9 +454,9 @@ func (m *Model) NeedFolderFiles(folder string, max int) ([]files.FileInfoTruncat
 		}
 		left := max - len(progress) - len(queued)
 		if max < 1 || left > 0 {
-			rf.WithNeedTruncated(protocol.LocalDeviceID, func(f files.FileIntf) bool {
+			rf.WithNeedTruncated(protocol.LocalDeviceID, func(f db.FileIntf) bool {
 				left--
-				ft := f.(files.FileInfoTruncated)
+				ft := f.(db.FileInfoTruncated)
 				if !seen[ft.Name] {
 					rest = append(rest, ft)
 				}
@@ -949,7 +949,7 @@ func (m *Model) receivedFile(folder, filename string) {
 	m.folderStatRef(folder).ReceivedFile(filename)
 }
 
-func sendIndexes(conn protocol.Connection, folder string, fs *files.Set, ignores *ignore.Matcher) {
+func sendIndexes(conn protocol.Connection, folder string, fs *db.FileSet, ignores *ignore.Matcher) {
 	deviceID := conn.ID()
 	name := conn.Name()
 	var err error
@@ -974,7 +974,7 @@ func sendIndexes(conn protocol.Connection, folder string, fs *files.Set, ignores
 	}
 }
 
-func sendIndexTo(initial bool, minLocalVer uint64, conn protocol.Connection, folder string, fs *files.Set, ignores *ignore.Matcher) (uint64, error) {
+func sendIndexTo(initial bool, minLocalVer uint64, conn protocol.Connection, folder string, fs *db.FileSet, ignores *ignore.Matcher) (uint64, error) {
 	deviceID := conn.ID()
 	name := conn.Name()
 	batch := make([]protocol.FileInfo, 0, indexBatchSize)
@@ -982,7 +982,7 @@ func sendIndexTo(initial bool, minLocalVer uint64, conn protocol.Connection, fol
 	maxLocalVer := uint64(0)
 	var err error
 
-	fs.WithHave(protocol.LocalDeviceID, func(fi files.FileIntf) bool {
+	fs.WithHave(protocol.LocalDeviceID, func(fi db.FileIntf) bool {
 		f := fi.(protocol.FileInfo)
 		if f.LocalVersion <= minLocalVer {
 			return true
@@ -1081,7 +1081,7 @@ func (m *Model) AddFolder(cfg config.FolderConfiguration) {
 
 	m.fmut.Lock()
 	m.folderCfgs[cfg.ID] = cfg
-	m.folderFiles[cfg.ID] = files.NewSet(cfg.ID, m.db)
+	m.folderFiles[cfg.ID] = db.NewFileSet(cfg.ID, m.db)
 
 	m.folderDevices[cfg.ID] = make([]protocol.DeviceID, len(cfg.Devices))
 	for i, device := range cfg.Devices {
@@ -1182,8 +1182,8 @@ func (m *Model) ScanFolderSub(folder, sub string) error {
 	batch = batch[:0]
 	// TODO: We should limit the Have scanning to start at sub
 	seenPrefix := false
-	fs.WithHaveTruncated(protocol.LocalDeviceID, func(fi files.FileIntf) bool {
-		f := fi.(files.FileInfoTruncated)
+	fs.WithHaveTruncated(protocol.LocalDeviceID, func(fi db.FileIntf) bool {
+		f := fi.(db.FileInfoTruncated)
 		if !strings.HasPrefix(f.Name, sub) {
 			// Return true so that we keep iterating, until we get to the part
 			// of the tree we are interested in. Then return false so we stop
@@ -1323,7 +1323,7 @@ func (m *Model) Override(folder string) {
 
 	m.setState(folder, FolderScanning)
 	batch := make([]protocol.FileInfo, 0, indexBatchSize)
-	fs.WithNeed(protocol.LocalDeviceID, func(fi files.FileIntf) bool {
+	fs.WithNeed(protocol.LocalDeviceID, func(fi db.FileIntf) bool {
 		need := fi.(protocol.FileInfo)
 		if len(batch) == indexBatchSize {
 			fs.Update(protocol.LocalDeviceID, batch)

+ 2 - 2
internal/model/puller.go

@@ -28,8 +28,8 @@ import (
 	"github.com/AudriusButkevicius/lfu-go"
 
 	"github.com/syncthing/syncthing/internal/config"
+	"github.com/syncthing/syncthing/internal/db"
 	"github.com/syncthing/syncthing/internal/events"
-	"github.com/syncthing/syncthing/internal/files"
 	"github.com/syncthing/syncthing/internal/ignore"
 	"github.com/syncthing/syncthing/internal/osutil"
 	"github.com/syncthing/syncthing/internal/protocol"
@@ -299,7 +299,7 @@ func (p *Puller) pullerIteration(ignores *ignore.Matcher) int {
 
 	var deletions []protocol.FileInfo
 
-	folderFiles.WithNeed(protocol.LocalDeviceID, func(intf files.FileIntf) bool {
+	folderFiles.WithNeed(protocol.LocalDeviceID, func(intf db.FileIntf) bool {
 
 		// Needed items are delivered sorted lexicographically. This isn't
 		// really optimal from a performance point of view - it would be

+ 3 - 3
internal/model/sharedpullerstate.go

@@ -21,7 +21,7 @@ import (
 	"path/filepath"
 	"sync"
 
-	"github.com/syncthing/syncthing/internal/files"
+	"github.com/syncthing/syncthing/internal/db"
 	"github.com/syncthing/syncthing/internal/protocol"
 )
 
@@ -250,7 +250,7 @@ func (s *sharedPullerState) Progress() *pullerProgress {
 		CopiedFromElsewhere: s.copyTotal - s.copyNeeded - s.copyOrigin,
 		Pulled:              s.pullTotal - s.pullNeeded,
 		Pulling:             s.pullNeeded,
-		BytesTotal:          files.BlocksToSize(total),
-		BytesDone:           files.BlocksToSize(done),
+		BytesTotal:          db.BlocksToSize(total),
+		BytesDone:           db.BlocksToSize(done),
 	}
 }