浏览代码

Clean out index for nonexistent repositories (fixes #549)

Jakob Borg 11 年之前
父节点
当前提交
6c09a77a97
共有 4 个文件被更改,包括 144 次插入0 次删除
  1. 11 0
      cmd/syncthing/main.go
  2. 70 0
      files/leveldb.go
  3. 11 0
      files/set.go
  4. 52 0
      files/set_test.go

+ 11 - 0
cmd/syncthing/main.go

@@ -31,6 +31,7 @@ import (
 	"github.com/syncthing/syncthing/config"
 	"github.com/syncthing/syncthing/discover"
 	"github.com/syncthing/syncthing/events"
+	"github.com/syncthing/syncthing/files"
 	"github.com/syncthing/syncthing/logger"
 	"github.com/syncthing/syncthing/model"
 	"github.com/syncthing/syncthing/osutil"
@@ -392,6 +393,16 @@ func main() {
 	if err != nil {
 		l.Fatalln("Cannot open database:", err, "- Is another copy of Syncthing already running?")
 	}
+
+	// Remove database entries for repos that no longer exist in the config
+	repoMap := cfg.RepoMap()
+	for _, repo := range files.ListRepos(db) {
+		if _, ok := repoMap[repo]; !ok {
+			l.Infof("Cleaning data for dropped repo %q", repo)
+			files.DropRepo(db, repo)
+		}
+	}
+
 	m := model.NewModel(confDir, &cfg, myName, "syncthing", Version, db)
 
 nextRepo:

+ 70 - 0
files/leveldb.go

@@ -117,6 +117,12 @@ func globalKeyName(key []byte) []byte {
 	return key[1+64:]
 }
 
+func globalKeyRepo(key []byte) []byte {
+	repo := key[1 : 1+64]
+	izero := bytes.IndexByte(repo, 0)
+	return repo[:izero]
+}
+
 type deletionHandler func(db dbReader, batch dbWriter, repo, node, name []byte, dbi iterator.Iterator) uint64
 
 type fileIterator func(f protocol.FileIntf) bool
@@ -637,6 +643,70 @@ func ldbWithNeed(db *leveldb.DB, repo, node []byte, truncate bool, fn fileIterat
 	}
 }
 
+func ldbListRepos(db *leveldb.DB) []string {
+	defer runtime.GC()
+
+	start := []byte{keyTypeGlobal}
+	limit := []byte{keyTypeGlobal + 1}
+	snap, err := db.GetSnapshot()
+	if err != nil {
+		panic(err)
+	}
+	defer snap.Release()
+	dbi := snap.NewIterator(&util.Range{Start: start, Limit: limit}, nil)
+	defer dbi.Release()
+
+	repoExists := make(map[string]bool)
+	for dbi.Next() {
+		repo := string(globalKeyRepo(dbi.Key()))
+		if !repoExists[repo] {
+			repoExists[repo] = true
+		}
+	}
+
+	repos := make([]string, 0, len(repoExists))
+	for k := range repoExists {
+		repos = append(repos, k)
+	}
+
+	sort.Strings(repos)
+	return repos
+}
+
+func ldbDropRepo(db *leveldb.DB, repo []byte) {
+	defer runtime.GC()
+
+	snap, err := db.GetSnapshot()
+	if err != nil {
+		panic(err)
+	}
+	defer snap.Release()
+
+	// Remove all items related to the given repo from the node->file bucket
+	start := []byte{keyTypeNode}
+	limit := []byte{keyTypeNode + 1}
+	dbi := snap.NewIterator(&util.Range{Start: start, Limit: limit}, nil)
+	for dbi.Next() {
+		itemRepo := nodeKeyRepo(dbi.Key())
+		if bytes.Compare(repo, itemRepo) == 0 {
+			db.Delete(dbi.Key(), nil)
+		}
+	}
+	dbi.Release()
+
+	// Remove all items related to the given repo from the global bucket
+	start = []byte{keyTypeGlobal}
+	limit = []byte{keyTypeGlobal + 1}
+	dbi = snap.NewIterator(&util.Range{Start: start, Limit: limit}, nil)
+	for dbi.Next() {
+		itemRepo := globalKeyRepo(dbi.Key())
+		if bytes.Compare(repo, itemRepo) == 0 {
+			db.Delete(dbi.Key(), nil)
+		}
+	}
+	dbi.Release()
+}
+
 func unmarshalTrunc(bs []byte, truncate bool) (protocol.FileIntf, error) {
 	if truncate {
 		var tf protocol.FileInfoTruncated

+ 11 - 0
files/set.go

@@ -155,6 +155,17 @@ func (s *Set) LocalVersion(node protocol.NodeID) uint64 {
 	return s.localVersion[node]
 }
 
+// ListRepos returns the repository IDs seen in the database.
+func ListRepos(db *leveldb.DB) []string {
+	return ldbListRepos(db)
+}
+
+// DropRepo clears out all information related to the given repo from the
+// database.
+func DropRepo(db *leveldb.DB, repo string) {
+	ldbDropRepo(db, []byte(repo))
+}
+
 func normalizeFilenames(fs []protocol.FileInfo) {
 	for i := range fs {
 		fs[i].Name = normalizedFilename(fs[i].Name)

+ 52 - 0
files/set_test.go

@@ -7,6 +7,7 @@ package files_test
 import (
 	"bytes"
 	"fmt"
+	"reflect"
 	"sort"
 	"testing"
 
@@ -597,6 +598,57 @@ func TestLocalVersion(t *testing.T) {
 	}
 }
 
+func TestListDropRepo(t *testing.T) {
+	db, err := leveldb.Open(storage.NewMemStorage(), nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	s0 := files.NewSet("test0", db)
+	local1 := []protocol.FileInfo{
+		protocol.FileInfo{Name: "a", Version: 1000},
+		protocol.FileInfo{Name: "b", Version: 1000},
+		protocol.FileInfo{Name: "c", Version: 1000},
+	}
+	s0.Replace(protocol.LocalNodeID, local1)
+
+	s1 := files.NewSet("test1", db)
+	local2 := []protocol.FileInfo{
+		protocol.FileInfo{Name: "d", Version: 1002},
+		protocol.FileInfo{Name: "e", Version: 1002},
+		protocol.FileInfo{Name: "f", Version: 1002},
+	}
+	s1.Replace(remoteNode, local2)
+
+	// Check that we have both repos and their data is in the global list
+
+	expectedRepoList := []string{"test0", "test1"}
+	if actualRepoList := files.ListRepos(db); !reflect.DeepEqual(actualRepoList, expectedRepoList) {
+		t.Fatalf("RepoList mismatch\nE: %v\nA: %v", expectedRepoList, actualRepoList)
+	}
+	if l := len(globalList(s0)); l != 3 {
+		t.Errorf("Incorrect global length %d != 3 for s0", l)
+	}
+	if l := len(globalList(s1)); l != 3 {
+		t.Errorf("Incorrect global length %d != 3 for s1", l)
+	}
+
+	// Drop one of them and check that it's gone.
+
+	files.DropRepo(db, "test1")
+
+	expectedRepoList = []string{"test0"}
+	if actualRepoList := files.ListRepos(db); !reflect.DeepEqual(actualRepoList, expectedRepoList) {
+		t.Fatalf("RepoList mismatch\nE: %v\nA: %v", expectedRepoList, actualRepoList)
+	}
+	if l := len(globalList(s0)); l != 3 {
+		t.Errorf("Incorrect global length %d != 3 for s0", l)
+	}
+	if l := len(globalList(s1)); l != 0 {
+		t.Errorf("Incorrect global length %d != 0 for s1", l)
+	}
+}
+
 func TestLongPath(t *testing.T) {
 	db, err := leveldb.Open(storage.NewMemStorage(), nil)
 	if err != nil {