Przeglądaj źródła

lib/model: Handle cluster-config before folder start (fixes #7122) (#7123)

Simon Frei 5 lat temu
rodzic
commit
2d3a535ced
2 zmienionych plików z 77 dodań i 22 usunięć
  1. 28 22
      lib/model/model.go
  2. 49 0
      lib/model/requests_test.go

+ 28 - 22
lib/model/model.go

@@ -551,23 +551,28 @@ func (m *model) newFolder(cfg config.FolderConfiguration, cacheIgnoredFiles bool
 	m.fmut.Lock()
 	defer m.fmut.Unlock()
 
-	// In case this folder is new and was shared with us we already got a
-	// cluster config and wont necessarily get another soon - start sending
-	// indexes if connected.
-	if fset.Sequence(protocol.LocalDeviceID) == 0 {
-		m.pmut.RLock()
-		for _, id := range cfg.DeviceIDs() {
-			if is, ok := m.indexSenders[id]; ok {
-				if fset.Sequence(id) == 0 {
-					is.addNew(cfg, fset)
-				}
+	// Cluster configs might be received and processed before reaching this
+	// point, i.e. before the folder is started. If that's the case, start
+	// index senders here.
+	localSequenceZero := fset.Sequence(protocol.LocalDeviceID) == 0
+	m.pmut.RLock()
+	for _, id := range cfg.DeviceIDs() {
+		if is, ok := m.indexSenders[id]; ok {
+			if localSequenceZero && fset.Sequence(id) == 0 {
+				// In case this folder was shared to us and
+				// newly added, add a new index sender.
+				is.addNew(cfg, fset)
+			} else {
+				// For existing folders we stored the index data from
+				// the cluster config, so resume based on that - if
+				// we didn't get a cluster config yet, it's a noop.
+				is.resume(cfg, fset)
 			}
 		}
-		m.pmut.RUnlock()
 	}
+	m.pmut.RUnlock()
 
 	m.addAndStartFolderLocked(cfg, fset, cacheIgnoredFiles)
-
 }
 
 func (m *model) UsageReportingStats(report *contract.Report, version int, preview bool) {
@@ -1191,16 +1196,6 @@ func (m *model) ccHandleFolders(folders []protocol.Folder, deviceCfg config.Devi
 			continue
 		}
 
-		m.fmut.RLock()
-		fs, ok := m.folderFiles[folder.ID]
-		m.fmut.RUnlock()
-		if !ok {
-			// Shouldn't happen because !cfg.Paused, but might happen
-			// if the folder is about to be unpaused, but not yet.
-			l.Debugln("ccH: no fset", folder.ID)
-			continue
-		}
-
 		if err := m.ccCheckEncryption(cfg, folderDevice, ccDeviceInfos[folder.ID], deviceCfg.Untrusted); err != nil {
 			sameError := false
 			if devs, ok := m.folderEncryptionFailures[folder.ID]; ok {
@@ -1232,6 +1227,17 @@ func (m *model) ccHandleFolders(folders []protocol.Folder, deviceCfg config.Devi
 			tempIndexFolders = append(tempIndexFolders, folder.ID)
 		}
 
+		m.fmut.RLock()
+		fs, ok := m.folderFiles[folder.ID]
+		m.fmut.RUnlock()
+		if !ok {
+			// Shouldn't happen because !cfg.Paused, but might happen
+			// if the folder is about to be unpaused, but not yet.
+			l.Debugln("ccH: no fset", folder.ID)
+			indexSenders.addPaused(cfg, ccDeviceInfos[folder.ID])
+			continue
+		}
+
 		indexSenders.add(cfg, fs, ccDeviceInfos[folder.ID])
 
 		// We might already have files that we need to pull so let the

+ 49 - 0
lib/model/requests_test.go

@@ -20,6 +20,8 @@ import (
 	"time"
 
 	"github.com/syncthing/syncthing/lib/config"
+	"github.com/syncthing/syncthing/lib/db"
+	"github.com/syncthing/syncthing/lib/db/backend"
 	"github.com/syncthing/syncthing/lib/events"
 	"github.com/syncthing/syncthing/lib/fs"
 	"github.com/syncthing/syncthing/lib/protocol"
@@ -1271,3 +1273,50 @@ func TestRequestIndexSenderPause(t *testing.T) {
 		t.Error("Received index despite remote not having the folder")
 	}
 }
+
+func TestRequestIndexSenderClusterConfigBeforeStart(t *testing.T) {
+	done := make(chan struct{})
+	defer close(done)
+
+	ldb := db.NewLowlevel(backend.OpenMemory())
+	w, fcfg := tmpDefaultWrapper()
+	tfs := fcfg.Filesystem()
+	dir1 := "foo"
+	dir2 := "bar"
+
+	// Initialise db with an entry and then stop everything again
+	must(t, tfs.Mkdir(dir1, 0777))
+	m := newModel(w, myID, "syncthing", "dev", ldb, nil)
+	defer cleanupModelAndRemoveDir(m, tfs.URI())
+	m.ServeBackground()
+	m.ScanFolders()
+	m.cancel()
+	m.evCancel()
+	<-m.stopped
+
+	// Add connection (sends cluster config) before starting the new model
+	m = newModel(w, myID, "syncthing", "dev", ldb, nil)
+	defer cleanupModel(m)
+	fc := addFakeConn(m, device1)
+	indexChan := make(chan []protocol.FileInfo)
+	fc.mut.Lock()
+	fc.indexFn = func(_ context.Context, folder string, fs []protocol.FileInfo) {
+		select {
+		case indexChan <- fs:
+		case <-done:
+		}
+	}
+	fc.mut.Unlock()
+
+	m.ServeBackground()
+	<-m.started
+
+	// Check that an index is sent for the newly added item
+	must(t, tfs.Mkdir(dir2, 0777))
+	m.ScanFolders()
+	select {
+	case <-time.After(5 * time.Second):
+		t.Fatal("timed out before receiving index")
+	case <-indexChan:
+	}
+}