|
@@ -150,6 +150,7 @@ type model struct {
|
|
helloMessages map[protocol.DeviceID]protocol.Hello
|
|
helloMessages map[protocol.DeviceID]protocol.Hello
|
|
deviceDownloads map[protocol.DeviceID]*deviceDownloadState
|
|
deviceDownloads map[protocol.DeviceID]*deviceDownloadState
|
|
remotePausedFolders map[protocol.DeviceID][]string // deviceID -> folders
|
|
remotePausedFolders map[protocol.DeviceID][]string // deviceID -> folders
|
|
|
|
+ indexSenderTokens map[protocol.DeviceID][]suture.ServiceToken
|
|
|
|
|
|
foldersRunning int32 // for testing only
|
|
foldersRunning int32 // for testing only
|
|
}
|
|
}
|
|
@@ -222,6 +223,7 @@ func NewModel(cfg config.Wrapper, id protocol.DeviceID, clientName, clientVersio
|
|
helloMessages: make(map[protocol.DeviceID]protocol.Hello),
|
|
helloMessages: make(map[protocol.DeviceID]protocol.Hello),
|
|
deviceDownloads: make(map[protocol.DeviceID]*deviceDownloadState),
|
|
deviceDownloads: make(map[protocol.DeviceID]*deviceDownloadState),
|
|
remotePausedFolders: make(map[protocol.DeviceID][]string),
|
|
remotePausedFolders: make(map[protocol.DeviceID][]string),
|
|
|
|
+ indexSenderTokens: make(map[protocol.DeviceID][]suture.ServiceToken),
|
|
}
|
|
}
|
|
for devID := range cfg.Devices() {
|
|
for devID := range cfg.Devices() {
|
|
m.deviceStatRefs[devID] = stats.NewDeviceStatisticsReference(m.db, devID.String())
|
|
m.deviceStatRefs[devID] = stats.NewDeviceStatisticsReference(m.db, devID.String())
|
|
@@ -257,13 +259,16 @@ func (m *model) onServe() {
|
|
func (m *model) Stop() {
|
|
func (m *model) Stop() {
|
|
m.cfg.Unsubscribe(m)
|
|
m.cfg.Unsubscribe(m)
|
|
m.Supervisor.Stop()
|
|
m.Supervisor.Stop()
|
|
- devs := m.cfg.Devices()
|
|
|
|
- ids := make([]protocol.DeviceID, 0, len(devs))
|
|
|
|
- for id := range devs {
|
|
|
|
- ids = append(ids, id)
|
|
|
|
|
|
+ m.pmut.RLock()
|
|
|
|
+ closed := make([]chan struct{}, 0, len(m.conn))
|
|
|
|
+ for id, conn := range m.conn {
|
|
|
|
+ closed = append(closed, m.closed[id])
|
|
|
|
+ go conn.Close(errStopped)
|
|
|
|
+ }
|
|
|
|
+ m.pmut.RUnlock()
|
|
|
|
+ for _, c := range closed {
|
|
|
|
+ <-c
|
|
}
|
|
}
|
|
- w := m.closeConns(ids, errStopped)
|
|
|
|
- w.Wait()
|
|
|
|
}
|
|
}
|
|
|
|
|
|
// StartDeadlockDetector starts a deadlock detector on the models locks which
|
|
// StartDeadlockDetector starts a deadlock detector on the models locks which
|
|
@@ -393,7 +398,12 @@ func (m *model) warnAboutOverwritingProtectedFiles(cfg config.FolderConfiguratio
|
|
}
|
|
}
|
|
|
|
|
|
func (m *model) removeFolder(cfg config.FolderConfiguration) {
|
|
func (m *model) removeFolder(cfg config.FolderConfiguration) {
|
|
- m.stopFolder(cfg, fmt.Errorf("removing folder %v", cfg.Description()))
|
|
|
|
|
|
+ m.fmut.RLock()
|
|
|
|
+ token, ok := m.folderRunnerToken[cfg.ID]
|
|
|
|
+ m.fmut.RUnlock()
|
|
|
|
+ if ok {
|
|
|
|
+ m.RemoveAndWait(token, 0)
|
|
|
|
+ }
|
|
|
|
|
|
m.fmut.Lock()
|
|
m.fmut.Lock()
|
|
|
|
|
|
@@ -417,22 +427,6 @@ func (m *model) removeFolder(cfg config.FolderConfiguration) {
|
|
db.DropFolder(m.db, cfg.ID)
|
|
db.DropFolder(m.db, cfg.ID)
|
|
}
|
|
}
|
|
|
|
|
|
-func (m *model) stopFolder(cfg config.FolderConfiguration, err error) {
|
|
|
|
- // Stop the services running for this folder and wait for them to finish
|
|
|
|
- // stopping to prevent races on restart.
|
|
|
|
- m.fmut.RLock()
|
|
|
|
- token, ok := m.folderRunnerToken[cfg.ID]
|
|
|
|
- m.fmut.RUnlock()
|
|
|
|
-
|
|
|
|
- if ok {
|
|
|
|
- m.RemoveAndWait(token, 0)
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- // Wait for connections to stop to ensure that no more calls to methods
|
|
|
|
- // expecting this folder to exist happen (e.g. .IndexUpdate).
|
|
|
|
- m.closeConns(cfg.DeviceIDs(), err).Wait()
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
// Need to hold lock on m.fmut when calling this.
|
|
// Need to hold lock on m.fmut when calling this.
|
|
func (m *model) cleanupFolderLocked(cfg config.FolderConfiguration) {
|
|
func (m *model) cleanupFolderLocked(cfg config.FolderConfiguration) {
|
|
// clear up our config maps
|
|
// clear up our config maps
|
|
@@ -464,25 +458,13 @@ func (m *model) restartFolder(from, to config.FolderConfiguration, cacheIgnoredF
|
|
restartMut.Lock()
|
|
restartMut.Lock()
|
|
defer restartMut.Unlock()
|
|
defer restartMut.Unlock()
|
|
|
|
|
|
- var infoMsg string
|
|
|
|
- var errMsg string
|
|
|
|
- switch {
|
|
|
|
- case to.Paused:
|
|
|
|
- infoMsg = "Paused"
|
|
|
|
- errMsg = "pausing"
|
|
|
|
- case from.Paused:
|
|
|
|
- infoMsg = "Unpaused"
|
|
|
|
- errMsg = "unpausing"
|
|
|
|
- default:
|
|
|
|
- infoMsg = "Restarted"
|
|
|
|
- errMsg = "restarting"
|
|
|
|
|
|
+ m.fmut.RLock()
|
|
|
|
+ token, ok := m.folderRunnerToken[from.ID]
|
|
|
|
+ m.fmut.RUnlock()
|
|
|
|
+ if ok {
|
|
|
|
+ m.RemoveAndWait(token, 0)
|
|
}
|
|
}
|
|
|
|
|
|
- err := fmt.Errorf("%v folder %v", errMsg, to.Description())
|
|
|
|
- m.stopFolder(from, err)
|
|
|
|
- // Need to send CC change to both from and to devices.
|
|
|
|
- m.closeConns(to.DeviceIDs(), err)
|
|
|
|
-
|
|
|
|
m.fmut.Lock()
|
|
m.fmut.Lock()
|
|
defer m.fmut.Unlock()
|
|
defer m.fmut.Unlock()
|
|
|
|
|
|
@@ -499,6 +481,16 @@ func (m *model) restartFolder(from, to config.FolderConfiguration, cacheIgnoredF
|
|
}
|
|
}
|
|
m.addAndStartFolderLocked(to, fset, cacheIgnoredFiles)
|
|
m.addAndStartFolderLocked(to, fset, cacheIgnoredFiles)
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+ var infoMsg string
|
|
|
|
+ switch {
|
|
|
|
+ case to.Paused:
|
|
|
|
+ infoMsg = "Paused"
|
|
|
|
+ case from.Paused:
|
|
|
|
+ infoMsg = "Unpaused"
|
|
|
|
+ default:
|
|
|
|
+ infoMsg = "Restarted"
|
|
|
|
+ }
|
|
l.Infof("%v folder %v (%v)", infoMsg, to.Description(), to.Type)
|
|
l.Infof("%v folder %v (%v)", infoMsg, to.Description(), to.Type)
|
|
}
|
|
}
|
|
|
|
|
|
@@ -507,9 +499,6 @@ func (m *model) newFolder(cfg config.FolderConfiguration, cacheIgnoredFiles bool
|
|
// we do it outside of the lock.
|
|
// we do it outside of the lock.
|
|
fset := db.NewFileSet(cfg.ID, cfg.Filesystem(), m.db)
|
|
fset := db.NewFileSet(cfg.ID, cfg.Filesystem(), m.db)
|
|
|
|
|
|
- // Close connections to affected devices
|
|
|
|
- m.closeConns(cfg.DeviceIDs(), fmt.Errorf("started folder %v", cfg.Description()))
|
|
|
|
-
|
|
|
|
m.fmut.Lock()
|
|
m.fmut.Lock()
|
|
defer m.fmut.Unlock()
|
|
defer m.fmut.Unlock()
|
|
m.addAndStartFolderLocked(cfg, fset, cacheIgnoredFiles)
|
|
m.addAndStartFolderLocked(cfg, fset, cacheIgnoredFiles)
|
|
@@ -992,6 +981,9 @@ func (m *model) ClusterConfig(deviceID protocol.DeviceID, cm protocol.ClusterCon
|
|
m.pmut.RLock()
|
|
m.pmut.RLock()
|
|
conn, ok := m.conn[deviceID]
|
|
conn, ok := m.conn[deviceID]
|
|
closed := m.closed[deviceID]
|
|
closed := m.closed[deviceID]
|
|
|
|
+ for _, token := range m.indexSenderTokens[deviceID] {
|
|
|
|
+ m.RemoveAndWait(token, 0)
|
|
|
|
+ }
|
|
m.pmut.RUnlock()
|
|
m.pmut.RUnlock()
|
|
if !ok {
|
|
if !ok {
|
|
panic("bug: ClusterConfig called on closed or nonexistent connection")
|
|
panic("bug: ClusterConfig called on closed or nonexistent connection")
|
|
@@ -1024,6 +1016,7 @@ func (m *model) ClusterConfig(deviceID protocol.DeviceID, cm protocol.ClusterCon
|
|
}
|
|
}
|
|
|
|
|
|
var paused []string
|
|
var paused []string
|
|
|
|
+ indexSenderTokens := make([]suture.ServiceToken, 0, len(cm.Folders))
|
|
for _, folder := range cm.Folders {
|
|
for _, folder := range cm.Folders {
|
|
cfg, ok := m.cfg.Folder(folder.ID)
|
|
cfg, ok := m.cfg.Folder(folder.ID)
|
|
if !ok || !cfg.SharedWith(deviceID) {
|
|
if !ok || !cfg.SharedWith(deviceID) {
|
|
@@ -1142,14 +1135,12 @@ func (m *model) ClusterConfig(deviceID protocol.DeviceID, cm protocol.ClusterCon
|
|
evLogger: m.evLogger,
|
|
evLogger: m.evLogger,
|
|
}
|
|
}
|
|
is.Service = util.AsService(is.serve, is.String())
|
|
is.Service = util.AsService(is.serve, is.String())
|
|
- // The token isn't tracked as the service stops when the connection
|
|
|
|
- // terminates and is automatically removed from supervisor (by
|
|
|
|
- // implementing suture.IsCompletable).
|
|
|
|
- m.Add(is)
|
|
|
|
|
|
+ indexSenderTokens = append(indexSenderTokens, m.Add(is))
|
|
}
|
|
}
|
|
|
|
|
|
m.pmut.Lock()
|
|
m.pmut.Lock()
|
|
m.remotePausedFolders[deviceID] = paused
|
|
m.remotePausedFolders[deviceID] = paused
|
|
|
|
+ m.indexSenderTokens[deviceID] = indexSenderTokens
|
|
m.pmut.Unlock()
|
|
m.pmut.Unlock()
|
|
|
|
|
|
// This breaks if we send multiple CM messages during the same connection.
|
|
// This breaks if we send multiple CM messages during the same connection.
|
|
@@ -1397,41 +1388,6 @@ func (m *model) Closed(conn protocol.Connection, err error) {
|
|
close(closed)
|
|
close(closed)
|
|
}
|
|
}
|
|
|
|
|
|
-// closeConns will close the underlying connection for given devices and return
|
|
|
|
-// a waiter that will return once all the connections are finished closing.
|
|
|
|
-func (m *model) closeConns(devs []protocol.DeviceID, err error) config.Waiter {
|
|
|
|
- conns := make([]connections.Connection, 0, len(devs))
|
|
|
|
- closed := make([]chan struct{}, 0, len(devs))
|
|
|
|
- m.pmut.RLock()
|
|
|
|
- for _, dev := range devs {
|
|
|
|
- if conn, ok := m.conn[dev]; ok {
|
|
|
|
- conns = append(conns, conn)
|
|
|
|
- closed = append(closed, m.closed[dev])
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
- m.pmut.RUnlock()
|
|
|
|
- for _, conn := range conns {
|
|
|
|
- conn.Close(err)
|
|
|
|
- }
|
|
|
|
- return &channelWaiter{chans: closed}
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-// closeConn closes the underlying connection for the given device and returns
|
|
|
|
-// a waiter that will return once the connection is finished closing.
|
|
|
|
-func (m *model) closeConn(dev protocol.DeviceID, err error) config.Waiter {
|
|
|
|
- return m.closeConns([]protocol.DeviceID{dev}, err)
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-type channelWaiter struct {
|
|
|
|
- chans []chan struct{}
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-func (w *channelWaiter) Wait() {
|
|
|
|
- for _, c := range w.chans {
|
|
|
|
- <-c
|
|
|
|
- }
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
// Implements protocol.RequestResponse
|
|
// Implements protocol.RequestResponse
|
|
type requestResponse struct {
|
|
type requestResponse struct {
|
|
data []byte
|
|
data []byte
|
|
@@ -2467,6 +2423,9 @@ func (m *model) CommitConfiguration(from, to config.Configuration) bool {
|
|
|
|
|
|
// Go through the folder configs and figure out if we need to restart or not.
|
|
// Go through the folder configs and figure out if we need to restart or not.
|
|
|
|
|
|
|
|
+ // Tracks devices affected by any configuration change to resend ClusterConfig.
|
|
|
|
+ clusterConfigDevices := make(map[protocol.DeviceID]struct{}, len(from.Devices)+len(to.Devices))
|
|
|
|
+
|
|
fromFolders := mapFolders(from.Folders)
|
|
fromFolders := mapFolders(from.Folders)
|
|
toFolders := mapFolders(to.Folders)
|
|
toFolders := mapFolders(to.Folders)
|
|
for folderID, cfg := range toFolders {
|
|
for folderID, cfg := range toFolders {
|
|
@@ -2478,6 +2437,7 @@ func (m *model) CommitConfiguration(from, to config.Configuration) bool {
|
|
l.Infoln("Adding folder", cfg.Description())
|
|
l.Infoln("Adding folder", cfg.Description())
|
|
m.newFolder(cfg, to.Options.CacheIgnoredFiles)
|
|
m.newFolder(cfg, to.Options.CacheIgnoredFiles)
|
|
}
|
|
}
|
|
|
|
+ clusterConfigDevices = addDeviceIDsToMap(clusterConfigDevices, cfg.DeviceIDs())
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -2486,6 +2446,7 @@ func (m *model) CommitConfiguration(from, to config.Configuration) bool {
|
|
if !ok {
|
|
if !ok {
|
|
// The folder was removed.
|
|
// The folder was removed.
|
|
m.removeFolder(fromCfg)
|
|
m.removeFolder(fromCfg)
|
|
|
|
+ clusterConfigDevices = addDeviceIDsToMap(clusterConfigDevices, fromCfg.DeviceIDs())
|
|
continue
|
|
continue
|
|
}
|
|
}
|
|
|
|
|
|
@@ -2497,6 +2458,8 @@ func (m *model) CommitConfiguration(from, to config.Configuration) bool {
|
|
// Check if anything differs that requires a restart.
|
|
// Check if anything differs that requires a restart.
|
|
if !reflect.DeepEqual(fromCfg.RequiresRestartOnly(), toCfg.RequiresRestartOnly()) || from.Options.CacheIgnoredFiles != to.Options.CacheIgnoredFiles {
|
|
if !reflect.DeepEqual(fromCfg.RequiresRestartOnly(), toCfg.RequiresRestartOnly()) || from.Options.CacheIgnoredFiles != to.Options.CacheIgnoredFiles {
|
|
m.restartFolder(fromCfg, toCfg, to.Options.CacheIgnoredFiles)
|
|
m.restartFolder(fromCfg, toCfg, to.Options.CacheIgnoredFiles)
|
|
|
|
+ clusterConfigDevices = addDeviceIDsToMap(clusterConfigDevices, fromCfg.DeviceIDs())
|
|
|
|
+ clusterConfigDevices = addDeviceIDsToMap(clusterConfigDevices, toCfg.DeviceIDs())
|
|
}
|
|
}
|
|
|
|
|
|
// Emit the folder pause/resume event
|
|
// Emit the folder pause/resume event
|
|
@@ -2519,6 +2482,7 @@ func (m *model) CommitConfiguration(from, to config.Configuration) bool {
|
|
// Pausing a device, unpausing is handled by the connection service.
|
|
// Pausing a device, unpausing is handled by the connection service.
|
|
fromDevices := from.DeviceMap()
|
|
fromDevices := from.DeviceMap()
|
|
toDevices := to.DeviceMap()
|
|
toDevices := to.DeviceMap()
|
|
|
|
+ closeDevices := make([]protocol.DeviceID, 0, len(to.Devices))
|
|
for deviceID, toCfg := range toDevices {
|
|
for deviceID, toCfg := range toDevices {
|
|
fromCfg, ok := fromDevices[deviceID]
|
|
fromCfg, ok := fromDevices[deviceID]
|
|
if !ok {
|
|
if !ok {
|
|
@@ -2534,13 +2498,14 @@ func (m *model) CommitConfiguration(from, to config.Configuration) bool {
|
|
}
|
|
}
|
|
|
|
|
|
// Ignored folder was removed, reconnect to retrigger the prompt.
|
|
// Ignored folder was removed, reconnect to retrigger the prompt.
|
|
- if len(fromCfg.IgnoredFolders) > len(toCfg.IgnoredFolders) {
|
|
|
|
- m.closeConn(deviceID, errIgnoredFolderRemoved)
|
|
|
|
|
|
+ if !toCfg.Paused && len(fromCfg.IgnoredFolders) > len(toCfg.IgnoredFolders) {
|
|
|
|
+ closeDevices = append(closeDevices, deviceID)
|
|
}
|
|
}
|
|
|
|
|
|
if toCfg.Paused {
|
|
if toCfg.Paused {
|
|
l.Infoln("Pausing", deviceID)
|
|
l.Infoln("Pausing", deviceID)
|
|
- m.closeConn(deviceID, errDevicePaused)
|
|
|
|
|
|
+ closeDevices = append(closeDevices, deviceID)
|
|
|
|
+ delete(clusterConfigDevices, deviceID)
|
|
m.evLogger.Log(events.DevicePaused, map[string]string{"device": deviceID.String()})
|
|
m.evLogger.Log(events.DevicePaused, map[string]string{"device": deviceID.String()})
|
|
} else {
|
|
} else {
|
|
m.evLogger.Log(events.DeviceResumed, map[string]string{"device": deviceID.String()})
|
|
m.evLogger.Log(events.DeviceResumed, map[string]string{"device": deviceID.String()})
|
|
@@ -2551,9 +2516,28 @@ func (m *model) CommitConfiguration(from, to config.Configuration) bool {
|
|
for deviceID := range fromDevices {
|
|
for deviceID := range fromDevices {
|
|
delete(m.deviceStatRefs, deviceID)
|
|
delete(m.deviceStatRefs, deviceID)
|
|
removedDevices = append(removedDevices, deviceID)
|
|
removedDevices = append(removedDevices, deviceID)
|
|
|
|
+ delete(clusterConfigDevices, deviceID)
|
|
}
|
|
}
|
|
m.fmut.Unlock()
|
|
m.fmut.Unlock()
|
|
- m.closeConns(removedDevices, errDeviceRemoved)
|
|
|
|
|
|
+
|
|
|
|
+ m.pmut.RLock()
|
|
|
|
+ for _, id := range closeDevices {
|
|
|
|
+ if conn, ok := m.conn[id]; ok {
|
|
|
|
+ go conn.Close(errDevicePaused)
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ for _, id := range removedDevices {
|
|
|
|
+ if conn, ok := m.conn[id]; ok {
|
|
|
|
+ go conn.Close(errDeviceRemoved)
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ for id := range clusterConfigDevices {
|
|
|
|
+ if conn, ok := m.conn[id]; ok {
|
|
|
|
+ cm := m.generateClusterConfig(conn.ID())
|
|
|
|
+ go conn.ClusterConfig(cm)
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ m.pmut.RUnlock()
|
|
|
|
|
|
m.globalRequestLimiter.setCapacity(1024 * to.Options.MaxConcurrentIncomingRequestKiB())
|
|
m.globalRequestLimiter.setCapacity(1024 * to.Options.MaxConcurrentIncomingRequestKiB())
|
|
m.folderIOLimiter.setCapacity(to.Options.MaxFolderConcurrency())
|
|
m.folderIOLimiter.setCapacity(to.Options.MaxFolderConcurrency())
|
|
@@ -2758,3 +2742,12 @@ func sanitizePath(path string) string {
|
|
|
|
|
|
return strings.TrimSpace(b.String())
|
|
return strings.TrimSpace(b.String())
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+func addDeviceIDsToMap(m map[protocol.DeviceID]struct{}, s []protocol.DeviceID) map[protocol.DeviceID]struct{} {
|
|
|
|
+ for _, id := range s {
|
|
|
|
+ if _, ok := m[id]; !ok {
|
|
|
|
+ m[id] = struct{}{}
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ return m
|
|
|
|
+}
|