indexhandler.go 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689
  1. // Copyright (C) 2020 The Syncthing Authors.
  2. //
  3. // This Source Code Form is subject to the terms of the Mozilla Public
  4. // License, v. 2.0. If a copy of the MPL was not distributed with this file,
  5. // You can obtain one at https://mozilla.org/MPL/2.0/.
  6. package model
  7. import (
  8. "context"
  9. "errors"
  10. "fmt"
  11. "sync"
  12. "time"
  13. "github.com/syncthing/syncthing/internal/db"
  14. "github.com/syncthing/syncthing/internal/itererr"
  15. "github.com/syncthing/syncthing/lib/config"
  16. "github.com/syncthing/syncthing/lib/events"
  17. "github.com/syncthing/syncthing/lib/protocol"
  18. "github.com/syncthing/syncthing/lib/svcutil"
  19. "github.com/syncthing/syncthing/lib/ur"
  20. )
  21. type indexHandler struct {
  22. conn protocol.Connection
  23. downloads *deviceDownloadState
  24. folder string
  25. folderIsReceiveEncrypted bool
  26. evLogger events.Logger
  27. // We track the latest / highest sequence number in two ways for two
  28. // different reasons. Initially they are the same -- the highest seen
  29. // sequence number reported by the other side (or zero).
  30. //
  31. // One is the highest number we've seen when iterating the database,
  32. // which we track for database iteration purposes. When we loop, we
  33. // start looking at that number plus one in the next loop. Our index
  34. // numbering may have holes which this will skip over.
  35. //
  36. // The other is the highest sequence we previously sent to the other
  37. // side, used by them for correctness checks. This one must not skip
  38. // holes. That is, if we iterate and find a hole, this is not
  39. // incremented because nothing was sent to the other side.
  40. localPrevSequence int64 // the highest sequence number we've seen in our FileInfos
  41. sentPrevSequence int64 // the highest sequence number we've sent to the peer
  42. cond *sync.Cond
  43. paused bool
  44. sdb db.DB
  45. runner service
  46. }
  47. func newIndexHandler(conn protocol.Connection, downloads *deviceDownloadState, folder config.FolderConfiguration, sdb db.DB, runner service, startInfo *clusterConfigDeviceInfo, evLogger events.Logger) (*indexHandler, error) {
  48. myIndexID, err := sdb.GetIndexID(folder.ID, protocol.LocalDeviceID)
  49. if err != nil {
  50. return nil, err
  51. }
  52. mySequence, err := sdb.GetDeviceSequence(folder.ID, protocol.LocalDeviceID)
  53. if err != nil {
  54. return nil, err
  55. }
  56. var startSequence int64
  57. // This is the other side's description of what it knows
  58. // about us. Lets check to see if we can start sending index
  59. // updates directly or need to send the index from start...
  60. if startInfo.local.IndexID == myIndexID {
  61. // They say they've seen our index ID before, so we can
  62. // send a delta update only.
  63. if startInfo.local.MaxSequence > mySequence {
  64. // Safety check. They claim to have more or newer
  65. // index data than we have - either we have lost
  66. // index data, or reset the index without resetting
  67. // the IndexID, or something else weird has
  68. // happened. We send a full index to reset the
  69. // situation.
  70. l.Infof("Device %v folder %s is delta index compatible, but seems out of sync with reality", conn.DeviceID().Short(), folder.Description())
  71. startSequence = 0
  72. } else {
  73. l.Debugf("Device %v folder %s is delta index compatible (mlv=%d)", conn.DeviceID().Short(), folder.Description(), startInfo.local.MaxSequence)
  74. startSequence = startInfo.local.MaxSequence
  75. }
  76. } else if startInfo.local.IndexID != 0 {
  77. // They say they've seen an index ID from us, but it's
  78. // not the right one. Either they are confused or we
  79. // must have reset our database since last talking to
  80. // them. We'll start with a full index transfer.
  81. l.Infof("Device %v folder %s has mismatching index ID for us (%v != %v)", conn.DeviceID().Short(), folder.Description(), startInfo.local.IndexID, myIndexID)
  82. startSequence = 0
  83. } else {
  84. l.Debugf("Device %v folder %s has no index ID for us", conn.DeviceID().Short(), folder.Description())
  85. }
  86. // This is the other side's description of themselves. We
  87. // check to see that it matches the IndexID we have on file,
  88. // otherwise we drop our old index data and expect to get a
  89. // completely new set.
  90. theirIndexID, _ := sdb.GetIndexID(folder.ID, conn.DeviceID())
  91. if startInfo.remote.IndexID == 0 {
  92. // They're not announcing an index ID. This means they
  93. // do not support delta indexes and we should clear any
  94. // information we have from them before accepting their
  95. // index, which will presumably be a full index.
  96. l.Debugf("Device %v folder %s does not announce an index ID", conn.DeviceID().Short(), folder.Description())
  97. sdb.DropAllFiles(folder.ID, conn.DeviceID())
  98. } else if startInfo.remote.IndexID != theirIndexID {
  99. // The index ID we have on file is not what they're
  100. // announcing. They must have reset their database and
  101. // will probably send us a full index. We drop any
  102. // information we have and remember this new index ID
  103. // instead.
  104. l.Infof("Device %v folder %s has a new index ID (%v)", conn.DeviceID().Short(), folder.Description(), startInfo.remote.IndexID)
  105. sdb.DropAllFiles(folder.ID, conn.DeviceID())
  106. sdb.SetIndexID(folder.ID, conn.DeviceID(), startInfo.remote.IndexID)
  107. }
  108. return &indexHandler{
  109. conn: conn,
  110. downloads: downloads,
  111. folder: folder.ID,
  112. folderIsReceiveEncrypted: folder.Type == config.FolderTypeReceiveEncrypted,
  113. localPrevSequence: startSequence,
  114. sentPrevSequence: startSequence,
  115. evLogger: evLogger,
  116. sdb: sdb,
  117. runner: runner,
  118. cond: sync.NewCond(new(sync.Mutex)),
  119. }, nil
  120. }
  121. // waitWhilePaused waits for the handler to resume
  122. func (s *indexHandler) waitWhilePaused(ctx context.Context) error {
  123. s.cond.L.Lock()
  124. defer s.cond.L.Unlock()
  125. for s.paused {
  126. select {
  127. case <-ctx.Done():
  128. return ctx.Err()
  129. default:
  130. s.cond.Wait()
  131. }
  132. }
  133. return nil
  134. }
  135. func (s *indexHandler) Serve(ctx context.Context) (err error) {
  136. l.Debugf("Starting index handler for %s to %s at %s (localPrevSequence=%d)", s.folder, s.conn.DeviceID().Short(), s.conn, s.localPrevSequence)
  137. stop := make(chan struct{})
  138. defer func() {
  139. err = svcutil.NoRestartErr(err)
  140. l.Debugf("Exiting index handler for %s to %s at %s: %v", s.folder, s.conn.DeviceID().Short(), s.conn, err)
  141. close(stop)
  142. }()
  143. // Broadcast the pause cond when the context quits
  144. go func() {
  145. select {
  146. case <-ctx.Done():
  147. s.cond.Broadcast()
  148. case <-stop:
  149. }
  150. }()
  151. // We need to send one index, regardless of whether there is something to send or not
  152. if err := s.waitWhilePaused(ctx); err != nil {
  153. return err
  154. }
  155. err = s.sendIndexTo(ctx)
  156. // Subscribe to LocalIndexUpdated (we have new information to send) and
  157. // DeviceDisconnected (it might be us who disconnected, so we should
  158. // exit).
  159. sub := s.evLogger.Subscribe(events.LocalIndexUpdated | events.DeviceDisconnected)
  160. defer sub.Unsubscribe()
  161. evChan := sub.C()
  162. ticker := time.NewTicker(time.Minute)
  163. defer ticker.Stop()
  164. for err == nil {
  165. if err := s.waitWhilePaused(ctx); err != nil {
  166. return err
  167. }
  168. // While we have sent a sequence at least equal to the one
  169. // currently in the database, wait for the local index to update. The
  170. // local index may update for other folders than the one we are
  171. // sending for.
  172. seq, err := s.sdb.GetDeviceSequence(s.folder, protocol.LocalDeviceID)
  173. if err != nil {
  174. return err
  175. }
  176. if seq <= s.localPrevSequence {
  177. select {
  178. case <-ctx.Done():
  179. return ctx.Err()
  180. case <-evChan:
  181. case <-ticker.C:
  182. }
  183. continue
  184. }
  185. err = s.sendIndexTo(ctx)
  186. // Wait a short amount of time before entering the next loop. If there
  187. // are continuous changes happening to the local index, this gives us
  188. // time to batch them up a little.
  189. select {
  190. case <-ctx.Done():
  191. return ctx.Err()
  192. case <-time.After(250 * time.Millisecond):
  193. }
  194. }
  195. return err
  196. }
  197. // resume might be called because the folder was actually resumed, or just
  198. // because the folder config changed (and thus the runner and potentially fset).
  199. func (s *indexHandler) resume(runner service) {
  200. s.cond.L.Lock()
  201. s.paused = false
  202. s.runner = runner
  203. s.cond.Broadcast()
  204. s.cond.L.Unlock()
  205. }
  206. func (s *indexHandler) pause() {
  207. s.cond.L.Lock()
  208. if s.paused {
  209. s.evLogger.Log(events.Failure, "index handler got paused while already paused")
  210. }
  211. s.paused = true
  212. s.runner = nil
  213. s.cond.Broadcast()
  214. s.cond.L.Unlock()
  215. }
  216. // sendIndexTo sends file infos with a sequence number higher than prevSequence and
  217. // returns the highest sent sequence number.
  218. func (s *indexHandler) sendIndexTo(ctx context.Context) error {
  219. initial := s.localPrevSequence == 0
  220. batch := NewFileInfoBatch(nil)
  221. var batchError error
  222. batch.SetFlushFunc(func(fs []protocol.FileInfo) error {
  223. select {
  224. case <-ctx.Done():
  225. return ctx.Err()
  226. default:
  227. }
  228. if len(fs) == 0 {
  229. // can't happen, flush is not called with an empty batch
  230. panic("bug: flush called with empty batch (race condition?)")
  231. }
  232. if batchError != nil {
  233. // can't happen, once an error is returned the index sender exits
  234. panic(fmt.Sprintf("bug: once failed it should stay failed (%v)", batchError))
  235. }
  236. l.Debugf("%v: Sending %d files (<%d bytes)", s, len(fs), batch.Size())
  237. lastSequence := fs[len(fs)-1].Sequence
  238. var err error
  239. if initial {
  240. initial = false
  241. err = s.conn.Index(ctx, &protocol.Index{
  242. Folder: s.folder,
  243. Files: fs,
  244. LastSequence: lastSequence,
  245. })
  246. } else {
  247. err = s.conn.IndexUpdate(ctx, &protocol.IndexUpdate{
  248. Folder: s.folder,
  249. Files: fs,
  250. PrevSequence: s.sentPrevSequence,
  251. LastSequence: lastSequence,
  252. })
  253. }
  254. if err != nil {
  255. batchError = err
  256. return err
  257. }
  258. s.sentPrevSequence = lastSequence
  259. return nil
  260. })
  261. var f protocol.FileInfo
  262. previousWasDelete := false
  263. for fi, err := range itererr.Zip(s.sdb.AllLocalFilesBySequence(s.folder, protocol.LocalDeviceID, s.localPrevSequence+1, 5000)) {
  264. if err != nil {
  265. return err
  266. }
  267. // This is to make sure that renames (which is an add followed by a delete) land in the same batch.
  268. // Even if the batch is full, we allow a last delete to slip in, we do this by making sure that
  269. // the batch ends with a non-delete, or that the last item in the batch is already a delete
  270. if batch.Full() && (!fi.IsDeleted() || previousWasDelete) {
  271. break
  272. }
  273. if fi.SequenceNo() < s.localPrevSequence+1 {
  274. s.logSequenceAnomaly("database returned sequence lower than requested", map[string]any{
  275. "sequence": fi.SequenceNo(),
  276. "start": s.localPrevSequence + 1,
  277. })
  278. return errors.New("database misbehaved")
  279. }
  280. if f.Sequence > 0 && fi.SequenceNo() <= f.Sequence {
  281. s.logSequenceAnomaly("database returned non-increasing sequence", map[string]any{
  282. "sequence": fi.SequenceNo(),
  283. "start": s.localPrevSequence + 1,
  284. "previous": f.Sequence,
  285. })
  286. return errors.New("database misbehaved")
  287. }
  288. f = fi
  289. s.localPrevSequence = f.Sequence
  290. // If this is a folder receiving encrypted files only, we
  291. // mustn't ever send locally changed file infos. Those aren't
  292. // encrypted and thus would be a protocol error at the remote.
  293. if s.folderIsReceiveEncrypted && fi.IsReceiveOnlyChanged() {
  294. continue
  295. }
  296. f = prepareFileInfoForIndex(f)
  297. previousWasDelete = f.IsDeleted()
  298. batch.Append(f)
  299. }
  300. return batch.Flush()
  301. }
  302. func (s *indexHandler) receive(fs []protocol.FileInfo, update bool, op string, prevSequence, lastSequence int64) error {
  303. deviceID := s.conn.DeviceID()
  304. s.cond.L.Lock()
  305. paused := s.paused
  306. runner := s.runner
  307. s.cond.L.Unlock()
  308. if paused {
  309. l.Infof("%v for paused folder %q", op, s.folder)
  310. return fmt.Errorf("%v: %w", s.folder, ErrFolderPaused)
  311. }
  312. defer runner.SchedulePull()
  313. s.downloads.Update(s.folder, makeForgetUpdate(fs))
  314. if !update {
  315. if err := s.sdb.DropAllFiles(s.folder, deviceID); err != nil {
  316. return err
  317. }
  318. }
  319. l.Debugf("Received %d files for %s from %s, prevSeq=%d, lastSeq=%d", len(fs), s.folder, deviceID.Short(), prevSequence, lastSequence)
  320. // Verify that the previous sequence number matches what we expected
  321. exp, err := s.sdb.GetDeviceSequence(s.folder, deviceID)
  322. if err != nil {
  323. return err
  324. }
  325. if prevSequence > 0 && prevSequence != exp {
  326. s.logSequenceAnomaly("index update with unexpected sequence", map[string]any{
  327. "prevSeq": prevSequence,
  328. "lastSeq": lastSequence,
  329. "batch": len(fs),
  330. "expectedPrev": exp,
  331. })
  332. }
  333. for i := range fs {
  334. // Verify index in relation to the claimed sequence boundaries
  335. if fs[i].Sequence < prevSequence {
  336. s.logSequenceAnomaly("file with sequence before prevSequence", map[string]any{
  337. "prevSeq": prevSequence,
  338. "lastSeq": lastSequence,
  339. "batch": len(fs),
  340. "seenSeq": fs[i].Sequence,
  341. "atIndex": i,
  342. })
  343. }
  344. if lastSequence > 0 && fs[i].Sequence > lastSequence {
  345. s.logSequenceAnomaly("file with sequence after lastSequence", map[string]any{
  346. "prevSeq": prevSequence,
  347. "lastSeq": lastSequence,
  348. "batch": len(fs),
  349. "seenSeq": fs[i].Sequence,
  350. "atIndex": i,
  351. })
  352. }
  353. if i > 0 && fs[i].Sequence <= fs[i-1].Sequence {
  354. s.logSequenceAnomaly("index update with non-increasing sequence", map[string]any{
  355. "prevSeq": prevSequence,
  356. "lastSeq": lastSequence,
  357. "batch": len(fs),
  358. "seenSeq": fs[i].Sequence,
  359. "atIndex": i,
  360. "precedingSeq": fs[i-1].Sequence,
  361. })
  362. }
  363. // The local attributes should never be transmitted over the wire.
  364. // Make sure they look like they weren't.
  365. fs[i].LocalFlags = 0
  366. fs[i].VersionHash = nil
  367. }
  368. // Verify the claimed last sequence number
  369. if lastSequence > 0 && len(fs) > 0 && lastSequence != fs[len(fs)-1].Sequence {
  370. s.logSequenceAnomaly("index update with unexpected last sequence", map[string]any{
  371. "prevSeq": prevSequence,
  372. "lastSeq": lastSequence,
  373. "batch": len(fs),
  374. "seenSeq": fs[len(fs)-1].Sequence,
  375. })
  376. }
  377. if err := s.sdb.Update(s.folder, deviceID, fs); err != nil {
  378. return err
  379. }
  380. seq, err := s.sdb.GetDeviceSequence(s.folder, deviceID)
  381. if err != nil {
  382. return err
  383. }
  384. // Check that the sequence we get back is what we put in...
  385. if lastSequence > 0 && len(fs) > 0 && seq != lastSequence {
  386. s.logSequenceAnomaly("unexpected sequence after update", map[string]any{
  387. "prevSeq": prevSequence,
  388. "lastSeq": lastSequence,
  389. "batch": len(fs),
  390. "seenSeq": fs[len(fs)-1].Sequence,
  391. "returnedSeq": seq,
  392. })
  393. }
  394. s.evLogger.Log(events.RemoteIndexUpdated, map[string]interface{}{
  395. "device": deviceID.String(),
  396. "folder": s.folder,
  397. "items": len(fs),
  398. "sequence": seq,
  399. "version": seq, // legacy for sequence
  400. })
  401. return nil
  402. }
  403. func (s *indexHandler) logSequenceAnomaly(msg string, extra map[string]any) {
  404. extraStrs := make(map[string]string, len(extra))
  405. for k, v := range extra {
  406. extraStrs[k] = fmt.Sprint(v)
  407. }
  408. s.evLogger.Log(events.Failure, ur.FailureData{
  409. Description: msg,
  410. Extra: extraStrs,
  411. })
  412. }
  413. func prepareFileInfoForIndex(f protocol.FileInfo) protocol.FileInfo {
  414. // Mark the file as invalid if any of the local bad stuff flags are set.
  415. f.RawInvalid = f.IsInvalid()
  416. // If the file is marked LocalReceive (i.e., changed locally on a
  417. // receive only folder) we do not want it to ever become the
  418. // globally best version, invalid or not.
  419. if f.IsReceiveOnlyChanged() {
  420. f.Version = protocol.Vector{}
  421. }
  422. // The trailer with the encrypted fileinfo is device local, don't send info
  423. // about that to remotes
  424. f.Size -= int64(f.EncryptionTrailerSize)
  425. f.EncryptionTrailerSize = 0
  426. // never sent externally
  427. f.LocalFlags = 0
  428. f.VersionHash = nil
  429. f.InodeChangeNs = 0
  430. return f
  431. }
  432. func (s *indexHandler) String() string {
  433. return fmt.Sprintf("indexHandler@%p for %s to %s at %s", s, s.folder, s.conn.DeviceID().Short(), s.conn)
  434. }
  435. type indexHandlerRegistry struct {
  436. evLogger events.Logger
  437. conn protocol.Connection
  438. sdb db.DB
  439. downloads *deviceDownloadState
  440. indexHandlers *serviceMap[string, *indexHandler]
  441. startInfos map[string]*clusterConfigDeviceInfo
  442. folderStates map[string]*indexHandlerFolderState
  443. mut sync.Mutex
  444. }
  445. type indexHandlerFolderState struct {
  446. cfg config.FolderConfiguration
  447. runner service
  448. }
  449. func newIndexHandlerRegistry(conn protocol.Connection, sdb db.DB, downloads *deviceDownloadState, evLogger events.Logger) *indexHandlerRegistry {
  450. r := &indexHandlerRegistry{
  451. evLogger: evLogger,
  452. conn: conn,
  453. sdb: sdb,
  454. downloads: downloads,
  455. indexHandlers: newServiceMap[string, *indexHandler](evLogger),
  456. startInfos: make(map[string]*clusterConfigDeviceInfo),
  457. folderStates: make(map[string]*indexHandlerFolderState),
  458. mut: sync.Mutex{},
  459. }
  460. return r
  461. }
  462. func (r *indexHandlerRegistry) String() string {
  463. return fmt.Sprintf("indexHandlerRegistry/%v", r.conn.DeviceID().Short())
  464. }
  465. func (r *indexHandlerRegistry) Serve(ctx context.Context) error {
  466. // Running the index handler registry means running the individual index
  467. // handler children.
  468. return r.indexHandlers.Serve(ctx)
  469. }
  470. func (r *indexHandlerRegistry) startLocked(folder config.FolderConfiguration, runner service, startInfo *clusterConfigDeviceInfo) error {
  471. r.indexHandlers.RemoveAndWait(folder.ID, 0)
  472. delete(r.startInfos, folder.ID)
  473. is, err := newIndexHandler(r.conn, r.downloads, folder, r.sdb, runner, startInfo, r.evLogger)
  474. if err != nil {
  475. return err
  476. }
  477. r.indexHandlers.Add(folder.ID, is)
  478. // This new connection might help us get in sync.
  479. runner.SchedulePull()
  480. return nil
  481. }
  482. // AddIndexInfo starts an index handler for given folder, unless it is paused.
  483. // If it is paused, the given startInfo is stored to start the sender once the
  484. // folder is resumed.
  485. // If an index handler is already running, it will be stopped first.
  486. func (r *indexHandlerRegistry) AddIndexInfo(folder string, startInfo *clusterConfigDeviceInfo) {
  487. r.mut.Lock()
  488. defer r.mut.Unlock()
  489. if r.indexHandlers.RemoveAndWait(folder, 0) == nil {
  490. l.Debugf("Removed index sender for device %v and folder %v due to added pending", r.conn.DeviceID().Short(), folder)
  491. }
  492. folderState, ok := r.folderStates[folder]
  493. if !ok {
  494. l.Debugf("Pending index handler for device %v and folder %v", r.conn.DeviceID().Short(), folder)
  495. r.startInfos[folder] = startInfo
  496. return
  497. }
  498. _ = r.startLocked(folderState.cfg, folderState.runner, startInfo) // XXX error handling...
  499. }
  500. // Remove stops a running index handler or removes one pending to be started.
  501. // It is a noop if the folder isn't known.
  502. func (r *indexHandlerRegistry) Remove(folder string) {
  503. r.mut.Lock()
  504. defer r.mut.Unlock()
  505. l.Debugf("Removing index handler for device %v and folder %v", r.conn.DeviceID().Short(), folder)
  506. r.indexHandlers.RemoveAndWait(folder, 0)
  507. delete(r.startInfos, folder)
  508. l.Debugf("Removed index handler for device %v and folder %v", r.conn.DeviceID().Short(), folder)
  509. }
  510. // RemoveAllExcept stops all running index handlers and removes those pending to be started,
  511. // except mentioned ones.
  512. // It is a noop if the folder isn't known.
  513. func (r *indexHandlerRegistry) RemoveAllExcept(except map[string]remoteFolderState) {
  514. r.mut.Lock()
  515. defer r.mut.Unlock()
  516. r.indexHandlers.Each(func(folder string, is *indexHandler) error {
  517. if _, ok := except[folder]; !ok {
  518. r.indexHandlers.RemoveAndWait(folder, 0)
  519. l.Debugf("Removed index handler for device %v and folder %v (removeAllExcept)", r.conn.DeviceID().Short(), folder)
  520. }
  521. return nil
  522. })
  523. for folder := range r.startInfos {
  524. if _, ok := except[folder]; !ok {
  525. delete(r.startInfos, folder)
  526. l.Debugf("Removed pending index handler for device %v and folder %v (removeAllExcept)", r.conn.DeviceID().Short(), folder)
  527. }
  528. }
  529. }
  530. // RegisterFolderState must be called whenever something about the folder
  531. // changes. The exception being if the folder is removed entirely, then call
  532. // Remove. The fset and runner arguments may be nil, if given folder is paused.
  533. func (r *indexHandlerRegistry) RegisterFolderState(folder config.FolderConfiguration, runner service) {
  534. if !folder.SharedWith(r.conn.DeviceID()) {
  535. r.Remove(folder.ID)
  536. return
  537. }
  538. r.mut.Lock()
  539. if folder.Paused {
  540. r.folderPausedLocked(folder.ID)
  541. } else {
  542. r.folderRunningLocked(folder, runner)
  543. }
  544. r.mut.Unlock()
  545. }
  546. // folderPausedLocked stops a running index handler.
  547. // It is a noop if the folder isn't known or has not been started yet.
  548. func (r *indexHandlerRegistry) folderPausedLocked(folder string) {
  549. l.Debugf("Pausing index handler for device %v and folder %v", r.conn.DeviceID().Short(), folder)
  550. delete(r.folderStates, folder)
  551. if is, ok := r.indexHandlers.Get(folder); ok {
  552. is.pause()
  553. l.Debugf("Paused index handler for device %v and folder %v", r.conn.DeviceID().Short(), folder)
  554. } else {
  555. l.Debugf("No index handler for device %v and folder %v to pause", r.conn.DeviceID().Short(), folder)
  556. }
  557. }
  558. // folderRunningLocked resumes an already running index handler or starts it, if it
  559. // was added while paused.
  560. // It is a noop if the folder isn't known.
  561. func (r *indexHandlerRegistry) folderRunningLocked(folder config.FolderConfiguration, runner service) {
  562. r.folderStates[folder.ID] = &indexHandlerFolderState{
  563. cfg: folder,
  564. runner: runner,
  565. }
  566. is, isOk := r.indexHandlers.Get(folder.ID)
  567. if info, ok := r.startInfos[folder.ID]; ok {
  568. if isOk {
  569. r.indexHandlers.RemoveAndWait(folder.ID, 0)
  570. l.Debugf("Removed index handler for device %v and folder %v in resume", r.conn.DeviceID().Short(), folder.ID)
  571. }
  572. _ = r.startLocked(folder, runner, info) // XXX error handling...
  573. delete(r.startInfos, folder.ID)
  574. l.Debugf("Started index handler for device %v and folder %v in resume", r.conn.DeviceID().Short(), folder.ID)
  575. } else if isOk {
  576. l.Debugf("Resuming index handler for device %v and folder %v", r.conn.DeviceID().Short(), folder)
  577. is.resume(runner)
  578. } else {
  579. l.Debugf("Not resuming index handler for device %v and folder %v as none is paused and there is no start info", r.conn.DeviceID().Short(), folder.ID)
  580. }
  581. }
  582. func (r *indexHandlerRegistry) ReceiveIndex(folder string, fs []protocol.FileInfo, update bool, op string, prevSequence, lastSequence int64) error {
  583. r.mut.Lock()
  584. defer r.mut.Unlock()
  585. is, isOk := r.indexHandlers.Get(folder)
  586. if !isOk {
  587. l.Infof("%v for nonexistent or paused folder %q", op, folder)
  588. return fmt.Errorf("%s: %w", folder, ErrFolderMissing)
  589. }
  590. return is.receive(fs, update, op, prevSequence, lastSequence)
  591. }
  592. // makeForgetUpdate takes an index update and constructs a download progress update
  593. // causing to forget any progress for files which we've just been sent.
  594. func makeForgetUpdate(files []protocol.FileInfo) []protocol.FileDownloadProgressUpdate {
  595. updates := make([]protocol.FileDownloadProgressUpdate, 0, len(files))
  596. for _, file := range files {
  597. if file.IsSymlink() || file.IsDirectory() || file.IsDeleted() {
  598. continue
  599. }
  600. updates = append(updates, protocol.FileDownloadProgressUpdate{
  601. Name: file.Name,
  602. Version: file.Version,
  603. UpdateType: protocol.FileDownloadProgressUpdateTypeForget,
  604. })
  605. }
  606. return updates
  607. }