indexhandler.go 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583
  1. // Copyright (C) 2020 The Syncthing Authors.
  2. //
  3. // This Source Code Form is subject to the terms of the Mozilla Public
  4. // License, v. 2.0. If a copy of the MPL was not distributed with this file,
  5. // You can obtain one at https://mozilla.org/MPL/2.0/.
  6. package model
  7. import (
  8. "context"
  9. "fmt"
  10. "sync"
  11. "time"
  12. "github.com/thejerf/suture/v4"
  13. "github.com/syncthing/syncthing/lib/config"
  14. "github.com/syncthing/syncthing/lib/db"
  15. "github.com/syncthing/syncthing/lib/events"
  16. "github.com/syncthing/syncthing/lib/protocol"
  17. "github.com/syncthing/syncthing/lib/svcutil"
  18. )
  19. type indexHandler struct {
  20. conn protocol.Connection
  21. downloads *deviceDownloadState
  22. folder string
  23. folderIsReceiveEncrypted bool
  24. prevSequence int64
  25. evLogger events.Logger
  26. token suture.ServiceToken
  27. cond *sync.Cond
  28. paused bool
  29. fset *db.FileSet
  30. runner service
  31. }
  32. func newIndexHandler(conn protocol.Connection, downloads *deviceDownloadState, folder config.FolderConfiguration, fset *db.FileSet, runner service, startInfo *clusterConfigDeviceInfo, evLogger events.Logger) *indexHandler {
  33. myIndexID := fset.IndexID(protocol.LocalDeviceID)
  34. mySequence := fset.Sequence(protocol.LocalDeviceID)
  35. var startSequence int64
  36. // This is the other side's description of what it knows
  37. // about us. Lets check to see if we can start sending index
  38. // updates directly or need to send the index from start...
  39. if startInfo.local.IndexID == myIndexID {
  40. // They say they've seen our index ID before, so we can
  41. // send a delta update only.
  42. if startInfo.local.MaxSequence > mySequence {
  43. // Safety check. They claim to have more or newer
  44. // index data than we have - either we have lost
  45. // index data, or reset the index without resetting
  46. // the IndexID, or something else weird has
  47. // happened. We send a full index to reset the
  48. // situation.
  49. l.Infof("Device %v folder %s is delta index compatible, but seems out of sync with reality", conn.ID().Short(), folder.Description())
  50. startSequence = 0
  51. } else {
  52. l.Debugf("Device %v folder %s is delta index compatible (mlv=%d)", conn.ID().Short(), folder.Description(), startInfo.local.MaxSequence)
  53. startSequence = startInfo.local.MaxSequence
  54. }
  55. } else if startInfo.local.IndexID != 0 {
  56. // They say they've seen an index ID from us, but it's
  57. // not the right one. Either they are confused or we
  58. // must have reset our database since last talking to
  59. // them. We'll start with a full index transfer.
  60. l.Infof("Device %v folder %s has mismatching index ID for us (%v != %v)", conn.ID().Short(), folder.Description(), startInfo.local.IndexID, myIndexID)
  61. startSequence = 0
  62. } else {
  63. l.Debugf("Device %v folder %s has no index ID for us", conn.ID().Short(), folder.Description())
  64. }
  65. // This is the other side's description of themselves. We
  66. // check to see that it matches the IndexID we have on file,
  67. // otherwise we drop our old index data and expect to get a
  68. // completely new set.
  69. theirIndexID := fset.IndexID(conn.ID())
  70. if startInfo.remote.IndexID == 0 {
  71. // They're not announcing an index ID. This means they
  72. // do not support delta indexes and we should clear any
  73. // information we have from them before accepting their
  74. // index, which will presumably be a full index.
  75. l.Debugf("Device %v folder %s does not announce an index ID", conn.ID().Short(), folder.Description())
  76. fset.Drop(conn.ID())
  77. } else if startInfo.remote.IndexID != theirIndexID {
  78. // The index ID we have on file is not what they're
  79. // announcing. They must have reset their database and
  80. // will probably send us a full index. We drop any
  81. // information we have and remember this new index ID
  82. // instead.
  83. l.Infof("Device %v folder %s has a new index ID (%v)", conn.ID().Short(), folder.Description(), startInfo.remote.IndexID)
  84. fset.Drop(conn.ID())
  85. fset.SetIndexID(conn.ID(), startInfo.remote.IndexID)
  86. }
  87. return &indexHandler{
  88. conn: conn,
  89. downloads: downloads,
  90. folder: folder.ID,
  91. folderIsReceiveEncrypted: folder.Type == config.FolderTypeReceiveEncrypted,
  92. prevSequence: startSequence,
  93. evLogger: evLogger,
  94. fset: fset,
  95. runner: runner,
  96. cond: sync.NewCond(new(sync.Mutex)),
  97. }
  98. }
  99. // waitForFileset waits for the handler to resume and fetches the current fileset.
  100. func (s *indexHandler) waitForFileset(ctx context.Context) (*db.FileSet, error) {
  101. s.cond.L.Lock()
  102. defer s.cond.L.Unlock()
  103. for s.paused {
  104. select {
  105. case <-ctx.Done():
  106. return nil, ctx.Err()
  107. default:
  108. s.cond.Wait()
  109. }
  110. }
  111. return s.fset, nil
  112. }
  113. func (s *indexHandler) Serve(ctx context.Context) (err error) {
  114. l.Debugf("Starting index handler for %s to %s at %s (slv=%d)", s.folder, s.conn.ID(), s.conn, s.prevSequence)
  115. stop := make(chan struct{})
  116. defer func() {
  117. err = svcutil.NoRestartErr(err)
  118. l.Debugf("Exiting index handler for %s to %s at %s: %v", s.folder, s.conn.ID(), s.conn, err)
  119. close(stop)
  120. }()
  121. // Broadcast the pause cond when the context quits
  122. go func() {
  123. select {
  124. case <-ctx.Done():
  125. s.cond.Broadcast()
  126. case <-stop:
  127. }
  128. }()
  129. // We need to send one index, regardless of whether there is something to send or not
  130. fset, err := s.waitForFileset(ctx)
  131. if err != nil {
  132. return err
  133. }
  134. err = s.sendIndexTo(ctx, fset)
  135. // Subscribe to LocalIndexUpdated (we have new information to send) and
  136. // DeviceDisconnected (it might be us who disconnected, so we should
  137. // exit).
  138. sub := s.evLogger.Subscribe(events.LocalIndexUpdated | events.DeviceDisconnected)
  139. defer sub.Unsubscribe()
  140. evChan := sub.C()
  141. ticker := time.NewTicker(time.Minute)
  142. defer ticker.Stop()
  143. for err == nil {
  144. fset, err = s.waitForFileset(ctx)
  145. if err != nil {
  146. return err
  147. }
  148. // While we have sent a sequence at least equal to the one
  149. // currently in the database, wait for the local index to update. The
  150. // local index may update for other folders than the one we are
  151. // sending for.
  152. if fset.Sequence(protocol.LocalDeviceID) <= s.prevSequence {
  153. select {
  154. case <-ctx.Done():
  155. return ctx.Err()
  156. case <-evChan:
  157. case <-ticker.C:
  158. }
  159. continue
  160. }
  161. err = s.sendIndexTo(ctx, fset)
  162. // Wait a short amount of time before entering the next loop. If there
  163. // are continuous changes happening to the local index, this gives us
  164. // time to batch them up a little.
  165. select {
  166. case <-ctx.Done():
  167. return ctx.Err()
  168. case <-time.After(250 * time.Millisecond):
  169. }
  170. }
  171. return err
  172. }
  173. // resume might be called because the folder was actually resumed, or just
  174. // because the folder config changed (and thus the runner and potentially fset).
  175. func (s *indexHandler) resume(fset *db.FileSet, runner service) {
  176. s.cond.L.Lock()
  177. s.paused = false
  178. s.fset = fset
  179. s.runner = runner
  180. s.cond.Broadcast()
  181. s.cond.L.Unlock()
  182. }
  183. func (s *indexHandler) pause() {
  184. s.cond.L.Lock()
  185. if s.paused {
  186. s.evLogger.Log(events.Failure, "index handler got paused while already paused")
  187. }
  188. s.paused = true
  189. s.fset = nil
  190. s.runner = nil
  191. s.cond.Broadcast()
  192. s.cond.L.Unlock()
  193. }
  194. // sendIndexTo sends file infos with a sequence number higher than prevSequence and
  195. // returns the highest sent sequence number.
  196. func (s *indexHandler) sendIndexTo(ctx context.Context, fset *db.FileSet) error {
  197. initial := s.prevSequence == 0
  198. batch := db.NewFileInfoBatch(nil)
  199. batch.SetFlushFunc(func(fs []protocol.FileInfo) error {
  200. l.Debugf("%v: Sending %d files (<%d bytes)", s, len(fs), batch.Size())
  201. if initial {
  202. initial = false
  203. return s.conn.Index(ctx, s.folder, fs)
  204. }
  205. return s.conn.IndexUpdate(ctx, s.folder, fs)
  206. })
  207. var err error
  208. var f protocol.FileInfo
  209. snap, err := fset.Snapshot()
  210. if err != nil {
  211. return svcutil.AsFatalErr(err, svcutil.ExitError)
  212. }
  213. defer snap.Release()
  214. previousWasDelete := false
  215. snap.WithHaveSequence(s.prevSequence+1, func(fi protocol.FileIntf) bool {
  216. // This is to make sure that renames (which is an add followed by a delete) land in the same batch.
  217. // Even if the batch is full, we allow a last delete to slip in, we do this by making sure that
  218. // the batch ends with a non-delete, or that the last item in the batch is already a delete
  219. if batch.Full() && (!fi.IsDeleted() || previousWasDelete) {
  220. if err = batch.Flush(); err != nil {
  221. return false
  222. }
  223. }
  224. if shouldDebug() {
  225. if fi.SequenceNo() < s.prevSequence+1 {
  226. panic(fmt.Sprintln("sequence lower than requested, got:", fi.SequenceNo(), ", asked to start at:", s.prevSequence+1))
  227. }
  228. }
  229. if f.Sequence > 0 && fi.SequenceNo() <= f.Sequence {
  230. l.Warnln("Non-increasing sequence detected: Checking and repairing the db...")
  231. // Abort this round of index sending - the next one will pick
  232. // up from the last successful one with the repeaired db.
  233. defer func() {
  234. if fixed, dbErr := fset.RepairSequence(); dbErr != nil {
  235. l.Warnln("Failed repairing sequence entries:", dbErr)
  236. panic("Failed repairing sequence entries")
  237. } else {
  238. s.evLogger.Log(events.Failure, "detected and repaired non-increasing sequence")
  239. l.Infof("Repaired %v sequence entries in database", fixed)
  240. }
  241. }()
  242. return false
  243. }
  244. f = fi.(protocol.FileInfo)
  245. // If this is a folder receiving encrypted files only, we
  246. // mustn't ever send locally changed file infos. Those aren't
  247. // encrypted and thus would be a protocol error at the remote.
  248. if s.folderIsReceiveEncrypted && fi.IsReceiveOnlyChanged() {
  249. return true
  250. }
  251. f = prepareFileInfoForIndex(f)
  252. previousWasDelete = f.IsDeleted()
  253. batch.Append(f)
  254. return true
  255. })
  256. if err != nil {
  257. return err
  258. }
  259. err = batch.Flush()
  260. // True if there was nothing to be sent
  261. if f.Sequence == 0 {
  262. return err
  263. }
  264. s.prevSequence = f.Sequence
  265. return err
  266. }
  267. func (s *indexHandler) receive(fs []protocol.FileInfo, update bool, op string) error {
  268. deviceID := s.conn.ID()
  269. s.cond.L.Lock()
  270. paused := s.paused
  271. fset := s.fset
  272. runner := s.runner
  273. s.cond.L.Unlock()
  274. if paused {
  275. l.Infof("%v for paused folder %q", op, s.folder)
  276. return fmt.Errorf("%v: %w", s.folder, ErrFolderPaused)
  277. }
  278. defer runner.SchedulePull()
  279. s.downloads.Update(s.folder, makeForgetUpdate(fs))
  280. if !update {
  281. fset.Drop(deviceID)
  282. }
  283. for i := range fs {
  284. // The local attributes should never be transmitted over the wire.
  285. // Make sure they look like they weren't.
  286. fs[i].LocalFlags = 0
  287. fs[i].VersionHash = nil
  288. }
  289. fset.Update(deviceID, fs)
  290. seq := fset.Sequence(deviceID)
  291. s.evLogger.Log(events.RemoteIndexUpdated, map[string]interface{}{
  292. "device": deviceID.String(),
  293. "folder": s.folder,
  294. "items": len(fs),
  295. "sequence": seq,
  296. "version": seq, // legacy for sequence
  297. })
  298. return nil
  299. }
  300. func prepareFileInfoForIndex(f protocol.FileInfo) protocol.FileInfo {
  301. // Mark the file as invalid if any of the local bad stuff flags are set.
  302. f.RawInvalid = f.IsInvalid()
  303. // If the file is marked LocalReceive (i.e., changed locally on a
  304. // receive only folder) we do not want it to ever become the
  305. // globally best version, invalid or not.
  306. if f.IsReceiveOnlyChanged() {
  307. f.Version = protocol.Vector{}
  308. }
  309. // The trailer with the encrypted fileinfo is device local, don't send info
  310. // about that to remotes
  311. f.Size -= int64(f.EncryptionTrailerSize)
  312. f.EncryptionTrailerSize = 0
  313. // never sent externally
  314. f.LocalFlags = 0
  315. f.VersionHash = nil
  316. f.InodeChangeNs = 0
  317. return f
  318. }
  319. func (s *indexHandler) String() string {
  320. return fmt.Sprintf("indexHandler@%p for %s to %s at %s", s, s.folder, s.conn.ID().Short(), s.conn)
  321. }
  322. type indexHandlerRegistry struct {
  323. sup *suture.Supervisor
  324. evLogger events.Logger
  325. conn protocol.Connection
  326. downloads *deviceDownloadState
  327. indexHandlers map[string]*indexHandler
  328. startInfos map[string]*clusterConfigDeviceInfo
  329. folderStates map[string]*indexHandlerFolderState
  330. mut sync.Mutex
  331. }
  332. type indexHandlerFolderState struct {
  333. cfg config.FolderConfiguration
  334. fset *db.FileSet
  335. runner service
  336. }
  337. func newIndexHandlerRegistry(conn protocol.Connection, downloads *deviceDownloadState, closed chan struct{}, parentSup *suture.Supervisor, evLogger events.Logger) *indexHandlerRegistry {
  338. r := &indexHandlerRegistry{
  339. conn: conn,
  340. downloads: downloads,
  341. evLogger: evLogger,
  342. indexHandlers: make(map[string]*indexHandler),
  343. startInfos: make(map[string]*clusterConfigDeviceInfo),
  344. folderStates: make(map[string]*indexHandlerFolderState),
  345. mut: sync.Mutex{},
  346. }
  347. r.sup = suture.New(r.String(), svcutil.SpecWithDebugLogger(l))
  348. ourToken := parentSup.Add(r.sup)
  349. r.sup.Add(svcutil.AsService(func(ctx context.Context) error {
  350. select {
  351. case <-ctx.Done():
  352. return ctx.Err()
  353. case <-closed:
  354. parentSup.Remove(ourToken)
  355. }
  356. return nil
  357. }, fmt.Sprintf("%v/waitForClosed", r)))
  358. return r
  359. }
  360. func (r *indexHandlerRegistry) String() string {
  361. return fmt.Sprintf("indexHandlerRegistry/%v", r.conn.ID().Short())
  362. }
  363. func (r *indexHandlerRegistry) GetSupervisor() *suture.Supervisor {
  364. return r.sup
  365. }
  366. func (r *indexHandlerRegistry) startLocked(folder config.FolderConfiguration, fset *db.FileSet, runner service, startInfo *clusterConfigDeviceInfo) {
  367. if is, ok := r.indexHandlers[folder.ID]; ok {
  368. r.sup.RemoveAndWait(is.token, 0)
  369. delete(r.indexHandlers, folder.ID)
  370. }
  371. delete(r.startInfos, folder.ID)
  372. is := newIndexHandler(r.conn, r.downloads, folder, fset, runner, startInfo, r.evLogger)
  373. is.token = r.sup.Add(is)
  374. r.indexHandlers[folder.ID] = is
  375. // This new connection might help us get in sync.
  376. runner.SchedulePull()
  377. }
  378. // AddIndexInfo starts an index handler for given folder, unless it is paused.
  379. // If it is paused, the given startInfo is stored to start the sender once the
  380. // folder is resumed.
  381. // If an index handler is already running, it will be stopped first.
  382. func (r *indexHandlerRegistry) AddIndexInfo(folder string, startInfo *clusterConfigDeviceInfo) {
  383. r.mut.Lock()
  384. defer r.mut.Unlock()
  385. if is, ok := r.indexHandlers[folder]; ok {
  386. r.sup.RemoveAndWait(is.token, 0)
  387. delete(r.indexHandlers, folder)
  388. l.Debugf("Removed index sender for device %v and folder %v due to added pending", r.conn.ID().Short(), folder)
  389. }
  390. folderState, ok := r.folderStates[folder]
  391. if !ok {
  392. l.Debugf("Pending index handler for device %v and folder %v", r.conn.ID().Short(), folder)
  393. r.startInfos[folder] = startInfo
  394. return
  395. }
  396. r.startLocked(folderState.cfg, folderState.fset, folderState.runner, startInfo)
  397. }
  398. // Remove stops a running index handler or removes one pending to be started.
  399. // It is a noop if the folder isn't known.
  400. func (r *indexHandlerRegistry) Remove(folder string) {
  401. r.mut.Lock()
  402. defer r.mut.Unlock()
  403. l.Debugf("Removing index handler for device %v and folder %v", r.conn.ID().Short(), folder)
  404. if is, ok := r.indexHandlers[folder]; ok {
  405. r.sup.RemoveAndWait(is.token, 0)
  406. delete(r.indexHandlers, folder)
  407. }
  408. delete(r.startInfos, folder)
  409. l.Debugf("Removed index handler for device %v and folder %v", r.conn.ID().Short(), folder)
  410. }
  411. // RemoveAllExcept stops all running index handlers and removes those pending to be started,
  412. // except mentioned ones.
  413. // It is a noop if the folder isn't known.
  414. func (r *indexHandlerRegistry) RemoveAllExcept(except map[string]remoteFolderState) {
  415. r.mut.Lock()
  416. defer r.mut.Unlock()
  417. for folder, is := range r.indexHandlers {
  418. if _, ok := except[folder]; !ok {
  419. r.sup.RemoveAndWait(is.token, 0)
  420. delete(r.indexHandlers, folder)
  421. l.Debugf("Removed index handler for device %v and folder %v (removeAllExcept)", r.conn.ID().Short(), folder)
  422. }
  423. }
  424. for folder := range r.startInfos {
  425. if _, ok := except[folder]; !ok {
  426. delete(r.startInfos, folder)
  427. l.Debugf("Removed pending index handler for device %v and folder %v (removeAllExcept)", r.conn.ID().Short(), folder)
  428. }
  429. }
  430. }
  431. // RegisterFolderState must be called whenever something about the folder
  432. // changes. The exception being if the folder is removed entirely, then call
  433. // Remove. The fset and runner arguments may be nil, if given folder is paused.
  434. func (r *indexHandlerRegistry) RegisterFolderState(folder config.FolderConfiguration, fset *db.FileSet, runner service) {
  435. if !folder.SharedWith(r.conn.ID()) {
  436. r.Remove(folder.ID)
  437. return
  438. }
  439. r.mut.Lock()
  440. if folder.Paused {
  441. r.folderPausedLocked(folder.ID)
  442. } else {
  443. r.folderRunningLocked(folder, fset, runner)
  444. }
  445. r.mut.Unlock()
  446. }
  447. // folderPausedLocked stops a running index handler.
  448. // It is a noop if the folder isn't known or has not been started yet.
  449. func (r *indexHandlerRegistry) folderPausedLocked(folder string) {
  450. l.Debugf("Pausing index handler for device %v and folder %v", r.conn.ID().Short(), folder)
  451. delete(r.folderStates, folder)
  452. if is, ok := r.indexHandlers[folder]; ok {
  453. is.pause()
  454. l.Debugf("Paused index handler for device %v and folder %v", r.conn.ID().Short(), folder)
  455. } else {
  456. l.Debugf("No index handler for device %v and folder %v to pause", r.conn.ID().Short(), folder)
  457. }
  458. }
  459. // folderRunningLocked resumes an already running index handler or starts it, if it
  460. // was added while paused.
  461. // It is a noop if the folder isn't known.
  462. func (r *indexHandlerRegistry) folderRunningLocked(folder config.FolderConfiguration, fset *db.FileSet, runner service) {
  463. r.folderStates[folder.ID] = &indexHandlerFolderState{
  464. cfg: folder,
  465. fset: fset,
  466. runner: runner,
  467. }
  468. is, isOk := r.indexHandlers[folder.ID]
  469. if info, ok := r.startInfos[folder.ID]; ok {
  470. if isOk {
  471. r.sup.RemoveAndWait(is.token, 0)
  472. delete(r.indexHandlers, folder.ID)
  473. l.Debugf("Removed index handler for device %v and folder %v in resume", r.conn.ID().Short(), folder.ID)
  474. }
  475. r.startLocked(folder, fset, runner, info)
  476. delete(r.startInfos, folder.ID)
  477. l.Debugf("Started index handler for device %v and folder %v in resume", r.conn.ID().Short(), folder.ID)
  478. } else if isOk {
  479. l.Debugf("Resuming index handler for device %v and folder %v", r.conn.ID().Short(), folder)
  480. is.resume(fset, runner)
  481. } else {
  482. l.Debugf("Not resuming index handler for device %v and folder %v as none is paused and there is no start info", r.conn.ID().Short(), folder.ID)
  483. }
  484. }
  485. func (r *indexHandlerRegistry) ReceiveIndex(folder string, fs []protocol.FileInfo, update bool, op string) error {
  486. r.mut.Lock()
  487. defer r.mut.Unlock()
  488. is, isOk := r.indexHandlers[folder]
  489. if !isOk {
  490. l.Infof("%v for nonexistent or paused folder %q", op, folder)
  491. return ErrFolderMissing
  492. }
  493. return is.receive(fs, update, op)
  494. }
  495. // makeForgetUpdate takes an index update and constructs a download progress update
  496. // causing to forget any progress for files which we've just been sent.
  497. func makeForgetUpdate(files []protocol.FileInfo) []protocol.FileDownloadProgressUpdate {
  498. updates := make([]protocol.FileDownloadProgressUpdate, 0, len(files))
  499. for _, file := range files {
  500. if file.IsSymlink() || file.IsDirectory() || file.IsDeleted() {
  501. continue
  502. }
  503. updates = append(updates, protocol.FileDownloadProgressUpdate{
  504. Name: file.Name,
  505. Version: file.Version,
  506. UpdateType: protocol.FileDownloadProgressUpdateTypeForget,
  507. })
  508. }
  509. return updates
  510. }