folder.go 26 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013
  1. // Copyright (C) 2014 The Syncthing Authors.
  2. //
  3. // This Source Code Form is subject to the terms of the Mozilla Public
  4. // License, v. 2.0. If a copy of the MPL was not distributed with this file,
  5. // You can obtain one at https://mozilla.org/MPL/2.0/.
  6. package model
  7. import (
  8. "context"
  9. "fmt"
  10. "math/rand"
  11. "path/filepath"
  12. "sort"
  13. "sync/atomic"
  14. "time"
  15. "github.com/pkg/errors"
  16. "github.com/syncthing/syncthing/lib/config"
  17. "github.com/syncthing/syncthing/lib/db"
  18. "github.com/syncthing/syncthing/lib/events"
  19. "github.com/syncthing/syncthing/lib/fs"
  20. "github.com/syncthing/syncthing/lib/ignore"
  21. "github.com/syncthing/syncthing/lib/locations"
  22. "github.com/syncthing/syncthing/lib/osutil"
  23. "github.com/syncthing/syncthing/lib/protocol"
  24. "github.com/syncthing/syncthing/lib/scanner"
  25. "github.com/syncthing/syncthing/lib/stats"
  26. "github.com/syncthing/syncthing/lib/sync"
  27. "github.com/syncthing/syncthing/lib/watchaggregator"
  28. "github.com/thejerf/suture"
  29. )
  30. type folder struct {
  31. suture.Service
  32. stateTracker
  33. config.FolderConfiguration
  34. *stats.FolderStatisticsReference
  35. ioLimiter *byteSemaphore
  36. localFlags uint32
  37. model *model
  38. shortID protocol.ShortID
  39. fset *db.FileSet
  40. ignores *ignore.Matcher
  41. ctx context.Context
  42. scanInterval time.Duration
  43. scanTimer *time.Timer
  44. scanDelay chan time.Duration
  45. initialScanFinished chan struct{}
  46. scanErrors []FileError
  47. scanErrorsMut sync.Mutex
  48. pullScheduled chan struct{}
  49. doInSyncChan chan syncRequest
  50. forcedRescanRequested chan struct{}
  51. forcedRescanPaths map[string]struct{}
  52. forcedRescanPathsMut sync.Mutex
  53. watchCancel context.CancelFunc
  54. watchChan chan []string
  55. restartWatchChan chan struct{}
  56. watchErr error
  57. watchMut sync.Mutex
  58. puller puller
  59. }
  60. type syncRequest struct {
  61. fn func() error
  62. err chan error
  63. }
  64. type puller interface {
  65. pull() bool // true when successfull and should not be retried
  66. }
  67. func newFolder(model *model, fset *db.FileSet, ignores *ignore.Matcher, cfg config.FolderConfiguration, evLogger events.Logger, ioLimiter *byteSemaphore) folder {
  68. return folder{
  69. stateTracker: newStateTracker(cfg.ID, evLogger),
  70. FolderConfiguration: cfg,
  71. FolderStatisticsReference: stats.NewFolderStatisticsReference(model.db, cfg.ID),
  72. ioLimiter: ioLimiter,
  73. model: model,
  74. shortID: model.shortID,
  75. fset: fset,
  76. ignores: ignores,
  77. scanInterval: time.Duration(cfg.RescanIntervalS) * time.Second,
  78. scanTimer: time.NewTimer(time.Millisecond), // The first scan should be done immediately.
  79. scanDelay: make(chan time.Duration),
  80. initialScanFinished: make(chan struct{}),
  81. scanErrorsMut: sync.NewMutex(),
  82. pullScheduled: make(chan struct{}, 1), // This needs to be 1-buffered so that we queue a pull if we're busy when it comes.
  83. doInSyncChan: make(chan syncRequest),
  84. forcedRescanRequested: make(chan struct{}, 1),
  85. forcedRescanPaths: make(map[string]struct{}),
  86. forcedRescanPathsMut: sync.NewMutex(),
  87. watchCancel: func() {},
  88. restartWatchChan: make(chan struct{}, 1),
  89. watchMut: sync.NewMutex(),
  90. }
  91. }
  92. func (f *folder) serve(ctx context.Context) {
  93. atomic.AddInt32(&f.model.foldersRunning, 1)
  94. defer atomic.AddInt32(&f.model.foldersRunning, -1)
  95. f.ctx = ctx
  96. l.Debugln(f, "starting")
  97. defer l.Debugln(f, "exiting")
  98. defer func() {
  99. f.scanTimer.Stop()
  100. f.setState(FolderIdle)
  101. }()
  102. pause := f.basePause()
  103. pullFailTimer := time.NewTimer(0)
  104. <-pullFailTimer.C
  105. if f.FSWatcherEnabled && f.getHealthErrorAndLoadIgnores() == nil {
  106. f.startWatch()
  107. }
  108. initialCompleted := f.initialScanFinished
  109. pull := func() {
  110. startTime := time.Now()
  111. if f.pull() {
  112. // We're good. Don't schedule another pull and reset
  113. // the pause interval.
  114. pause = f.basePause()
  115. return
  116. }
  117. // Pulling failed, try again later.
  118. delay := pause + time.Since(startTime)
  119. l.Infof("Folder %v isn't making sync progress - retrying in %v.", f.Description(), delay)
  120. pullFailTimer.Reset(delay)
  121. if pause < 60*f.basePause() {
  122. pause *= 2
  123. }
  124. }
  125. for {
  126. select {
  127. case <-f.ctx.Done():
  128. return
  129. case <-f.pullScheduled:
  130. pullFailTimer.Stop()
  131. select {
  132. case <-pullFailTimer.C:
  133. default:
  134. }
  135. pull()
  136. case <-pullFailTimer.C:
  137. pull()
  138. case <-initialCompleted:
  139. // Initial scan has completed, we should do a pull
  140. initialCompleted = nil // never hit this case again
  141. if !f.pull() {
  142. // Pulling failed, try again later.
  143. pullFailTimer.Reset(pause)
  144. }
  145. case <-f.forcedRescanRequested:
  146. f.handleForcedRescans()
  147. case <-f.scanTimer.C:
  148. l.Debugln(f, "Scanning due to timer")
  149. f.scanTimerFired()
  150. case req := <-f.doInSyncChan:
  151. l.Debugln(f, "Running something due to request")
  152. req.err <- req.fn()
  153. case next := <-f.scanDelay:
  154. l.Debugln(f, "Delaying scan")
  155. f.scanTimer.Reset(next)
  156. case fsEvents := <-f.watchChan:
  157. l.Debugln(f, "Scan due to watcher")
  158. f.scanSubdirs(fsEvents)
  159. case <-f.restartWatchChan:
  160. l.Debugln(f, "Restart watcher")
  161. f.restartWatch()
  162. }
  163. }
  164. }
  165. func (f *folder) BringToFront(string) {}
  166. func (f *folder) Override() {}
  167. func (f *folder) Revert() {}
  168. func (f *folder) DelayScan(next time.Duration) {
  169. f.Delay(next)
  170. }
  171. func (f *folder) ignoresUpdated() {
  172. if f.FSWatcherEnabled {
  173. f.scheduleWatchRestart()
  174. }
  175. }
  176. func (f *folder) SchedulePull() {
  177. select {
  178. case f.pullScheduled <- struct{}{}:
  179. default:
  180. // We might be busy doing a pull and thus not reading from this
  181. // channel. The channel is 1-buffered, so one notification will be
  182. // queued to ensure we recheck after the pull, but beyond that we must
  183. // make sure to not block index receiving.
  184. }
  185. }
  186. func (f *folder) Jobs(_, _ int) ([]string, []string, int) {
  187. return nil, nil, 0
  188. }
  189. func (f *folder) Scan(subdirs []string) error {
  190. <-f.initialScanFinished
  191. return f.doInSync(func() error { return f.scanSubdirs(subdirs) })
  192. }
  193. // doInSync allows to run functions synchronously in folder.serve from exported,
  194. // asynchronously called methods.
  195. func (f *folder) doInSync(fn func() error) error {
  196. req := syncRequest{
  197. fn: fn,
  198. err: make(chan error, 1),
  199. }
  200. select {
  201. case f.doInSyncChan <- req:
  202. return <-req.err
  203. case <-f.ctx.Done():
  204. return f.ctx.Err()
  205. }
  206. }
  207. func (f *folder) Reschedule() {
  208. if f.scanInterval == 0 {
  209. return
  210. }
  211. // Sleep a random time between 3/4 and 5/4 of the configured interval.
  212. sleepNanos := (f.scanInterval.Nanoseconds()*3 + rand.Int63n(2*f.scanInterval.Nanoseconds())) / 4
  213. interval := time.Duration(sleepNanos) * time.Nanosecond
  214. l.Debugln(f, "next rescan in", interval)
  215. f.scanTimer.Reset(interval)
  216. }
  217. func (f *folder) Delay(next time.Duration) {
  218. f.scanDelay <- next
  219. }
  220. func (f *folder) getHealthErrorAndLoadIgnores() error {
  221. if err := f.getHealthErrorWithoutIgnores(); err != nil {
  222. return err
  223. }
  224. if err := f.ignores.Load(".stignore"); err != nil && !fs.IsNotExist(err) {
  225. return errors.Wrap(err, "loading ignores")
  226. }
  227. return nil
  228. }
  229. func (f *folder) getHealthErrorWithoutIgnores() error {
  230. // Check for folder errors, with the most serious and specific first and
  231. // generic ones like out of space on the home disk later.
  232. if err := f.CheckPath(); err != nil {
  233. return err
  234. }
  235. dbPath := locations.Get(locations.Database)
  236. if usage, err := fs.NewFilesystem(fs.FilesystemTypeBasic, dbPath).Usage("."); err == nil {
  237. if err = config.CheckFreeSpace(f.model.cfg.Options().MinHomeDiskFree, usage); err != nil {
  238. return errors.Wrapf(err, "insufficient space on disk for database (%v)", dbPath)
  239. }
  240. }
  241. return nil
  242. }
  243. func (f *folder) pull() bool {
  244. select {
  245. case <-f.initialScanFinished:
  246. default:
  247. // Once the initial scan finished, a pull will be scheduled
  248. return true
  249. }
  250. // If there is nothing to do, don't even enter sync-waiting state.
  251. abort := true
  252. snap := f.fset.Snapshot()
  253. snap.WithNeed(protocol.LocalDeviceID, func(intf db.FileIntf) bool {
  254. abort = false
  255. return false
  256. })
  257. snap.Release()
  258. if abort {
  259. return true
  260. }
  261. f.setState(FolderSyncWaiting)
  262. defer f.setState(FolderIdle)
  263. if err := f.ioLimiter.takeWithContext(f.ctx, 1); err != nil {
  264. return true
  265. }
  266. defer f.ioLimiter.give(1)
  267. return f.puller.pull()
  268. }
  269. func (f *folder) scanSubdirs(subDirs []string) error {
  270. oldHash := f.ignores.Hash()
  271. err := f.getHealthErrorAndLoadIgnores()
  272. f.setError(err)
  273. if err != nil {
  274. // If there is a health error we set it as the folder error. We do not
  275. // clear the folder error if there is no health error, as there might be
  276. // an *other* folder error (failed to load ignores, for example). Hence
  277. // we do not use the CheckHealth() convenience function here.
  278. return err
  279. }
  280. // Check on the way out if the ignore patterns changed as part of scanning
  281. // this folder. If they did we should schedule a pull of the folder so that
  282. // we request things we might have suddenly become unignored and so on.
  283. defer func() {
  284. if f.ignores.Hash() != oldHash {
  285. l.Debugln("Folder", f.Description(), "ignore patterns change detected while scanning; triggering puller")
  286. f.ignoresUpdated()
  287. f.SchedulePull()
  288. }
  289. }()
  290. f.setState(FolderScanWaiting)
  291. if err := f.ioLimiter.takeWithContext(f.ctx, 1); err != nil {
  292. return err
  293. }
  294. defer f.ioLimiter.give(1)
  295. for i := range subDirs {
  296. sub := osutil.NativeFilename(subDirs[i])
  297. if sub == "" {
  298. // A blank subdirs means to scan the entire folder. We can trim
  299. // the subDirs list and go on our way.
  300. subDirs = nil
  301. break
  302. }
  303. subDirs[i] = sub
  304. }
  305. snap := f.fset.Snapshot()
  306. // We release explicitly later in this function, however we might exit early
  307. // and it's ok to release twice.
  308. defer snap.Release()
  309. // Clean the list of subitems to ensure that we start at a known
  310. // directory, and don't scan subdirectories of things we've already
  311. // scanned.
  312. subDirs = unifySubs(subDirs, func(file string) bool {
  313. _, ok := snap.Get(protocol.LocalDeviceID, file)
  314. return ok
  315. })
  316. f.setState(FolderScanning)
  317. mtimefs := f.fset.MtimeFS()
  318. fchan := scanner.Walk(f.ctx, scanner.Config{
  319. Folder: f.ID,
  320. Subs: subDirs,
  321. Matcher: f.ignores,
  322. TempLifetime: time.Duration(f.model.cfg.Options().KeepTemporariesH) * time.Hour,
  323. CurrentFiler: cFiler{snap},
  324. Filesystem: mtimefs,
  325. IgnorePerms: f.IgnorePerms,
  326. AutoNormalize: f.AutoNormalize,
  327. Hashers: f.model.numHashers(f.ID),
  328. ShortID: f.shortID,
  329. ProgressTickIntervalS: f.ScanProgressIntervalS,
  330. LocalFlags: f.localFlags,
  331. ModTimeWindow: f.ModTimeWindow(),
  332. EventLogger: f.evLogger,
  333. })
  334. batchFn := func(fs []protocol.FileInfo) error {
  335. if err := f.getHealthErrorWithoutIgnores(); err != nil {
  336. l.Debugf("Stopping scan of folder %s due to: %s", f.Description(), err)
  337. return err
  338. }
  339. f.updateLocalsFromScanning(fs)
  340. return nil
  341. }
  342. // Resolve items which are identical with the global state.
  343. if f.localFlags&protocol.FlagLocalReceiveOnly != 0 {
  344. oldBatchFn := batchFn // can't reference batchFn directly (recursion)
  345. batchFn = func(fs []protocol.FileInfo) error {
  346. for i := range fs {
  347. switch gf, ok := snap.GetGlobal(fs[i].Name); {
  348. case !ok:
  349. continue
  350. case gf.IsEquivalentOptional(fs[i], f.ModTimeWindow(), false, false, protocol.FlagLocalReceiveOnly):
  351. // What we have locally is equivalent to the global file.
  352. fs[i].Version = fs[i].Version.Merge(gf.Version)
  353. fallthrough
  354. case fs[i].IsDeleted() && gf.IsReceiveOnlyChanged():
  355. // Our item is deleted and the global item is our own
  356. // receive only file. We can't delete file infos, so
  357. // we just pretend it is a normal deleted file (nobody
  358. // cares about that).
  359. fs[i].LocalFlags &^= protocol.FlagLocalReceiveOnly
  360. }
  361. }
  362. return oldBatchFn(fs)
  363. }
  364. }
  365. batch := newFileInfoBatch(batchFn)
  366. // Schedule a pull after scanning, but only if we actually detected any
  367. // changes.
  368. changes := 0
  369. defer func() {
  370. if changes > 0 {
  371. f.SchedulePull()
  372. }
  373. }()
  374. f.clearScanErrors(subDirs)
  375. for res := range fchan {
  376. if res.Err != nil {
  377. f.newScanError(res.Path, res.Err)
  378. continue
  379. }
  380. if err := batch.flushIfFull(); err != nil {
  381. return err
  382. }
  383. batch.append(res.File)
  384. changes++
  385. }
  386. if err := batch.flush(); err != nil {
  387. return err
  388. }
  389. if len(subDirs) == 0 {
  390. // If we have no specific subdirectories to traverse, set it to one
  391. // empty prefix so we traverse the entire folder contents once.
  392. subDirs = []string{""}
  393. }
  394. // Do a scan of the database for each prefix, to check for deleted and
  395. // ignored files.
  396. var toIgnore []db.FileInfoTruncated
  397. ignoredParent := ""
  398. snap.Release()
  399. snap = f.fset.Snapshot()
  400. defer snap.Release()
  401. for _, sub := range subDirs {
  402. var iterError error
  403. snap.WithPrefixedHaveTruncated(protocol.LocalDeviceID, sub, func(fi db.FileIntf) bool {
  404. select {
  405. case <-f.ctx.Done():
  406. return false
  407. default:
  408. }
  409. file := fi.(db.FileInfoTruncated)
  410. if err := batch.flushIfFull(); err != nil {
  411. iterError = err
  412. return false
  413. }
  414. if ignoredParent != "" && !fs.IsParent(file.Name, ignoredParent) {
  415. for _, file := range toIgnore {
  416. l.Debugln("marking file as ignored", file)
  417. nf := file.ConvertToIgnoredFileInfo(f.shortID)
  418. batch.append(nf)
  419. changes++
  420. if err := batch.flushIfFull(); err != nil {
  421. iterError = err
  422. return false
  423. }
  424. }
  425. toIgnore = toIgnore[:0]
  426. ignoredParent = ""
  427. }
  428. switch ignored := f.ignores.Match(file.Name).IsIgnored(); {
  429. case !file.IsIgnored() && ignored:
  430. // File was not ignored at last pass but has been ignored.
  431. if file.IsDirectory() {
  432. // Delay ignoring as a child might be unignored.
  433. toIgnore = append(toIgnore, file)
  434. if ignoredParent == "" {
  435. // If the parent wasn't ignored already, set
  436. // this path as the "highest" ignored parent
  437. ignoredParent = file.Name
  438. }
  439. return true
  440. }
  441. l.Debugln("marking file as ignored", file)
  442. nf := file.ConvertToIgnoredFileInfo(f.shortID)
  443. batch.append(nf)
  444. changes++
  445. case file.IsIgnored() && !ignored:
  446. // Successfully scanned items are already un-ignored during
  447. // the scan, so check whether it is deleted.
  448. fallthrough
  449. case !file.IsIgnored() && !file.IsDeleted() && !file.IsUnsupported():
  450. // The file is not ignored, deleted or unsupported. Lets check if
  451. // it's still here. Simply stat:ing it wont do as there are
  452. // tons of corner cases (e.g. parent dir->symlink, missing
  453. // permissions)
  454. if !osutil.IsDeleted(mtimefs, file.Name) {
  455. if ignoredParent != "" {
  456. // Don't ignore parents of this not ignored item
  457. toIgnore = toIgnore[:0]
  458. ignoredParent = ""
  459. }
  460. return true
  461. }
  462. nf := file.ConvertToDeletedFileInfo(f.shortID)
  463. nf.LocalFlags = f.localFlags
  464. if file.ShouldConflict() {
  465. // We do not want to override the global version with
  466. // the deleted file. Setting to an empty version makes
  467. // sure the file gets in sync on the following pull.
  468. nf.Version = protocol.Vector{}
  469. }
  470. batch.append(nf)
  471. changes++
  472. }
  473. // Check for deleted, locally changed items that noone else has.
  474. if f.localFlags&protocol.FlagLocalReceiveOnly == 0 {
  475. return true
  476. }
  477. if !fi.IsDeleted() || !fi.IsReceiveOnlyChanged() || len(snap.Availability(fi.FileName())) > 0 {
  478. return true
  479. }
  480. nf := fi.(db.FileInfoTruncated).ConvertDeletedToFileInfo()
  481. nf.LocalFlags = 0
  482. nf.Version = protocol.Vector{}
  483. batch.append(nf)
  484. changes++
  485. return true
  486. })
  487. select {
  488. case <-f.ctx.Done():
  489. return f.ctx.Err()
  490. default:
  491. }
  492. if iterError == nil && len(toIgnore) > 0 {
  493. for _, file := range toIgnore {
  494. l.Debugln("marking file as ignored", f)
  495. nf := file.ConvertToIgnoredFileInfo(f.shortID)
  496. batch.append(nf)
  497. changes++
  498. if iterError = batch.flushIfFull(); iterError != nil {
  499. break
  500. }
  501. }
  502. toIgnore = toIgnore[:0]
  503. }
  504. if iterError != nil {
  505. return iterError
  506. }
  507. }
  508. if err := batch.flush(); err != nil {
  509. return err
  510. }
  511. f.ScanCompleted()
  512. f.setState(FolderIdle)
  513. return nil
  514. }
  515. func (f *folder) scanTimerFired() {
  516. err := f.scanSubdirs(nil)
  517. select {
  518. case <-f.initialScanFinished:
  519. default:
  520. status := "Completed"
  521. if err != nil {
  522. status = "Failed"
  523. }
  524. l.Infoln(status, "initial scan of", f.Type.String(), "folder", f.Description())
  525. close(f.initialScanFinished)
  526. }
  527. f.Reschedule()
  528. }
  529. func (f *folder) WatchError() error {
  530. f.watchMut.Lock()
  531. defer f.watchMut.Unlock()
  532. return f.watchErr
  533. }
  534. // stopWatch immediately aborts watching and may be called asynchronously
  535. func (f *folder) stopWatch() {
  536. f.watchMut.Lock()
  537. f.watchCancel()
  538. f.watchMut.Unlock()
  539. f.setWatchError(nil, 0)
  540. }
  541. // scheduleWatchRestart makes sure watching is restarted from the main for loop
  542. // in a folder's Serve and thus may be called asynchronously (e.g. when ignores change).
  543. func (f *folder) scheduleWatchRestart() {
  544. select {
  545. case f.restartWatchChan <- struct{}{}:
  546. default:
  547. // We might be busy doing a pull and thus not reading from this
  548. // channel. The channel is 1-buffered, so one notification will be
  549. // queued to ensure we recheck after the pull.
  550. }
  551. }
  552. // restartWatch should only ever be called synchronously. If you want to use
  553. // this asynchronously, you should probably use scheduleWatchRestart instead.
  554. func (f *folder) restartWatch() {
  555. f.stopWatch()
  556. f.startWatch()
  557. f.scanSubdirs(nil)
  558. }
  559. // startWatch should only ever be called synchronously. If you want to use
  560. // this asynchronously, you should probably use scheduleWatchRestart instead.
  561. func (f *folder) startWatch() {
  562. ctx, cancel := context.WithCancel(f.ctx)
  563. f.watchMut.Lock()
  564. f.watchChan = make(chan []string)
  565. f.watchCancel = cancel
  566. f.watchMut.Unlock()
  567. go f.monitorWatch(ctx)
  568. }
  569. // monitorWatch starts the filesystem watching and retries every minute on failure.
  570. // It should not be used except in startWatch.
  571. func (f *folder) monitorWatch(ctx context.Context) {
  572. failTimer := time.NewTimer(0)
  573. aggrCtx, aggrCancel := context.WithCancel(ctx)
  574. var err error
  575. var eventChan <-chan fs.Event
  576. var errChan <-chan error
  577. warnedOutside := false
  578. var lastWatch time.Time
  579. pause := time.Minute
  580. for {
  581. select {
  582. case <-failTimer.C:
  583. eventChan, errChan, err = f.Filesystem().Watch(".", f.ignores, ctx, f.IgnorePerms)
  584. // We do this once per minute initially increased to
  585. // max one hour in case of repeat failures.
  586. f.scanOnWatchErr()
  587. f.setWatchError(err, pause)
  588. if err != nil {
  589. failTimer.Reset(pause)
  590. if pause < 60*time.Minute {
  591. pause *= 2
  592. }
  593. continue
  594. }
  595. lastWatch = time.Now()
  596. watchaggregator.Aggregate(aggrCtx, eventChan, f.watchChan, f.FolderConfiguration, f.model.cfg, f.evLogger)
  597. l.Debugln("Started filesystem watcher for folder", f.Description())
  598. case err = <-errChan:
  599. var next time.Duration
  600. if dur := time.Since(lastWatch); dur > pause {
  601. pause = time.Minute
  602. next = 0
  603. } else {
  604. next = pause - dur
  605. if pause < 60*time.Minute {
  606. pause *= 2
  607. }
  608. }
  609. failTimer.Reset(next)
  610. f.setWatchError(err, next)
  611. // This error was previously a panic and should never occur, so generate
  612. // a warning, but don't do it repetitively.
  613. if !warnedOutside {
  614. if _, ok := err.(*fs.ErrWatchEventOutsideRoot); ok {
  615. l.Warnln(err)
  616. warnedOutside = true
  617. }
  618. }
  619. aggrCancel()
  620. errChan = nil
  621. aggrCtx, aggrCancel = context.WithCancel(ctx)
  622. case <-ctx.Done():
  623. return
  624. }
  625. }
  626. }
  627. // setWatchError sets the current error state of the watch and should be called
  628. // regardless of whether err is nil or not.
  629. func (f *folder) setWatchError(err error, nextTryIn time.Duration) {
  630. f.watchMut.Lock()
  631. prevErr := f.watchErr
  632. f.watchErr = err
  633. f.watchMut.Unlock()
  634. if err != prevErr {
  635. data := map[string]interface{}{
  636. "folder": f.ID,
  637. }
  638. if prevErr != nil {
  639. data["from"] = prevErr.Error()
  640. }
  641. if err != nil {
  642. data["to"] = err.Error()
  643. }
  644. f.evLogger.Log(events.FolderWatchStateChanged, data)
  645. }
  646. if err == nil {
  647. return
  648. }
  649. msg := fmt.Sprintf("Error while trying to start filesystem watcher for folder %s, trying again in %v: %v", f.Description(), nextTryIn, err)
  650. if prevErr != err {
  651. l.Infof(msg)
  652. return
  653. }
  654. l.Debugf(msg)
  655. }
  656. // scanOnWatchErr schedules a full scan immediately if an error occurred while watching.
  657. func (f *folder) scanOnWatchErr() {
  658. f.watchMut.Lock()
  659. err := f.watchErr
  660. f.watchMut.Unlock()
  661. if err != nil {
  662. f.Delay(0)
  663. }
  664. }
  665. func (f *folder) setError(err error) {
  666. select {
  667. case <-f.ctx.Done():
  668. return
  669. default:
  670. }
  671. _, _, oldErr := f.getState()
  672. if (err != nil && oldErr != nil && oldErr.Error() == err.Error()) || (err == nil && oldErr == nil) {
  673. return
  674. }
  675. if err != nil {
  676. if oldErr == nil {
  677. l.Warnf("Error on folder %s: %v", f.Description(), err)
  678. } else {
  679. l.Infof("Error on folder %s changed: %q -> %q", f.Description(), oldErr, err)
  680. }
  681. } else {
  682. l.Infoln("Cleared error on folder", f.Description())
  683. }
  684. if f.FSWatcherEnabled {
  685. if err != nil {
  686. f.stopWatch()
  687. } else {
  688. f.scheduleWatchRestart()
  689. }
  690. }
  691. f.stateTracker.setError(err)
  692. }
  693. func (f *folder) basePause() time.Duration {
  694. if f.PullerPauseS == 0 {
  695. return defaultPullerPause
  696. }
  697. return time.Duration(f.PullerPauseS) * time.Second
  698. }
  699. func (f *folder) String() string {
  700. return fmt.Sprintf("%s/%s@%p", f.Type, f.folderID, f)
  701. }
  702. func (f *folder) newScanError(path string, err error) {
  703. f.scanErrorsMut.Lock()
  704. f.scanErrors = append(f.scanErrors, FileError{
  705. Err: err.Error(),
  706. Path: path,
  707. })
  708. f.scanErrorsMut.Unlock()
  709. }
  710. func (f *folder) clearScanErrors(subDirs []string) {
  711. f.scanErrorsMut.Lock()
  712. defer f.scanErrorsMut.Unlock()
  713. if len(subDirs) == 0 {
  714. f.scanErrors = nil
  715. return
  716. }
  717. filtered := f.scanErrors[:0]
  718. outer:
  719. for _, fe := range f.scanErrors {
  720. for _, sub := range subDirs {
  721. if fe.Path == sub || fs.IsParent(fe.Path, sub) {
  722. continue outer
  723. }
  724. }
  725. filtered = append(filtered, fe)
  726. }
  727. f.scanErrors = filtered
  728. }
  729. func (f *folder) Errors() []FileError {
  730. f.scanErrorsMut.Lock()
  731. defer f.scanErrorsMut.Unlock()
  732. return append([]FileError{}, f.scanErrors...)
  733. }
  734. // ScheduleForceRescan marks the file such that it gets rehashed on next scan, and schedules a scan.
  735. func (f *folder) ScheduleForceRescan(path string) {
  736. f.forcedRescanPathsMut.Lock()
  737. f.forcedRescanPaths[path] = struct{}{}
  738. f.forcedRescanPathsMut.Unlock()
  739. select {
  740. case f.forcedRescanRequested <- struct{}{}:
  741. default:
  742. }
  743. }
  744. func (f *folder) updateLocalsFromScanning(fs []protocol.FileInfo) {
  745. f.updateLocals(fs)
  746. f.emitDiskChangeEvents(fs, events.LocalChangeDetected)
  747. }
  748. func (f *folder) updateLocalsFromPulling(fs []protocol.FileInfo) {
  749. f.updateLocals(fs)
  750. f.emitDiskChangeEvents(fs, events.RemoteChangeDetected)
  751. }
  752. func (f *folder) updateLocals(fs []protocol.FileInfo) {
  753. f.fset.Update(protocol.LocalDeviceID, fs)
  754. filenames := make([]string, len(fs))
  755. for i, file := range fs {
  756. filenames[i] = file.Name
  757. }
  758. f.evLogger.Log(events.LocalIndexUpdated, map[string]interface{}{
  759. "folder": f.ID,
  760. "items": len(fs),
  761. "filenames": filenames,
  762. "version": f.fset.Sequence(protocol.LocalDeviceID),
  763. })
  764. }
  765. func (f *folder) emitDiskChangeEvents(fs []protocol.FileInfo, typeOfEvent events.EventType) {
  766. for _, file := range fs {
  767. if file.IsInvalid() {
  768. continue
  769. }
  770. objType := "file"
  771. action := "modified"
  772. switch {
  773. case file.IsDeleted():
  774. action = "deleted"
  775. // If our local vector is version 1 AND it is the only version
  776. // vector so far seen for this file then it is a new file. Else if
  777. // it is > 1 it's not new, and if it is 1 but another shortId
  778. // version vector exists then it is new for us but created elsewhere
  779. // so the file is still not new but modified by us. Only if it is
  780. // truly new do we change this to 'added', else we leave it as
  781. // 'modified'.
  782. case len(file.Version.Counters) == 1 && file.Version.Counters[0].Value == 1:
  783. action = "added"
  784. }
  785. if file.IsSymlink() {
  786. objType = "symlink"
  787. } else if file.IsDirectory() {
  788. objType = "dir"
  789. }
  790. // Two different events can be fired here based on what EventType is passed into function
  791. f.evLogger.Log(typeOfEvent, map[string]string{
  792. "folder": f.ID,
  793. "folderID": f.ID, // incorrect, deprecated, kept for historical compliance
  794. "label": f.Label,
  795. "action": action,
  796. "type": objType,
  797. "path": filepath.FromSlash(file.Name),
  798. "modifiedBy": file.ModifiedBy.String(),
  799. })
  800. }
  801. }
  802. func (f *folder) handleForcedRescans() {
  803. f.forcedRescanPathsMut.Lock()
  804. paths := make([]string, 0, len(f.forcedRescanPaths))
  805. for path := range f.forcedRescanPaths {
  806. paths = append(paths, path)
  807. }
  808. f.forcedRescanPaths = make(map[string]struct{})
  809. f.forcedRescanPathsMut.Unlock()
  810. batch := newFileInfoBatch(func(fs []protocol.FileInfo) error {
  811. f.fset.Update(protocol.LocalDeviceID, fs)
  812. return nil
  813. })
  814. snap := f.fset.Snapshot()
  815. for _, path := range paths {
  816. _ = batch.flushIfFull()
  817. fi, ok := snap.Get(protocol.LocalDeviceID, path)
  818. if !ok {
  819. continue
  820. }
  821. fi.SetMustRescan(f.shortID)
  822. batch.append(fi)
  823. }
  824. snap.Release()
  825. _ = batch.flush()
  826. _ = f.scanSubdirs(paths)
  827. }
  828. // The exists function is expected to return true for all known paths
  829. // (excluding "" and ".")
  830. func unifySubs(dirs []string, exists func(dir string) bool) []string {
  831. if len(dirs) == 0 {
  832. return nil
  833. }
  834. sort.Strings(dirs)
  835. if dirs[0] == "" || dirs[0] == "." || dirs[0] == string(fs.PathSeparator) {
  836. return nil
  837. }
  838. prev := "./" // Anything that can't be parent of a clean path
  839. for i := 0; i < len(dirs); {
  840. dir, err := fs.Canonicalize(dirs[i])
  841. if err != nil {
  842. l.Debugf("Skipping %v for scan: %s", dirs[i], err)
  843. dirs = append(dirs[:i], dirs[i+1:]...)
  844. continue
  845. }
  846. if dir == prev || fs.IsParent(dir, prev) {
  847. dirs = append(dirs[:i], dirs[i+1:]...)
  848. continue
  849. }
  850. parent := filepath.Dir(dir)
  851. for parent != "." && parent != string(fs.PathSeparator) && !exists(parent) {
  852. dir = parent
  853. parent = filepath.Dir(dir)
  854. }
  855. dirs[i] = dir
  856. prev = dir
  857. i++
  858. }
  859. return dirs
  860. }
  861. type cFiler struct {
  862. *db.Snapshot
  863. }
  864. // Implements scanner.CurrentFiler
  865. func (cf cFiler) CurrentFile(file string) (protocol.FileInfo, bool) {
  866. return cf.Get(protocol.LocalDeviceID, file)
  867. }