folder.go 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148
  1. // Copyright (C) 2014 The Syncthing Authors.
  2. //
  3. // This Source Code Form is subject to the terms of the Mozilla Public
  4. // License, v. 2.0. If a copy of the MPL was not distributed with this file,
  5. // You can obtain one at https://mozilla.org/MPL/2.0/.
  6. package model
  7. import (
  8. "context"
  9. "fmt"
  10. "math/rand"
  11. "path/filepath"
  12. "sort"
  13. "sync/atomic"
  14. "time"
  15. "github.com/pkg/errors"
  16. "github.com/syncthing/syncthing/lib/config"
  17. "github.com/syncthing/syncthing/lib/db"
  18. "github.com/syncthing/syncthing/lib/events"
  19. "github.com/syncthing/syncthing/lib/fs"
  20. "github.com/syncthing/syncthing/lib/ignore"
  21. "github.com/syncthing/syncthing/lib/locations"
  22. "github.com/syncthing/syncthing/lib/osutil"
  23. "github.com/syncthing/syncthing/lib/protocol"
  24. "github.com/syncthing/syncthing/lib/scanner"
  25. "github.com/syncthing/syncthing/lib/stats"
  26. "github.com/syncthing/syncthing/lib/sync"
  27. "github.com/syncthing/syncthing/lib/util"
  28. "github.com/syncthing/syncthing/lib/versioner"
  29. "github.com/syncthing/syncthing/lib/watchaggregator"
  30. "github.com/thejerf/suture"
  31. )
  32. type folder struct {
  33. suture.Service
  34. stateTracker
  35. config.FolderConfiguration
  36. *stats.FolderStatisticsReference
  37. ioLimiter *byteSemaphore
  38. localFlags uint32
  39. model *model
  40. shortID protocol.ShortID
  41. fset *db.FileSet
  42. ignores *ignore.Matcher
  43. modTimeWindow time.Duration
  44. ctx context.Context
  45. scanInterval time.Duration
  46. scanTimer *time.Timer
  47. scanDelay chan time.Duration
  48. initialScanFinished chan struct{}
  49. scanErrors []FileError
  50. scanErrorsMut sync.Mutex
  51. versionCleanupInterval time.Duration
  52. versionCleanupTimer *time.Timer
  53. pullScheduled chan struct{}
  54. pullPause time.Duration
  55. pullFailTimer *time.Timer
  56. doInSyncChan chan syncRequest
  57. forcedRescanRequested chan struct{}
  58. forcedRescanPaths map[string]struct{}
  59. forcedRescanPathsMut sync.Mutex
  60. watchCancel context.CancelFunc
  61. watchChan chan []string
  62. restartWatchChan chan struct{}
  63. watchErr error
  64. watchMut sync.Mutex
  65. puller puller
  66. versioner versioner.Versioner
  67. }
  68. type syncRequest struct {
  69. fn func() error
  70. err chan error
  71. }
  72. type puller interface {
  73. pull() bool // true when successful and should not be retried
  74. }
  75. func newFolder(model *model, fset *db.FileSet, ignores *ignore.Matcher, cfg config.FolderConfiguration, evLogger events.Logger, ioLimiter *byteSemaphore, ver versioner.Versioner) folder {
  76. f := folder{
  77. stateTracker: newStateTracker(cfg.ID, evLogger),
  78. FolderConfiguration: cfg,
  79. FolderStatisticsReference: stats.NewFolderStatisticsReference(model.db, cfg.ID),
  80. ioLimiter: ioLimiter,
  81. model: model,
  82. shortID: model.shortID,
  83. fset: fset,
  84. ignores: ignores,
  85. modTimeWindow: cfg.ModTimeWindow(),
  86. scanInterval: time.Duration(cfg.RescanIntervalS) * time.Second,
  87. scanTimer: time.NewTimer(0), // The first scan should be done immediately.
  88. scanDelay: make(chan time.Duration),
  89. initialScanFinished: make(chan struct{}),
  90. scanErrorsMut: sync.NewMutex(),
  91. versionCleanupInterval: time.Duration(cfg.Versioning.CleanupIntervalS) * time.Second,
  92. versionCleanupTimer: time.NewTimer(time.Duration(cfg.Versioning.CleanupIntervalS) * time.Second),
  93. pullScheduled: make(chan struct{}, 1), // This needs to be 1-buffered so that we queue a pull if we're busy when it comes.
  94. doInSyncChan: make(chan syncRequest),
  95. forcedRescanRequested: make(chan struct{}, 1),
  96. forcedRescanPaths: make(map[string]struct{}),
  97. forcedRescanPathsMut: sync.NewMutex(),
  98. watchCancel: func() {},
  99. restartWatchChan: make(chan struct{}, 1),
  100. watchMut: sync.NewMutex(),
  101. versioner: ver,
  102. }
  103. f.pullPause = f.pullBasePause()
  104. f.pullFailTimer = time.NewTimer(0)
  105. <-f.pullFailTimer.C
  106. return f
  107. }
  108. func (f *folder) serve(ctx context.Context) {
  109. atomic.AddInt32(&f.model.foldersRunning, 1)
  110. defer atomic.AddInt32(&f.model.foldersRunning, -1)
  111. f.ctx = ctx
  112. l.Debugln(f, "starting")
  113. defer l.Debugln(f, "exiting")
  114. defer func() {
  115. f.scanTimer.Stop()
  116. f.versionCleanupTimer.Stop()
  117. f.setState(FolderIdle)
  118. }()
  119. if f.FSWatcherEnabled && f.getHealthErrorAndLoadIgnores() == nil {
  120. f.startWatch()
  121. }
  122. // If we're configured to not do version cleanup, or we don't have a
  123. // versioner, cancel and drain that timer now.
  124. if f.versionCleanupInterval == 0 || f.versioner == nil {
  125. if !f.versionCleanupTimer.Stop() {
  126. <-f.versionCleanupTimer.C
  127. }
  128. }
  129. initialCompleted := f.initialScanFinished
  130. for {
  131. select {
  132. case <-f.ctx.Done():
  133. return
  134. case <-f.pullScheduled:
  135. f.pull()
  136. case <-f.pullFailTimer.C:
  137. if !f.pull() && f.pullPause < 60*f.pullBasePause() {
  138. // Back off from retrying to pull
  139. f.pullPause *= 2
  140. }
  141. case <-initialCompleted:
  142. // Initial scan has completed, we should do a pull
  143. initialCompleted = nil // never hit this case again
  144. f.pull()
  145. case <-f.forcedRescanRequested:
  146. f.handleForcedRescans()
  147. case <-f.scanTimer.C:
  148. l.Debugln(f, "Scanning due to timer")
  149. f.scanTimerFired()
  150. case req := <-f.doInSyncChan:
  151. l.Debugln(f, "Running something due to request")
  152. req.err <- req.fn()
  153. case next := <-f.scanDelay:
  154. l.Debugln(f, "Delaying scan")
  155. f.scanTimer.Reset(next)
  156. case fsEvents := <-f.watchChan:
  157. l.Debugln(f, "Scan due to watcher")
  158. f.scanSubdirs(fsEvents)
  159. case <-f.restartWatchChan:
  160. l.Debugln(f, "Restart watcher")
  161. f.restartWatch()
  162. case <-f.versionCleanupTimer.C:
  163. l.Debugln(f, "Doing version cleanup")
  164. f.versionCleanupTimerFired()
  165. }
  166. }
  167. }
  168. func (f *folder) BringToFront(string) {}
  169. func (f *folder) Override() {}
  170. func (f *folder) Revert() {}
  171. func (f *folder) DelayScan(next time.Duration) {
  172. f.Delay(next)
  173. }
  174. func (f *folder) ignoresUpdated() {
  175. if f.FSWatcherEnabled {
  176. f.scheduleWatchRestart()
  177. }
  178. }
  179. func (f *folder) SchedulePull() {
  180. select {
  181. case f.pullScheduled <- struct{}{}:
  182. default:
  183. // We might be busy doing a pull and thus not reading from this
  184. // channel. The channel is 1-buffered, so one notification will be
  185. // queued to ensure we recheck after the pull, but beyond that we must
  186. // make sure to not block index receiving.
  187. }
  188. }
  189. func (f *folder) Jobs(_, _ int) ([]string, []string, int) {
  190. return nil, nil, 0
  191. }
  192. func (f *folder) Scan(subdirs []string) error {
  193. <-f.initialScanFinished
  194. return f.doInSync(func() error { return f.scanSubdirs(subdirs) })
  195. }
  196. // doInSync allows to run functions synchronously in folder.serve from exported,
  197. // asynchronously called methods.
  198. func (f *folder) doInSync(fn func() error) error {
  199. req := syncRequest{
  200. fn: fn,
  201. err: make(chan error, 1),
  202. }
  203. select {
  204. case f.doInSyncChan <- req:
  205. return <-req.err
  206. case <-f.ctx.Done():
  207. return f.ctx.Err()
  208. }
  209. }
  210. func (f *folder) Reschedule() {
  211. if f.scanInterval == 0 {
  212. return
  213. }
  214. // Sleep a random time between 3/4 and 5/4 of the configured interval.
  215. sleepNanos := (f.scanInterval.Nanoseconds()*3 + rand.Int63n(2*f.scanInterval.Nanoseconds())) / 4
  216. interval := time.Duration(sleepNanos) * time.Nanosecond
  217. l.Debugln(f, "next rescan in", interval)
  218. f.scanTimer.Reset(interval)
  219. }
  220. func (f *folder) Delay(next time.Duration) {
  221. f.scanDelay <- next
  222. }
  223. func (f *folder) getHealthErrorAndLoadIgnores() error {
  224. if err := f.getHealthErrorWithoutIgnores(); err != nil {
  225. return err
  226. }
  227. if err := f.ignores.Load(".stignore"); err != nil && !fs.IsNotExist(err) {
  228. return errors.Wrap(err, "loading ignores")
  229. }
  230. return nil
  231. }
  232. func (f *folder) getHealthErrorWithoutIgnores() error {
  233. // Check for folder errors, with the most serious and specific first and
  234. // generic ones like out of space on the home disk later.
  235. if err := f.CheckPath(); err != nil {
  236. return err
  237. }
  238. dbPath := locations.Get(locations.Database)
  239. if usage, err := fs.NewFilesystem(fs.FilesystemTypeBasic, dbPath).Usage("."); err == nil {
  240. if err = config.CheckFreeSpace(f.model.cfg.Options().MinHomeDiskFree, usage); err != nil {
  241. return errors.Wrapf(err, "insufficient space on disk for database (%v)", dbPath)
  242. }
  243. }
  244. return nil
  245. }
  246. func (f *folder) pull() (success bool) {
  247. f.pullFailTimer.Stop()
  248. select {
  249. case <-f.pullFailTimer.C:
  250. default:
  251. }
  252. select {
  253. case <-f.initialScanFinished:
  254. default:
  255. // Once the initial scan finished, a pull will be scheduled
  256. return true
  257. }
  258. defer func() {
  259. if success {
  260. // We're good, reset the pause interval.
  261. f.pullPause = f.pullBasePause()
  262. }
  263. }()
  264. // If there is nothing to do, don't even enter sync-waiting state.
  265. abort := true
  266. snap := f.fset.Snapshot()
  267. snap.WithNeed(protocol.LocalDeviceID, func(intf protocol.FileIntf) bool {
  268. abort = false
  269. return false
  270. })
  271. snap.Release()
  272. if abort {
  273. return true
  274. }
  275. // Abort early (before acquiring a token) if there's a folder error
  276. err := f.getHealthErrorWithoutIgnores()
  277. f.setError(err)
  278. if err != nil {
  279. l.Debugln("Skipping pull of", f.Description(), "due to folder error:", err)
  280. return false
  281. }
  282. f.setState(FolderSyncWaiting)
  283. if err := f.ioLimiter.takeWithContext(f.ctx, 1); err != nil {
  284. f.setError(err)
  285. return true
  286. }
  287. defer f.ioLimiter.give(1)
  288. startTime := time.Now()
  289. // Check if the ignore patterns changed.
  290. oldHash := f.ignores.Hash()
  291. defer func() {
  292. if f.ignores.Hash() != oldHash {
  293. f.ignoresUpdated()
  294. }
  295. }()
  296. err = f.getHealthErrorAndLoadIgnores()
  297. f.setError(err)
  298. if err != nil {
  299. l.Debugln("Skipping pull of", f.Description(), "due to folder error:", err)
  300. return false
  301. }
  302. success = f.puller.pull()
  303. if success {
  304. return true
  305. }
  306. // Pulling failed, try again later.
  307. delay := f.pullPause + time.Since(startTime)
  308. l.Infof("Folder %v isn't making sync progress - retrying in %v.", f.Description(), util.NiceDurationString(delay))
  309. f.pullFailTimer.Reset(delay)
  310. return false
  311. }
  312. func (f *folder) scanSubdirs(subDirs []string) error {
  313. oldHash := f.ignores.Hash()
  314. err := f.getHealthErrorAndLoadIgnores()
  315. f.setError(err)
  316. if err != nil {
  317. // If there is a health error we set it as the folder error. We do not
  318. // clear the folder error if there is no health error, as there might be
  319. // an *other* folder error (failed to load ignores, for example). Hence
  320. // we do not use the CheckHealth() convenience function here.
  321. return err
  322. }
  323. // Check on the way out if the ignore patterns changed as part of scanning
  324. // this folder. If they did we should schedule a pull of the folder so that
  325. // we request things we might have suddenly become unignored and so on.
  326. defer func() {
  327. if f.ignores.Hash() != oldHash {
  328. l.Debugln("Folder", f.Description(), "ignore patterns change detected while scanning; triggering puller")
  329. f.ignoresUpdated()
  330. f.SchedulePull()
  331. }
  332. }()
  333. f.setState(FolderScanWaiting)
  334. defer f.setState(FolderIdle)
  335. if err := f.ioLimiter.takeWithContext(f.ctx, 1); err != nil {
  336. return err
  337. }
  338. defer f.ioLimiter.give(1)
  339. for i := range subDirs {
  340. sub := osutil.NativeFilename(subDirs[i])
  341. if sub == "" {
  342. // A blank subdirs means to scan the entire folder. We can trim
  343. // the subDirs list and go on our way.
  344. subDirs = nil
  345. break
  346. }
  347. subDirs[i] = sub
  348. }
  349. snap := f.fset.Snapshot()
  350. // We release explicitly later in this function, however we might exit early
  351. // and it's ok to release twice.
  352. defer snap.Release()
  353. // Clean the list of subitems to ensure that we start at a known
  354. // directory, and don't scan subdirectories of things we've already
  355. // scanned.
  356. subDirs = unifySubs(subDirs, func(file string) bool {
  357. _, ok := snap.Get(protocol.LocalDeviceID, file)
  358. return ok
  359. })
  360. f.setState(FolderScanning)
  361. // If we return early e.g. due to a folder health error, the scan needs
  362. // to be cancelled.
  363. scanCtx, scanCancel := context.WithCancel(f.ctx)
  364. defer scanCancel()
  365. mtimefs := f.fset.MtimeFS()
  366. fchan := scanner.Walk(scanCtx, scanner.Config{
  367. Folder: f.ID,
  368. Subs: subDirs,
  369. Matcher: f.ignores,
  370. TempLifetime: time.Duration(f.model.cfg.Options().KeepTemporariesH) * time.Hour,
  371. CurrentFiler: cFiler{snap},
  372. Filesystem: mtimefs,
  373. IgnorePerms: f.IgnorePerms,
  374. AutoNormalize: f.AutoNormalize,
  375. Hashers: f.model.numHashers(f.ID),
  376. ShortID: f.shortID,
  377. ProgressTickIntervalS: f.ScanProgressIntervalS,
  378. LocalFlags: f.localFlags,
  379. ModTimeWindow: f.modTimeWindow,
  380. EventLogger: f.evLogger,
  381. })
  382. batch := newFileInfoBatch(func(fs []protocol.FileInfo) error {
  383. if err := f.getHealthErrorWithoutIgnores(); err != nil {
  384. l.Debugf("Stopping scan of folder %s due to: %s", f.Description(), err)
  385. return err
  386. }
  387. f.updateLocalsFromScanning(fs)
  388. return nil
  389. })
  390. var batchAppend func(protocol.FileInfo, *db.Snapshot)
  391. // Resolve items which are identical with the global state.
  392. if f.localFlags&protocol.FlagLocalReceiveOnly == 0 {
  393. batchAppend = func(fi protocol.FileInfo, _ *db.Snapshot) {
  394. batch.append(fi)
  395. }
  396. } else {
  397. batchAppend = func(fi protocol.FileInfo, snap *db.Snapshot) {
  398. switch gf, ok := snap.GetGlobal(fi.Name); {
  399. case !ok:
  400. case gf.IsEquivalentOptional(fi, f.modTimeWindow, false, false, protocol.FlagLocalReceiveOnly):
  401. // What we have locally is equivalent to the global file.
  402. fi.Version = fi.Version.Merge(gf.Version)
  403. fallthrough
  404. case fi.IsDeleted() && (gf.IsReceiveOnlyChanged() || gf.IsDeleted()):
  405. // Our item is deleted and the global item is our own
  406. // receive only file or deleted too. In the former
  407. // case we can't delete file infos, so we just
  408. // pretend it is a normal deleted file (nobody
  409. // cares about that).
  410. fi.LocalFlags &^= protocol.FlagLocalReceiveOnly
  411. }
  412. batch.append(fi)
  413. }
  414. }
  415. // Schedule a pull after scanning, but only if we actually detected any
  416. // changes.
  417. changes := 0
  418. defer func() {
  419. if changes > 0 {
  420. f.SchedulePull()
  421. }
  422. }()
  423. f.clearScanErrors(subDirs)
  424. alreadyUsed := make(map[string]struct{})
  425. for res := range fchan {
  426. if res.Err != nil {
  427. f.newScanError(res.Path, res.Err)
  428. continue
  429. }
  430. if err := batch.flushIfFull(); err != nil {
  431. return err
  432. }
  433. batchAppend(res.File, snap)
  434. changes++
  435. if f.localFlags&protocol.FlagLocalReceiveOnly == 0 {
  436. if nf, ok := f.findRename(snap, mtimefs, res.File, alreadyUsed); ok {
  437. batchAppend(nf, snap)
  438. changes++
  439. }
  440. }
  441. }
  442. if err := batch.flush(); err != nil {
  443. return err
  444. }
  445. if len(subDirs) == 0 {
  446. // If we have no specific subdirectories to traverse, set it to one
  447. // empty prefix so we traverse the entire folder contents once.
  448. subDirs = []string{""}
  449. }
  450. // Do a scan of the database for each prefix, to check for deleted and
  451. // ignored files.
  452. var toIgnore []db.FileInfoTruncated
  453. ignoredParent := ""
  454. snap.Release()
  455. snap = f.fset.Snapshot()
  456. defer snap.Release()
  457. for _, sub := range subDirs {
  458. var iterError error
  459. snap.WithPrefixedHaveTruncated(protocol.LocalDeviceID, sub, func(fi protocol.FileIntf) bool {
  460. select {
  461. case <-f.ctx.Done():
  462. return false
  463. default:
  464. }
  465. file := fi.(db.FileInfoTruncated)
  466. if err := batch.flushIfFull(); err != nil {
  467. iterError = err
  468. return false
  469. }
  470. if ignoredParent != "" && !fs.IsParent(file.Name, ignoredParent) {
  471. for _, file := range toIgnore {
  472. l.Debugln("marking file as ignored", file)
  473. nf := file.ConvertToIgnoredFileInfo(f.shortID)
  474. batchAppend(nf, snap)
  475. changes++
  476. if err := batch.flushIfFull(); err != nil {
  477. iterError = err
  478. return false
  479. }
  480. }
  481. toIgnore = toIgnore[:0]
  482. ignoredParent = ""
  483. }
  484. switch ignored := f.ignores.Match(file.Name).IsIgnored(); {
  485. case file.IsIgnored() && ignored:
  486. return true
  487. case !file.IsIgnored() && ignored:
  488. // File was not ignored at last pass but has been ignored.
  489. if file.IsDirectory() {
  490. // Delay ignoring as a child might be unignored.
  491. toIgnore = append(toIgnore, file)
  492. if ignoredParent == "" {
  493. // If the parent wasn't ignored already, set
  494. // this path as the "highest" ignored parent
  495. ignoredParent = file.Name
  496. }
  497. return true
  498. }
  499. l.Debugln("marking file as ignored", file)
  500. nf := file.ConvertToIgnoredFileInfo(f.shortID)
  501. batchAppend(nf, snap)
  502. changes++
  503. case file.IsIgnored() && !ignored:
  504. // Successfully scanned items are already un-ignored during
  505. // the scan, so check whether it is deleted.
  506. fallthrough
  507. case !file.IsIgnored() && !file.IsDeleted() && !file.IsUnsupported():
  508. // The file is not ignored, deleted or unsupported. Lets check if
  509. // it's still here. Simply stat:ing it wont do as there are
  510. // tons of corner cases (e.g. parent dir->symlink, missing
  511. // permissions)
  512. if !osutil.IsDeleted(mtimefs, file.Name) {
  513. if ignoredParent != "" {
  514. // Don't ignore parents of this not ignored item
  515. toIgnore = toIgnore[:0]
  516. ignoredParent = ""
  517. }
  518. return true
  519. }
  520. nf := file.ConvertToDeletedFileInfo(f.shortID)
  521. nf.LocalFlags = f.localFlags
  522. if file.ShouldConflict() {
  523. // We do not want to override the global version with
  524. // the deleted file. Setting to an empty version makes
  525. // sure the file gets in sync on the following pull.
  526. nf.Version = protocol.Vector{}
  527. }
  528. l.Debugln("marking file as deleted", nf)
  529. batchAppend(nf, snap)
  530. changes++
  531. case file.IsDeleted() && file.IsReceiveOnlyChanged() && f.localFlags&protocol.FlagLocalReceiveOnly != 0 && len(snap.Availability(file.Name)) == 0:
  532. file.Version = protocol.Vector{}
  533. file.LocalFlags &^= protocol.FlagLocalReceiveOnly
  534. l.Debugln("marking deleted item that doesn't exist anywhere as not receive-only", file)
  535. batchAppend(file.ConvertDeletedToFileInfo(), snap)
  536. changes++
  537. case file.IsDeleted() && file.IsReceiveOnlyChanged() && f.Type != config.FolderTypeReceiveOnly:
  538. // No need to bump the version for a file that was and is
  539. // deleted and just the folder type/local flags changed.
  540. file.LocalFlags &^= protocol.FlagLocalReceiveOnly
  541. l.Debugln("removing receive-only flag on deleted item", file)
  542. batchAppend(file.ConvertDeletedToFileInfo(), snap)
  543. changes++
  544. }
  545. return true
  546. })
  547. select {
  548. case <-f.ctx.Done():
  549. return f.ctx.Err()
  550. default:
  551. }
  552. if iterError == nil && len(toIgnore) > 0 {
  553. for _, file := range toIgnore {
  554. l.Debugln("marking file as ignored", f)
  555. nf := file.ConvertToIgnoredFileInfo(f.shortID)
  556. batchAppend(nf, snap)
  557. changes++
  558. if iterError = batch.flushIfFull(); iterError != nil {
  559. break
  560. }
  561. }
  562. toIgnore = toIgnore[:0]
  563. }
  564. if iterError != nil {
  565. return iterError
  566. }
  567. }
  568. if err := batch.flush(); err != nil {
  569. return err
  570. }
  571. f.ScanCompleted()
  572. return nil
  573. }
  574. func (f *folder) findRename(snap *db.Snapshot, mtimefs fs.Filesystem, file protocol.FileInfo, alreadyUsed map[string]struct{}) (protocol.FileInfo, bool) {
  575. if len(file.Blocks) == 0 || file.Size == 0 {
  576. return protocol.FileInfo{}, false
  577. }
  578. found := false
  579. nf := protocol.FileInfo{}
  580. snap.WithBlocksHash(file.BlocksHash, func(ifi protocol.FileIntf) bool {
  581. fi := ifi.(protocol.FileInfo)
  582. select {
  583. case <-f.ctx.Done():
  584. return false
  585. default:
  586. }
  587. if _, ok := alreadyUsed[fi.Name]; ok {
  588. return true
  589. }
  590. if fi.ShouldConflict() {
  591. return true
  592. }
  593. if f.ignores.Match(fi.Name).IsIgnored() {
  594. return true
  595. }
  596. // Only check the size.
  597. // No point checking block equality, as that uses BlocksHash comparison if that is set (which it will be).
  598. // No point checking BlocksHash comparison as WithBlocksHash already does that.
  599. if file.Size != fi.Size {
  600. return true
  601. }
  602. if !osutil.IsDeleted(mtimefs, fi.Name) {
  603. return true
  604. }
  605. alreadyUsed[fi.Name] = struct{}{}
  606. nf = fi
  607. nf.SetDeleted(f.shortID)
  608. nf.LocalFlags = f.localFlags
  609. found = true
  610. return false
  611. })
  612. return nf, found
  613. }
  614. func (f *folder) scanTimerFired() {
  615. err := f.scanSubdirs(nil)
  616. select {
  617. case <-f.initialScanFinished:
  618. default:
  619. status := "Completed"
  620. if err != nil {
  621. status = "Failed"
  622. }
  623. l.Infoln(status, "initial scan of", f.Type.String(), "folder", f.Description())
  624. close(f.initialScanFinished)
  625. }
  626. f.Reschedule()
  627. }
  628. func (f *folder) versionCleanupTimerFired() {
  629. f.setState(FolderCleanWaiting)
  630. defer f.setState(FolderIdle)
  631. if err := f.ioLimiter.takeWithContext(f.ctx, 1); err != nil {
  632. return
  633. }
  634. defer f.ioLimiter.give(1)
  635. f.setState(FolderCleaning)
  636. if err := f.versioner.Clean(f.ctx); err != nil {
  637. l.Infoln("Failed to clean versions in %s: %v", f.Description(), err)
  638. }
  639. f.versionCleanupTimer.Reset(f.versionCleanupInterval)
  640. }
  641. func (f *folder) WatchError() error {
  642. f.watchMut.Lock()
  643. defer f.watchMut.Unlock()
  644. return f.watchErr
  645. }
  646. // stopWatch immediately aborts watching and may be called asynchronously
  647. func (f *folder) stopWatch() {
  648. f.watchMut.Lock()
  649. f.watchCancel()
  650. f.watchMut.Unlock()
  651. f.setWatchError(nil, 0)
  652. }
  653. // scheduleWatchRestart makes sure watching is restarted from the main for loop
  654. // in a folder's Serve and thus may be called asynchronously (e.g. when ignores change).
  655. func (f *folder) scheduleWatchRestart() {
  656. select {
  657. case f.restartWatchChan <- struct{}{}:
  658. default:
  659. // We might be busy doing a pull and thus not reading from this
  660. // channel. The channel is 1-buffered, so one notification will be
  661. // queued to ensure we recheck after the pull.
  662. }
  663. }
  664. // restartWatch should only ever be called synchronously. If you want to use
  665. // this asynchronously, you should probably use scheduleWatchRestart instead.
  666. func (f *folder) restartWatch() {
  667. f.stopWatch()
  668. f.startWatch()
  669. f.scanSubdirs(nil)
  670. }
  671. // startWatch should only ever be called synchronously. If you want to use
  672. // this asynchronously, you should probably use scheduleWatchRestart instead.
  673. func (f *folder) startWatch() {
  674. ctx, cancel := context.WithCancel(f.ctx)
  675. f.watchMut.Lock()
  676. f.watchChan = make(chan []string)
  677. f.watchCancel = cancel
  678. f.watchMut.Unlock()
  679. go f.monitorWatch(ctx)
  680. }
  681. // monitorWatch starts the filesystem watching and retries every minute on failure.
  682. // It should not be used except in startWatch.
  683. func (f *folder) monitorWatch(ctx context.Context) {
  684. failTimer := time.NewTimer(0)
  685. aggrCtx, aggrCancel := context.WithCancel(ctx)
  686. var err error
  687. var eventChan <-chan fs.Event
  688. var errChan <-chan error
  689. warnedOutside := false
  690. var lastWatch time.Time
  691. pause := time.Minute
  692. for {
  693. select {
  694. case <-failTimer.C:
  695. eventChan, errChan, err = f.Filesystem().Watch(".", f.ignores, ctx, f.IgnorePerms)
  696. // We do this once per minute initially increased to
  697. // max one hour in case of repeat failures.
  698. f.scanOnWatchErr()
  699. f.setWatchError(err, pause)
  700. if err != nil {
  701. failTimer.Reset(pause)
  702. if pause < 60*time.Minute {
  703. pause *= 2
  704. }
  705. continue
  706. }
  707. lastWatch = time.Now()
  708. watchaggregator.Aggregate(aggrCtx, eventChan, f.watchChan, f.FolderConfiguration, f.model.cfg, f.evLogger)
  709. l.Debugln("Started filesystem watcher for folder", f.Description())
  710. case err = <-errChan:
  711. var next time.Duration
  712. if dur := time.Since(lastWatch); dur > pause {
  713. pause = time.Minute
  714. next = 0
  715. } else {
  716. next = pause - dur
  717. if pause < 60*time.Minute {
  718. pause *= 2
  719. }
  720. }
  721. failTimer.Reset(next)
  722. f.setWatchError(err, next)
  723. // This error was previously a panic and should never occur, so generate
  724. // a warning, but don't do it repetitively.
  725. if !warnedOutside {
  726. if _, ok := err.(*fs.ErrWatchEventOutsideRoot); ok {
  727. l.Warnln(err)
  728. warnedOutside = true
  729. }
  730. }
  731. aggrCancel()
  732. errChan = nil
  733. aggrCtx, aggrCancel = context.WithCancel(ctx)
  734. case <-ctx.Done():
  735. return
  736. }
  737. }
  738. }
  739. // setWatchError sets the current error state of the watch and should be called
  740. // regardless of whether err is nil or not.
  741. func (f *folder) setWatchError(err error, nextTryIn time.Duration) {
  742. f.watchMut.Lock()
  743. prevErr := f.watchErr
  744. f.watchErr = err
  745. f.watchMut.Unlock()
  746. if err != prevErr {
  747. data := map[string]interface{}{
  748. "folder": f.ID,
  749. }
  750. if prevErr != nil {
  751. data["from"] = prevErr.Error()
  752. }
  753. if err != nil {
  754. data["to"] = err.Error()
  755. }
  756. f.evLogger.Log(events.FolderWatchStateChanged, data)
  757. }
  758. if err == nil {
  759. return
  760. }
  761. msg := fmt.Sprintf("Error while trying to start filesystem watcher for folder %s, trying again in %v: %v", f.Description(), nextTryIn, err)
  762. if prevErr != err {
  763. l.Infof(msg)
  764. return
  765. }
  766. l.Debugf(msg)
  767. }
  768. // scanOnWatchErr schedules a full scan immediately if an error occurred while watching.
  769. func (f *folder) scanOnWatchErr() {
  770. f.watchMut.Lock()
  771. err := f.watchErr
  772. f.watchMut.Unlock()
  773. if err != nil {
  774. f.Delay(0)
  775. }
  776. }
  777. func (f *folder) setError(err error) {
  778. select {
  779. case <-f.ctx.Done():
  780. return
  781. default:
  782. }
  783. _, _, oldErr := f.getState()
  784. if (err != nil && oldErr != nil && oldErr.Error() == err.Error()) || (err == nil && oldErr == nil) {
  785. return
  786. }
  787. if err != nil {
  788. if oldErr == nil {
  789. l.Warnf("Error on folder %s: %v", f.Description(), err)
  790. } else {
  791. l.Infof("Error on folder %s changed: %q -> %q", f.Description(), oldErr, err)
  792. }
  793. } else {
  794. l.Infoln("Cleared error on folder", f.Description())
  795. }
  796. if f.FSWatcherEnabled {
  797. if err != nil {
  798. f.stopWatch()
  799. } else {
  800. f.scheduleWatchRestart()
  801. }
  802. }
  803. f.stateTracker.setError(err)
  804. }
  805. func (f *folder) pullBasePause() time.Duration {
  806. if f.PullerPauseS == 0 {
  807. return defaultPullerPause
  808. }
  809. return time.Duration(f.PullerPauseS) * time.Second
  810. }
  811. func (f *folder) String() string {
  812. return fmt.Sprintf("%s/%s@%p", f.Type, f.folderID, f)
  813. }
  814. func (f *folder) newScanError(path string, err error) {
  815. f.scanErrorsMut.Lock()
  816. l.Infof("Scanner (folder %s, item %q): %v", f.Description(), path, err)
  817. f.scanErrors = append(f.scanErrors, FileError{
  818. Err: err.Error(),
  819. Path: path,
  820. })
  821. f.scanErrorsMut.Unlock()
  822. }
  823. func (f *folder) clearScanErrors(subDirs []string) {
  824. f.scanErrorsMut.Lock()
  825. defer f.scanErrorsMut.Unlock()
  826. if len(subDirs) == 0 {
  827. f.scanErrors = nil
  828. return
  829. }
  830. filtered := f.scanErrors[:0]
  831. outer:
  832. for _, fe := range f.scanErrors {
  833. for _, sub := range subDirs {
  834. if fe.Path == sub || fs.IsParent(fe.Path, sub) {
  835. continue outer
  836. }
  837. }
  838. filtered = append(filtered, fe)
  839. }
  840. f.scanErrors = filtered
  841. }
  842. func (f *folder) Errors() []FileError {
  843. f.scanErrorsMut.Lock()
  844. defer f.scanErrorsMut.Unlock()
  845. return append([]FileError{}, f.scanErrors...)
  846. }
  847. // ScheduleForceRescan marks the file such that it gets rehashed on next scan, and schedules a scan.
  848. func (f *folder) ScheduleForceRescan(path string) {
  849. f.forcedRescanPathsMut.Lock()
  850. f.forcedRescanPaths[path] = struct{}{}
  851. f.forcedRescanPathsMut.Unlock()
  852. select {
  853. case f.forcedRescanRequested <- struct{}{}:
  854. default:
  855. }
  856. }
  857. func (f *folder) updateLocalsFromScanning(fs []protocol.FileInfo) {
  858. f.updateLocals(fs)
  859. f.emitDiskChangeEvents(fs, events.LocalChangeDetected)
  860. }
  861. func (f *folder) updateLocalsFromPulling(fs []protocol.FileInfo) {
  862. f.updateLocals(fs)
  863. f.emitDiskChangeEvents(fs, events.RemoteChangeDetected)
  864. }
  865. func (f *folder) updateLocals(fs []protocol.FileInfo) {
  866. f.fset.Update(protocol.LocalDeviceID, fs)
  867. filenames := make([]string, len(fs))
  868. f.forcedRescanPathsMut.Lock()
  869. for i, file := range fs {
  870. filenames[i] = file.Name
  871. // No need to rescan a file that was changed since anyway.
  872. delete(f.forcedRescanPaths, file.Name)
  873. }
  874. f.forcedRescanPathsMut.Unlock()
  875. f.evLogger.Log(events.LocalIndexUpdated, map[string]interface{}{
  876. "folder": f.ID,
  877. "items": len(fs),
  878. "filenames": filenames,
  879. "version": f.fset.Sequence(protocol.LocalDeviceID),
  880. })
  881. }
  882. func (f *folder) emitDiskChangeEvents(fs []protocol.FileInfo, typeOfEvent events.EventType) {
  883. for _, file := range fs {
  884. if file.IsInvalid() {
  885. continue
  886. }
  887. objType := "file"
  888. action := "modified"
  889. if file.IsDeleted() {
  890. action = "deleted"
  891. }
  892. if file.IsSymlink() {
  893. objType = "symlink"
  894. } else if file.IsDirectory() {
  895. objType = "dir"
  896. }
  897. // Two different events can be fired here based on what EventType is passed into function
  898. f.evLogger.Log(typeOfEvent, map[string]string{
  899. "folder": f.ID,
  900. "folderID": f.ID, // incorrect, deprecated, kept for historical compliance
  901. "label": f.Label,
  902. "action": action,
  903. "type": objType,
  904. "path": filepath.FromSlash(file.Name),
  905. "modifiedBy": file.ModifiedBy.String(),
  906. })
  907. }
  908. }
  909. func (f *folder) handleForcedRescans() {
  910. f.forcedRescanPathsMut.Lock()
  911. paths := make([]string, 0, len(f.forcedRescanPaths))
  912. for path := range f.forcedRescanPaths {
  913. paths = append(paths, path)
  914. }
  915. f.forcedRescanPaths = make(map[string]struct{})
  916. f.forcedRescanPathsMut.Unlock()
  917. if len(paths) == 0 {
  918. return
  919. }
  920. batch := newFileInfoBatch(func(fs []protocol.FileInfo) error {
  921. f.fset.Update(protocol.LocalDeviceID, fs)
  922. return nil
  923. })
  924. snap := f.fset.Snapshot()
  925. for _, path := range paths {
  926. _ = batch.flushIfFull()
  927. fi, ok := snap.Get(protocol.LocalDeviceID, path)
  928. if !ok {
  929. continue
  930. }
  931. fi.SetMustRescan(f.shortID)
  932. batch.append(fi)
  933. }
  934. snap.Release()
  935. _ = batch.flush()
  936. _ = f.scanSubdirs(paths)
  937. }
  938. // The exists function is expected to return true for all known paths
  939. // (excluding "" and ".")
  940. func unifySubs(dirs []string, exists func(dir string) bool) []string {
  941. if len(dirs) == 0 {
  942. return nil
  943. }
  944. sort.Strings(dirs)
  945. if dirs[0] == "" || dirs[0] == "." || dirs[0] == string(fs.PathSeparator) {
  946. return nil
  947. }
  948. prev := "./" // Anything that can't be parent of a clean path
  949. for i := 0; i < len(dirs); {
  950. dir, err := fs.Canonicalize(dirs[i])
  951. if err != nil {
  952. l.Debugf("Skipping %v for scan: %s", dirs[i], err)
  953. dirs = append(dirs[:i], dirs[i+1:]...)
  954. continue
  955. }
  956. if dir == prev || fs.IsParent(dir, prev) {
  957. dirs = append(dirs[:i], dirs[i+1:]...)
  958. continue
  959. }
  960. parent := filepath.Dir(dir)
  961. for parent != "." && parent != string(fs.PathSeparator) && !exists(parent) {
  962. dir = parent
  963. parent = filepath.Dir(dir)
  964. }
  965. dirs[i] = dir
  966. prev = dir
  967. i++
  968. }
  969. return dirs
  970. }
  971. type cFiler struct {
  972. *db.Snapshot
  973. }
  974. // Implements scanner.CurrentFiler
  975. func (cf cFiler) CurrentFile(file string) (protocol.FileInfo, bool) {
  976. return cf.Get(protocol.LocalDeviceID, file)
  977. }