aggregator.go 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467
  1. // Copyright (C) 2016 The Syncthing Authors.
  2. //
  3. // This Source Code Form is subject to the terms of the Mozilla Public
  4. // License, v. 2.0. If a copy of the MPL was not distributed with this file,
  5. // You can obtain one at http://mozilla.org/MPL/2.0/.
  6. package watchaggregator
  7. import (
  8. "context"
  9. "fmt"
  10. "path/filepath"
  11. "strings"
  12. "time"
  13. "github.com/syncthing/syncthing/lib/config"
  14. "github.com/syncthing/syncthing/lib/events"
  15. "github.com/syncthing/syncthing/lib/fs"
  16. )
  17. // Not meant to be changed, but must be changeable for tests
  18. var (
  19. maxFiles = 512
  20. maxFilesPerDir = 128
  21. )
  22. // aggregatedEvent represents potentially multiple events at and/or recursively
  23. // below one path until it times out and a scan is scheduled.
  24. // If it represents multiple events and there are events of both Remove and
  25. // NonRemove types, the evType attribute is Mixed (as returned by fs.Event.Merge).
  26. type aggregatedEvent struct {
  27. firstModTime time.Time
  28. lastModTime time.Time
  29. evType fs.EventType
  30. }
  31. // Stores pointers to both aggregated events directly within this directory and
  32. // child directories recursively containing aggregated events themselves.
  33. type eventDir struct {
  34. events map[string]*aggregatedEvent
  35. dirs map[string]*eventDir
  36. }
  37. func newEventDir() *eventDir {
  38. return &eventDir{
  39. events: make(map[string]*aggregatedEvent),
  40. dirs: make(map[string]*eventDir),
  41. }
  42. }
  43. func (dir *eventDir) childCount() int {
  44. return len(dir.events) + len(dir.dirs)
  45. }
  46. func (dir *eventDir) firstModTime() time.Time {
  47. if dir.childCount() == 0 {
  48. panic("bug: firstModTime must not be used on empty eventDir")
  49. }
  50. firstModTime := time.Now()
  51. for _, childDir := range dir.dirs {
  52. dirTime := childDir.firstModTime()
  53. if dirTime.Before(firstModTime) {
  54. firstModTime = dirTime
  55. }
  56. }
  57. for _, event := range dir.events {
  58. if event.firstModTime.Before(firstModTime) {
  59. firstModTime = event.firstModTime
  60. }
  61. }
  62. return firstModTime
  63. }
  64. func (dir *eventDir) eventType() fs.EventType {
  65. if dir.childCount() == 0 {
  66. panic("bug: eventType must not be used on empty eventDir")
  67. }
  68. var evType fs.EventType
  69. for _, childDir := range dir.dirs {
  70. evType |= childDir.eventType()
  71. if evType == fs.Mixed {
  72. return fs.Mixed
  73. }
  74. }
  75. for _, event := range dir.events {
  76. evType |= event.evType
  77. if evType == fs.Mixed {
  78. return fs.Mixed
  79. }
  80. }
  81. return evType
  82. }
  83. type aggregator struct {
  84. // folderID never changes and is accessed in CommitConfiguration, which
  85. // asynchronously updates folderCfg -> can't use folderCfg.ID (racy)
  86. folderID string
  87. folderCfg config.FolderConfiguration
  88. folderCfgUpdate chan config.FolderConfiguration
  89. // Time after which an event is scheduled for scanning when no modifications occur.
  90. notifyDelay time.Duration
  91. // Time after which an event is scheduled for scanning even though modifications occur.
  92. notifyTimeout time.Duration
  93. notifyTimer *time.Timer
  94. notifyTimerNeedsReset bool
  95. notifyTimerResetChan chan time.Duration
  96. counts map[fs.EventType]int
  97. root *eventDir
  98. ctx context.Context
  99. }
  100. func newAggregator(folderCfg config.FolderConfiguration, ctx context.Context) *aggregator {
  101. a := &aggregator{
  102. folderID: folderCfg.ID,
  103. folderCfgUpdate: make(chan config.FolderConfiguration),
  104. notifyTimerNeedsReset: false,
  105. notifyTimerResetChan: make(chan time.Duration),
  106. counts: make(map[fs.EventType]int),
  107. root: newEventDir(),
  108. ctx: ctx,
  109. }
  110. a.updateConfig(folderCfg)
  111. return a
  112. }
  113. func Aggregate(in <-chan fs.Event, out chan<- []string, folderCfg config.FolderConfiguration, cfg config.Wrapper, evLogger events.Logger, ctx context.Context) {
  114. a := newAggregator(folderCfg, ctx)
  115. // Necessary for unit tests where the backend is mocked
  116. go a.mainLoop(in, out, cfg, evLogger)
  117. }
  118. func (a *aggregator) mainLoop(in <-chan fs.Event, out chan<- []string, cfg config.Wrapper, evLogger events.Logger) {
  119. a.notifyTimer = time.NewTimer(a.notifyDelay)
  120. defer a.notifyTimer.Stop()
  121. inProgressItemSubscription := evLogger.Subscribe(events.ItemStarted | events.ItemFinished)
  122. defer inProgressItemSubscription.Unsubscribe()
  123. cfg.Subscribe(a)
  124. defer cfg.Unsubscribe(a)
  125. inProgress := make(map[string]struct{})
  126. for {
  127. select {
  128. case event := <-in:
  129. a.newEvent(event, inProgress)
  130. case event := <-inProgressItemSubscription.C():
  131. updateInProgressSet(event, inProgress)
  132. case <-a.notifyTimer.C:
  133. a.actOnTimer(out)
  134. case interval := <-a.notifyTimerResetChan:
  135. a.resetNotifyTimer(interval)
  136. case folderCfg := <-a.folderCfgUpdate:
  137. a.updateConfig(folderCfg)
  138. case <-a.ctx.Done():
  139. l.Debugln(a, "Stopped")
  140. return
  141. }
  142. }
  143. }
  144. func (a *aggregator) newEvent(event fs.Event, inProgress map[string]struct{}) {
  145. if _, ok := a.root.events["."]; ok {
  146. l.Debugln(a, "Will scan entire folder anyway; dropping:", event.Name)
  147. return
  148. }
  149. if _, ok := inProgress[event.Name]; ok {
  150. l.Debugln(a, "Skipping path we modified:", event.Name)
  151. return
  152. }
  153. a.aggregateEvent(event, time.Now())
  154. }
  155. func (a *aggregator) aggregateEvent(event fs.Event, evTime time.Time) {
  156. if event.Name == "." || a.eventCount() == maxFiles {
  157. l.Debugln(a, "Scan entire folder")
  158. firstModTime := evTime
  159. if a.root.childCount() != 0 {
  160. event.Type = event.Type.Merge(a.root.eventType())
  161. firstModTime = a.root.firstModTime()
  162. }
  163. a.root.dirs = make(map[string]*eventDir)
  164. a.root.events = make(map[string]*aggregatedEvent)
  165. a.root.events["."] = &aggregatedEvent{
  166. firstModTime: firstModTime,
  167. lastModTime: evTime,
  168. evType: event.Type,
  169. }
  170. a.counts = make(map[fs.EventType]int)
  171. a.counts[event.Type]++
  172. a.resetNotifyTimerIfNeeded()
  173. return
  174. }
  175. parentDir := a.root
  176. // Check if any parent directory is already tracked or will exceed
  177. // events per directory limit bottom up
  178. pathSegments := strings.Split(filepath.ToSlash(event.Name), "/")
  179. // As root dir cannot be further aggregated, allow up to maxFiles
  180. // children.
  181. localMaxFilesPerDir := maxFiles
  182. var currPath string
  183. for i, name := range pathSegments[:len(pathSegments)-1] {
  184. currPath = filepath.Join(currPath, name)
  185. if ev, ok := parentDir.events[name]; ok {
  186. ev.lastModTime = evTime
  187. if merged := event.Type.Merge(ev.evType); ev.evType != merged {
  188. a.counts[ev.evType]--
  189. ev.evType = merged
  190. a.counts[ev.evType]++
  191. }
  192. l.Debugf("%v Parent %s (type %s) already tracked: %s", a, currPath, ev.evType, event.Name)
  193. return
  194. }
  195. if parentDir.childCount() == localMaxFilesPerDir {
  196. l.Debugf("%v Parent dir %s already has %d children, tracking it instead: %s", a, currPath, localMaxFilesPerDir, event.Name)
  197. event.Name = filepath.Dir(currPath)
  198. a.aggregateEvent(event, evTime)
  199. return
  200. }
  201. // If there are no events below path, but we need to recurse
  202. // into that path, create eventDir at path.
  203. if newParent, ok := parentDir.dirs[name]; ok {
  204. parentDir = newParent
  205. } else {
  206. l.Debugln(a, "Creating eventDir at:", currPath)
  207. newParent = newEventDir()
  208. parentDir.dirs[name] = newParent
  209. parentDir = newParent
  210. }
  211. // Reset allowed children count to maxFilesPerDir for non-root
  212. if i == 0 {
  213. localMaxFilesPerDir = maxFilesPerDir
  214. }
  215. }
  216. name := pathSegments[len(pathSegments)-1]
  217. if ev, ok := parentDir.events[name]; ok {
  218. ev.lastModTime = evTime
  219. if merged := event.Type.Merge(ev.evType); ev.evType != merged {
  220. a.counts[ev.evType]--
  221. ev.evType = merged
  222. a.counts[ev.evType]++
  223. }
  224. l.Debugf("%v Already tracked (type %v): %s", a, ev.evType, event.Name)
  225. return
  226. }
  227. childDir, ok := parentDir.dirs[name]
  228. // If a dir existed at path, it would be removed from dirs, thus
  229. // childCount would not increase.
  230. if !ok && parentDir.childCount() == localMaxFilesPerDir {
  231. l.Debugf("%v Parent dir already has %d children, tracking it instead: %s", a, localMaxFilesPerDir, event.Name)
  232. event.Name = filepath.Dir(event.Name)
  233. a.aggregateEvent(event, evTime)
  234. return
  235. }
  236. firstModTime := evTime
  237. if ok {
  238. firstModTime = childDir.firstModTime()
  239. if merged := event.Type.Merge(childDir.eventType()); event.Type != merged {
  240. a.counts[event.Type]--
  241. event.Type = merged
  242. }
  243. delete(parentDir.dirs, name)
  244. }
  245. l.Debugf("%v Tracking (type %v): %s", a, event.Type, event.Name)
  246. parentDir.events[name] = &aggregatedEvent{
  247. firstModTime: firstModTime,
  248. lastModTime: evTime,
  249. evType: event.Type,
  250. }
  251. a.counts[event.Type]++
  252. a.resetNotifyTimerIfNeeded()
  253. }
  254. func (a *aggregator) resetNotifyTimerIfNeeded() {
  255. if a.notifyTimerNeedsReset {
  256. a.resetNotifyTimer(a.notifyDelay)
  257. }
  258. }
  259. // resetNotifyTimer should only ever be called when notifyTimer has stopped
  260. // and notifyTimer.C been read from. Otherwise, call resetNotifyTimerIfNeeded.
  261. func (a *aggregator) resetNotifyTimer(duration time.Duration) {
  262. l.Debugln(a, "Resetting notifyTimer to", duration.String())
  263. a.notifyTimerNeedsReset = false
  264. a.notifyTimer.Reset(duration)
  265. }
  266. func (a *aggregator) actOnTimer(out chan<- []string) {
  267. c := a.eventCount()
  268. if c == 0 {
  269. l.Debugln(a, "No tracked events, waiting for new event.")
  270. a.notifyTimerNeedsReset = true
  271. return
  272. }
  273. oldEvents := make(map[string]*aggregatedEvent, c)
  274. a.popOldEventsTo(oldEvents, a.root, ".", time.Now(), true)
  275. if a.notifyDelay != a.notifyTimeout && a.counts[fs.NonRemove] == 0 && a.counts[fs.Remove]+a.counts[fs.Mixed] != 0 {
  276. // Only delayed events remaining, no need to delay them additionally
  277. a.popOldEventsTo(oldEvents, a.root, ".", time.Now(), false)
  278. }
  279. if len(oldEvents) == 0 {
  280. l.Debugln(a, "No old fs events")
  281. a.resetNotifyTimer(a.notifyDelay)
  282. return
  283. }
  284. // Sending to channel might block for a long time, but we need to keep
  285. // reading from notify backend channel to avoid overflow
  286. go a.notify(oldEvents, out)
  287. }
  288. // Schedule scan for given events dispatching deletes last and reset notification
  289. // afterwards to set up for the next scan scheduling.
  290. func (a *aggregator) notify(oldEvents map[string]*aggregatedEvent, out chan<- []string) {
  291. timeBeforeSending := time.Now()
  292. l.Debugf("%v Notifying about %d fs events", a, len(oldEvents))
  293. separatedBatches := make(map[fs.EventType][]string)
  294. for path, event := range oldEvents {
  295. separatedBatches[event.evType] = append(separatedBatches[event.evType], path)
  296. }
  297. for _, evType := range [3]fs.EventType{fs.NonRemove, fs.Mixed, fs.Remove} {
  298. currBatch := separatedBatches[evType]
  299. if len(currBatch) != 0 {
  300. select {
  301. case out <- currBatch:
  302. case <-a.ctx.Done():
  303. return
  304. }
  305. }
  306. }
  307. // If sending to channel blocked for a long time,
  308. // shorten next notifyDelay accordingly.
  309. duration := time.Since(timeBeforeSending)
  310. buffer := time.Millisecond
  311. var nextDelay time.Duration
  312. switch {
  313. case duration < a.notifyDelay/10:
  314. nextDelay = a.notifyDelay
  315. case duration+buffer > a.notifyDelay:
  316. nextDelay = buffer
  317. default:
  318. nextDelay = a.notifyDelay - duration
  319. }
  320. select {
  321. case a.notifyTimerResetChan <- nextDelay:
  322. case <-a.ctx.Done():
  323. }
  324. }
  325. // popOldEvents finds events that should be scheduled for scanning recursively in dirs,
  326. // removes those events and empty eventDirs and returns a map with all the removed
  327. // events referenced by their filesystem path
  328. func (a *aggregator) popOldEventsTo(to map[string]*aggregatedEvent, dir *eventDir, dirPath string, currTime time.Time, delayRem bool) {
  329. for childName, childDir := range dir.dirs {
  330. a.popOldEventsTo(to, childDir, filepath.Join(dirPath, childName), currTime, delayRem)
  331. if childDir.childCount() == 0 {
  332. delete(dir.dirs, childName)
  333. }
  334. }
  335. for name, event := range dir.events {
  336. if a.isOld(event, currTime, delayRem) {
  337. to[filepath.Join(dirPath, name)] = event
  338. delete(dir.events, name)
  339. a.counts[event.evType]--
  340. }
  341. }
  342. }
  343. func (a *aggregator) isOld(ev *aggregatedEvent, currTime time.Time, delayRem bool) bool {
  344. // Deletes should in general be scanned last, therefore they are delayed by
  345. // letting them time out. This behaviour is overriden by delayRem == false.
  346. // Refer to following comments as to why.
  347. // An event that has not registered any new modifications recently is scanned.
  348. // a.notifyDelay is the user facing value signifying the normal delay between
  349. // picking up a modification and scanning it. As scheduling scans happens at
  350. // regular intervals of a.notifyDelay the delay of a single event is not exactly
  351. // a.notifyDelay, but lies in the range of 0.5 to 1.5 times a.notifyDelay.
  352. if (!delayRem || ev.evType == fs.NonRemove) && 2*currTime.Sub(ev.lastModTime) > a.notifyDelay {
  353. return true
  354. }
  355. // When an event registers repeat modifications or involves removals it
  356. // is delayed to reduce resource usage, but after a certain time (notifyTimeout)
  357. // passed it is scanned anyway.
  358. // If only removals are remaining to be scanned, there is no point to delay
  359. // removals further, so this behaviour is overriden by delayRem == false.
  360. return currTime.Sub(ev.firstModTime) > a.notifyTimeout
  361. }
  362. func (a *aggregator) eventCount() int {
  363. c := 0
  364. for _, v := range a.counts {
  365. c += v
  366. }
  367. return c
  368. }
  369. func (a *aggregator) String() string {
  370. return fmt.Sprintf("aggregator/%s:", a.folderCfg.Description())
  371. }
  372. func (a *aggregator) VerifyConfiguration(from, to config.Configuration) error {
  373. return nil
  374. }
  375. func (a *aggregator) CommitConfiguration(from, to config.Configuration) bool {
  376. for _, folderCfg := range to.Folders {
  377. if folderCfg.ID == a.folderID {
  378. select {
  379. case a.folderCfgUpdate <- folderCfg:
  380. case <-a.ctx.Done():
  381. }
  382. return true
  383. }
  384. }
  385. // Nothing to do, model will soon stop this
  386. return true
  387. }
  388. func (a *aggregator) updateConfig(folderCfg config.FolderConfiguration) {
  389. a.notifyDelay = time.Duration(folderCfg.FSWatcherDelayS) * time.Second
  390. a.notifyTimeout = notifyTimeout(folderCfg.FSWatcherDelayS)
  391. a.folderCfg = folderCfg
  392. }
  393. func updateInProgressSet(event events.Event, inProgress map[string]struct{}) {
  394. if event.Type == events.ItemStarted {
  395. path := event.Data.(map[string]string)["item"]
  396. inProgress[path] = struct{}{}
  397. } else if event.Type == events.ItemFinished {
  398. path := event.Data.(map[string]interface{})["item"].(string)
  399. delete(inProgress, path)
  400. }
  401. }
  402. // Events that involve removals or continuously receive new modifications are
  403. // delayed but must time out at some point. The following numbers come out of thin
  404. // air, they were just considered as a sensible compromise between fast updates and
  405. // saving resources. For short delays the timeout is 6 times the delay, capped at 1
  406. // minute. For delays longer than 1 minute, the delay and timeout are equal.
  407. func notifyTimeout(eventDelayS int) time.Duration {
  408. shortDelayS := 10
  409. shortDelayMultiplicator := 6
  410. longDelayS := 60
  411. longDelayTimeout := time.Duration(1) * time.Minute
  412. if eventDelayS < shortDelayS {
  413. return time.Duration(eventDelayS*shortDelayMultiplicator) * time.Second
  414. }
  415. if eventDelayS < longDelayS {
  416. return longDelayTimeout
  417. }
  418. return time.Duration(eventDelayS) * time.Second
  419. }