indexsender.go 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409
  1. // Copyright (C) 2020 The Syncthing Authors.
  2. //
  3. // This Source Code Form is subject to the terms of the Mozilla Public
  4. // License, v. 2.0. If a copy of the MPL was not distributed with this file,
  5. // You can obtain one at https://mozilla.org/MPL/2.0/.
  6. package model
  7. import (
  8. "context"
  9. "fmt"
  10. "sync"
  11. "time"
  12. "github.com/thejerf/suture/v4"
  13. "github.com/syncthing/syncthing/lib/config"
  14. "github.com/syncthing/syncthing/lib/db"
  15. "github.com/syncthing/syncthing/lib/events"
  16. "github.com/syncthing/syncthing/lib/protocol"
  17. "github.com/syncthing/syncthing/lib/util"
  18. )
  19. type indexSender struct {
  20. conn protocol.Connection
  21. folder string
  22. folderIsReceiveEncrypted bool
  23. dev string
  24. fset *db.FileSet
  25. prevSequence int64
  26. evLogger events.Logger
  27. connClosed chan struct{}
  28. token suture.ServiceToken
  29. pauseChan chan struct{}
  30. resumeChan chan *db.FileSet
  31. }
  32. func (s *indexSender) Serve(ctx context.Context) (err error) {
  33. l.Debugf("Starting indexSender for %s to %s at %s (slv=%d)", s.folder, s.conn.ID(), s.conn, s.prevSequence)
  34. defer func() {
  35. err = util.NoRestartErr(err)
  36. l.Debugf("Exiting indexSender for %s to %s at %s: %v", s.folder, s.conn.ID(), s.conn, err)
  37. }()
  38. // We need to send one index, regardless of whether there is something to send or not
  39. err = s.sendIndexTo(ctx)
  40. // Subscribe to LocalIndexUpdated (we have new information to send) and
  41. // DeviceDisconnected (it might be us who disconnected, so we should
  42. // exit).
  43. sub := s.evLogger.Subscribe(events.LocalIndexUpdated | events.DeviceDisconnected)
  44. defer sub.Unsubscribe()
  45. paused := false
  46. evChan := sub.C()
  47. ticker := time.NewTicker(time.Minute)
  48. defer ticker.Stop()
  49. for err == nil {
  50. select {
  51. case <-ctx.Done():
  52. return ctx.Err()
  53. case <-s.connClosed:
  54. return nil
  55. default:
  56. }
  57. // While we have sent a sequence at least equal to the one
  58. // currently in the database, wait for the local index to update. The
  59. // local index may update for other folders than the one we are
  60. // sending for.
  61. if s.fset.Sequence(protocol.LocalDeviceID) <= s.prevSequence {
  62. select {
  63. case <-ctx.Done():
  64. return ctx.Err()
  65. case <-s.connClosed:
  66. return nil
  67. case <-evChan:
  68. case <-ticker.C:
  69. case <-s.pauseChan:
  70. paused = true
  71. case s.fset = <-s.resumeChan:
  72. paused = false
  73. }
  74. continue
  75. }
  76. if !paused {
  77. err = s.sendIndexTo(ctx)
  78. }
  79. // Wait a short amount of time before entering the next loop. If there
  80. // are continuous changes happening to the local index, this gives us
  81. // time to batch them up a little.
  82. time.Sleep(250 * time.Millisecond)
  83. }
  84. return err
  85. }
  86. func (s *indexSender) resume(fset *db.FileSet) {
  87. select {
  88. case <-s.connClosed:
  89. case s.resumeChan <- fset:
  90. }
  91. }
  92. func (s *indexSender) pause() {
  93. select {
  94. case <-s.connClosed:
  95. case s.pauseChan <- struct{}{}:
  96. }
  97. }
  98. // sendIndexTo sends file infos with a sequence number higher than prevSequence and
  99. // returns the highest sent sequence number.
  100. func (s *indexSender) sendIndexTo(ctx context.Context) error {
  101. initial := s.prevSequence == 0
  102. batch := newFileInfoBatch(nil)
  103. batch.flushFn = func(fs []protocol.FileInfo) error {
  104. l.Debugf("%v: Sending %d files (<%d bytes)", s, len(batch.infos), batch.size)
  105. if initial {
  106. initial = false
  107. return s.conn.Index(ctx, s.folder, fs)
  108. }
  109. return s.conn.IndexUpdate(ctx, s.folder, fs)
  110. }
  111. var err error
  112. var f protocol.FileInfo
  113. snap := s.fset.Snapshot()
  114. defer snap.Release()
  115. previousWasDelete := false
  116. snap.WithHaveSequence(s.prevSequence+1, func(fi protocol.FileIntf) bool {
  117. // This is to make sure that renames (which is an add followed by a delete) land in the same batch.
  118. // Even if the batch is full, we allow a last delete to slip in, we do this by making sure that
  119. // the batch ends with a non-delete, or that the last item in the batch is already a delete
  120. if batch.full() && (!fi.IsDeleted() || previousWasDelete) {
  121. if err = batch.flush(); err != nil {
  122. return false
  123. }
  124. }
  125. if shouldDebug() {
  126. if fi.SequenceNo() < s.prevSequence+1 {
  127. panic(fmt.Sprintln("sequence lower than requested, got:", fi.SequenceNo(), ", asked to start at:", s.prevSequence+1))
  128. }
  129. }
  130. if f.Sequence > 0 && fi.SequenceNo() <= f.Sequence {
  131. l.Warnln("Non-increasing sequence detected: Checking and repairing the db...")
  132. // Abort this round of index sending - the next one will pick
  133. // up from the last successful one with the repeaired db.
  134. defer func() {
  135. if fixed, dbErr := s.fset.RepairSequence(); dbErr != nil {
  136. l.Warnln("Failed repairing sequence entries:", dbErr)
  137. panic("Failed repairing sequence entries")
  138. } else {
  139. s.evLogger.Log(events.Failure, "detected and repaired non-increasing sequence")
  140. l.Infof("Repaired %v sequence entries in database", fixed)
  141. }
  142. }()
  143. return false
  144. }
  145. f = fi.(protocol.FileInfo)
  146. // If this is a folder receiving encrypted files only, we
  147. // mustn't ever send locally changed file infos. Those aren't
  148. // encrypted and thus would be a protocol error at the remote.
  149. if s.folderIsReceiveEncrypted && fi.IsReceiveOnlyChanged() {
  150. return true
  151. }
  152. // Mark the file as invalid if any of the local bad stuff flags are set.
  153. f.RawInvalid = f.IsInvalid()
  154. // If the file is marked LocalReceive (i.e., changed locally on a
  155. // receive only folder) we do not want it to ever become the
  156. // globally best version, invalid or not.
  157. if f.IsReceiveOnlyChanged() {
  158. f.Version = protocol.Vector{}
  159. }
  160. // never sent externally
  161. f.LocalFlags = 0
  162. f.VersionHash = nil
  163. previousWasDelete = f.IsDeleted()
  164. batch.append(f)
  165. return true
  166. })
  167. if err != nil {
  168. return err
  169. }
  170. err = batch.flush()
  171. // True if there was nothing to be sent
  172. if f.Sequence == 0 {
  173. return err
  174. }
  175. s.prevSequence = f.Sequence
  176. return err
  177. }
  178. func (s *indexSender) String() string {
  179. return fmt.Sprintf("indexSender@%p for %s to %s at %s", s, s.folder, s.conn.ID(), s.conn)
  180. }
  181. type indexSenderRegistry struct {
  182. deviceID protocol.DeviceID
  183. sup *suture.Supervisor
  184. evLogger events.Logger
  185. conn protocol.Connection
  186. closed chan struct{}
  187. indexSenders map[string]*indexSender
  188. startInfos map[string]*indexSenderStartInfo
  189. mut sync.Mutex
  190. }
  191. func newIndexSenderRegistry(conn protocol.Connection, closed chan struct{}, sup *suture.Supervisor, evLogger events.Logger) *indexSenderRegistry {
  192. return &indexSenderRegistry{
  193. deviceID: conn.ID(),
  194. conn: conn,
  195. closed: closed,
  196. sup: sup,
  197. evLogger: evLogger,
  198. indexSenders: make(map[string]*indexSender),
  199. startInfos: make(map[string]*indexSenderStartInfo),
  200. mut: sync.Mutex{},
  201. }
  202. }
  203. // add starts an index sender for given folder.
  204. // If an index sender is already running, it will be stopped first.
  205. func (r *indexSenderRegistry) add(folder config.FolderConfiguration, fset *db.FileSet, startInfo *indexSenderStartInfo) {
  206. r.mut.Lock()
  207. r.addLocked(folder, fset, startInfo)
  208. r.mut.Unlock()
  209. }
  210. func (r *indexSenderRegistry) addLocked(folder config.FolderConfiguration, fset *db.FileSet, startInfo *indexSenderStartInfo) {
  211. myIndexID := fset.IndexID(protocol.LocalDeviceID)
  212. mySequence := fset.Sequence(protocol.LocalDeviceID)
  213. var startSequence int64
  214. // This is the other side's description of what it knows
  215. // about us. Lets check to see if we can start sending index
  216. // updates directly or need to send the index from start...
  217. if startInfo.local.IndexID == myIndexID {
  218. // They say they've seen our index ID before, so we can
  219. // send a delta update only.
  220. if startInfo.local.MaxSequence > mySequence {
  221. // Safety check. They claim to have more or newer
  222. // index data than we have - either we have lost
  223. // index data, or reset the index without resetting
  224. // the IndexID, or something else weird has
  225. // happened. We send a full index to reset the
  226. // situation.
  227. l.Infof("Device %v folder %s is delta index compatible, but seems out of sync with reality", r.deviceID, folder.Description())
  228. startSequence = 0
  229. } else {
  230. l.Debugf("Device %v folder %s is delta index compatible (mlv=%d)", r.deviceID, folder.Description(), startInfo.local.MaxSequence)
  231. startSequence = startInfo.local.MaxSequence
  232. }
  233. } else if startInfo.local.IndexID != 0 {
  234. // They say they've seen an index ID from us, but it's
  235. // not the right one. Either they are confused or we
  236. // must have reset our database since last talking to
  237. // them. We'll start with a full index transfer.
  238. l.Infof("Device %v folder %s has mismatching index ID for us (%v != %v)", r.deviceID, folder.Description(), startInfo.local.IndexID, myIndexID)
  239. startSequence = 0
  240. } else {
  241. l.Debugf("Device %v folder %s has no index ID for us", r.deviceID, folder.Description())
  242. }
  243. // This is the other side's description of themselves. We
  244. // check to see that it matches the IndexID we have on file,
  245. // otherwise we drop our old index data and expect to get a
  246. // completely new set.
  247. theirIndexID := fset.IndexID(r.deviceID)
  248. if startInfo.remote.IndexID == 0 {
  249. // They're not announcing an index ID. This means they
  250. // do not support delta indexes and we should clear any
  251. // information we have from them before accepting their
  252. // index, which will presumably be a full index.
  253. l.Debugf("Device %v folder %s does not announce an index ID", r.deviceID, folder.Description())
  254. fset.Drop(r.deviceID)
  255. } else if startInfo.remote.IndexID != theirIndexID {
  256. // The index ID we have on file is not what they're
  257. // announcing. They must have reset their database and
  258. // will probably send us a full index. We drop any
  259. // information we have and remember this new index ID
  260. // instead.
  261. l.Infof("Device %v folder %s has a new index ID (%v)", r.deviceID, folder.Description(), startInfo.remote.IndexID)
  262. fset.Drop(r.deviceID)
  263. fset.SetIndexID(r.deviceID, startInfo.remote.IndexID)
  264. }
  265. if is, ok := r.indexSenders[folder.ID]; ok {
  266. r.sup.RemoveAndWait(is.token, 0)
  267. delete(r.indexSenders, folder.ID)
  268. }
  269. if _, ok := r.startInfos[folder.ID]; ok {
  270. delete(r.startInfos, folder.ID)
  271. }
  272. is := &indexSender{
  273. conn: r.conn,
  274. connClosed: r.closed,
  275. folder: folder.ID,
  276. folderIsReceiveEncrypted: folder.Type == config.FolderTypeReceiveEncrypted,
  277. fset: fset,
  278. prevSequence: startSequence,
  279. evLogger: r.evLogger,
  280. pauseChan: make(chan struct{}),
  281. resumeChan: make(chan *db.FileSet),
  282. }
  283. is.token = r.sup.Add(is)
  284. r.indexSenders[folder.ID] = is
  285. }
  286. // addPending stores the given info to start an index sender once resume is called
  287. // for this folder.
  288. // If an index sender is already running, it will be stopped.
  289. func (r *indexSenderRegistry) addPending(folder config.FolderConfiguration, startInfo *indexSenderStartInfo) {
  290. r.mut.Lock()
  291. defer r.mut.Unlock()
  292. if is, ok := r.indexSenders[folder.ID]; ok {
  293. r.sup.RemoveAndWait(is.token, 0)
  294. delete(r.indexSenders, folder.ID)
  295. }
  296. r.startInfos[folder.ID] = startInfo
  297. }
  298. // remove stops a running index sender or removes one pending to be started.
  299. // It is a noop if the folder isn't known.
  300. func (r *indexSenderRegistry) remove(folder string) {
  301. r.mut.Lock()
  302. defer r.mut.Unlock()
  303. if is, ok := r.indexSenders[folder]; ok {
  304. r.sup.RemoveAndWait(is.token, 0)
  305. delete(r.indexSenders, folder)
  306. }
  307. delete(r.startInfos, folder)
  308. }
  309. // removeAllExcept stops all running index senders and removes those pending to be started,
  310. // except mentioned ones.
  311. // It is a noop if the folder isn't known.
  312. func (r *indexSenderRegistry) removeAllExcept(except map[string]struct{}) {
  313. r.mut.Lock()
  314. defer r.mut.Unlock()
  315. for folder, is := range r.indexSenders {
  316. if _, ok := except[folder]; !ok {
  317. r.sup.RemoveAndWait(is.token, 0)
  318. delete(r.indexSenders, folder)
  319. }
  320. }
  321. for folder := range r.indexSenders {
  322. if _, ok := except[folder]; !ok {
  323. delete(r.startInfos, folder)
  324. }
  325. }
  326. }
  327. // pause stops a running index sender.
  328. // It is a noop if the folder isn't known or has not been started yet.
  329. func (r *indexSenderRegistry) pause(folder string) {
  330. r.mut.Lock()
  331. defer r.mut.Unlock()
  332. if is, ok := r.indexSenders[folder]; ok {
  333. is.pause()
  334. }
  335. }
  336. // resume unpauses an already running index sender or starts it, if it was added
  337. // while paused.
  338. // It is a noop if the folder isn't known.
  339. func (r *indexSenderRegistry) resume(folder config.FolderConfiguration, fset *db.FileSet) {
  340. r.mut.Lock()
  341. defer r.mut.Unlock()
  342. is, isOk := r.indexSenders[folder.ID]
  343. if info, ok := r.startInfos[folder.ID]; ok {
  344. if isOk {
  345. r.sup.RemoveAndWait(is.token, 0)
  346. delete(r.indexSenders, folder.ID)
  347. }
  348. r.addLocked(folder, fset, info)
  349. delete(r.startInfos, folder.ID)
  350. } else if isOk {
  351. is.resume(fset)
  352. }
  353. }
  354. type indexSenderStartInfo struct {
  355. local, remote protocol.Device
  356. }