puller.go 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907
  1. // Copyright (C) 2014 The Syncthing Authors.
  2. //
  3. // This program is free software: you can redistribute it and/or modify it
  4. // under the terms of the GNU General Public License as published by the Free
  5. // Software Foundation, either version 3 of the License, or (at your option)
  6. // any later version.
  7. //
  8. // This program is distributed in the hope that it will be useful, but WITHOUT
  9. // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. // more details.
  12. //
  13. // You should have received a copy of the GNU General Public License along
  14. // with this program. If not, see <http://www.gnu.org/licenses/>.
  15. package model
  16. import (
  17. "bytes"
  18. "crypto/sha256"
  19. "errors"
  20. "fmt"
  21. "io/ioutil"
  22. "os"
  23. "path/filepath"
  24. "sync"
  25. "time"
  26. "github.com/AudriusButkevicius/lfu-go"
  27. "github.com/syncthing/syncthing/internal/config"
  28. "github.com/syncthing/syncthing/internal/events"
  29. "github.com/syncthing/syncthing/internal/ignore"
  30. "github.com/syncthing/syncthing/internal/osutil"
  31. "github.com/syncthing/syncthing/internal/protocol"
  32. "github.com/syncthing/syncthing/internal/scanner"
  33. "github.com/syncthing/syncthing/internal/symlinks"
  34. "github.com/syncthing/syncthing/internal/versioner"
  35. )
  36. // TODO: Stop on errors
  37. const (
  38. pauseIntv = 60 * time.Second
  39. nextPullIntv = 10 * time.Second
  40. checkPullIntv = 1 * time.Second
  41. )
  42. // A pullBlockState is passed to the puller routine for each block that needs
  43. // to be fetched.
  44. type pullBlockState struct {
  45. *sharedPullerState
  46. block protocol.BlockInfo
  47. }
  48. // A copyBlocksState is passed to copy routine if the file has blocks to be
  49. // copied.
  50. type copyBlocksState struct {
  51. *sharedPullerState
  52. blocks []protocol.BlockInfo
  53. }
  54. var (
  55. activity = newDeviceActivity()
  56. errNoDevice = errors.New("no available source device")
  57. )
  58. type Puller struct {
  59. folder string
  60. dir string
  61. scanIntv time.Duration
  62. model *Model
  63. stop chan struct{}
  64. versioner versioner.Versioner
  65. ignorePerms bool
  66. lenientMtimes bool
  67. progressEmitter *ProgressEmitter
  68. copiers int
  69. pullers int
  70. finishers int
  71. queue *JobQueue
  72. }
  73. // Serve will run scans and pulls. It will return when Stop()ed or on a
  74. // critical error.
  75. func (p *Puller) Serve() {
  76. if debug {
  77. l.Debugln(p, "starting")
  78. defer l.Debugln(p, "exiting")
  79. }
  80. p.stop = make(chan struct{})
  81. p.queue = NewJobQueue()
  82. pullTimer := time.NewTimer(checkPullIntv)
  83. scanTimer := time.NewTimer(time.Millisecond) // The first scan should be done immediately.
  84. defer func() {
  85. pullTimer.Stop()
  86. scanTimer.Stop()
  87. // TODO: Should there be an actual FolderStopped state?
  88. p.model.setState(p.folder, FolderIdle)
  89. }()
  90. var prevVer uint64
  91. var prevIgnoreHash string
  92. // We don't start pulling files until a scan has been completed.
  93. initialScanCompleted := false
  94. loop:
  95. for {
  96. select {
  97. case <-p.stop:
  98. return
  99. // TODO: We could easily add a channel here for notifications from
  100. // Index(), so that we immediately start a pull when new index
  101. // information is available. Before that though, I'd like to build a
  102. // repeatable benchmark of how long it takes to sync a change from
  103. // device A to device B, so we have something to work against.
  104. case <-pullTimer.C:
  105. if !initialScanCompleted {
  106. // How did we even get here?
  107. if debug {
  108. l.Debugln(p, "skip (initial)")
  109. }
  110. pullTimer.Reset(nextPullIntv)
  111. continue
  112. }
  113. p.model.fmut.RLock()
  114. curIgnores := p.model.folderIgnores[p.folder]
  115. p.model.fmut.RUnlock()
  116. if newHash := curIgnores.Hash(); newHash != prevIgnoreHash {
  117. // The ignore patterns have changed. We need to re-evaluate if
  118. // there are files we need now that were ignored before.
  119. if debug {
  120. l.Debugln(p, "ignore patterns have changed, resetting prevVer")
  121. }
  122. prevVer = 0
  123. prevIgnoreHash = newHash
  124. }
  125. // RemoteLocalVersion() is a fast call, doesn't touch the database.
  126. curVer := p.model.RemoteLocalVersion(p.folder)
  127. if curVer == prevVer {
  128. if debug {
  129. l.Debugln(p, "skip (curVer == prevVer)", prevVer)
  130. }
  131. pullTimer.Reset(checkPullIntv)
  132. continue
  133. }
  134. if debug {
  135. l.Debugln(p, "pulling", prevVer, curVer)
  136. }
  137. p.model.setState(p.folder, FolderSyncing)
  138. tries := 0
  139. checksum := false
  140. for {
  141. tries++
  142. // Last resort mode, to get around corrupt/invalid block maps.
  143. if tries == 10 {
  144. l.Infoln("Desperation mode ON")
  145. checksum = true
  146. }
  147. changed := p.pullerIteration(checksum, curIgnores)
  148. if debug {
  149. l.Debugln(p, "changed", changed)
  150. }
  151. if changed == 0 {
  152. // No files were changed by the puller, so we are in
  153. // sync. Remember the local version number and
  154. // schedule a resync a little bit into the future.
  155. if lv := p.model.RemoteLocalVersion(p.folder); lv < curVer {
  156. // There's a corner case where the device we needed
  157. // files from disconnected during the puller
  158. // iteration. The files will have been removed from
  159. // the index, so we've concluded that we don't need
  160. // them, but at the same time we have the local
  161. // version that includes those files in curVer. So we
  162. // catch the case that localVersion might have
  163. // decreased here.
  164. l.Debugln(p, "adjusting curVer", lv)
  165. curVer = lv
  166. }
  167. prevVer = curVer
  168. if debug {
  169. l.Debugln(p, "next pull in", nextPullIntv)
  170. }
  171. pullTimer.Reset(nextPullIntv)
  172. break
  173. }
  174. if tries > 10 {
  175. // We've tried a bunch of times to get in sync, but
  176. // we're not making it. Probably there are write
  177. // errors preventing us. Flag this with a warning and
  178. // wait a bit longer before retrying.
  179. l.Warnf("Folder %q isn't making progress - check logs for possible root cause. Pausing puller for %v.", p.folder, pauseIntv)
  180. if debug {
  181. l.Debugln(p, "next pull in", pauseIntv)
  182. }
  183. pullTimer.Reset(pauseIntv)
  184. break
  185. }
  186. }
  187. p.model.setState(p.folder, FolderIdle)
  188. // The reason for running the scanner from within the puller is that
  189. // this is the easiest way to make sure we are not doing both at the
  190. // same time.
  191. case <-scanTimer.C:
  192. if debug {
  193. l.Debugln(p, "rescan")
  194. }
  195. p.model.setState(p.folder, FolderScanning)
  196. if err := p.model.ScanFolder(p.folder); err != nil {
  197. p.model.cfg.InvalidateFolder(p.folder, err.Error())
  198. break loop
  199. }
  200. p.model.setState(p.folder, FolderIdle)
  201. if p.scanIntv > 0 {
  202. if debug {
  203. l.Debugln(p, "next rescan in", p.scanIntv)
  204. }
  205. scanTimer.Reset(p.scanIntv)
  206. }
  207. if !initialScanCompleted {
  208. l.Infoln("Completed initial scan (rw) of folder", p.folder)
  209. initialScanCompleted = true
  210. }
  211. }
  212. }
  213. }
  214. func (p *Puller) Stop() {
  215. close(p.stop)
  216. }
  217. func (p *Puller) String() string {
  218. return fmt.Sprintf("puller/%s@%p", p.folder, p)
  219. }
  220. // pullerIteration runs a single puller iteration for the given folder and
  221. // returns the number items that should have been synced (even those that
  222. // might have failed). One puller iteration handles all files currently
  223. // flagged as needed in the folder.
  224. func (p *Puller) pullerIteration(checksum bool, ignores *ignore.Matcher) int {
  225. pullChan := make(chan pullBlockState)
  226. copyChan := make(chan copyBlocksState)
  227. finisherChan := make(chan *sharedPullerState)
  228. var copyWg sync.WaitGroup
  229. var pullWg sync.WaitGroup
  230. var doneWg sync.WaitGroup
  231. if debug {
  232. l.Debugln(p, "c", p.copiers, "p", p.pullers, "f", p.finishers)
  233. }
  234. for i := 0; i < p.copiers; i++ {
  235. copyWg.Add(1)
  236. go func() {
  237. // copierRoutine finishes when copyChan is closed
  238. p.copierRoutine(copyChan, pullChan, finisherChan, checksum)
  239. copyWg.Done()
  240. }()
  241. }
  242. for i := 0; i < p.pullers; i++ {
  243. pullWg.Add(1)
  244. go func() {
  245. // pullerRoutine finishes when pullChan is closed
  246. p.pullerRoutine(pullChan, finisherChan)
  247. pullWg.Done()
  248. }()
  249. }
  250. for i := 0; i < p.finishers; i++ {
  251. doneWg.Add(1)
  252. // finisherRoutine finishes when finisherChan is closed
  253. go func() {
  254. p.finisherRoutine(finisherChan)
  255. doneWg.Done()
  256. }()
  257. }
  258. p.model.fmut.RLock()
  259. files := p.model.folderFiles[p.folder]
  260. p.model.fmut.RUnlock()
  261. // !!!
  262. // WithNeed takes a database snapshot (by necessity). By the time we've
  263. // handled a bunch of files it might have become out of date and we might
  264. // be attempting to sync with an old version of a file...
  265. // !!!
  266. changed := 0
  267. var deletions []protocol.FileInfo
  268. files.WithNeed(protocol.LocalDeviceID, func(intf protocol.FileIntf) bool {
  269. // Needed items are delivered sorted lexicographically. This isn't
  270. // really optimal from a performance point of view - it would be
  271. // better if files were handled in random order, to spread the load
  272. // over the cluster. But it means that we can be sure that we fully
  273. // handle directories before the files that go inside them, which is
  274. // nice.
  275. file := intf.(protocol.FileInfo)
  276. if ignores.Match(file.Name) {
  277. // This is an ignored file. Skip it, continue iteration.
  278. return true
  279. }
  280. events.Default.Log(events.ItemStarted, map[string]string{
  281. "folder": p.folder,
  282. "item": file.Name,
  283. })
  284. if debug {
  285. l.Debugln(p, "handling", file.Name)
  286. }
  287. switch {
  288. case file.IsDeleted():
  289. // A deleted file, directory or symlink
  290. deletions = append(deletions, file)
  291. case file.IsDirectory() && !file.IsSymlink():
  292. // A new or changed directory
  293. p.handleDir(file)
  294. default:
  295. // A new or changed file or symlink. This is the only case where we
  296. // do stuff concurrently in the background
  297. p.queue.Push(&file)
  298. }
  299. changed++
  300. return true
  301. })
  302. for {
  303. f := p.queue.Pop()
  304. if f == nil {
  305. break
  306. }
  307. p.handleFile(*f, copyChan, finisherChan)
  308. }
  309. // Signal copy and puller routines that we are done with the in data for
  310. // this iteration. Wait for them to finish.
  311. close(copyChan)
  312. copyWg.Wait()
  313. close(pullChan)
  314. pullWg.Wait()
  315. // Signal the finisher chan that there will be no more input.
  316. close(finisherChan)
  317. // Wait for the finisherChan to finish.
  318. doneWg.Wait()
  319. for i := range deletions {
  320. deletion := deletions[len(deletions)-i-1]
  321. if deletion.IsDirectory() {
  322. p.deleteDir(deletion)
  323. } else {
  324. p.deleteFile(deletion)
  325. }
  326. }
  327. return changed
  328. }
  329. // handleDir creates or updates the given directory
  330. func (p *Puller) handleDir(file protocol.FileInfo) {
  331. realName := filepath.Join(p.dir, file.Name)
  332. mode := os.FileMode(file.Flags & 0777)
  333. if p.ignorePerms {
  334. mode = 0755
  335. }
  336. if debug {
  337. curFile := p.model.CurrentFolderFile(p.folder, file.Name)
  338. l.Debugf("need dir\n\t%v\n\t%v", file, curFile)
  339. }
  340. info, err := os.Lstat(realName)
  341. switch {
  342. // There is already something under that name, but it's a file/link.
  343. // Most likely a file/link is getting replaced with a directory.
  344. // Remove the file/link and fall through to directory creation.
  345. case err == nil && (!info.IsDir() || info.Mode()&os.ModeSymlink != 0):
  346. err = osutil.InWritableDir(os.Remove, realName)
  347. if err != nil {
  348. l.Infof("Puller (folder %q, dir %q): %v", p.folder, file.Name, err)
  349. return
  350. }
  351. fallthrough
  352. // The directory doesn't exist, so we create it with the right
  353. // mode bits from the start.
  354. case err != nil && os.IsNotExist(err):
  355. // We declare a function that acts on only the path name, so
  356. // we can pass it to InWritableDir. We use a regular Mkdir and
  357. // not MkdirAll because the parent should already exist.
  358. mkdir := func(path string) error {
  359. return os.Mkdir(path, mode)
  360. }
  361. if err = osutil.InWritableDir(mkdir, realName); err == nil {
  362. p.model.updateLocal(p.folder, file)
  363. } else {
  364. l.Infof("Puller (folder %q, dir %q): %v", p.folder, file.Name, err)
  365. }
  366. return
  367. // Weird error when stat()'ing the dir. Probably won't work to do
  368. // anything else with it if we can't even stat() it.
  369. case err != nil:
  370. l.Infof("Puller (folder %q, dir %q): %v", p.folder, file.Name, err)
  371. return
  372. }
  373. // The directory already exists, so we just correct the mode bits. (We
  374. // don't handle modification times on directories, because that sucks...)
  375. // It's OK to change mode bits on stuff within non-writable directories.
  376. if p.ignorePerms {
  377. p.model.updateLocal(p.folder, file)
  378. } else if err := os.Chmod(realName, mode); err == nil {
  379. p.model.updateLocal(p.folder, file)
  380. } else {
  381. l.Infof("Puller (folder %q, dir %q): %v", p.folder, file.Name, err)
  382. }
  383. }
  384. // deleteDir attempts to delete the given directory
  385. func (p *Puller) deleteDir(file protocol.FileInfo) {
  386. realName := filepath.Join(p.dir, file.Name)
  387. // Delete any temporary files lying around in the directory
  388. dir, _ := os.Open(realName)
  389. if dir != nil {
  390. files, _ := dir.Readdirnames(-1)
  391. for _, file := range files {
  392. if defTempNamer.IsTemporary(file) {
  393. osutil.InWritableDir(os.Remove, filepath.Join(realName, file))
  394. }
  395. }
  396. }
  397. err := osutil.InWritableDir(os.Remove, realName)
  398. if err == nil || os.IsNotExist(err) {
  399. p.model.updateLocal(p.folder, file)
  400. } else {
  401. l.Infof("Puller (folder %q, dir %q): delete: %v", p.folder, file.Name, err)
  402. }
  403. }
  404. // deleteFile attempts to delete the given file
  405. func (p *Puller) deleteFile(file protocol.FileInfo) {
  406. realName := filepath.Join(p.dir, file.Name)
  407. var err error
  408. if p.versioner != nil {
  409. err = osutil.InWritableDir(p.versioner.Archive, realName)
  410. } else {
  411. err = osutil.InWritableDir(os.Remove, realName)
  412. }
  413. if err != nil && !os.IsNotExist(err) {
  414. l.Infof("Puller (folder %q, file %q): delete: %v", p.folder, file.Name, err)
  415. } else {
  416. p.model.updateLocal(p.folder, file)
  417. }
  418. }
  419. // handleFile queues the copies and pulls as necessary for a single new or
  420. // changed file.
  421. func (p *Puller) handleFile(file protocol.FileInfo, copyChan chan<- copyBlocksState, finisherChan chan<- *sharedPullerState) {
  422. curFile := p.model.CurrentFolderFile(p.folder, file.Name)
  423. if len(curFile.Blocks) == len(file.Blocks) && scanner.BlocksEqual(curFile.Blocks, file.Blocks) {
  424. // We are supposed to copy the entire file, and then fetch nothing. We
  425. // are only updating metadata, so we don't actually *need* to make the
  426. // copy.
  427. if debug {
  428. l.Debugln(p, "taking shortcut on", file.Name)
  429. }
  430. p.queue.Done(&file)
  431. if file.IsSymlink() {
  432. p.shortcutSymlink(curFile, file)
  433. } else {
  434. p.shortcutFile(file)
  435. }
  436. return
  437. }
  438. scanner.PopulateOffsets(file.Blocks)
  439. // Figure out the absolute filenames we need once and for all
  440. tempName := filepath.Join(p.dir, defTempNamer.TempName(file.Name))
  441. realName := filepath.Join(p.dir, file.Name)
  442. reused := 0
  443. var blocks []protocol.BlockInfo
  444. // Check for an old temporary file which might have some blocks we could
  445. // reuse.
  446. tempBlocks, err := scanner.HashFile(tempName, protocol.BlockSize)
  447. if err == nil {
  448. // Check for any reusable blocks in the temp file
  449. tempCopyBlocks, _ := scanner.BlockDiff(tempBlocks, file.Blocks)
  450. // block.String() returns a string unique to the block
  451. existingBlocks := make(map[string]bool, len(tempCopyBlocks))
  452. for _, block := range tempCopyBlocks {
  453. existingBlocks[block.String()] = true
  454. }
  455. // Since the blocks are already there, we don't need to get them.
  456. for _, block := range file.Blocks {
  457. _, ok := existingBlocks[block.String()]
  458. if !ok {
  459. blocks = append(blocks, block)
  460. }
  461. }
  462. // The sharedpullerstate will know which flags to use when opening the
  463. // temp file depending if we are reusing any blocks or not.
  464. reused = len(file.Blocks) - len(blocks)
  465. if reused == 0 {
  466. // Otherwise, discard the file ourselves in order for the
  467. // sharedpuller not to panic when it fails to exlusively create a
  468. // file which already exists
  469. os.Remove(tempName)
  470. }
  471. } else {
  472. blocks = file.Blocks
  473. }
  474. s := sharedPullerState{
  475. file: file,
  476. folder: p.folder,
  477. tempName: tempName,
  478. realName: realName,
  479. copyTotal: uint32(len(blocks)),
  480. copyNeeded: uint32(len(blocks)),
  481. reused: uint32(reused),
  482. }
  483. if debug {
  484. l.Debugf("%v need file %s; copy %d, reused %v", p, file.Name, len(blocks), reused)
  485. }
  486. cs := copyBlocksState{
  487. sharedPullerState: &s,
  488. blocks: blocks,
  489. }
  490. copyChan <- cs
  491. }
  492. // shortcutFile sets file mode and modification time, when that's the only
  493. // thing that has changed.
  494. func (p *Puller) shortcutFile(file protocol.FileInfo) {
  495. realName := filepath.Join(p.dir, file.Name)
  496. if !p.ignorePerms {
  497. err := os.Chmod(realName, os.FileMode(file.Flags&0777))
  498. if err != nil {
  499. l.Infof("Puller (folder %q, file %q): shortcut: %v", p.folder, file.Name, err)
  500. return
  501. }
  502. }
  503. t := time.Unix(file.Modified, 0)
  504. err := os.Chtimes(realName, t, t)
  505. if err != nil {
  506. if p.lenientMtimes {
  507. // We accept the failure with a warning here and allow the sync to
  508. // continue. We'll sync the new mtime back to the other devices later.
  509. // If they have the same problem & setting, we might never get in
  510. // sync.
  511. l.Infof("Puller (folder %q, file %q): shortcut: %v (continuing anyway as requested)", p.folder, file.Name, err)
  512. } else {
  513. l.Infof("Puller (folder %q, file %q): shortcut: %v", p.folder, file.Name, err)
  514. return
  515. }
  516. }
  517. p.model.updateLocal(p.folder, file)
  518. }
  519. // shortcutSymlink changes the symlinks type if necessery.
  520. func (p *Puller) shortcutSymlink(curFile, file protocol.FileInfo) {
  521. err := symlinks.ChangeType(filepath.Join(p.dir, file.Name), file.Flags)
  522. if err != nil {
  523. l.Infof("Puller (folder %q, file %q): symlink shortcut: %v", p.folder, file.Name, err)
  524. return
  525. }
  526. p.model.updateLocal(p.folder, file)
  527. }
  528. // copierRoutine reads copierStates until the in channel closes and performs
  529. // the relevant copies when possible, or passes it to the puller routine.
  530. func (p *Puller) copierRoutine(in <-chan copyBlocksState, pullChan chan<- pullBlockState, out chan<- *sharedPullerState, checksum bool) {
  531. buf := make([]byte, protocol.BlockSize)
  532. nextFile:
  533. for state := range in {
  534. dstFd, err := state.tempFile()
  535. if err != nil {
  536. // Nothing more to do for this failed file (the error was logged
  537. // when it happened)
  538. continue nextFile
  539. }
  540. if p.progressEmitter != nil {
  541. p.progressEmitter.Register(state.sharedPullerState)
  542. }
  543. evictionChan := make(chan lfu.Eviction)
  544. fdCache := lfu.New()
  545. fdCache.UpperBound = 50
  546. fdCache.LowerBound = 20
  547. fdCache.EvictionChannel = evictionChan
  548. go func() {
  549. for item := range evictionChan {
  550. item.Value.(*os.File).Close()
  551. }
  552. }()
  553. folderRoots := make(map[string]string)
  554. p.model.fmut.RLock()
  555. for folder, cfg := range p.model.folderCfgs {
  556. folderRoots[folder] = cfg.Path
  557. }
  558. p.model.fmut.RUnlock()
  559. hasher := sha256.New()
  560. for _, block := range state.blocks {
  561. buf = buf[:int(block.Size)]
  562. found := p.model.finder.Iterate(block.Hash, func(folder, file string, index uint32) bool {
  563. path := filepath.Join(folderRoots[folder], file)
  564. var fd *os.File
  565. fdi := fdCache.Get(path)
  566. if fdi != nil {
  567. fd = fdi.(*os.File)
  568. } else {
  569. fd, err = os.Open(path)
  570. if err != nil {
  571. return false
  572. }
  573. fdCache.Set(path, fd)
  574. }
  575. _, err = fd.ReadAt(buf, protocol.BlockSize*int64(index))
  576. if err != nil {
  577. return false
  578. }
  579. // Only done on second to last puller attempt
  580. if checksum {
  581. hasher.Write(buf)
  582. hash := hasher.Sum(nil)
  583. hasher.Reset()
  584. if !bytes.Equal(hash, block.Hash) {
  585. if debug {
  586. l.Debugf("Finder block mismatch in %s:%s:%d expected %q got %q", folder, file, index, block.Hash, hash)
  587. }
  588. err = p.model.finder.Fix(folder, file, index, block.Hash, hash)
  589. if err != nil {
  590. l.Warnln("finder fix:", err)
  591. }
  592. return false
  593. }
  594. }
  595. _, err = dstFd.WriteAt(buf, block.Offset)
  596. if err != nil {
  597. state.earlyClose("dst write", err)
  598. }
  599. if file == state.file.Name {
  600. state.copiedFromOrigin()
  601. }
  602. return true
  603. })
  604. if state.failed() != nil {
  605. break
  606. }
  607. if !found {
  608. state.pullStarted()
  609. ps := pullBlockState{
  610. sharedPullerState: state.sharedPullerState,
  611. block: block,
  612. }
  613. pullChan <- ps
  614. } else {
  615. state.copyDone()
  616. }
  617. }
  618. fdCache.Evict(fdCache.Len())
  619. close(evictionChan)
  620. out <- state.sharedPullerState
  621. }
  622. }
  623. func (p *Puller) pullerRoutine(in <-chan pullBlockState, out chan<- *sharedPullerState) {
  624. nextBlock:
  625. for state := range in {
  626. if state.failed() != nil {
  627. continue nextBlock
  628. }
  629. // Select the least busy device to pull the block from. If we found no
  630. // feasible device at all, fail the block (and in the long run, the
  631. // file).
  632. potentialDevices := p.model.availability(p.folder, state.file.Name)
  633. selected := activity.leastBusy(potentialDevices)
  634. if selected == (protocol.DeviceID{}) {
  635. state.earlyClose("pull", errNoDevice)
  636. continue nextBlock
  637. }
  638. // Get an fd to the temporary file. Tehcnically we don't need it until
  639. // after fetching the block, but if we run into an error here there is
  640. // no point in issuing the request to the network.
  641. fd, err := state.tempFile()
  642. if err != nil {
  643. continue nextBlock
  644. }
  645. // Fetch the block, while marking the selected device as in use so that
  646. // leastBusy can select another device when someone else asks.
  647. activity.using(selected)
  648. buf, err := p.model.requestGlobal(selected, p.folder, state.file.Name, state.block.Offset, int(state.block.Size), state.block.Hash)
  649. activity.done(selected)
  650. if err != nil {
  651. state.earlyClose("pull", err)
  652. continue nextBlock
  653. }
  654. // Save the block data we got from the cluster
  655. _, err = fd.WriteAt(buf, state.block.Offset)
  656. if err != nil {
  657. state.earlyClose("save", err)
  658. continue nextBlock
  659. }
  660. state.pullDone()
  661. out <- state.sharedPullerState
  662. }
  663. }
  664. func (p *Puller) performFinish(state *sharedPullerState) {
  665. // Verify the file against expected hashes
  666. fd, err := os.Open(state.tempName)
  667. if err != nil {
  668. l.Warnln("puller: final:", err)
  669. return
  670. }
  671. err = scanner.Verify(fd, protocol.BlockSize, state.file.Blocks)
  672. fd.Close()
  673. if err != nil {
  674. l.Infoln("puller:", state.file.Name, err, "(file changed during pull?)")
  675. return
  676. }
  677. // Set the correct permission bits on the new file
  678. if !p.ignorePerms {
  679. err = os.Chmod(state.tempName, os.FileMode(state.file.Flags&0777))
  680. if err != nil {
  681. l.Warnln("puller: final:", err)
  682. return
  683. }
  684. }
  685. // Set the correct timestamp on the new file
  686. t := time.Unix(state.file.Modified, 0)
  687. err = os.Chtimes(state.tempName, t, t)
  688. if err != nil {
  689. if p.lenientMtimes {
  690. // We accept the failure with a warning here and allow the sync to
  691. // continue. We'll sync the new mtime back to the other devices later.
  692. // If they have the same problem & setting, we might never get in
  693. // sync.
  694. l.Infof("Puller (folder %q, file %q): final: %v (continuing anyway as requested)", p.folder, state.file.Name, err)
  695. } else {
  696. l.Warnln("puller: final:", err)
  697. return
  698. }
  699. }
  700. // If we should use versioning, let the versioner archive the old
  701. // file before we replace it. Archiving a non-existent file is not
  702. // an error.
  703. if p.versioner != nil {
  704. err = p.versioner.Archive(state.realName)
  705. if err != nil {
  706. l.Warnln("puller: final:", err)
  707. return
  708. }
  709. }
  710. // If the target path is a symlink or a directory, we cannot copy
  711. // over it, hence remove it before proceeding.
  712. stat, err := os.Lstat(state.realName)
  713. if err == nil && (stat.IsDir() || stat.Mode()&os.ModeSymlink != 0) {
  714. osutil.InWritableDir(os.Remove, state.realName)
  715. }
  716. // Replace the original content with the new one
  717. err = osutil.Rename(state.tempName, state.realName)
  718. if err != nil {
  719. l.Warnln("puller: final:", err)
  720. return
  721. }
  722. // If it's a symlink, the target of the symlink is inside the file.
  723. if state.file.IsSymlink() {
  724. content, err := ioutil.ReadFile(state.realName)
  725. if err != nil {
  726. l.Warnln("puller: final: reading symlink:", err)
  727. return
  728. }
  729. // Remove the file, and replace it with a symlink.
  730. err = osutil.InWritableDir(func(path string) error {
  731. os.Remove(path)
  732. return symlinks.Create(path, string(content), state.file.Flags)
  733. }, state.realName)
  734. if err != nil {
  735. l.Warnln("puller: final: creating symlink:", err)
  736. return
  737. }
  738. }
  739. // Record the updated file in the index
  740. p.model.updateLocal(p.folder, state.file)
  741. }
  742. func (p *Puller) finisherRoutine(in <-chan *sharedPullerState) {
  743. for state := range in {
  744. if closed, err := state.finalClose(); closed {
  745. if debug {
  746. l.Debugln(p, "closing", state.file.Name)
  747. }
  748. if err != nil {
  749. l.Warnln("puller: final:", err)
  750. continue
  751. }
  752. p.queue.Done(&state.file)
  753. p.performFinish(state)
  754. p.model.receivedFile(p.folder, state.file.Name)
  755. if p.progressEmitter != nil {
  756. p.progressEmitter.Deregister(state)
  757. }
  758. }
  759. }
  760. }
  761. // Moves the given filename to the front of the job queue
  762. func (p *Puller) Bump(filename string) {
  763. p.queue.Bump(filename)
  764. }
  765. func (p *Puller) Jobs() ([]protocol.FileInfoTruncated, []protocol.FileInfoTruncated) {
  766. return p.queue.Jobs()
  767. }
  768. // clean deletes orphaned temporary files
  769. func (p *Puller) clean() {
  770. keep := time.Duration(p.model.cfg.Options().KeepTemporariesH) * time.Hour
  771. now := time.Now()
  772. filepath.Walk(p.dir, func(path string, info os.FileInfo, err error) error {
  773. if err != nil {
  774. return err
  775. }
  776. if info.Mode().IsRegular() && defTempNamer.IsTemporary(path) && info.ModTime().Add(keep).Before(now) {
  777. os.Remove(path)
  778. }
  779. return nil
  780. })
  781. }
  782. func invalidateFolder(cfg *config.Configuration, folderID string, err error) {
  783. for i := range cfg.Folders {
  784. folder := &cfg.Folders[i]
  785. if folder.ID == folderID {
  786. folder.Invalid = err.Error()
  787. return
  788. }
  789. }
  790. }