|
|
@@ -2,34 +2,14 @@
|
|
|
// All rights reserved. Use of this source code is governed by an MIT-style
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
-/*
|
|
|
-__ __ _ _
|
|
|
-\ \ / /_ _ _ __ _ __ (_)_ __ __ _| |
|
|
|
- \ \ /\ / / _` | '__| '_ \| | '_ \ / _` | |
|
|
|
- \ V V / (_| | | | | | | | | | | (_| |_|
|
|
|
- \_/\_/ \__,_|_| |_| |_|_|_| |_|\__, (_)
|
|
|
- |___/
|
|
|
-
|
|
|
-The code in this file is a piece of crap. Don't base anything on it.
|
|
|
-Refactorin ongoing in new-puller branch.
|
|
|
-
|
|
|
-__ __ _ _
|
|
|
-\ \ / /_ _ _ __ _ __ (_)_ __ __ _| |
|
|
|
- \ \ /\ / / _` | '__| '_ \| | '_ \ / _` | |
|
|
|
- \ V V / (_| | | | | | | | | | | (_| |_|
|
|
|
- \_/\_/ \__,_|_| |_| |_|_|_| |_|\__, (_)
|
|
|
- |___/
|
|
|
-*/
|
|
|
-
|
|
|
package model
|
|
|
|
|
|
import (
|
|
|
- "bytes"
|
|
|
"errors"
|
|
|
"fmt"
|
|
|
- "math/rand"
|
|
|
"os"
|
|
|
"path/filepath"
|
|
|
+ "sync"
|
|
|
"time"
|
|
|
|
|
|
"github.com/syncthing/syncthing/internal/config"
|
|
|
@@ -40,780 +20,561 @@ import (
|
|
|
"github.com/syncthing/syncthing/internal/versioner"
|
|
|
)
|
|
|
|
|
|
-type requestResult struct {
|
|
|
- node protocol.NodeID
|
|
|
- file protocol.FileInfo
|
|
|
- filepath string // full filepath name
|
|
|
- offset int64
|
|
|
- data []byte
|
|
|
- err error
|
|
|
-}
|
|
|
+// TODO: Stop on errors
|
|
|
|
|
|
-type openFile struct {
|
|
|
- filepath string // full filepath name
|
|
|
- temp string // temporary filename
|
|
|
- availability []protocol.NodeID
|
|
|
- file *os.File
|
|
|
- err error // error when opening or writing to file, all following operations are cancelled
|
|
|
- outstanding int // number of requests we still have outstanding
|
|
|
- done bool // we have sent all requests for this file
|
|
|
-}
|
|
|
+const (
|
|
|
+ copiersPerRepo = 1
|
|
|
+ pullersPerRepo = 16
|
|
|
+ finishersPerRepo = 2
|
|
|
+ pauseIntv = 60 * time.Second
|
|
|
+ nextPullIntv = 10 * time.Second
|
|
|
+ checkPullIntv = 1 * time.Second
|
|
|
+)
|
|
|
|
|
|
-type activityMap map[protocol.NodeID]int
|
|
|
-
|
|
|
-// Queue about this many blocks each puller iteration. More blocks means
|
|
|
-// longer iterations and better efficiency; fewer blocks reduce memory
|
|
|
-// consumption. 1000 blocks ~= 1000 * 128 KiB ~= 125 MiB of data.
|
|
|
-const pullIterationBlocks = 1000
|
|
|
-
|
|
|
-func (m activityMap) leastBusyNode(availability []protocol.NodeID, isValid func(protocol.NodeID) bool) protocol.NodeID {
|
|
|
- var low int = 2<<30 - 1
|
|
|
- var selected protocol.NodeID
|
|
|
- for _, node := range availability {
|
|
|
- usage := m[node]
|
|
|
- if usage < low && isValid(node) {
|
|
|
- low = usage
|
|
|
- selected = node
|
|
|
- }
|
|
|
- }
|
|
|
- m[selected]++
|
|
|
- return selected
|
|
|
+// A pullBlockState is passed to the puller routine for each block that needs
|
|
|
+// to be fetched.
|
|
|
+type pullBlockState struct {
|
|
|
+ *sharedPullerState
|
|
|
+ block protocol.BlockInfo
|
|
|
}
|
|
|
|
|
|
-func (m activityMap) decrease(node protocol.NodeID) {
|
|
|
- m[node]--
|
|
|
+// A copyBlocksState is passed to copy routine if the file has blocks to be
|
|
|
+// copied from the original.
|
|
|
+type copyBlocksState struct {
|
|
|
+ *sharedPullerState
|
|
|
+ blocks []protocol.BlockInfo
|
|
|
}
|
|
|
|
|
|
-var errNoNode = errors.New("no available source node")
|
|
|
-
|
|
|
-type puller struct {
|
|
|
- cfg *config.Configuration
|
|
|
- repoCfg config.RepositoryConfiguration
|
|
|
- bq blockQueue
|
|
|
- slots int
|
|
|
- model *Model
|
|
|
- oustandingPerNode activityMap
|
|
|
- openFiles map[string]openFile
|
|
|
- requestSlots chan bool
|
|
|
- blocks chan bqBlock
|
|
|
- requestResults chan requestResult
|
|
|
- versioner versioner.Versioner
|
|
|
- errors int
|
|
|
+var (
|
|
|
+ activity = newNodeActivity()
|
|
|
+ errNoNode = errors.New("no available source node")
|
|
|
+)
|
|
|
+
|
|
|
+type Puller struct {
|
|
|
+ repo string
|
|
|
+ dir string
|
|
|
+ scanIntv time.Duration
|
|
|
+ model *Model
|
|
|
+ stop chan struct{}
|
|
|
+ versioner versioner.Versioner
|
|
|
}
|
|
|
|
|
|
-func newPuller(repoCfg config.RepositoryConfiguration, model *Model, slots int, cfg *config.Configuration) *puller {
|
|
|
- p := &puller{
|
|
|
- cfg: cfg,
|
|
|
- repoCfg: repoCfg,
|
|
|
- slots: slots,
|
|
|
- model: model,
|
|
|
- oustandingPerNode: make(activityMap),
|
|
|
- openFiles: make(map[string]openFile),
|
|
|
- requestSlots: make(chan bool, slots),
|
|
|
- blocks: make(chan bqBlock),
|
|
|
- requestResults: make(chan requestResult),
|
|
|
+// Serve will run scans and pulls. It will return when Stop()ed or on a
|
|
|
+// critical error.
|
|
|
+func (p *Puller) Serve() {
|
|
|
+ if debug {
|
|
|
+ l.Debugln(p, "starting")
|
|
|
+ defer l.Debugln(p, "exiting")
|
|
|
}
|
|
|
|
|
|
- if len(repoCfg.Versioning.Type) > 0 {
|
|
|
- factory, ok := versioner.Factories[repoCfg.Versioning.Type]
|
|
|
- if !ok {
|
|
|
- l.Fatalf("Requested versioning type %q that does not exist", repoCfg.Versioning.Type)
|
|
|
- }
|
|
|
- p.versioner = factory(repoCfg.ID, repoCfg.Directory, repoCfg.Versioning.Params)
|
|
|
- }
|
|
|
+ p.stop = make(chan struct{})
|
|
|
|
|
|
- if slots > 0 {
|
|
|
- // Read/write
|
|
|
- if debug {
|
|
|
- l.Debugf("starting puller; repo %q dir %q slots %d", repoCfg.ID, repoCfg.Directory, slots)
|
|
|
- }
|
|
|
- go p.run()
|
|
|
- } else {
|
|
|
- // Read only
|
|
|
- if debug {
|
|
|
- l.Debugf("starting puller; repo %q dir %q (read only)", repoCfg.ID, repoCfg.Directory)
|
|
|
- }
|
|
|
- go p.runRO()
|
|
|
- }
|
|
|
- return p
|
|
|
-}
|
|
|
+ pullTimer := time.NewTimer(checkPullIntv)
|
|
|
+ scanTimer := time.NewTimer(p.scanIntv)
|
|
|
+
|
|
|
+ defer func() {
|
|
|
+ pullTimer.Stop()
|
|
|
+ scanTimer.Stop()
|
|
|
+ // TODO: Should there be an actual RepoStopped state?
|
|
|
+ p.model.setState(p.repo, RepoIdle)
|
|
|
+ }()
|
|
|
|
|
|
-func (p *puller) run() {
|
|
|
- changed := true
|
|
|
- scanintv := time.Duration(p.repoCfg.RescanIntervalS) * time.Second
|
|
|
- lastscan := time.Now()
|
|
|
var prevVer uint64
|
|
|
- var queued int
|
|
|
|
|
|
- // Load up the request slots
|
|
|
- for i := 0; i < cap(p.requestSlots); i++ {
|
|
|
- p.requestSlots <- true
|
|
|
- }
|
|
|
+ // Clean out old temporaries before we start pulling
|
|
|
+ p.clean()
|
|
|
|
|
|
+loop:
|
|
|
for {
|
|
|
- if sc, sl := cap(p.requestSlots), len(p.requestSlots); sl != sc {
|
|
|
- panic(fmt.Sprintf("Incorrect number of slots; %d != %d", sl, sc))
|
|
|
- }
|
|
|
-
|
|
|
- // Run the pulling loop as long as there are blocks to fetch
|
|
|
- prevVer, queued = p.queueNeededBlocks(prevVer)
|
|
|
- if queued > 0 {
|
|
|
- p.errors = 0
|
|
|
+ select {
|
|
|
+ case <-p.stop:
|
|
|
+ return
|
|
|
|
|
|
- pull:
|
|
|
- for {
|
|
|
- select {
|
|
|
- case res := <-p.requestResults:
|
|
|
- p.model.setState(p.repoCfg.ID, RepoSyncing)
|
|
|
- changed = true
|
|
|
- p.requestSlots <- true
|
|
|
- p.handleRequestResult(res)
|
|
|
-
|
|
|
- case <-p.requestSlots:
|
|
|
- b, ok := p.bq.get()
|
|
|
-
|
|
|
- if !ok {
|
|
|
- if debug {
|
|
|
- l.Debugf("%q: pulling loop needs more blocks", p.repoCfg.ID)
|
|
|
- }
|
|
|
-
|
|
|
- if p.errors > 0 && p.errors >= queued {
|
|
|
- p.requestSlots <- true
|
|
|
- break pull
|
|
|
- }
|
|
|
-
|
|
|
- prevVer, _ = p.queueNeededBlocks(prevVer)
|
|
|
- b, ok = p.bq.get()
|
|
|
- }
|
|
|
-
|
|
|
- if !ok && len(p.openFiles) == 0 {
|
|
|
- // Nothing queued, nothing outstanding
|
|
|
- if debug {
|
|
|
- l.Debugf("%q: pulling loop done", p.repoCfg.ID)
|
|
|
- }
|
|
|
- p.requestSlots <- true
|
|
|
- break pull
|
|
|
- }
|
|
|
-
|
|
|
- if !ok {
|
|
|
- // Nothing queued, but there are still open files.
|
|
|
- // Give the situation a moment to change.
|
|
|
- if debug {
|
|
|
- l.Debugf("%q: pulling loop paused", p.repoCfg.ID)
|
|
|
- }
|
|
|
- p.requestSlots <- true
|
|
|
- time.Sleep(100 * time.Millisecond)
|
|
|
- continue pull
|
|
|
- }
|
|
|
-
|
|
|
- if debug {
|
|
|
- l.Debugf("queueing %q / %q offset %d copy %d", p.repoCfg.ID, b.file.Name, b.block.Offset, len(b.copy))
|
|
|
- }
|
|
|
- p.model.setState(p.repoCfg.ID, RepoSyncing)
|
|
|
- changed = true
|
|
|
- if p.handleBlock(b) {
|
|
|
- // Block was fully handled, free up the slot
|
|
|
- p.requestSlots <- true
|
|
|
- }
|
|
|
- }
|
|
|
+ // TODO: We could easily add a channel here for notifications from
|
|
|
+ // Index(), so that we immediately start a pull when new index
|
|
|
+ // information is available. Before that though, I'd like to build a
|
|
|
+ // repeatable benchmark of how long it takes to sync a change from
|
|
|
+ // node A to node B, so we have something to work against.
|
|
|
+ case <-pullTimer.C:
|
|
|
+ // RemoteLocalVersion() is a fast call, doesn't touch the database.
|
|
|
+ curVer := p.model.RemoteLocalVersion(p.repo)
|
|
|
+ if curVer == prevVer {
|
|
|
+ pullTimer.Reset(checkPullIntv)
|
|
|
+ continue
|
|
|
}
|
|
|
|
|
|
- if p.errors > 0 && p.errors >= queued {
|
|
|
- l.Warnf("All remaining files failed to sync. Stopping repo %q.", p.repoCfg.ID)
|
|
|
- invalidateRepo(p.cfg, p.repoCfg.ID, errors.New("too many errors, check logs"))
|
|
|
- return
|
|
|
+ if debug {
|
|
|
+ l.Debugln(p, "pulling", prevVer, curVer)
|
|
|
}
|
|
|
- }
|
|
|
+ p.model.setState(p.repo, RepoSyncing)
|
|
|
+ tries := 0
|
|
|
+ for {
|
|
|
+ tries++
|
|
|
+ changed := p.pullerIteration(copiersPerRepo, pullersPerRepo, finishersPerRepo)
|
|
|
+ if debug {
|
|
|
+ l.Debugln(p, "changed", changed)
|
|
|
+ }
|
|
|
|
|
|
- if changed {
|
|
|
- p.model.setState(p.repoCfg.ID, RepoCleaning)
|
|
|
- p.clean()
|
|
|
- changed = false
|
|
|
- }
|
|
|
+ if changed == 0 {
|
|
|
+ // No files were changed by the puller, so we are in
|
|
|
+ // sync. Remember the local version number and
|
|
|
+ // schedule a resync a little bit into the future.
|
|
|
+ prevVer = curVer
|
|
|
+ pullTimer.Reset(nextPullIntv)
|
|
|
+ break
|
|
|
+ }
|
|
|
|
|
|
- p.model.setState(p.repoCfg.ID, RepoIdle)
|
|
|
+ if tries > 10 {
|
|
|
+ // We've tried a bunch of times to get in sync, but
|
|
|
+ // we're not making it. Probably there are write
|
|
|
+ // errors preventing us. Flag this with a warning and
|
|
|
+ // wait a bit longer before retrying.
|
|
|
+ l.Warnf("Repo %q isn't making progress - check logs for possible root cause. Pausing puller for %v.", p.repo, pauseIntv)
|
|
|
+ pullTimer.Reset(pauseIntv)
|
|
|
+ break
|
|
|
+ }
|
|
|
+ }
|
|
|
+ p.model.setState(p.repo, RepoIdle)
|
|
|
|
|
|
- // Do a rescan if it's time for it
|
|
|
- if time.Since(lastscan) > scanintv {
|
|
|
+ // The reason for running the scanner from within the puller is that
|
|
|
+ // this is the easiest way to make sure we are not doing both at the
|
|
|
+ // same time.
|
|
|
+ case <-scanTimer.C:
|
|
|
if debug {
|
|
|
- l.Debugf("%q: time for rescan", p.repoCfg.ID)
|
|
|
+ l.Debugln(p, "rescan")
|
|
|
}
|
|
|
-
|
|
|
- err := p.model.ScanRepo(p.repoCfg.ID)
|
|
|
- if err != nil {
|
|
|
- invalidateRepo(p.cfg, p.repoCfg.ID, err)
|
|
|
- return
|
|
|
+ p.model.setState(p.repo, RepoScanning)
|
|
|
+ if err := p.model.ScanRepo(p.repo); err != nil {
|
|
|
+ invalidateRepo(p.model.cfg, p.repo, err)
|
|
|
+ break loop
|
|
|
}
|
|
|
- lastscan = time.Now()
|
|
|
+ p.model.setState(p.repo, RepoIdle)
|
|
|
+ scanTimer.Reset(p.scanIntv)
|
|
|
}
|
|
|
-
|
|
|
- time.Sleep(5 * time.Second)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-func (p *puller) runRO() {
|
|
|
- walkTicker := time.Tick(time.Duration(p.repoCfg.RescanIntervalS) * time.Second)
|
|
|
-
|
|
|
- for _ = range walkTicker {
|
|
|
- if debug {
|
|
|
- l.Debugf("%q: time for rescan", p.repoCfg.ID)
|
|
|
- }
|
|
|
- err := p.model.ScanRepo(p.repoCfg.ID)
|
|
|
- if err != nil {
|
|
|
- invalidateRepo(p.cfg, p.repoCfg.ID, err)
|
|
|
- return
|
|
|
- }
|
|
|
- }
|
|
|
+func (p *Puller) Stop() {
|
|
|
+ close(p.stop)
|
|
|
}
|
|
|
|
|
|
-// clean deletes orphaned temporary files and directories that should no
|
|
|
-// longer exist.
|
|
|
-func (p *puller) clean() {
|
|
|
- var deleteDirs []string
|
|
|
- var changed = 0
|
|
|
-
|
|
|
- var walkFn = func(path string, info os.FileInfo, err error) error {
|
|
|
- if err != nil {
|
|
|
- return err
|
|
|
- }
|
|
|
-
|
|
|
- if info.Mode().IsRegular() && defTempNamer.IsTemporary(path) {
|
|
|
- os.Remove(path)
|
|
|
- }
|
|
|
+func (p *Puller) String() string {
|
|
|
+ return fmt.Sprintf("puller/%s@%p", p.repo, p)
|
|
|
+}
|
|
|
|
|
|
- if !info.IsDir() {
|
|
|
- return nil
|
|
|
- }
|
|
|
+// pullerIteration runs a single puller iteration for the given repo and
|
|
|
+// returns the number items that should have been synced (even those that
|
|
|
+// might have failed). One puller iteration handles all files currently
|
|
|
+// flagged as needed in the repo. The specified number of copier, puller and
|
|
|
+// finisher routines are used. It's seldom efficient to use more than one
|
|
|
+// copier routine, while multiple pullers are essential and multiple finishers
|
|
|
+// may be useful (they are primarily CPU bound due to hashing).
|
|
|
+func (p *Puller) pullerIteration(ncopiers, npullers, nfinishers int) int {
|
|
|
+ pullChan := make(chan pullBlockState)
|
|
|
+ copyChan := make(chan copyBlocksState)
|
|
|
+ finisherChan := make(chan *sharedPullerState)
|
|
|
+
|
|
|
+ var wg sync.WaitGroup
|
|
|
+ var doneWg sync.WaitGroup
|
|
|
+
|
|
|
+ for i := 0; i < ncopiers; i++ {
|
|
|
+ wg.Add(1)
|
|
|
+ go func() {
|
|
|
+ // copierRoutine finishes when copyChan is closed
|
|
|
+ p.copierRoutine(copyChan, finisherChan)
|
|
|
+ wg.Done()
|
|
|
+ }()
|
|
|
+ }
|
|
|
+
|
|
|
+ for i := 0; i < npullers; i++ {
|
|
|
+ wg.Add(1)
|
|
|
+ go func() {
|
|
|
+ // pullerRoutine finishes when pullChan is closed
|
|
|
+ p.pullerRoutine(pullChan, finisherChan)
|
|
|
+ wg.Done()
|
|
|
+ }()
|
|
|
+ }
|
|
|
+
|
|
|
+ for i := 0; i < nfinishers; i++ {
|
|
|
+ doneWg.Add(1)
|
|
|
+ // finisherRoutine finishes when finisherChan is closed
|
|
|
+ go func() {
|
|
|
+ p.finisherRoutine(finisherChan)
|
|
|
+ doneWg.Done()
|
|
|
+ }()
|
|
|
+ }
|
|
|
+
|
|
|
+ p.model.rmut.RLock()
|
|
|
+ files := p.model.repoFiles[p.repo]
|
|
|
+ p.model.rmut.RUnlock()
|
|
|
+
|
|
|
+ // !!!
|
|
|
+ // WithNeed takes a database snapshot (by necessity). By the time we've
|
|
|
+ // handled a bunch of files it might have become out of date and we might
|
|
|
+ // be attempting to sync with an old version of a file...
|
|
|
+ // !!!
|
|
|
+
|
|
|
+ changed := 0
|
|
|
+ files.WithNeed(protocol.LocalNodeID, func(intf protocol.FileIntf) bool {
|
|
|
+ file := intf.(protocol.FileInfo)
|
|
|
|
|
|
- rn, err := filepath.Rel(p.repoCfg.Directory, path)
|
|
|
- if err != nil {
|
|
|
- return nil
|
|
|
- }
|
|
|
-
|
|
|
- if rn == "." {
|
|
|
- return nil
|
|
|
- }
|
|
|
+ events.Default.Log(events.ItemStarted, map[string]string{
|
|
|
+ "repo": p.repo,
|
|
|
+ "item": file.Name,
|
|
|
+ })
|
|
|
|
|
|
- if filepath.Base(rn) == ".stversions" {
|
|
|
- return filepath.SkipDir
|
|
|
+ if debug {
|
|
|
+ l.Debugln(p, "handling", file.Name)
|
|
|
}
|
|
|
|
|
|
- cur := p.model.CurrentRepoFile(p.repoCfg.ID, rn)
|
|
|
- if cur.Name != rn {
|
|
|
- // No matching dir in current list; weird
|
|
|
- if debug {
|
|
|
- l.Debugf("missing dir: %s; %v", rn, cur)
|
|
|
- }
|
|
|
- return nil
|
|
|
+ switch {
|
|
|
+ case protocol.IsDirectory(file.Flags) && protocol.IsDeleted(file.Flags):
|
|
|
+ // A deleted directory
|
|
|
+ p.deleteDir(file)
|
|
|
+ case protocol.IsDirectory(file.Flags):
|
|
|
+ // A new or changed directory
|
|
|
+ p.handleDir(file)
|
|
|
+ case protocol.IsDeleted(file.Flags):
|
|
|
+ // A deleted file
|
|
|
+ p.deleteFile(file)
|
|
|
+ default:
|
|
|
+ // A new or changed file
|
|
|
+ p.handleFile(file, copyChan, pullChan)
|
|
|
}
|
|
|
|
|
|
- if protocol.IsDeleted(cur.Flags) {
|
|
|
- if debug {
|
|
|
- l.Debugf("queue delete dir: %v", cur)
|
|
|
- }
|
|
|
-
|
|
|
- // We queue the directories to delete since we walk the
|
|
|
- // tree in depth first order and need to remove the
|
|
|
- // directories in the opposite order.
|
|
|
+ changed++
|
|
|
+ return true
|
|
|
+ })
|
|
|
|
|
|
- deleteDirs = append(deleteDirs, path)
|
|
|
- return nil
|
|
|
- }
|
|
|
+ // Signal copy and puller routines that we are done with the in data for
|
|
|
+ // this iteration
|
|
|
+ close(copyChan)
|
|
|
+ close(pullChan)
|
|
|
|
|
|
- if !p.repoCfg.IgnorePerms && protocol.HasPermissionBits(cur.Flags) && !scanner.PermsEqual(cur.Flags, uint32(info.Mode())) {
|
|
|
- err := os.Chmod(path, os.FileMode(cur.Flags)&os.ModePerm)
|
|
|
- if err != nil {
|
|
|
- l.Warnf("Restoring folder flags: %q: %v", path, err)
|
|
|
- } else {
|
|
|
- changed++
|
|
|
- if debug {
|
|
|
- l.Debugf("restored dir flags: %o -> %v", info.Mode()&os.ModePerm, cur)
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
+ // Wait for them to finish, then signal the finisher chan that there will
|
|
|
+ // be no more input.
|
|
|
+ wg.Wait()
|
|
|
+ close(finisherChan)
|
|
|
|
|
|
- return nil
|
|
|
- }
|
|
|
+ // Wait for the finisherChan to finish.
|
|
|
+ doneWg.Wait()
|
|
|
|
|
|
- for {
|
|
|
- deleteDirs = nil
|
|
|
- changed = 0
|
|
|
- filepath.Walk(p.repoCfg.Directory, walkFn)
|
|
|
-
|
|
|
- var deleted = 0
|
|
|
- // Delete any queued directories
|
|
|
- for i := len(deleteDirs) - 1; i >= 0; i-- {
|
|
|
- dir := deleteDirs[i]
|
|
|
- if debug {
|
|
|
- l.Debugln("delete dir:", dir)
|
|
|
- }
|
|
|
- err := os.Remove(dir)
|
|
|
- if err == nil {
|
|
|
- deleted++
|
|
|
- } else {
|
|
|
- l.Warnln("Delete dir:", err)
|
|
|
- }
|
|
|
- }
|
|
|
+ return changed
|
|
|
+}
|
|
|
|
|
|
- if debug {
|
|
|
- l.Debugf("changed %d, deleted %d dirs", changed, deleted)
|
|
|
- }
|
|
|
+// handleDir creates or updates the given directory
|
|
|
+func (p *Puller) handleDir(file protocol.FileInfo) {
|
|
|
+ realName := filepath.Join(p.dir, file.Name)
|
|
|
+ mode := os.FileMode(file.Flags & 0777)
|
|
|
|
|
|
- if changed+deleted == 0 {
|
|
|
- return
|
|
|
- }
|
|
|
+ if debug {
|
|
|
+ curFile := p.model.CurrentRepoFile(p.repo, file.Name)
|
|
|
+ l.Debugf("need dir\n\t%v\n\t%v", file, curFile)
|
|
|
}
|
|
|
-}
|
|
|
|
|
|
-func (p *puller) handleRequestResult(res requestResult) {
|
|
|
- p.oustandingPerNode.decrease(res.node)
|
|
|
- f := res.file
|
|
|
-
|
|
|
- of, ok := p.openFiles[f.Name]
|
|
|
- if !ok {
|
|
|
- // no entry in openFiles means there was an error and we've cancelled the operation
|
|
|
+ var err error
|
|
|
+ if info, err := os.Stat(realName); err != nil && os.IsNotExist(err) {
|
|
|
+ err = os.MkdirAll(realName, mode)
|
|
|
+ } else if !info.IsDir() {
|
|
|
+ l.Infof("Puller (repo %q, file %q): should be dir, but is not", p.repo, file.Name)
|
|
|
return
|
|
|
+ } else {
|
|
|
+ err = os.Chmod(realName, mode)
|
|
|
}
|
|
|
|
|
|
- if res.err != nil {
|
|
|
- // This request resulted in an error
|
|
|
- of.err = res.err
|
|
|
- if debug {
|
|
|
- l.Debugf("pull: not writing %q / %q offset %d: %v; (done=%v, outstanding=%d)", p.repoCfg.ID, f.Name, res.offset, res.err, of.done, of.outstanding)
|
|
|
- }
|
|
|
- } else if of.err == nil {
|
|
|
- // This request was sucessfull and nothing has failed previously either
|
|
|
- _, of.err = of.file.WriteAt(res.data, res.offset)
|
|
|
- if debug {
|
|
|
- l.Debugf("pull: wrote %q / %q offset %d len %d outstanding %d done %v", p.repoCfg.ID, f.Name, res.offset, len(res.data), of.outstanding, of.done)
|
|
|
- }
|
|
|
+ if err == nil {
|
|
|
+ p.model.updateLocal(p.repo, file)
|
|
|
}
|
|
|
+}
|
|
|
|
|
|
- of.outstanding--
|
|
|
- p.openFiles[f.Name] = of
|
|
|
-
|
|
|
- if of.done && of.outstanding == 0 {
|
|
|
- p.closeFile(f)
|
|
|
+// deleteDir attempts to delete the given directory
|
|
|
+func (p *Puller) deleteDir(file protocol.FileInfo) {
|
|
|
+ realName := filepath.Join(p.dir, file.Name)
|
|
|
+ err := os.Remove(realName)
|
|
|
+ if err == nil || os.IsNotExist(err) {
|
|
|
+ p.model.updateLocal(p.repo, file)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-// handleBlock fulfills the block request by copying, ignoring or fetching
|
|
|
-// from the network. Returns true if the block was fully handled
|
|
|
-// synchronously, i.e. if the slot can be reused.
|
|
|
-func (p *puller) handleBlock(b bqBlock) bool {
|
|
|
- f := b.file
|
|
|
-
|
|
|
- // For directories, making sure they exist is enough.
|
|
|
- // Deleted directories we mark as handled and delete later.
|
|
|
- if protocol.IsDirectory(f.Flags) {
|
|
|
- if !protocol.IsDeleted(f.Flags) {
|
|
|
- path := filepath.Join(p.repoCfg.Directory, f.Name)
|
|
|
- _, err := os.Stat(path)
|
|
|
- if err != nil && os.IsNotExist(err) {
|
|
|
- if debug {
|
|
|
- l.Debugf("create dir: %v", f)
|
|
|
- }
|
|
|
- err = os.MkdirAll(path, os.FileMode(f.Flags&0777))
|
|
|
+// deleteFile attempts to delete the given file
|
|
|
+func (p *Puller) deleteFile(file protocol.FileInfo) {
|
|
|
+ realName := filepath.Join(p.dir, file.Name)
|
|
|
+ realDir := filepath.Dir(realName)
|
|
|
+ if info, err := os.Stat(realDir); err == nil && info.IsDir() && info.Mode()&04 == 0 {
|
|
|
+ // A non-writeable directory (for this user; we assume that's the
|
|
|
+ // relevant part). Temporarily change the mode so we can delete the
|
|
|
+ // file inside it.
|
|
|
+ err = os.Chmod(realDir, 0755)
|
|
|
+ if err == nil {
|
|
|
+ defer func() {
|
|
|
+ err = os.Chmod(realDir, info.Mode())
|
|
|
if err != nil {
|
|
|
- p.errors++
|
|
|
- l.Infof("mkdir: error: %q: %v", path, err)
|
|
|
+ panic(err)
|
|
|
}
|
|
|
- }
|
|
|
- } else if debug {
|
|
|
- l.Debugf("ignore delete dir: %v", f)
|
|
|
+ }()
|
|
|
}
|
|
|
- p.model.updateLocal(p.repoCfg.ID, f)
|
|
|
- return true
|
|
|
}
|
|
|
|
|
|
- if len(b.copy) > 0 && len(b.copy) == len(b.file.Blocks) && b.last {
|
|
|
- // We are supposed to copy the entire file, and then fetch nothing.
|
|
|
- // We don't actually need to make the copy.
|
|
|
- if debug {
|
|
|
- l.Debugln("taking shortcut:", f)
|
|
|
- }
|
|
|
- fp := filepath.Join(p.repoCfg.Directory, f.Name)
|
|
|
- t := time.Unix(f.Modified, 0)
|
|
|
- err := os.Chtimes(fp, t, t)
|
|
|
- if err != nil {
|
|
|
- l.Infof("chtimes: error: %q / %q: %v", p.repoCfg.ID, f.Name, err)
|
|
|
- }
|
|
|
- if !p.repoCfg.IgnorePerms && protocol.HasPermissionBits(f.Flags) {
|
|
|
- err = os.Chmod(fp, os.FileMode(f.Flags&0777))
|
|
|
- if err != nil {
|
|
|
- l.Infof("chmod: error: %q / %q: %v", p.repoCfg.ID, f.Name, err)
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- events.Default.Log(events.ItemStarted, map[string]string{
|
|
|
- "repo": p.repoCfg.ID,
|
|
|
- "item": f.Name,
|
|
|
- })
|
|
|
+ var err error
|
|
|
+ if p.versioner != nil {
|
|
|
+ err = p.versioner.Archive(realName)
|
|
|
+ } else {
|
|
|
+ err = os.Remove(realName)
|
|
|
+ }
|
|
|
|
|
|
- p.model.updateLocal(p.repoCfg.ID, f)
|
|
|
- return true
|
|
|
+ if err != nil {
|
|
|
+ l.Infof("Puller (repo %q, file %q): delete: %v", p.repo, file.Name, err)
|
|
|
+ } else {
|
|
|
+ p.model.updateLocal(p.repo, file)
|
|
|
}
|
|
|
+}
|
|
|
|
|
|
- of, ok := p.openFiles[f.Name]
|
|
|
- of.done = b.last
|
|
|
+// handleFile queues the copies and pulls as necessary for a single new or
|
|
|
+// changed file.
|
|
|
+func (p *Puller) handleFile(file protocol.FileInfo, copyChan chan<- copyBlocksState, pullChan chan<- pullBlockState) {
|
|
|
+ curFile := p.model.CurrentRepoFile(p.repo, file.Name)
|
|
|
+ copyBlocks, pullBlocks := scanner.BlockDiff(curFile.Blocks, file.Blocks)
|
|
|
|
|
|
- if !ok {
|
|
|
+ if len(copyBlocks) == len(curFile.Blocks) && len(pullBlocks) == 0 {
|
|
|
+ // We are supposed to copy the entire file, and then fetch nothing. We
|
|
|
+ // are only updating metadata, so we don't actually *need* to make the
|
|
|
+ // copy.
|
|
|
if debug {
|
|
|
- l.Debugf("pull: %q: opening file %q", p.repoCfg.ID, f.Name)
|
|
|
- }
|
|
|
-
|
|
|
- events.Default.Log(events.ItemStarted, map[string]string{
|
|
|
- "repo": p.repoCfg.ID,
|
|
|
- "item": f.Name,
|
|
|
- })
|
|
|
-
|
|
|
- of.availability = p.model.repoFiles[p.repoCfg.ID].Availability(f.Name)
|
|
|
- of.filepath = filepath.Join(p.repoCfg.Directory, f.Name)
|
|
|
- of.temp = filepath.Join(p.repoCfg.Directory, defTempNamer.TempName(f.Name))
|
|
|
-
|
|
|
- dirName := filepath.Dir(of.filepath)
|
|
|
- info, err := os.Stat(dirName)
|
|
|
- if err != nil {
|
|
|
- err = os.MkdirAll(dirName, 0777)
|
|
|
- if debug && err != nil {
|
|
|
- l.Debugf("mkdir: error: %q / %q: %v", p.repoCfg.ID, f.Name, err)
|
|
|
- }
|
|
|
- } else {
|
|
|
- // We need to make sure the directory is writeable so we can create files in it
|
|
|
- if dirName != p.repoCfg.Directory {
|
|
|
- err = os.Chmod(dirName, 0777)
|
|
|
- if debug && err != nil {
|
|
|
- l.Debugf("make writeable: error: %q / %q: %v", p.repoCfg.ID, f.Name, err)
|
|
|
- }
|
|
|
- }
|
|
|
- // Change it back after creating the file, to minimize the time window with incorrect permissions
|
|
|
- defer os.Chmod(dirName, info.Mode())
|
|
|
+ l.Debugln(p, "taking shortcut on", file.Name)
|
|
|
}
|
|
|
-
|
|
|
- of.file, of.err = os.Create(of.temp)
|
|
|
- if of.err != nil {
|
|
|
- p.errors++
|
|
|
- l.Infof("create: error: %q / %q: %v", p.repoCfg.ID, f.Name, of.err)
|
|
|
- if !b.last {
|
|
|
- p.openFiles[f.Name] = of
|
|
|
- }
|
|
|
- return true
|
|
|
- }
|
|
|
- osutil.HideFile(of.temp)
|
|
|
+ p.shortcutFile(file)
|
|
|
+ return
|
|
|
}
|
|
|
|
|
|
- if of.err != nil {
|
|
|
- // We have already failed this file.
|
|
|
- if debug {
|
|
|
- l.Debugf("pull: error: %q / %q has already failed: %v", p.repoCfg.ID, f.Name, of.err)
|
|
|
- }
|
|
|
- if b.last {
|
|
|
- delete(p.openFiles, f.Name)
|
|
|
- }
|
|
|
+ // Figure out the absolute filenames we need once and for all
|
|
|
+ tempName := filepath.Join(p.dir, defTempNamer.TempName(file.Name))
|
|
|
+ realName := filepath.Join(p.dir, file.Name)
|
|
|
|
|
|
- return true
|
|
|
+ s := sharedPullerState{
|
|
|
+ file: file,
|
|
|
+ repo: p.repo,
|
|
|
+ tempName: tempName,
|
|
|
+ realName: realName,
|
|
|
+ pullNeeded: len(pullBlocks),
|
|
|
}
|
|
|
-
|
|
|
- p.openFiles[f.Name] = of
|
|
|
-
|
|
|
- switch {
|
|
|
- case len(b.copy) > 0:
|
|
|
- p.handleCopyBlock(b)
|
|
|
- return true
|
|
|
-
|
|
|
- case b.block.Size > 0:
|
|
|
- return p.handleRequestBlock(b)
|
|
|
-
|
|
|
- default:
|
|
|
- p.handleEmptyBlock(b)
|
|
|
- return true
|
|
|
+ if len(copyBlocks) > 0 {
|
|
|
+ s.copyNeeded = 1
|
|
|
}
|
|
|
-}
|
|
|
-
|
|
|
-func (p *puller) handleCopyBlock(b bqBlock) {
|
|
|
- // We have blocks to copy from the existing file
|
|
|
- f := b.file
|
|
|
- of := p.openFiles[f.Name]
|
|
|
|
|
|
if debug {
|
|
|
- l.Debugf("pull: copying %d blocks for %q / %q", len(b.copy), p.repoCfg.ID, f.Name)
|
|
|
+ l.Debugf("%v need file %s; copy %d, pull %d", p, file.Name, len(copyBlocks), len(pullBlocks))
|
|
|
}
|
|
|
|
|
|
- var exfd *os.File
|
|
|
- exfd, of.err = os.Open(of.filepath)
|
|
|
- if of.err != nil {
|
|
|
- p.errors++
|
|
|
- l.Infof("open: error: %q / %q: %v", p.repoCfg.ID, f.Name, of.err)
|
|
|
- of.file.Close()
|
|
|
- of.file = nil
|
|
|
-
|
|
|
- p.openFiles[f.Name] = of
|
|
|
- return
|
|
|
+ if len(copyBlocks) > 0 {
|
|
|
+ cs := copyBlocksState{
|
|
|
+ sharedPullerState: &s,
|
|
|
+ blocks: copyBlocks,
|
|
|
+ }
|
|
|
+ copyChan <- cs
|
|
|
}
|
|
|
- defer exfd.Close()
|
|
|
|
|
|
- for _, b := range b.copy {
|
|
|
- bs := make([]byte, b.Size)
|
|
|
- _, of.err = exfd.ReadAt(bs, b.Offset)
|
|
|
- if of.err == nil {
|
|
|
- _, of.err = of.file.WriteAt(bs, b.Offset)
|
|
|
- }
|
|
|
- if of.err != nil {
|
|
|
- p.errors++
|
|
|
- l.Infof("write: error: %q / %q: %v", p.repoCfg.ID, f.Name, of.err)
|
|
|
- exfd.Close()
|
|
|
- of.file.Close()
|
|
|
- of.file = nil
|
|
|
-
|
|
|
- p.openFiles[f.Name] = of
|
|
|
- return
|
|
|
+ if len(pullBlocks) > 0 {
|
|
|
+ for _, block := range pullBlocks {
|
|
|
+ ps := pullBlockState{
|
|
|
+ sharedPullerState: &s,
|
|
|
+ block: block,
|
|
|
+ }
|
|
|
+ pullChan <- ps
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-// handleRequestBlock tries to pull a block from the network. Returns true if
|
|
|
-// the block could _not_ be fetched (i.e. it was fully handled, matching the
|
|
|
-// return criteria of handleBlock)
|
|
|
-func (p *puller) handleRequestBlock(b bqBlock) bool {
|
|
|
- f := b.file
|
|
|
- of, ok := p.openFiles[f.Name]
|
|
|
- if !ok {
|
|
|
- panic("bug: request for non-open file")
|
|
|
+// shortcutFile sets file mode and modification time, when that's the only
|
|
|
+// thing that has changed.
|
|
|
+func (p *Puller) shortcutFile(file protocol.FileInfo) {
|
|
|
+ realName := filepath.Join(p.dir, file.Name)
|
|
|
+ err := os.Chmod(realName, os.FileMode(file.Flags&0777))
|
|
|
+ if err != nil {
|
|
|
+ l.Infof("Puller (repo %q, file %q): shortcut: %v", p.repo, file.Name, err)
|
|
|
+ return
|
|
|
}
|
|
|
|
|
|
- node := p.oustandingPerNode.leastBusyNode(of.availability, p.model.ConnectedTo)
|
|
|
- if node == (protocol.NodeID{}) {
|
|
|
- of.err = errNoNode
|
|
|
- if of.file != nil {
|
|
|
- of.file.Close()
|
|
|
- of.file = nil
|
|
|
- os.Remove(of.temp)
|
|
|
- if debug {
|
|
|
- l.Debugf("pull: no source for %q / %q; closed", p.repoCfg.ID, f.Name)
|
|
|
- }
|
|
|
- }
|
|
|
- if b.last {
|
|
|
- if debug {
|
|
|
- l.Debugf("pull: no source for %q / %q; deleting", p.repoCfg.ID, f.Name)
|
|
|
- }
|
|
|
- delete(p.openFiles, f.Name)
|
|
|
- } else {
|
|
|
- if debug {
|
|
|
- l.Debugf("pull: no source for %q / %q; await more blocks", p.repoCfg.ID, f.Name)
|
|
|
- }
|
|
|
- p.openFiles[f.Name] = of
|
|
|
- }
|
|
|
- return true
|
|
|
+ t := time.Unix(file.Modified, 0)
|
|
|
+ err = os.Chtimes(realName, t, t)
|
|
|
+ if err != nil {
|
|
|
+ l.Infof("Puller (repo %q, file %q): shortcut: %v", p.repo, file.Name, err)
|
|
|
+ return
|
|
|
}
|
|
|
|
|
|
- of.outstanding++
|
|
|
- p.openFiles[f.Name] = of
|
|
|
-
|
|
|
- go func(node protocol.NodeID, b bqBlock) {
|
|
|
- if debug {
|
|
|
- l.Debugf("pull: requesting %q / %q offset %d size %d from %q outstanding %d", p.repoCfg.ID, f.Name, b.block.Offset, b.block.Size, node, of.outstanding)
|
|
|
- }
|
|
|
-
|
|
|
- bs, err := p.model.requestGlobal(node, p.repoCfg.ID, f.Name, b.block.Offset, int(b.block.Size), nil)
|
|
|
- p.requestResults <- requestResult{
|
|
|
- node: node,
|
|
|
- file: f,
|
|
|
- filepath: of.filepath,
|
|
|
- offset: b.block.Offset,
|
|
|
- data: bs,
|
|
|
- err: err,
|
|
|
- }
|
|
|
- }(node, b)
|
|
|
-
|
|
|
- return false
|
|
|
+ p.model.updateLocal(p.repo, file)
|
|
|
}
|
|
|
|
|
|
-func (p *puller) handleEmptyBlock(b bqBlock) {
|
|
|
- f := b.file
|
|
|
- of := p.openFiles[f.Name]
|
|
|
+// copierRoutine reads pullerStates until the in channel closes and performs
|
|
|
+// the relevant copy.
|
|
|
+func (p *Puller) copierRoutine(in <-chan copyBlocksState, out chan<- *sharedPullerState) {
|
|
|
+ buf := make([]byte, scanner.StandardBlockSize)
|
|
|
|
|
|
- if b.last {
|
|
|
- if of.err == nil {
|
|
|
- of.file.Close()
|
|
|
+nextFile:
|
|
|
+ for state := range in {
|
|
|
+ dstFd, err := state.tempFile()
|
|
|
+ if err != nil {
|
|
|
+ // Nothing more to do for this failed file (the error was logged
|
|
|
+ // when it happened)
|
|
|
+ continue nextFile
|
|
|
}
|
|
|
- }
|
|
|
|
|
|
- if protocol.IsDeleted(f.Flags) {
|
|
|
- if debug {
|
|
|
- l.Debugf("pull: delete %q", f.Name)
|
|
|
+ srcFd, err := state.sourceFile()
|
|
|
+ if err != nil {
|
|
|
+ // As above
|
|
|
+ continue nextFile
|
|
|
}
|
|
|
- os.Remove(of.temp)
|
|
|
|
|
|
- // Ensure the file and the directory it is in is writeable so we can remove the file
|
|
|
- dirName := filepath.Dir(of.filepath)
|
|
|
- err := os.Chmod(of.filepath, 0666)
|
|
|
- if debug && err != nil {
|
|
|
- l.Debugf("make writeable: error: %q: %v", of.filepath, err)
|
|
|
- }
|
|
|
- if dirName != p.repoCfg.Directory {
|
|
|
- info, err := os.Stat(dirName)
|
|
|
+ for _, block := range state.blocks {
|
|
|
+ buf = buf[:int(block.Size)]
|
|
|
+
|
|
|
+ _, err = srcFd.ReadAt(buf, block.Offset)
|
|
|
if err != nil {
|
|
|
- l.Debugln("weird! can't happen?", err)
|
|
|
- }
|
|
|
- err = os.Chmod(dirName, 0777)
|
|
|
- if debug && err != nil {
|
|
|
- l.Debugf("make writeable: error: %q: %v", dirName, err)
|
|
|
+ state.earlyClose("src read", err)
|
|
|
+ srcFd.Close()
|
|
|
+ continue nextFile
|
|
|
}
|
|
|
- // Change it back after deleting the file, to minimize the time window with incorrect permissions
|
|
|
- defer os.Chmod(dirName, info.Mode())
|
|
|
- }
|
|
|
- if p.versioner != nil {
|
|
|
- if debug {
|
|
|
- l.Debugln("pull: deleting with versioner")
|
|
|
- }
|
|
|
- if err := p.versioner.Archive(of.filepath); err == nil {
|
|
|
- p.model.updateLocal(p.repoCfg.ID, f)
|
|
|
- } else if debug {
|
|
|
- l.Debugln("pull: error:", err)
|
|
|
+
|
|
|
+ _, err = dstFd.WriteAt(buf, block.Offset)
|
|
|
+ if err != nil {
|
|
|
+ state.earlyClose("dst write", err)
|
|
|
+ srcFd.Close()
|
|
|
+ continue nextFile
|
|
|
}
|
|
|
- } else if err := os.Remove(of.filepath); err == nil || os.IsNotExist(err) {
|
|
|
- p.model.updateLocal(p.repoCfg.ID, f)
|
|
|
- }
|
|
|
- } else {
|
|
|
- if debug {
|
|
|
- l.Debugf("pull: no blocks to fetch and nothing to copy for %q / %q", p.repoCfg.ID, f.Name)
|
|
|
- }
|
|
|
- t := time.Unix(f.Modified, 0)
|
|
|
- if os.Chtimes(of.temp, t, t) != nil {
|
|
|
- delete(p.openFiles, f.Name)
|
|
|
- return
|
|
|
- }
|
|
|
- if !p.repoCfg.IgnorePerms && protocol.HasPermissionBits(f.Flags) && os.Chmod(of.temp, os.FileMode(f.Flags&0777)) != nil {
|
|
|
- delete(p.openFiles, f.Name)
|
|
|
- return
|
|
|
- }
|
|
|
- osutil.ShowFile(of.temp)
|
|
|
- if osutil.Rename(of.temp, of.filepath) == nil {
|
|
|
- p.model.updateLocal(p.repoCfg.ID, f)
|
|
|
}
|
|
|
+
|
|
|
+ srcFd.Close()
|
|
|
+ state.copyDone()
|
|
|
+ out <- state.sharedPullerState
|
|
|
}
|
|
|
- delete(p.openFiles, f.Name)
|
|
|
}
|
|
|
|
|
|
-func (p *puller) queueNeededBlocks(prevVer uint64) (uint64, int) {
|
|
|
- curVer := p.model.LocalVersion(p.repoCfg.ID)
|
|
|
- if curVer == prevVer {
|
|
|
- return curVer, 0
|
|
|
- }
|
|
|
+func (p *Puller) pullerRoutine(in <-chan pullBlockState, out chan<- *sharedPullerState) {
|
|
|
+nextBlock:
|
|
|
+ for state := range in {
|
|
|
+ if state.failed() != nil {
|
|
|
+ continue nextBlock
|
|
|
+ }
|
|
|
|
|
|
- if debug {
|
|
|
- l.Debugf("%q: checking for more needed blocks", p.repoCfg.ID)
|
|
|
- }
|
|
|
+ // Select the least busy node to pull the block frop.model. If we found no
|
|
|
+ // feasible node at all, fail the block (and in the long run, the
|
|
|
+ // file).
|
|
|
+ potentialNodes := p.model.availability(p.repo, state.file.Name)
|
|
|
+ selected := activity.leastBusy(potentialNodes)
|
|
|
+ if selected == (protocol.NodeID{}) {
|
|
|
+ state.earlyClose("pull", errNoNode)
|
|
|
+ continue nextBlock
|
|
|
+ }
|
|
|
|
|
|
- queued := 0
|
|
|
- files := make([]protocol.FileInfo, 0, indexBatchSize)
|
|
|
- for _, f := range p.model.NeedFilesRepoLimited(p.repoCfg.ID, indexBatchSize, pullIterationBlocks) {
|
|
|
- if _, ok := p.openFiles[f.Name]; ok {
|
|
|
- continue
|
|
|
+ // Get an fd to the temporary file. Tehcnically we don't need it until
|
|
|
+ // after fetching the block, but if we run into an error here there is
|
|
|
+ // no point in issuing the request to the network.
|
|
|
+ fd, err := state.tempFile()
|
|
|
+ if err != nil {
|
|
|
+ continue nextBlock
|
|
|
}
|
|
|
- files = append(files, f)
|
|
|
- }
|
|
|
|
|
|
- perm := rand.Perm(len(files))
|
|
|
- for _, idx := range perm {
|
|
|
- f := files[idx]
|
|
|
- lf := p.model.CurrentRepoFile(p.repoCfg.ID, f.Name)
|
|
|
- have, need := scanner.BlockDiff(lf.Blocks, f.Blocks)
|
|
|
- if debug {
|
|
|
- l.Debugf("need:\n local: %v\n global: %v\n haveBlocks: %v\n needBlocks: %v", lf, f, have, need)
|
|
|
+ // Fetch the block, while marking the selected node as in use so that
|
|
|
+ // leastBusy can select another node when someone else asks.
|
|
|
+ activity.using(selected)
|
|
|
+ buf, err := p.model.requestGlobal(selected, p.repo, state.file.Name, state.block.Offset, int(state.block.Size), state.block.Hash)
|
|
|
+ activity.done(selected)
|
|
|
+ if err != nil {
|
|
|
+ state.earlyClose("pull", err)
|
|
|
+ continue nextBlock
|
|
|
}
|
|
|
- queued++
|
|
|
- p.bq.put(bqAdd{
|
|
|
- file: f,
|
|
|
- have: have,
|
|
|
- need: need,
|
|
|
- })
|
|
|
- }
|
|
|
|
|
|
- if debug && queued > 0 {
|
|
|
- l.Debugf("%q: queued %d items", p.repoCfg.ID, queued)
|
|
|
- }
|
|
|
+ // Save the block data we got from the cluster
|
|
|
+ _, err = fd.WriteAt(buf, state.block.Offset)
|
|
|
+ if err != nil {
|
|
|
+ state.earlyClose("save", err)
|
|
|
+ continue nextBlock
|
|
|
+ }
|
|
|
|
|
|
- if queued > 0 {
|
|
|
- return prevVer, queued
|
|
|
- } else {
|
|
|
- return curVer, 0
|
|
|
+ state.pullDone()
|
|
|
+ out <- state.sharedPullerState
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-func (p *puller) closeFile(f protocol.FileInfo) {
|
|
|
- if debug {
|
|
|
- l.Debugf("pull: closing %q / %q", p.repoCfg.ID, f.Name)
|
|
|
- }
|
|
|
+func (p *Puller) finisherRoutine(in <-chan *sharedPullerState) {
|
|
|
+ for state := range in {
|
|
|
+ if closed, err := state.finalClose(); closed {
|
|
|
+ if debug {
|
|
|
+ l.Debugln(p, "closing", state.file.Name)
|
|
|
+ }
|
|
|
+ if err != nil {
|
|
|
+ l.Warnln("puller: final:", err)
|
|
|
+ continue
|
|
|
+ }
|
|
|
|
|
|
- of := p.openFiles[f.Name]
|
|
|
- err := of.file.Close()
|
|
|
- if err != nil {
|
|
|
- p.errors++
|
|
|
- l.Infof("close: error: %q / %q: %v", p.repoCfg.ID, f.Name, err)
|
|
|
- }
|
|
|
- defer os.Remove(of.temp)
|
|
|
+ // Verify the file against expected hashes
|
|
|
+ fd, err := os.Open(state.tempName)
|
|
|
+ if err != nil {
|
|
|
+ l.Warnln("puller: final:", err)
|
|
|
+ continue
|
|
|
+ }
|
|
|
+ err = scanner.Verify(fd, scanner.StandardBlockSize, state.file.Blocks)
|
|
|
+ fd.Close()
|
|
|
+ if err != nil {
|
|
|
+ l.Warnln("puller: final:", state.file.Name, err)
|
|
|
+ continue
|
|
|
+ }
|
|
|
|
|
|
- delete(p.openFiles, f.Name)
|
|
|
+ // Set the correct permission bits on the new file
|
|
|
+ err = os.Chmod(state.tempName, os.FileMode(state.file.Flags&0777))
|
|
|
+ if err != nil {
|
|
|
+ os.Remove(state.tempName)
|
|
|
+ l.Warnln("puller: final:", err)
|
|
|
+ continue
|
|
|
+ }
|
|
|
|
|
|
- fd, err := os.Open(of.temp)
|
|
|
- if err != nil {
|
|
|
- p.errors++
|
|
|
- l.Infof("open: error: %q / %q: %v", p.repoCfg.ID, f.Name, err)
|
|
|
- return
|
|
|
- }
|
|
|
- hb, _ := scanner.Blocks(fd, scanner.StandardBlockSize, f.Size())
|
|
|
- fd.Close()
|
|
|
+ // Set the correct timestamp on the new file
|
|
|
+ t := time.Unix(state.file.Modified, 0)
|
|
|
+ err = os.Chtimes(state.tempName, t, t)
|
|
|
+ if err != nil {
|
|
|
+ os.Remove(state.tempName)
|
|
|
+ l.Warnln("puller: final:", err)
|
|
|
+ continue
|
|
|
+ }
|
|
|
|
|
|
- if l0, l1 := len(hb), len(f.Blocks); l0 != l1 {
|
|
|
- if debug {
|
|
|
- l.Debugf("pull: %q / %q: nblocks %d != %d", p.repoCfg.ID, f.Name, l0, l1)
|
|
|
- }
|
|
|
- return
|
|
|
- }
|
|
|
+ // If we should use versioning, let the versioner archive the old
|
|
|
+ // file before we replace it. Archiving a non-existent file is not
|
|
|
+ // an error.
|
|
|
+ if p.versioner != nil {
|
|
|
+ err = p.versioner.Archive(state.realName)
|
|
|
+ if err != nil {
|
|
|
+ os.Remove(state.tempName)
|
|
|
+ l.Warnln("puller: final:", err)
|
|
|
+ continue
|
|
|
+ }
|
|
|
+ }
|
|
|
|
|
|
- for i := range hb {
|
|
|
- if bytes.Compare(hb[i].Hash, f.Blocks[i].Hash) != 0 {
|
|
|
- if debug {
|
|
|
- l.Debugf("pull: %q / %q: block %d hash mismatch\n have: %x\n want: %x", p.repoCfg.ID, f.Name, i, hb[i].Hash, f.Blocks[i].Hash)
|
|
|
+ // Replace the original file with the new one
|
|
|
+ err = osutil.Rename(state.tempName, state.realName)
|
|
|
+ if err != nil {
|
|
|
+ os.Remove(state.tempName)
|
|
|
+ l.Warnln("puller: final:", err)
|
|
|
+ continue
|
|
|
}
|
|
|
- return
|
|
|
+
|
|
|
+ // Record the updated file in the index
|
|
|
+ p.model.updateLocal(p.repo, state.file)
|
|
|
}
|
|
|
}
|
|
|
+}
|
|
|
|
|
|
- t := time.Unix(f.Modified, 0)
|
|
|
- err = os.Chtimes(of.temp, t, t)
|
|
|
- if err != nil {
|
|
|
- l.Infof("chtimes: error: %q / %q: %v", p.repoCfg.ID, f.Name, err)
|
|
|
- }
|
|
|
- if !p.repoCfg.IgnorePerms && protocol.HasPermissionBits(f.Flags) {
|
|
|
- err = os.Chmod(of.temp, os.FileMode(f.Flags&0777))
|
|
|
+// clean deletes orphaned temporary files
|
|
|
+func (p *Puller) clean() {
|
|
|
+ filepath.Walk(p.dir, func(path string, info os.FileInfo, err error) error {
|
|
|
if err != nil {
|
|
|
- l.Infof("chmod: error: %q / %q: %v", p.repoCfg.ID, f.Name, err)
|
|
|
+ return err
|
|
|
}
|
|
|
- }
|
|
|
-
|
|
|
- osutil.ShowFile(of.temp)
|
|
|
|
|
|
- if p.versioner != nil {
|
|
|
- err := p.versioner.Archive(of.filepath)
|
|
|
- if err != nil {
|
|
|
- if debug {
|
|
|
- l.Debugf("pull: error: %q / %q: %v", p.repoCfg.ID, f.Name, err)
|
|
|
- }
|
|
|
- return
|
|
|
+ if info.Mode().IsRegular() && defTempNamer.IsTemporary(path) {
|
|
|
+ os.Remove(path)
|
|
|
}
|
|
|
- }
|
|
|
|
|
|
- if debug {
|
|
|
- l.Debugf("pull: rename %q / %q: %q", p.repoCfg.ID, f.Name, of.filepath)
|
|
|
- }
|
|
|
- if err := osutil.Rename(of.temp, of.filepath); err == nil {
|
|
|
- p.model.updateLocal(p.repoCfg.ID, f)
|
|
|
- } else {
|
|
|
- p.errors++
|
|
|
- l.Infof("rename: error: %q / %q: %v", p.repoCfg.ID, f.Name, err)
|
|
|
- }
|
|
|
+ return nil
|
|
|
+ })
|
|
|
}
|
|
|
|
|
|
func invalidateRepo(cfg *config.Configuration, repoID string, err error) {
|