ソースを参照

lib/model: Consistently show folder description in startup messages

Since we anyway need the folderConfig for this I'm skipping the copying
of all it's attributes that rwfolder did and just keeping the original
around instead.

GitHub-Pull-Request: https://github.com/syncthing/syncthing/pull/3825
Jakob Borg 9 年 前
コミット
5c067661f4
3 ファイル変更46 行追加62 行削除
  1. 4 2
      lib/model/rofolder.go
  2. 40 59
      lib/model/rwfolder.go
  3. 2 1
      lib/model/rwfolder_test.go

+ 4 - 2
lib/model/rofolder.go

@@ -20,6 +20,7 @@ func init() {
 
 type sendOnlyFolder struct {
 	folder
+	config.FolderConfiguration
 }
 
 func newSendOnlyFolder(model *Model, cfg config.FolderConfiguration, _ versioner.Versioner, _ *fs.MtimeFS) service {
@@ -30,6 +31,7 @@ func newSendOnlyFolder(model *Model, cfg config.FolderConfiguration, _ versioner
 			stop:         make(chan struct{}),
 			model:        model,
 		},
+		FolderConfiguration: cfg,
 	}
 }
 
@@ -49,7 +51,7 @@ func (f *sendOnlyFolder) Serve() {
 
 		case <-f.scan.timer.C:
 			if err := f.model.CheckFolderHealth(f.folderID); err != nil {
-				l.Infoln("Skipping folder", f.folderID, "scan due to folder error:", err)
+				l.Infoln("Skipping scan of", f.Description(), "due to folder error:", err)
 				f.scan.Reschedule()
 				continue
 			}
@@ -67,7 +69,7 @@ func (f *sendOnlyFolder) Serve() {
 			}
 
 			if !initialScanCompleted {
-				l.Infoln("Completed initial scan (ro) of folder", f.folderID)
+				l.Infoln("Completed initial scan (ro) of", f.Description())
 				initialScanCompleted = true
 			}
 

+ 40 - 59
lib/model/rwfolder.go

@@ -80,23 +80,13 @@ type dbUpdateJob struct {
 
 type sendReceiveFolder struct {
 	folder
+	config.FolderConfiguration
 
-	mtimeFS        *fs.MtimeFS
-	dir            string
-	versioner      versioner.Versioner
-	ignorePerms    bool
-	order          config.PullOrder
-	maxConflicts   int
-	sleep          time.Duration
-	pause          time.Duration
-	allowSparse    bool
-	checkFreeSpace bool
-	ignoreDelete   bool
-	fsync          bool
-	useWeakHash    bool
-
-	copiers int
-	pullers int
+	mtimeFS   *fs.MtimeFS
+	dir       string
+	versioner versioner.Versioner
+	sleep     time.Duration
+	pause     time.Duration
 
 	queue       *jobQueue
 	dbUpdates   chan dbUpdateJob
@@ -117,20 +107,11 @@ func newSendReceiveFolder(model *Model, cfg config.FolderConfiguration, ver vers
 			stop:         make(chan struct{}),
 			model:        model,
 		},
+		FolderConfiguration: cfg,
 
-		mtimeFS:        mtimeFS,
-		dir:            cfg.Path(),
-		versioner:      ver,
-		ignorePerms:    cfg.IgnorePerms,
-		copiers:        cfg.Copiers,
-		pullers:        cfg.Pullers,
-		order:          cfg.Order,
-		maxConflicts:   cfg.MaxConflicts,
-		allowSparse:    !cfg.DisableSparseFiles,
-		checkFreeSpace: cfg.MinDiskFreePct != 0,
-		ignoreDelete:   cfg.IgnoreDelete,
-		fsync:          cfg.Fsync,
-		useWeakHash:    !cfg.DisableWeakHash,
+		mtimeFS:   mtimeFS,
+		dir:       cfg.Path(),
+		versioner: ver,
 
 		queue:       newJobQueue(),
 		pullTimer:   time.NewTimer(time.Second),
@@ -141,29 +122,29 @@ func newSendReceiveFolder(model *Model, cfg config.FolderConfiguration, ver vers
 		initialScanCompleted: make(chan struct{}),
 	}
 
-	f.configureCopiersAndPullers(cfg)
+	f.configureCopiersAndPullers()
 
 	return f
 }
 
-func (f *sendReceiveFolder) configureCopiersAndPullers(cfg config.FolderConfiguration) {
-	if f.copiers == 0 {
-		f.copiers = defaultCopiers
+func (f *sendReceiveFolder) configureCopiersAndPullers() {
+	if f.Copiers == 0 {
+		f.Copiers = defaultCopiers
 	}
-	if f.pullers == 0 {
-		f.pullers = defaultPullers
+	if f.Pullers == 0 {
+		f.Pullers = defaultPullers
 	}
 
-	if cfg.PullerPauseS == 0 {
+	if f.PullerPauseS == 0 {
 		f.pause = defaultPullerPause
 	} else {
-		f.pause = time.Duration(cfg.PullerPauseS) * time.Second
+		f.pause = time.Duration(f.PullerPauseS) * time.Second
 	}
 
-	if cfg.PullerSleepS == 0 {
+	if f.PullerSleepS == 0 {
 		f.sleep = defaultPullerSleep
 	} else {
-		f.sleep = time.Duration(cfg.PullerSleepS) * time.Second
+		f.sleep = time.Duration(f.PullerSleepS) * time.Second
 	}
 }
 
@@ -171,7 +152,7 @@ func (f *sendReceiveFolder) configureCopiersAndPullers(cfg config.FolderConfigur
 // set on the local host or the FlagNoPermBits has been set on the file/dir
 // which is being pulled.
 func (f *sendReceiveFolder) ignorePermissions(file protocol.FileInfo) bool {
-	return f.ignorePerms || file.NoPermissions
+	return f.IgnorePerms || file.NoPermissions
 }
 
 // Serve will run scans and pulls. It will return when Stop()ed or on a
@@ -231,7 +212,7 @@ func (f *sendReceiveFolder) Serve() {
 			}
 
 			if err := f.model.CheckFolderHealth(f.folderID); err != nil {
-				l.Infoln("Skipping folder", f.folderID, "pull due to folder error:", err)
+				l.Infoln("Skipping pull of", f.Description(), "due to folder error:", err)
 				f.pullTimer.Reset(f.sleep)
 				continue
 			}
@@ -304,7 +285,7 @@ func (f *sendReceiveFolder) Serve() {
 			select {
 			case <-f.initialScanCompleted:
 			default:
-				l.Infoln("Completed initial scan (rw) of folder", f.folderID)
+				l.Infoln("Completed initial scan (rw) of", f.Description())
 				close(f.initialScanCompleted)
 			}
 
@@ -346,7 +327,7 @@ func (f *sendReceiveFolder) pullerIteration(ignores *ignore.Matcher) int {
 	pullWg := sync.NewWaitGroup()
 	doneWg := sync.NewWaitGroup()
 
-	l.Debugln(f, "c", f.copiers, "p", f.pullers)
+	l.Debugln(f, "c", f.Copiers, "p", f.Pullers)
 
 	f.dbUpdates = make(chan dbUpdateJob)
 	updateWg.Add(1)
@@ -356,7 +337,7 @@ func (f *sendReceiveFolder) pullerIteration(ignores *ignore.Matcher) int {
 		updateWg.Done()
 	}()
 
-	for i := 0; i < f.copiers; i++ {
+	for i := 0; i < f.Copiers; i++ {
 		copyWg.Add(1)
 		go func() {
 			// copierRoutine finishes when copyChan is closed
@@ -365,7 +346,7 @@ func (f *sendReceiveFolder) pullerIteration(ignores *ignore.Matcher) int {
 		}()
 	}
 
-	for i := 0; i < f.pullers; i++ {
+	for i := 0; i < f.Pullers; i++ {
 		pullWg.Add(1)
 		go func() {
 			// pullerRoutine finishes when pullChan is closed
@@ -394,7 +375,7 @@ func (f *sendReceiveFolder) pullerIteration(ignores *ignore.Matcher) int {
 	// pile.
 
 	folderFiles.WithNeed(protocol.LocalDeviceID, func(intf db.FileIntf) bool {
-		if shouldIgnore(intf, ignores, f.ignoreDelete, defTempNamer) {
+		if shouldIgnore(intf, ignores, f.IgnoreDelete, defTempNamer) {
 			return true
 		}
 
@@ -491,7 +472,7 @@ func (f *sendReceiveFolder) pullerIteration(ignores *ignore.Matcher) int {
 
 	// Now do the file queue. Reorder it according to configuration.
 
-	switch f.order {
+	switch f.Order {
 	case config.OrderRandom:
 		f.queue.Shuffle()
 	case config.OrderAlphabetic:
@@ -1133,7 +1114,7 @@ func (f *sendReceiveFolder) handleFile(file protocol.FileInfo, copyChan chan<- c
 		blocksSize = file.Size
 	}
 
-	if f.checkFreeSpace {
+	if f.MinDiskFreePct > 0 {
 		if free, err := osutil.DiskFreeBytes(f.dir); err == nil && free < blocksSize {
 			l.Warnf(`Folder "%s": insufficient disk space in %s for %s: have %.2f MiB, need %.2f MiB`, f.folderID, f.dir, file.Name, float64(free)/1024/1024, float64(blocksSize)/1024/1024)
 			f.newError(file.Name, errors.New("insufficient space"))
@@ -1168,7 +1149,7 @@ func (f *sendReceiveFolder) handleFile(file protocol.FileInfo, copyChan chan<- c
 		ignorePerms:      f.ignorePermissions(file),
 		version:          curFile.Version,
 		mut:              sync.NewRWMutex(),
-		sparse:           f.allowSparse,
+		sparse:           !f.DisableSparseFiles,
 		created:          time.Now(),
 	}
 
@@ -1235,7 +1216,7 @@ func (f *sendReceiveFolder) copierRoutine(in <-chan copyBlocksState, pullChan ch
 		f.model.fmut.RUnlock()
 
 		var weakHashFinder *weakhash.Finder
-		if f.useWeakHash {
+		if !f.DisableWeakHash {
 			hashesToFind := make([]uint32, 0, len(state.blocks))
 			for _, block := range state.blocks {
 				if block.WeakHash != 0 {
@@ -1250,7 +1231,7 @@ func (f *sendReceiveFolder) copierRoutine(in <-chan copyBlocksState, pullChan ch
 		}
 
 		for _, block := range state.blocks {
-			if f.allowSparse && state.reused == 0 && block.IsEmpty() {
+			if !f.DisableSparseFiles && state.reused == 0 && block.IsEmpty() {
 				// The block is a block of all zeroes, and we are not reusing
 				// a temp file, so there is no need to do anything with it.
 				// If we were reusing a temp file and had this block to copy,
@@ -1364,7 +1345,7 @@ func (f *sendReceiveFolder) pullerRoutine(in <-chan pullBlockState, out chan<- *
 			continue
 		}
 
-		if f.allowSparse && state.reused == 0 && state.block.IsEmpty() {
+		if !f.DisableSparseFiles && state.reused == 0 && state.block.IsEmpty() {
 			// There is no need to request a block of all zeroes. Pretend we
 			// requested it and handled it correctly.
 			state.pullDone(state.block)
@@ -1537,7 +1518,7 @@ func (f *sendReceiveFolder) dbUpdaterRoutine() {
 
 	var changedFiles []string
 	var changedDirs []string
-	if f.fsync {
+	if f.Fsync {
 		changedFiles = make([]string, 0, maxBatchSize)
 		changedDirs = make([]string, 0, maxBatchSize)
 	}
@@ -1562,7 +1543,7 @@ func (f *sendReceiveFolder) dbUpdaterRoutine() {
 
 		for _, job := range batch {
 			files = append(files, job.file)
-			if f.fsync {
+			if f.Fsync {
 				// collect changed files and dirs
 				switch job.jobType {
 				case dbUpdateHandleFile, dbUpdateShortcutFile:
@@ -1588,7 +1569,7 @@ func (f *sendReceiveFolder) dbUpdaterRoutine() {
 			lastFile = job.file
 		}
 
-		if f.fsync {
+		if f.Fsync {
 			// sync files and dirs to disk
 			syncFilesOnce(changedFiles, osutil.SyncFile)
 			changedFiles = changedFiles[:0]
@@ -1670,7 +1651,7 @@ func (f *sendReceiveFolder) moveForConflict(name string) error {
 		return nil
 	}
 
-	if f.maxConflicts == 0 {
+	if f.MaxConflicts == 0 {
 		if err := os.Remove(name); err != nil && !os.IsNotExist(err) {
 			return err
 		}
@@ -1688,11 +1669,11 @@ func (f *sendReceiveFolder) moveForConflict(name string) error {
 		// matter, go ahead as if the move succeeded.
 		err = nil
 	}
-	if f.maxConflicts > -1 {
+	if f.MaxConflicts > -1 {
 		matches, gerr := osutil.Glob(withoutExt + ".sync-conflict-????????-??????" + ext)
-		if gerr == nil && len(matches) > f.maxConflicts {
+		if gerr == nil && len(matches) > f.MaxConflicts {
 			sort.Sort(sort.Reverse(sort.StringSlice(matches)))
-			for _, match := range matches[f.maxConflicts:] {
+			for _, match := range matches[f.MaxConflicts:] {
 				gerr = os.Remove(match)
 				if gerr != nil {
 					l.Debugln(f, "removing extra conflict", gerr)

+ 2 - 1
lib/model/rwfolder_test.go

@@ -322,6 +322,7 @@ func TestWeakHash(t *testing.T) {
 	go fo.copierRoutine(copyChan, pullChan, finisherChan)
 
 	// Test 1 - no weak hashing, file gets fully repulled (`expectBlocks` pulls).
+	fo.DisableWeakHash = true
 	fo.handleFile(desiredFile, copyChan, finisherChan)
 
 	var pulls []pullBlockState
@@ -349,7 +350,7 @@ func TestWeakHash(t *testing.T) {
 	}
 
 	// Test 2 - using weak hash, expectPulls blocks pulled.
-	fo.useWeakHash = true
+	fo.DisableWeakHash = false
 	fo.handleFile(desiredFile, copyChan, finisherChan)
 
 	pulls = pulls[:0]