Jelajahi Sumber

chore(model): the easier linter complaints

Jakob Borg 4 bulan lalu
induk
melakukan
700bb75016

+ 2 - 2
lib/model/blockpullreorderer.go

@@ -51,7 +51,7 @@ type standardBlockPullReorderer struct {
 }
 
 func newStandardBlockPullReorderer(id protocol.DeviceID, otherDevices []protocol.DeviceID) *standardBlockPullReorderer {
-	allDevices := append(otherDevices, id)
+	allDevices := append(otherDevices, id) //nolint:gocritic
 	slices.SortFunc(allDevices, func(a, b protocol.DeviceID) int {
 		return a.Compare(b)
 	})
@@ -92,7 +92,7 @@ func (p *standardBlockPullReorderer) Reorder(blocks []protocol.BlockInfo) []prot
 	// The rest of the chunks we fetch in a random order in whole chunks.
 	// Generate chunk index slice and shuffle it
 	indexes := make([]int, 0, len(chunks)-1)
-	for i := range len(chunks) {
+	for i := range chunks {
 		if i != p.myIndex {
 			indexes = append(indexes, i)
 		}

+ 4 - 3
lib/model/devicedownloadstate.go

@@ -56,17 +56,18 @@ func (p *deviceFolderDownloadState) Update(updates []protocol.FileDownloadProgre
 		if update.UpdateType == protocol.FileDownloadProgressUpdateTypeForget && ok && local.version.Equal(update.Version) {
 			delete(p.files, update.Name)
 		} else if update.UpdateType == protocol.FileDownloadProgressUpdateTypeAppend {
-			if !ok {
+			switch {
+			case !ok:
 				local = deviceFolderFileDownloadState{
 					blockIndexes: update.BlockIndexes,
 					version:      update.Version,
 					blockSize:    update.BlockSize,
 				}
-			} else if !local.version.Equal(update.Version) {
+			case !local.version.Equal(update.Version):
 				local.blockIndexes = append(local.blockIndexes[:0], update.BlockIndexes...)
 				local.version = update.Version
 				local.blockSize = update.BlockSize
-			} else {
+			default:
 				local.blockIndexes = append(local.blockIndexes, update.BlockIndexes...)
 			}
 			p.files[update.Name] = local

+ 4 - 4
lib/model/folder.go

@@ -52,7 +52,7 @@ type folder struct {
 	ignores       *ignore.Matcher
 	mtimefs       fs.Filesystem
 	modTimeWindow time.Duration
-	ctx           context.Context // used internally, only accessible on serve lifetime
+	ctx           context.Context //nolint:containedctx // used internally, only accessible on serve lifetime
 	done          chan struct{}   // used externally, accessible regardless of serve
 
 	scanInterval           time.Duration
@@ -322,7 +322,7 @@ func (f *folder) Reschedule() {
 		return
 	}
 	// Sleep a random time between 3/4 and 5/4 of the configured interval.
-	sleepNanos := (f.scanInterval.Nanoseconds()*3 + rand.Int63n(2*f.scanInterval.Nanoseconds())) / 4
+	sleepNanos := (f.scanInterval.Nanoseconds()*3 + rand.Int63n(2*f.scanInterval.Nanoseconds())) / 4 //nolint:gosec
 	interval := time.Duration(sleepNanos) * time.Nanosecond
 	l.Debugln(f, "next rescan in", interval)
 	f.scanTimer.Reset(interval)
@@ -1111,7 +1111,7 @@ func (f *folder) setWatchError(err error, nextTryIn time.Duration) {
 	prevErr := f.watchErr
 	f.watchErr = err
 	f.watchMut.Unlock()
-	if err != prevErr {
+	if err != prevErr { //nolint:errorlint
 		data := map[string]interface{}{
 			"folder": f.ID,
 		}
@@ -1127,7 +1127,7 @@ func (f *folder) setWatchError(err error, nextTryIn time.Duration) {
 		return
 	}
 	msg := fmt.Sprintf("Error while trying to start filesystem watcher for folder %s, trying again in %v: %v", f.Description(), nextTryIn, err)
-	if prevErr != err {
+	if prevErr != err { //nolint:errorlint
 		l.Infof(msg)
 		return
 	}

+ 1 - 1
lib/model/folder_sendonly.go

@@ -28,7 +28,7 @@ func newSendOnlyFolder(model *model, ignores *ignore.Matcher, cfg config.FolderC
 	f := &sendOnlyFolder{
 		folder: newFolder(model, ignores, cfg, evLogger, ioLimiter, nil),
 	}
-	f.folder.puller = f
+	f.puller = f
 	return f
 }
 

+ 12 - 12
lib/model/folder_sendrecv.go

@@ -136,7 +136,7 @@ func newSendReceiveFolder(model *model, ignores *ignore.Matcher, cfg config.Fold
 		blockPullReorderer: newBlockPullReorderer(cfg.BlockPullOrder, model.id, cfg.DeviceIDs()),
 		writeLimiter:       semaphore.New(cfg.MaxConcurrentWrites),
 	}
-	f.folder.puller = f
+	f.puller = f
 
 	if f.Copiers == 0 {
 		f.Copiers = defaultCopiers
@@ -359,13 +359,14 @@ loop:
 			}
 
 		case file.IsDeleted():
-			if file.IsDirectory() {
+			switch {
+			case file.IsDirectory():
 				// Perform directory deletions at the end, as we may have
 				// files to delete inside them before we get to that point.
 				dirDeletions = append(dirDeletions, file)
-			} else if file.IsSymlink() {
+			case file.IsSymlink():
 				f.deleteFile(file, dbUpdateChan, scanChan)
-			} else {
+			default:
 				df, ok, err := f.model.sdb.GetDeviceFile(f.folderID, protocol.LocalDeviceID, file.Name)
 				if err != nil {
 					return changed, nil, nil, err
@@ -1023,7 +1024,7 @@ func (f *sendReceiveFolder) renameFile(cur, source, target protocol.FileInfo, db
 	tempName := fs.TempName(target.Name)
 
 	if f.versioner != nil {
-		err = f.CheckAvailableSpace(uint64(source.Size))
+		err = f.CheckAvailableSpace(uint64(source.Size)) //nolint:gosec
 		if err == nil {
 			err = osutil.Copy(f.CopyRangeMethod.ToFS(), f.mtimefs, f.mtimefs, source.Name, tempName)
 			if err == nil {
@@ -1295,7 +1296,7 @@ func (f *sendReceiveFolder) copierRoutine(in <-chan copyBlocksState, pullChan ch
 	}
 
 	for state := range in {
-		if err := f.CheckAvailableSpace(uint64(state.file.Size)); err != nil {
+		if err := f.CheckAvailableSpace(uint64(state.file.Size)); err != nil { //nolint:gosec
 			state.fail(err)
 			// Nothing more to do for this failed file, since it would use to much disk space
 			out <- state.sharedPullerState
@@ -1461,7 +1462,7 @@ func (f *sendReceiveFolder) copyBlockFromFile(srcName string, srcOffset int64, s
 }
 
 func (*sendReceiveFolder) verifyBuffer(buf []byte, block protocol.BlockInfo) error {
-	if len(buf) != int(block.Size) {
+	if len(buf) != block.Size {
 		return fmt.Errorf("length mismatch %d != %d", len(buf), block.Size)
 	}
 
@@ -1489,8 +1490,7 @@ func (f *sendReceiveFolder) pullerRoutine(in <-chan pullBlockState, out chan<- *
 		// ongoing at any given time, based on the size of the blocks
 		// themselves.
 
-		state := state
-		bytes := int(state.block.Size)
+		bytes := state.block.Size
 
 		if err := requestLimiter.TakeWithContext(f.ctx, bytes); err != nil {
 			state.fail(err)
@@ -1713,7 +1713,7 @@ func (f *sendReceiveFolder) dbUpdaterRoutine(dbUpdateChan <-chan dbUpdateJob) {
 		// sync directories
 		for dir := range changedDirs {
 			delete(changedDirs, dir)
-			if !f.FolderConfiguration.DisableFsync {
+			if !f.DisableFsync {
 				fd, err := f.mtimefs.Open(dir)
 				if err != nil {
 					l.Debugf("fsync %q failed: %v", dir, err)
@@ -1996,7 +1996,7 @@ func (f *sendReceiveFolder) deleteDirOnDiskHandleChildren(dir string, scanChan c
 			// Lets just assume the file has changed.
 			scanChan <- path
 			hasToBeScanned = true
-			return nil
+			return nil //nolint:nilerr
 		}
 		if !cf.IsEquivalentOptional(diskFile, protocol.FileInfoComparison{
 			ModTimeWindow:   f.modTimeWindow,
@@ -2055,7 +2055,7 @@ func (f *sendReceiveFolder) deleteDirOnDiskHandleChildren(dir string, scanChan c
 // not changed.
 func (f *sendReceiveFolder) scanIfItemChanged(name string, stat fs.FileInfo, item protocol.FileInfo, hasItem bool, fromDelete bool, scanChan chan<- string) (err error) {
 	defer func() {
-		if err == errModified {
+		if errors.Is(err, errModified) {
 			scanChan <- name
 		}
 	}()

+ 5 - 4
lib/model/folder_summary.go

@@ -11,6 +11,7 @@ package model
 
 import (
 	"context"
+	"errors"
 	"fmt"
 	"strings"
 	"time"
@@ -125,7 +126,7 @@ func (c *folderSummaryService) Summary(folder string) (*FolderSummary, error) {
 	var local, global, need, ro db.Counts
 	var ourSeq int64
 	var remoteSeq map[protocol.DeviceID]int64
-	errors, err := c.model.FolderErrors(folder)
+	errs, err := c.model.FolderErrors(folder)
 	if err == nil {
 		global, _ = c.model.GlobalSize(folder)
 		local, _ = c.model.LocalSize(folder, protocol.LocalDeviceID)
@@ -137,12 +138,12 @@ func (c *folderSummaryService) Summary(folder string) (*FolderSummary, error) {
 	// For API backwards compatibility (SyncTrayzor needs it) an empty folder
 	// summary is returned for not running folders, an error might actually be
 	// more appropriate
-	if err != nil && err != ErrFolderPaused && err != ErrFolderNotRunning {
+	if err != nil && !errors.Is(err, ErrFolderPaused) && !errors.Is(err, ErrFolderNotRunning) {
 		return nil, err
 	}
 
-	res.Errors = len(errors)
-	res.PullErrors = len(errors) // deprecated
+	res.Errors = len(errs)
+	res.PullErrors = len(errs) // deprecated
 
 	res.Invalid = "" // Deprecated, retains external API for now
 

+ 7 - 4
lib/model/indexhandler.go

@@ -66,7 +66,8 @@ func newIndexHandler(conn protocol.Connection, downloads *deviceDownloadState, f
 	// about us. Lets check to see if we can start sending index
 	// updates directly or need to send the index from start...
 
-	if startInfo.local.IndexID == myIndexID {
+	switch startInfo.local.IndexID {
+	case myIndexID:
 		// They say they've seen our index ID before, so we can
 		// send a delta update only.
 
@@ -83,15 +84,17 @@ func newIndexHandler(conn protocol.Connection, downloads *deviceDownloadState, f
 			l.Debugf("Device %v folder %s is delta index compatible (mlv=%d)", conn.DeviceID().Short(), folder.Description(), startInfo.local.MaxSequence)
 			startSequence = startInfo.local.MaxSequence
 		}
-	} else if startInfo.local.IndexID != 0 {
+
+	case 0:
+		l.Debugf("Device %v folder %s has no index ID for us", conn.DeviceID().Short(), folder.Description())
+
+	default:
 		// They say they've seen an index ID from us, but it's
 		// not the right one. Either they are confused or we
 		// must have reset our database since last talking to
 		// them. We'll start with a full index transfer.
 		l.Infof("Device %v folder %s has mismatching index ID for us (%v != %v)", conn.DeviceID().Short(), folder.Description(), startInfo.local.IndexID, myIndexID)
 		startSequence = 0
-	} else {
-		l.Debugf("Device %v folder %s has no index ID for us", conn.DeviceID().Short(), folder.Description())
 	}
 
 	// This is the other side's description of themselves. We

+ 1 - 1
lib/model/metrics.go

@@ -18,7 +18,7 @@ var (
 		Name:      "folder_state",
 		Help:      "Current folder state",
 	}, []string{"folder"})
-	metricFolderSummary = promauto.NewGaugeVec(prometheus.GaugeOpts{
+	metricFolderSummary = promauto.NewGaugeVec(prometheus.GaugeOpts{ //nolint:promlinter
 		Namespace: "syncthing",
 		Subsystem: "model",
 		Name:      "folder_summary",

+ 25 - 21
lib/model/model.go

@@ -195,7 +195,7 @@ var (
 	ErrFolderMissing    = errors.New("no such folder")
 	errNoVersioner      = errors.New("folder has no versioner")
 	// errors about why a connection is closed
-	errStopped                            = errors.New("Syncthing is being stopped")
+	errStopped                            = errors.New("Syncthing is being stopped") //nolint:staticcheck
 	errEncryptionInvConfigLocal           = errors.New("can't encrypt outgoing data because local data is encrypted (folder-type receive-encrypted)")
 	errEncryptionInvConfigRemote          = errors.New("remote has encrypted data and encrypts that data for us - this is impossible")
 	errEncryptionNotEncryptedLocal        = errors.New("remote expects to exchange encrypted data, but is configured for plain data")
@@ -622,24 +622,26 @@ func (m *model) UsageReportingStats(report *contract.Report, version int, previe
 
 			for _, line := range lines {
 				// Allow prefixes to be specified in any order, but only once.
+			loop:
 				for {
-					if strings.HasPrefix(line, "!") && !seenPrefix[0] {
+					switch {
+					case strings.HasPrefix(line, "!") && !seenPrefix[0]:
 						seenPrefix[0] = true
 						line = line[1:]
 						report.IgnoreStats.Inverts++
-					} else if strings.HasPrefix(line, "(?i)") && !seenPrefix[1] {
+					case strings.HasPrefix(line, "(?i)") && !seenPrefix[1]:
 						seenPrefix[1] = true
 						line = line[4:]
 						report.IgnoreStats.Folded++
-					} else if strings.HasPrefix(line, "(?d)") && !seenPrefix[2] {
+					case strings.HasPrefix(line, "(?d)") && !seenPrefix[2]:
 						seenPrefix[2] = true
 						line = line[4:]
 						report.IgnoreStats.Deletable++
-					} else {
+					default:
 						seenPrefix[0] = false
 						seenPrefix[1] = false
 						seenPrefix[2] = false
-						break
+						break loop
 					}
 				}
 
@@ -1227,9 +1229,10 @@ func (m *model) ClusterConfig(conn protocol.Connection, cm *protocol.ClusterConf
 	for _, folder := range cm.Folders {
 		info := &clusterConfigDeviceInfo{}
 		for _, dev := range folder.Devices {
-			if dev.ID == m.id {
+			switch dev.ID {
+			case m.id:
 				info.local = dev
-			} else if dev.ID == deviceID {
+			case deviceID:
 				info.remote = dev
 			}
 			if info.local.ID != protocol.EmptyDeviceID && info.remote.ID != protocol.EmptyDeviceID {
@@ -1451,7 +1454,7 @@ func (m *model) ccHandleFolders(folders []protocol.Folder, deviceCfg config.Devi
 			sameError := false
 			m.mut.Lock()
 			if devs, ok := m.folderEncryptionFailures[folder.ID]; ok {
-				sameError = devs[deviceID] == err
+				sameError = devs[deviceID] == err //nolint:errorlint
 			} else {
 				m.folderEncryptionFailures[folder.ID] = make(map[protocol.DeviceID]error)
 			}
@@ -1461,7 +1464,8 @@ func (m *model) ccHandleFolders(folders []protocol.Folder, deviceCfg config.Devi
 			if sameError {
 				l.Debugln(msg)
 			} else {
-				if rerr, ok := err.(*redactedError); ok {
+				var rerr *redactedError
+				if errors.As(err, &rerr) {
 					err = rerr.redacted
 				}
 				m.evLogger.Log(events.Failure, err.Error())
@@ -2015,7 +2019,7 @@ func (m *model) Request(conn protocol.Connection, req *protocol.Request) (out pr
 
 	// The requestResponse releases the bytes to the buffer pool and the
 	// limiters when its Close method is called.
-	res := newLimitedRequestResponse(int(req.Size), limiter, m.globalRequestLimiter)
+	res := newLimitedRequestResponse(req.Size, limiter, m.globalRequestLimiter)
 
 	defer func() {
 		// Close it ourselves if it isn't returned due to an error
@@ -2061,16 +2065,17 @@ func (m *model) Request(conn protocol.Connection, req *protocol.Request) (out pr
 	}
 
 	n, err := readOffsetIntoBuf(folderFs, req.Name, req.Offset, res.data)
-	if fs.IsNotExist(err) {
+	switch {
+	case fs.IsNotExist(err):
 		l.Debugf("%v REQ(in) file doesn't exist: %s: %q / %q o=%d s=%d", m, deviceID.Short(), req.Folder, req.Name, req.Offset, req.Size)
 		return nil, protocol.ErrNoSuchFile
-	} else if err == io.EOF {
+	case errors.Is(err, io.EOF):
 		// Read beyond end of file. This might indicate a problem, or it
 		// might be a short block that gets padded when read for encrypted
 		// folders. We ignore the error and let the hash validation in the
 		// next step take care of it, by only hashing the part we actually
 		// managed to read.
-	} else if err != nil {
+	case err != nil:
 		l.Debugf("%v REQ(in) failed reading file (%v): %s: %q / %q o=%d s=%d", m, err, deviceID.Short(), req.Folder, req.Name, req.Offset, req.Size)
 		return nil, protocol.ErrGeneric
 	}
@@ -2230,13 +2235,13 @@ func (m *model) SetIgnores(folder string, content []string) error {
 
 func (m *model) setIgnores(cfg config.FolderConfiguration, content []string) error {
 	err := cfg.CheckPath()
-	if err == config.ErrPathMissing {
+	if errors.Is(err, config.ErrPathMissing) {
 		if err = cfg.CreateRoot(); err != nil {
 			return fmt.Errorf("failed to create folder root: %w", err)
 		}
 		err = cfg.CheckPath()
 	}
-	if err != nil && err != config.ErrMarkerMissing {
+	if err != nil && !errors.Is(err, config.ErrMarkerMissing) {
 		return err
 	}
 
@@ -2498,7 +2503,6 @@ func (m *model) ScanFolders() map[string]error {
 	wg := sync.NewWaitGroup()
 	wg.Add(len(folders))
 	for _, folder := range folders {
-		folder := folder
 		go func() {
 			err := m.ScanFolder(folder)
 			if err != nil {
@@ -2677,7 +2681,7 @@ func (m *model) WatchError(folder string) error {
 	runner, _ := m.folderRunners.Get(folder)
 	m.mut.RUnlock()
 	if err != nil {
-		return nil // If the folder isn't running, there's no error to report.
+		return nil //nolint:nilerr // If the folder isn't running, there's no error to report.
 	}
 	return runner.WatchError()
 }
@@ -2744,7 +2748,7 @@ func (m *model) GlobalDirectoryTree(folder, prefix string, levels int, dirsOnly
 	prefix = osutil.NativeFilename(prefix)
 
 	if prefix != "" && !strings.HasSuffix(prefix, sep) {
-		prefix = prefix + sep
+		prefix += sep
 	}
 
 	for f, err := range itererr.Zip(m.sdb.AllGlobalFilesPrefix(folder, prefix)) {
@@ -3456,8 +3460,8 @@ type updatedPendingFolder struct {
 // redactPathError checks if the error is actually a os.PathError, and if yes
 // returns a redactedError with the path removed.
 func redactPathError(err error) (error, bool) {
-	perr, ok := err.(*os.PathError)
-	if !ok {
+	var perr *os.PathError
+	if !errors.As(err, &perr) {
 		return nil, false
 	}
 	return &redactedError{

+ 1 - 1
lib/model/sharedpullerstate.go

@@ -384,7 +384,7 @@ func writeEncryptionTrailer(file protocol.FileInfo, writer io.WriterAt) (int64,
 	if err != nil {
 		return 0, err
 	}
-	binary.BigEndian.PutUint32(bs[n:], uint32(n))
+	binary.BigEndian.PutUint32(bs[n:], uint32(n)) //nolint:gosec
 	bs = bs[:n+4]
 
 	if _, err := writer.WriteAt(bs, wireFile.Size); err != nil {