db_service.go 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358
  1. // Copyright (C) 2025 The Syncthing Authors.
  2. //
  3. // This Source Code Form is subject to the terms of the Mozilla Public
  4. // License, v. 2.0. If a copy of the MPL was not distributed with this file,
  5. // You can obtain one at https://mozilla.org/MPL/2.0/.
  6. package sqlite
  7. import (
  8. "context"
  9. "encoding/binary"
  10. "fmt"
  11. "log/slog"
  12. "math/rand"
  13. "strings"
  14. "time"
  15. "github.com/jmoiron/sqlx"
  16. "github.com/syncthing/syncthing/internal/db"
  17. "github.com/syncthing/syncthing/internal/slogutil"
  18. "github.com/syncthing/syncthing/lib/protocol"
  19. )
  20. const (
  21. internalMetaPrefix = "dbsvc"
  22. lastMaintKey = "lastMaint"
  23. lastSuccessfulGCSeqKey = "lastSuccessfulGCSeq"
  24. gcMinChunks = 5
  25. gcChunkSize = 100_000 // approximate number of rows to process in a single gc query
  26. gcMaxRuntime = 5 * time.Minute // max time to spend on gc, per table, per run
  27. )
  28. func (s *DB) Service(maintenanceInterval time.Duration) db.DBService {
  29. return newService(s, maintenanceInterval)
  30. }
  31. type Service struct {
  32. sdb *DB
  33. maintenanceInterval time.Duration
  34. internalMeta *db.Typed
  35. start chan struct{}
  36. }
  37. func (s *Service) String() string {
  38. return fmt.Sprintf("sqlite.service@%p", s)
  39. }
  40. func newService(sdb *DB, maintenanceInterval time.Duration) *Service {
  41. return &Service{
  42. sdb: sdb,
  43. maintenanceInterval: maintenanceInterval,
  44. internalMeta: db.NewTyped(sdb, internalMetaPrefix),
  45. start: make(chan struct{}),
  46. }
  47. }
  48. func (s *Service) StartMaintenance() {
  49. select {
  50. case s.start <- struct{}{}:
  51. default:
  52. }
  53. }
  54. func (s *Service) Serve(ctx context.Context) error {
  55. // Run periodic maintenance
  56. // Figure out when we last ran maintenance and schedule accordingly. If
  57. // it was never, do it now.
  58. lastMaint, _, _ := s.internalMeta.Time(lastMaintKey)
  59. nextMaint := lastMaint.Add(s.maintenanceInterval)
  60. wait := time.Until(nextMaint)
  61. if wait < 0 {
  62. wait = time.Minute
  63. }
  64. slog.DebugContext(ctx, "Next periodic run due", "after", wait)
  65. timer := time.NewTimer(wait)
  66. if s.maintenanceInterval == 0 {
  67. timer.Stop()
  68. }
  69. for {
  70. select {
  71. case <-ctx.Done():
  72. return ctx.Err()
  73. case <-timer.C:
  74. case <-s.start:
  75. }
  76. if err := s.periodic(ctx); err != nil {
  77. return wrap(err)
  78. }
  79. if s.maintenanceInterval != 0 {
  80. timer.Reset(s.maintenanceInterval)
  81. slog.DebugContext(ctx, "Next periodic run due", "after", s.maintenanceInterval)
  82. }
  83. _ = s.internalMeta.PutTime(lastMaintKey, time.Now())
  84. }
  85. }
  86. func (s *Service) periodic(ctx context.Context) error {
  87. t0 := time.Now()
  88. slog.DebugContext(ctx, "Periodic start")
  89. t1 := time.Now()
  90. defer func() { slog.DebugContext(ctx, "Periodic done in", "t1", time.Since(t1), "t0t1", t1.Sub(t0)) }()
  91. s.sdb.updateLock.Lock()
  92. err := tidy(ctx, s.sdb.sql)
  93. s.sdb.updateLock.Unlock()
  94. if err != nil {
  95. return err
  96. }
  97. return wrap(s.sdb.forEachFolder(func(fdb *folderDB) error {
  98. // Get the current device sequence, for comparison in the next step.
  99. seq, err := fdb.GetDeviceSequence(protocol.LocalDeviceID)
  100. if err != nil {
  101. return wrap(err)
  102. }
  103. // Get the last successful GC sequence. If it's the same as the
  104. // current sequence, nothing has changed and we can skip the GC
  105. // entirely.
  106. meta := db.NewTyped(fdb, internalMetaPrefix)
  107. if prev, _, err := meta.Int64(lastSuccessfulGCSeqKey); err != nil {
  108. return wrap(err)
  109. } else if seq == prev {
  110. slog.DebugContext(ctx, "Skipping unnecessary GC", "folder", fdb.folderID, "fdb", fdb.baseName)
  111. return nil
  112. }
  113. // Run the GC steps, in a function to be able to use a deferred
  114. // unlock.
  115. if err := func() error {
  116. fdb.updateLock.Lock()
  117. defer fdb.updateLock.Unlock()
  118. if err := garbageCollectOldDeletedLocked(ctx, fdb); err != nil {
  119. return wrap(err)
  120. }
  121. if err := garbageCollectNamesAndVersions(ctx, fdb); err != nil {
  122. return wrap(err)
  123. }
  124. if err := garbageCollectBlocklistsAndBlocksLocked(ctx, fdb); err != nil {
  125. return wrap(err)
  126. }
  127. return tidy(ctx, fdb.sql)
  128. }(); err != nil {
  129. return wrap(err)
  130. }
  131. // Update the successful GC sequence.
  132. return wrap(meta.PutInt64(lastSuccessfulGCSeqKey, seq))
  133. }))
  134. }
  135. func tidy(ctx context.Context, db *sqlx.DB) error {
  136. conn, err := db.Conn(ctx)
  137. if err != nil {
  138. return wrap(err)
  139. }
  140. defer conn.Close()
  141. _, _ = conn.ExecContext(ctx, `ANALYZE`)
  142. _, _ = conn.ExecContext(ctx, `PRAGMA optimize`)
  143. _, _ = conn.ExecContext(ctx, `PRAGMA incremental_vacuum`)
  144. _, _ = conn.ExecContext(ctx, `PRAGMA journal_size_limit = 8388608`)
  145. _, _ = conn.ExecContext(ctx, `PRAGMA wal_checkpoint(TRUNCATE)`)
  146. return nil
  147. }
  148. func garbageCollectNamesAndVersions(ctx context.Context, fdb *folderDB) error {
  149. l := slog.With("folder", fdb.folderID, "fdb", fdb.baseName)
  150. res, err := fdb.stmt(`
  151. DELETE FROM file_names
  152. WHERE NOT EXISTS (SELECT 1 FROM files f WHERE f.name_idx = idx)
  153. `).Exec()
  154. if err != nil {
  155. return wrap(err, "delete names")
  156. }
  157. if aff, err := res.RowsAffected(); err == nil {
  158. l.DebugContext(ctx, "Removed old file names", "affected", aff)
  159. }
  160. res, err = fdb.stmt(`
  161. DELETE FROM file_versions
  162. WHERE NOT EXISTS (SELECT 1 FROM files f WHERE f.version_idx = idx)
  163. `).Exec()
  164. if err != nil {
  165. return wrap(err, "delete versions")
  166. }
  167. if aff, err := res.RowsAffected(); err == nil {
  168. l.DebugContext(ctx, "Removed old file versions", "affected", aff)
  169. }
  170. return nil
  171. }
  172. func garbageCollectOldDeletedLocked(ctx context.Context, fdb *folderDB) error {
  173. l := slog.With("folder", fdb.folderID, "fdb", fdb.baseName)
  174. if fdb.deleteRetention <= 0 {
  175. slog.DebugContext(ctx, "Delete retention is infinite, skipping cleanup")
  176. return nil
  177. }
  178. // Remove deleted files that are marked as not needed (we have processed
  179. // them) and they were deleted more than MaxDeletedFileAge ago.
  180. l.DebugContext(ctx, "Forgetting deleted files", "retention", fdb.deleteRetention)
  181. res, err := fdb.stmt(`
  182. DELETE FROM files
  183. WHERE deleted AND modified < ? AND local_flags & {{.FlagLocalNeeded}} == 0
  184. `).Exec(time.Now().Add(-fdb.deleteRetention).UnixNano())
  185. if err != nil {
  186. return wrap(err)
  187. }
  188. if aff, err := res.RowsAffected(); err == nil {
  189. l.DebugContext(ctx, "Removed old deleted file records", "affected", aff)
  190. }
  191. return nil
  192. }
  193. func garbageCollectBlocklistsAndBlocksLocked(ctx context.Context, fdb *folderDB) error {
  194. // Remove all blocklists not referred to by any files and, by extension,
  195. // any blocks not referred to by a blocklist. This is an expensive
  196. // operation when run normally, especially if there are a lot of blocks
  197. // to collect.
  198. //
  199. // We make this orders of magnitude faster by disabling foreign keys for
  200. // the transaction and doing the cleanup manually. This requires using
  201. // an explicit connection and disabling foreign keys before starting the
  202. // transaction. We make sure to clean up on the way out.
  203. conn, err := fdb.sql.Connx(ctx)
  204. if err != nil {
  205. return wrap(err)
  206. }
  207. defer conn.Close()
  208. if _, err := conn.ExecContext(ctx, `PRAGMA foreign_keys = 0`); err != nil {
  209. return wrap(err)
  210. }
  211. defer func() { //nolint:contextcheck
  212. _, _ = conn.ExecContext(context.Background(), `PRAGMA foreign_keys = 1`)
  213. }()
  214. tx, err := conn.BeginTxx(ctx, nil)
  215. if err != nil {
  216. return wrap(err)
  217. }
  218. defer tx.Rollback() //nolint:errcheck
  219. // Both blocklists and blocks refer to blocklists_hash from the files table.
  220. for _, table := range []string{"blocklists", "blocks"} {
  221. // Count the number of rows
  222. var rows int64
  223. if err := tx.GetContext(ctx, &rows, `SELECT count(*) FROM `+table); err != nil {
  224. return wrap(err)
  225. }
  226. chunks := max(gcMinChunks, rows/gcChunkSize)
  227. l := slog.With("folder", fdb.folderID, "fdb", fdb.baseName, "table", table, "rows", rows, "chunks", chunks)
  228. // Process rows in chunks up to a given time limit. We always use at
  229. // least gcMinChunks chunks, then increase the number as the number of rows
  230. // exceeds gcMinChunks*gcChunkSize.
  231. t0 := time.Now()
  232. for i, br := range randomBlobRanges(int(chunks)) {
  233. if d := time.Since(t0); d > gcMaxRuntime {
  234. l.InfoContext(ctx, "GC was interrupted due to exceeding time limit", "processed", i, "runtime", time.Since(t0))
  235. break
  236. }
  237. // The limit column must be an indexed column with a mostly random distribution of blobs.
  238. // That's the blocklist_hash column for blocklists, and the hash column for blocks.
  239. limitColumn := table + ".blocklist_hash"
  240. if table == "blocks" {
  241. limitColumn = "blocks.hash"
  242. }
  243. q := fmt.Sprintf(`
  244. DELETE FROM %s
  245. WHERE %s AND NOT EXISTS (
  246. SELECT 1 FROM files WHERE files.blocklist_hash = %s.blocklist_hash
  247. )`, table, br.SQL(limitColumn), table)
  248. if res, err := tx.ExecContext(ctx, q); err != nil {
  249. return wrap(err, "delete from "+table)
  250. } else {
  251. l.DebugContext(ctx, "GC query result", "processed", i, "runtime", time.Since(t0), "result", slogutil.Expensive(func() any {
  252. rows, err := res.RowsAffected()
  253. if err != nil {
  254. return slogutil.Error(err)
  255. }
  256. return slog.Int64("rows", rows)
  257. }))
  258. }
  259. }
  260. }
  261. return wrap(tx.Commit())
  262. }
  263. // blobRange defines a range for blob searching. A range is open ended if
  264. // start or end is nil.
  265. type blobRange struct {
  266. start, end []byte
  267. }
  268. // SQL returns the SQL where clause for the given range, e.g.
  269. // `column >= x'49249248' AND column < x'6db6db6c'`
  270. func (r blobRange) SQL(name string) string {
  271. var sb strings.Builder
  272. if r.start != nil {
  273. fmt.Fprintf(&sb, "%s >= x'%x'", name, r.start)
  274. }
  275. if r.start != nil && r.end != nil {
  276. sb.WriteString(" AND ")
  277. }
  278. if r.end != nil {
  279. fmt.Fprintf(&sb, "%s < x'%x'", name, r.end)
  280. }
  281. return sb.String()
  282. }
  283. // randomBlobRanges returns n blobRanges in random order
  284. func randomBlobRanges(n int) []blobRange {
  285. ranges := blobRanges(n)
  286. rand.Shuffle(len(ranges), func(i, j int) { ranges[i], ranges[j] = ranges[j], ranges[i] })
  287. return ranges
  288. }
  289. // blobRanges returns n blobRanges
  290. func blobRanges(n int) []blobRange {
  291. // We use three byte (24 bit) prefixes to get fairly granular ranges and easy bit
  292. // conversions.
  293. rangeSize := (1 << 24) / n
  294. ranges := make([]blobRange, 0, n)
  295. var prev []byte
  296. for i := range n {
  297. var pref []byte
  298. if i < n-1 {
  299. end := (i + 1) * rangeSize
  300. pref = intToBlob(end)
  301. }
  302. ranges = append(ranges, blobRange{prev, pref})
  303. prev = pref
  304. }
  305. return ranges
  306. }
  307. func intToBlob(n int) []byte {
  308. var pref [4]byte
  309. binary.BigEndian.PutUint32(pref[:], uint32(n)) //nolint:gosec
  310. // first byte is always zero and not part of the range
  311. return pref[1:]
  312. }