db_bench_test.go 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328
  1. // Copyright (C) 2025 The Syncthing Authors.
  2. //
  3. // This Source Code Form is subject to the terms of the Mozilla Public
  4. // License, v. 2.0. If a copy of the MPL was not distributed with this file,
  5. // You can obtain one at https://mozilla.org/MPL/2.0/.
  6. package sqlite
  7. import (
  8. "fmt"
  9. "os"
  10. "testing"
  11. "time"
  12. "github.com/syncthing/syncthing/internal/timeutil"
  13. "github.com/syncthing/syncthing/lib/config"
  14. "github.com/syncthing/syncthing/lib/osutil"
  15. "github.com/syncthing/syncthing/lib/protocol"
  16. "github.com/syncthing/syncthing/lib/rand"
  17. )
  18. var globalFi protocol.FileInfo
  19. func BenchmarkUpdate(b *testing.B) {
  20. db, err := Open(b.TempDir())
  21. if err != nil {
  22. b.Fatal(err)
  23. }
  24. b.Cleanup(func() {
  25. if err := db.Close(); err != nil {
  26. b.Fatal(err)
  27. }
  28. })
  29. fs := make([]protocol.FileInfo, 100)
  30. t0 := time.Now()
  31. seed := 0
  32. size := 1000
  33. const numBlocks = 500
  34. fdb, err := db.getFolderDB(folderID, true)
  35. if err != nil {
  36. b.Fatal(err)
  37. }
  38. for size < 200_000 {
  39. for {
  40. local, err := db.CountLocal(folderID, protocol.LocalDeviceID)
  41. if err != nil {
  42. b.Fatal(err)
  43. }
  44. if local.Files >= size {
  45. break
  46. }
  47. fs := make([]protocol.FileInfo, 1000)
  48. for i := range fs {
  49. fs[i] = genFile(rand.String(24), numBlocks, 0)
  50. }
  51. if err := db.Update(folderID, protocol.LocalDeviceID, fs); err != nil {
  52. b.Fatal(err)
  53. }
  54. }
  55. var files, blocks int
  56. if err := fdb.sql.QueryRowx(`SELECT count(*) FROM files`).Scan(&files); err != nil {
  57. b.Fatal(err)
  58. }
  59. if err := fdb.sql.QueryRowx(`SELECT count(*) FROM blocks`).Scan(&blocks); err != nil {
  60. b.Fatal(err)
  61. }
  62. d := time.Since(t0)
  63. b.Logf("t=%s, files=%d, blocks=%d, files/s=%.01f, blocks/s=%.01f", d, files, blocks, float64(files)/d.Seconds(), float64(blocks)/d.Seconds())
  64. b.Run(fmt.Sprintf("n=Insert100Loc/size=%d", size), func(b *testing.B) {
  65. for range b.N {
  66. for i := range fs {
  67. fs[i] = genFile(rand.String(24), numBlocks, 0)
  68. }
  69. if err := db.Update(folderID, protocol.LocalDeviceID, fs); err != nil {
  70. b.Fatal(err)
  71. }
  72. }
  73. b.ReportMetric(float64(b.N)*100.0/b.Elapsed().Seconds(), "files/s")
  74. })
  75. b.Run(fmt.Sprintf("n=RepBlocks100/size=%d", size), func(b *testing.B) {
  76. for range b.N {
  77. for i := range fs {
  78. fs[i].Blocks = genBlocks(fs[i].Name, seed, 64)
  79. fs[i].Version = fs[i].Version.Update(42)
  80. }
  81. seed++
  82. if err := db.Update(folderID, protocol.LocalDeviceID, fs); err != nil {
  83. b.Fatal(err)
  84. }
  85. }
  86. b.ReportMetric(float64(b.N)*100.0/b.Elapsed().Seconds(), "files/s")
  87. })
  88. b.Run(fmt.Sprintf("n=RepSame100/size=%d", size), func(b *testing.B) {
  89. for range b.N {
  90. for i := range fs {
  91. fs[i].Version = fs[i].Version.Update(42)
  92. }
  93. if err := db.Update(folderID, protocol.LocalDeviceID, fs); err != nil {
  94. b.Fatal(err)
  95. }
  96. }
  97. b.ReportMetric(float64(b.N)*100.0/b.Elapsed().Seconds(), "files/s")
  98. })
  99. b.Run(fmt.Sprintf("n=Insert100Rem/size=%d", size), func(b *testing.B) {
  100. for range b.N {
  101. for i := range fs {
  102. fs[i].Blocks = genBlocks(fs[i].Name, seed, 64)
  103. fs[i].Version = fs[i].Version.Update(42)
  104. fs[i].Sequence = timeutil.StrictlyMonotonicNanos()
  105. }
  106. if err := db.Update(folderID, protocol.DeviceID{42}, fs); err != nil {
  107. b.Fatal(err)
  108. }
  109. }
  110. b.ReportMetric(float64(b.N)*100.0/b.Elapsed().Seconds(), "files/s")
  111. })
  112. b.Run(fmt.Sprintf("n=GetGlobal100/size=%d", size), func(b *testing.B) {
  113. for range b.N {
  114. for i := range fs {
  115. _, ok, err := db.GetGlobalFile(folderID, fs[i].Name)
  116. if err != nil {
  117. b.Fatal(err)
  118. }
  119. if !ok {
  120. b.Fatal("should exist")
  121. }
  122. }
  123. }
  124. b.ReportMetric(float64(b.N)*100.0/b.Elapsed().Seconds(), "files/s")
  125. })
  126. b.Run(fmt.Sprintf("n=LocalSequenced/size=%d", size), func(b *testing.B) {
  127. count := 0
  128. for range b.N {
  129. cur, err := db.GetDeviceSequence(folderID, protocol.LocalDeviceID)
  130. if err != nil {
  131. b.Fatal(err)
  132. }
  133. it, errFn := db.AllLocalFilesBySequence(folderID, protocol.LocalDeviceID, cur-100, 0)
  134. for f := range it {
  135. count++
  136. globalFi = f
  137. }
  138. if err := errFn(); err != nil {
  139. b.Fatal(err)
  140. }
  141. }
  142. b.ReportMetric(float64(count)/b.Elapsed().Seconds(), "files/s")
  143. })
  144. b.Run(fmt.Sprintf("n=AllLocalBlocksWithHash/size=%d", size), func(b *testing.B) {
  145. count := 0
  146. for range b.N {
  147. it, errFn := db.AllLocalBlocksWithHash(folderID, globalFi.Blocks[0].Hash)
  148. for range it {
  149. count++
  150. }
  151. if err := errFn(); err != nil {
  152. b.Fatal(err)
  153. }
  154. }
  155. b.ReportMetric(float64(count)/b.Elapsed().Seconds(), "blocks/s")
  156. })
  157. b.Run(fmt.Sprintf("n=GetDeviceSequenceLoc/size=%d", size), func(b *testing.B) {
  158. for range b.N {
  159. _, err := db.GetDeviceSequence(folderID, protocol.LocalDeviceID)
  160. if err != nil {
  161. b.Fatal(err)
  162. }
  163. }
  164. })
  165. b.Run(fmt.Sprintf("n=GetDeviceSequenceRem/size=%d", size), func(b *testing.B) {
  166. for range b.N {
  167. _, err := db.GetDeviceSequence(folderID, protocol.DeviceID{42})
  168. if err != nil {
  169. b.Fatal(err)
  170. }
  171. }
  172. })
  173. b.Run(fmt.Sprintf("n=RemoteNeed/size=%d", size), func(b *testing.B) {
  174. count := 0
  175. for range b.N {
  176. it, errFn := db.AllNeededGlobalFiles(folderID, protocol.DeviceID{42}, config.PullOrderAlphabetic, 0, 0)
  177. for f := range it {
  178. count++
  179. globalFi = f
  180. }
  181. if err := errFn(); err != nil {
  182. b.Fatal(err)
  183. }
  184. }
  185. b.ReportMetric(float64(count)/b.Elapsed().Seconds(), "files/s")
  186. })
  187. b.Run(fmt.Sprintf("n=LocalNeed100Largest/size=%d", size), func(b *testing.B) {
  188. count := 0
  189. for range b.N {
  190. it, errFn := db.AllNeededGlobalFiles(folderID, protocol.LocalDeviceID, config.PullOrderLargestFirst, 100, 0)
  191. for f := range it {
  192. globalFi = f
  193. count++
  194. }
  195. if err := errFn(); err != nil {
  196. b.Fatal(err)
  197. }
  198. }
  199. b.ReportMetric(float64(count)/b.Elapsed().Seconds(), "files/s")
  200. })
  201. size += 1000
  202. }
  203. }
  204. func TestBenchmarkDropAllRemote(t *testing.T) {
  205. if testing.Short() || os.Getenv("LONG_TEST") == "" {
  206. t.Skip("slow test")
  207. }
  208. db, err := Open(t.TempDir())
  209. if err != nil {
  210. t.Fatal(err)
  211. }
  212. t.Cleanup(func() {
  213. if err := db.Close(); err != nil {
  214. t.Fatal(err)
  215. }
  216. })
  217. fs := make([]protocol.FileInfo, 1000)
  218. seq := 0
  219. for {
  220. local, err := db.CountLocal(folderID, protocol.LocalDeviceID)
  221. if err != nil {
  222. t.Fatal(err)
  223. }
  224. if local.Files >= 15_000 {
  225. break
  226. }
  227. for i := range fs {
  228. seq++
  229. fs[i] = genFile(rand.String(24), 64, seq)
  230. }
  231. if err := db.Update(folderID, protocol.DeviceID{42}, fs); err != nil {
  232. t.Fatal(err)
  233. }
  234. if err := db.Update(folderID, protocol.LocalDeviceID, fs); err != nil {
  235. t.Fatal(err)
  236. }
  237. }
  238. t0 := time.Now()
  239. if err := db.DropAllFiles(folderID, protocol.DeviceID{42}); err != nil {
  240. t.Fatal(err)
  241. }
  242. d := time.Since(t0)
  243. t.Log("drop all took", d)
  244. }
  245. func TestBenchmarkSizeManyFilesRemotes(t *testing.T) {
  246. // Reports the database size for a setup with many files and many remote
  247. // devices each announcing every files, with fairly long file names and
  248. // "worst case" version vectors.
  249. if testing.Short() || os.Getenv("LONG_TEST") == "" {
  250. t.Skip("slow test")
  251. }
  252. dir := t.TempDir()
  253. db, err := Open(dir)
  254. if err != nil {
  255. t.Fatal(err)
  256. }
  257. t.Cleanup(func() {
  258. if err := db.Close(); err != nil {
  259. t.Fatal(err)
  260. }
  261. })
  262. // This is equivalent to about 800 GiB in 100k files (i.e., 8 MiB per
  263. // file), shared between 31 devices where each have touched every file.
  264. const numFiles = 1e5
  265. const numRemotes = 30
  266. const numBlocks = 64
  267. const filenameLen = 64
  268. fs := make([]protocol.FileInfo, 1000)
  269. n := 0
  270. seq := 0
  271. for n < numFiles {
  272. for i := range fs {
  273. seq++
  274. fs[i] = genFile(rand.String(filenameLen), numBlocks, seq)
  275. for r := range numRemotes {
  276. fs[i].Version = fs[i].Version.Update(42 + protocol.ShortID(r))
  277. }
  278. }
  279. if err := db.Update(folderID, protocol.LocalDeviceID, fs); err != nil {
  280. t.Fatal(err)
  281. }
  282. for r := range numRemotes {
  283. if err := db.Update(folderID, protocol.DeviceID{byte(42 + r)}, fs); err != nil {
  284. t.Fatal(err)
  285. }
  286. }
  287. n += len(fs)
  288. t.Log(n, (numRemotes+1)*n)
  289. }
  290. if err := db.Close(); err != nil {
  291. t.Fatal(err)
  292. }
  293. size := osutil.DirSize(dir)
  294. t.Logf("Total size: %.02f MiB", float64(size)/1024/1024)
  295. }