rwfolder_test.go 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735
  1. // Copyright (C) 2014 The Syncthing Authors.
  2. //
  3. // This Source Code Form is subject to the terms of the Mozilla Public
  4. // License, v. 2.0. If a copy of the MPL was not distributed with this file,
  5. // You can obtain one at https://mozilla.org/MPL/2.0/.
  6. package model
  7. import (
  8. "bytes"
  9. "context"
  10. "crypto/rand"
  11. "io"
  12. "io/ioutil"
  13. "os"
  14. "path/filepath"
  15. "testing"
  16. "time"
  17. "github.com/syncthing/syncthing/lib/config"
  18. "github.com/syncthing/syncthing/lib/db"
  19. "github.com/syncthing/syncthing/lib/fs"
  20. "github.com/syncthing/syncthing/lib/ignore"
  21. "github.com/syncthing/syncthing/lib/protocol"
  22. "github.com/syncthing/syncthing/lib/scanner"
  23. "github.com/syncthing/syncthing/lib/sync"
  24. )
  25. func TestMain(m *testing.M) {
  26. // We do this to make sure that the temp file required for the tests
  27. // does not get removed during the tests. Also set the prefix so it's
  28. // found correctly regardless of platform.
  29. if fs.TempPrefix != fs.WindowsTempPrefix {
  30. originalPrefix := fs.TempPrefix
  31. fs.TempPrefix = fs.WindowsTempPrefix
  32. defer func() {
  33. fs.TempPrefix = originalPrefix
  34. }()
  35. }
  36. future := time.Now().Add(time.Hour)
  37. err := os.Chtimes(filepath.Join("testdata", fs.TempName("file")), future, future)
  38. if err != nil {
  39. panic(err)
  40. }
  41. os.Exit(m.Run())
  42. }
  43. var blocks = []protocol.BlockInfo{
  44. {Hash: []uint8{0xfa, 0x43, 0x23, 0x9b, 0xce, 0xe7, 0xb9, 0x7c, 0xa6, 0x2f, 0x0, 0x7c, 0xc6, 0x84, 0x87, 0x56, 0xa, 0x39, 0xe1, 0x9f, 0x74, 0xf3, 0xdd, 0xe7, 0x48, 0x6d, 0xb3, 0xf9, 0x8d, 0xf8, 0xe4, 0x71}}, // Zero'ed out block
  45. {Offset: 0, Size: 0x20000, Hash: []uint8{0x7e, 0xad, 0xbc, 0x36, 0xae, 0xbb, 0xcf, 0x74, 0x43, 0xe2, 0x7a, 0x5a, 0x4b, 0xb8, 0x5b, 0xce, 0xe6, 0x9e, 0x1e, 0x10, 0xf9, 0x8a, 0xbc, 0x77, 0x95, 0x2, 0x29, 0x60, 0x9e, 0x96, 0xae, 0x6c}},
  46. {Offset: 131072, Size: 0x20000, Hash: []uint8{0x3c, 0xc4, 0x20, 0xf4, 0xb, 0x2e, 0xcb, 0xb9, 0x5d, 0xce, 0x34, 0xa8, 0xc3, 0x92, 0xea, 0xf3, 0xda, 0x88, 0x33, 0xee, 0x7a, 0xb6, 0xe, 0xf1, 0x82, 0x5e, 0xb0, 0xa9, 0x26, 0xa9, 0xc0, 0xef}},
  47. {Offset: 262144, Size: 0x20000, Hash: []uint8{0x76, 0xa8, 0xc, 0x69, 0xd7, 0x5c, 0x52, 0xfd, 0xdf, 0x55, 0xef, 0x44, 0xc1, 0xd6, 0x25, 0x48, 0x4d, 0x98, 0x48, 0x4d, 0xaa, 0x50, 0xf6, 0x6b, 0x32, 0x47, 0x55, 0x81, 0x6b, 0xed, 0xee, 0xfb}},
  48. {Offset: 393216, Size: 0x20000, Hash: []uint8{0x44, 0x1e, 0xa4, 0xf2, 0x8d, 0x1f, 0xc3, 0x1b, 0x9d, 0xa5, 0x18, 0x5e, 0x59, 0x1b, 0xd8, 0x5c, 0xba, 0x7d, 0xb9, 0x8d, 0x70, 0x11, 0x5c, 0xea, 0xa1, 0x57, 0x4d, 0xcb, 0x3c, 0x5b, 0xf8, 0x6c}},
  49. {Offset: 524288, Size: 0x20000, Hash: []uint8{0x8, 0x40, 0xd0, 0x5e, 0x80, 0x0, 0x0, 0x7c, 0x8b, 0xb3, 0x8b, 0xf7, 0x7b, 0x23, 0x26, 0x28, 0xab, 0xda, 0xcf, 0x86, 0x8f, 0xc2, 0x8a, 0x39, 0xc6, 0xe6, 0x69, 0x59, 0x97, 0xb6, 0x1a, 0x43}},
  50. {Offset: 655360, Size: 0x20000, Hash: []uint8{0x38, 0x8e, 0x44, 0xcb, 0x30, 0xd8, 0x90, 0xf, 0xce, 0x7, 0x4b, 0x58, 0x86, 0xde, 0xce, 0x59, 0xa2, 0x46, 0xd2, 0xf9, 0xba, 0xaf, 0x35, 0x87, 0x38, 0xdf, 0xd2, 0xd, 0xf9, 0x45, 0xed, 0x91}},
  51. {Offset: 786432, Size: 0x20000, Hash: []uint8{0x32, 0x28, 0xcd, 0xf, 0x37, 0x21, 0xe5, 0xd4, 0x1e, 0x58, 0x87, 0x73, 0x8e, 0x36, 0xdf, 0xb2, 0x70, 0x78, 0x56, 0xc3, 0x42, 0xff, 0xf7, 0x8f, 0x37, 0x95, 0x0, 0x26, 0xa, 0xac, 0x54, 0x72}},
  52. {Offset: 917504, Size: 0x20000, Hash: []uint8{0x96, 0x6b, 0x15, 0x6b, 0xc4, 0xf, 0x19, 0x18, 0xca, 0xbb, 0x5f, 0xd6, 0xbb, 0xa2, 0xc6, 0x2a, 0xac, 0xbb, 0x8a, 0xb9, 0xce, 0xec, 0x4c, 0xdb, 0x78, 0xec, 0x57, 0x5d, 0x33, 0xf9, 0x8e, 0xaf}},
  53. }
  54. var folders = []string{"default"}
  55. var diffTestData = []struct {
  56. a string
  57. b string
  58. s int
  59. d []protocol.BlockInfo
  60. }{
  61. {"contents", "contents", 1024, []protocol.BlockInfo{}},
  62. {"", "", 1024, []protocol.BlockInfo{}},
  63. {"contents", "contents", 3, []protocol.BlockInfo{}},
  64. {"contents", "cantents", 3, []protocol.BlockInfo{{Offset: 0, Size: 3}}},
  65. {"contents", "contants", 3, []protocol.BlockInfo{{Offset: 3, Size: 3}}},
  66. {"contents", "cantants", 3, []protocol.BlockInfo{{Offset: 0, Size: 3}, {Offset: 3, Size: 3}}},
  67. {"contents", "", 3, []protocol.BlockInfo{{Offset: 0, Size: 0}}},
  68. {"", "contents", 3, []protocol.BlockInfo{{Offset: 0, Size: 3}, {Offset: 3, Size: 3}, {Offset: 6, Size: 2}}},
  69. {"con", "contents", 3, []protocol.BlockInfo{{Offset: 3, Size: 3}, {Offset: 6, Size: 2}}},
  70. {"contents", "con", 3, nil},
  71. {"contents", "cont", 3, []protocol.BlockInfo{{Offset: 3, Size: 1}}},
  72. {"cont", "contents", 3, []protocol.BlockInfo{{Offset: 3, Size: 3}, {Offset: 6, Size: 2}}},
  73. }
  74. func setUpFile(filename string, blockNumbers []int) protocol.FileInfo {
  75. // Create existing file
  76. existingBlocks := make([]protocol.BlockInfo, len(blockNumbers))
  77. for i := range blockNumbers {
  78. existingBlocks[i] = blocks[blockNumbers[i]]
  79. }
  80. return protocol.FileInfo{
  81. Name: filename,
  82. Blocks: existingBlocks,
  83. }
  84. }
  85. func setUpModel(file protocol.FileInfo) *Model {
  86. db := db.OpenMemory()
  87. model := NewModel(defaultConfig, protocol.LocalDeviceID, "syncthing", "dev", db, nil)
  88. model.AddFolder(defaultFolderConfig)
  89. // Update index
  90. model.updateLocalsFromScanning("default", []protocol.FileInfo{file})
  91. return model
  92. }
  93. func setUpSendReceiveFolder(model *Model) *sendReceiveFolder {
  94. f := &sendReceiveFolder{
  95. folder: folder{
  96. stateTracker: newStateTracker("default"),
  97. model: model,
  98. initialScanFinished: make(chan struct{}),
  99. ctx: context.TODO(),
  100. FolderConfiguration: config.FolderConfiguration{
  101. PullerMaxPendingKiB: defaultPullerPendingKiB,
  102. },
  103. },
  104. fs: fs.NewMtimeFS(fs.NewFilesystem(fs.FilesystemTypeBasic, "testdata"), db.NewNamespacedKV(model.db, "mtime")),
  105. queue: newJobQueue(),
  106. errors: make(map[string]string),
  107. errorsMut: sync.NewMutex(),
  108. }
  109. // Folders are never actually started, so no initial scan will be done
  110. close(f.initialScanFinished)
  111. return f
  112. }
  113. // Layout of the files: (indexes from the above array)
  114. // 12345678 - Required file
  115. // 02005008 - Existing file (currently in the index)
  116. // 02340070 - Temp file on the disk
  117. func TestHandleFile(t *testing.T) {
  118. // After the diff between required and existing we should:
  119. // Copy: 2, 5, 8
  120. // Pull: 1, 3, 4, 6, 7
  121. existingBlocks := []int{0, 2, 0, 0, 5, 0, 0, 8}
  122. existingFile := setUpFile("filex", existingBlocks)
  123. requiredFile := existingFile
  124. requiredFile.Blocks = blocks[1:]
  125. m := setUpModel(existingFile)
  126. f := setUpSendReceiveFolder(m)
  127. copyChan := make(chan copyBlocksState, 1)
  128. dbUpdateChan := make(chan dbUpdateJob, 1)
  129. f.handleFile(requiredFile, copyChan, nil, dbUpdateChan)
  130. // Receive the results
  131. toCopy := <-copyChan
  132. if len(toCopy.blocks) != 8 {
  133. t.Errorf("Unexpected count of copy blocks: %d != 8", len(toCopy.blocks))
  134. }
  135. for _, block := range blocks[1:] {
  136. found := false
  137. for _, toCopyBlock := range toCopy.blocks {
  138. if string(toCopyBlock.Hash) == string(block.Hash) {
  139. found = true
  140. break
  141. }
  142. }
  143. if !found {
  144. t.Errorf("Did not find block %s", block.String())
  145. }
  146. }
  147. }
  148. func TestHandleFileWithTemp(t *testing.T) {
  149. // After diff between required and existing we should:
  150. // Copy: 2, 5, 8
  151. // Pull: 1, 3, 4, 6, 7
  152. // After dropping out blocks already on the temp file we should:
  153. // Copy: 5, 8
  154. // Pull: 1, 6
  155. existingBlocks := []int{0, 2, 0, 0, 5, 0, 0, 8}
  156. existingFile := setUpFile("file", existingBlocks)
  157. requiredFile := existingFile
  158. requiredFile.Blocks = blocks[1:]
  159. m := setUpModel(existingFile)
  160. f := setUpSendReceiveFolder(m)
  161. copyChan := make(chan copyBlocksState, 1)
  162. dbUpdateChan := make(chan dbUpdateJob, 1)
  163. f.handleFile(requiredFile, copyChan, nil, dbUpdateChan)
  164. // Receive the results
  165. toCopy := <-copyChan
  166. if len(toCopy.blocks) != 4 {
  167. t.Errorf("Unexpected count of copy blocks: %d != 4", len(toCopy.blocks))
  168. }
  169. for _, idx := range []int{1, 5, 6, 8} {
  170. found := false
  171. block := blocks[idx]
  172. for _, toCopyBlock := range toCopy.blocks {
  173. if string(toCopyBlock.Hash) == string(block.Hash) {
  174. found = true
  175. break
  176. }
  177. }
  178. if !found {
  179. t.Errorf("Did not find block %s", block.String())
  180. }
  181. }
  182. }
  183. func TestCopierFinder(t *testing.T) {
  184. // After diff between required and existing we should:
  185. // Copy: 1, 2, 3, 4, 6, 7, 8
  186. // Since there is no existing file, nor a temp file
  187. // After dropping out blocks found locally:
  188. // Pull: 1, 5, 6, 8
  189. tempFile := filepath.Join("testdata", fs.TempName("file2"))
  190. err := os.Remove(tempFile)
  191. if err != nil && !os.IsNotExist(err) {
  192. t.Error(err)
  193. }
  194. existingBlocks := []int{0, 2, 3, 4, 0, 0, 7, 0}
  195. existingFile := setUpFile(fs.TempName("file"), existingBlocks)
  196. requiredFile := existingFile
  197. requiredFile.Blocks = blocks[1:]
  198. requiredFile.Name = "file2"
  199. m := setUpModel(existingFile)
  200. f := setUpSendReceiveFolder(m)
  201. copyChan := make(chan copyBlocksState)
  202. pullChan := make(chan pullBlockState, 4)
  203. finisherChan := make(chan *sharedPullerState, 1)
  204. dbUpdateChan := make(chan dbUpdateJob, 1)
  205. // Run a single fetcher routine
  206. go f.copierRoutine(copyChan, pullChan, finisherChan)
  207. f.handleFile(requiredFile, copyChan, finisherChan, dbUpdateChan)
  208. pulls := []pullBlockState{<-pullChan, <-pullChan, <-pullChan, <-pullChan}
  209. finish := <-finisherChan
  210. select {
  211. case <-pullChan:
  212. t.Fatal("Pull channel has data to be read")
  213. case <-finisherChan:
  214. t.Fatal("Finisher channel has data to be read")
  215. default:
  216. }
  217. // Verify that the right blocks went into the pull list.
  218. // They are pulled in random order.
  219. for _, idx := range []int{1, 5, 6, 8} {
  220. found := false
  221. block := blocks[idx]
  222. for _, pulledBlock := range pulls {
  223. if string(pulledBlock.block.Hash) == string(block.Hash) {
  224. found = true
  225. break
  226. }
  227. }
  228. if !found {
  229. t.Errorf("Did not find block %s", block.String())
  230. }
  231. if string(finish.file.Blocks[idx-1].Hash) != string(blocks[idx].Hash) {
  232. t.Errorf("Block %d mismatch: %s != %s", idx, finish.file.Blocks[idx-1].String(), blocks[idx].String())
  233. }
  234. }
  235. // Verify that the fetched blocks have actually been written to the temp file
  236. blks, err := scanner.HashFile(context.TODO(), fs.NewFilesystem(fs.FilesystemTypeBasic, "."), tempFile, protocol.BlockSize, nil, false)
  237. if err != nil {
  238. t.Log(err)
  239. }
  240. for _, eq := range []int{2, 3, 4, 7} {
  241. if string(blks[eq-1].Hash) != string(blocks[eq].Hash) {
  242. t.Errorf("Block %d mismatch: %s != %s", eq, blks[eq-1].String(), blocks[eq].String())
  243. }
  244. }
  245. finish.fd.Close()
  246. os.Remove(tempFile)
  247. }
  248. func TestWeakHash(t *testing.T) {
  249. tempFile := filepath.Join("testdata", fs.TempName("weakhash"))
  250. var shift int64 = 10
  251. var size int64 = 1 << 20
  252. expectBlocks := int(size / protocol.BlockSize)
  253. expectPulls := int(shift / protocol.BlockSize)
  254. if shift > 0 {
  255. expectPulls++
  256. }
  257. cleanup := func() {
  258. for _, path := range []string{tempFile, "testdata/weakhash"} {
  259. os.Remove(path)
  260. }
  261. }
  262. cleanup()
  263. defer cleanup()
  264. f, err := os.Create("testdata/weakhash")
  265. if err != nil {
  266. t.Error(err)
  267. }
  268. defer f.Close()
  269. _, err = io.CopyN(f, rand.Reader, size)
  270. if err != nil {
  271. t.Error(err)
  272. }
  273. info, err := f.Stat()
  274. if err != nil {
  275. t.Error(err)
  276. }
  277. // Create two files, second file has `shifted` bytes random prefix, yet
  278. // both are of the same length, for example:
  279. // File 1: abcdefgh
  280. // File 2: xyabcdef
  281. f.Seek(0, os.SEEK_SET)
  282. existing, err := scanner.Blocks(context.TODO(), f, protocol.BlockSize, size, nil, true)
  283. if err != nil {
  284. t.Error(err)
  285. }
  286. f.Seek(0, os.SEEK_SET)
  287. remainder := io.LimitReader(f, size-shift)
  288. prefix := io.LimitReader(rand.Reader, shift)
  289. nf := io.MultiReader(prefix, remainder)
  290. desired, err := scanner.Blocks(context.TODO(), nf, protocol.BlockSize, size, nil, true)
  291. if err != nil {
  292. t.Error(err)
  293. }
  294. existingFile := protocol.FileInfo{
  295. Name: "weakhash",
  296. Blocks: existing,
  297. Size: size,
  298. ModifiedS: info.ModTime().Unix(),
  299. ModifiedNs: int32(info.ModTime().Nanosecond()),
  300. }
  301. desiredFile := protocol.FileInfo{
  302. Name: "weakhash",
  303. Size: size,
  304. Blocks: desired,
  305. ModifiedS: info.ModTime().Unix() + 1,
  306. }
  307. // Setup the model/pull environment
  308. m := setUpModel(existingFile)
  309. fo := setUpSendReceiveFolder(m)
  310. copyChan := make(chan copyBlocksState)
  311. pullChan := make(chan pullBlockState, expectBlocks)
  312. finisherChan := make(chan *sharedPullerState, 1)
  313. dbUpdateChan := make(chan dbUpdateJob, 1)
  314. // Run a single fetcher routine
  315. go fo.copierRoutine(copyChan, pullChan, finisherChan)
  316. // Test 1 - no weak hashing, file gets fully repulled (`expectBlocks` pulls).
  317. fo.WeakHashThresholdPct = 101
  318. fo.handleFile(desiredFile, copyChan, finisherChan, dbUpdateChan)
  319. var pulls []pullBlockState
  320. for len(pulls) < expectBlocks {
  321. select {
  322. case pull := <-pullChan:
  323. pulls = append(pulls, pull)
  324. case <-time.After(10 * time.Second):
  325. t.Errorf("timed out, got %d pulls expected %d", len(pulls), expectPulls)
  326. }
  327. }
  328. finish := <-finisherChan
  329. select {
  330. case <-pullChan:
  331. t.Fatal("Pull channel has data to be read")
  332. case <-finisherChan:
  333. t.Fatal("Finisher channel has data to be read")
  334. default:
  335. }
  336. finish.fd.Close()
  337. if err := os.Remove(tempFile); err != nil && !os.IsNotExist(err) {
  338. t.Error(err)
  339. }
  340. // Test 2 - using weak hash, expectPulls blocks pulled.
  341. fo.WeakHashThresholdPct = -1
  342. fo.handleFile(desiredFile, copyChan, finisherChan, dbUpdateChan)
  343. pulls = pulls[:0]
  344. for len(pulls) < expectPulls {
  345. select {
  346. case pull := <-pullChan:
  347. pulls = append(pulls, pull)
  348. case <-time.After(10 * time.Second):
  349. t.Errorf("timed out, got %d pulls expected %d", len(pulls), expectPulls)
  350. }
  351. }
  352. finish = <-finisherChan
  353. finish.fd.Close()
  354. expectShifted := expectBlocks - expectPulls
  355. if finish.copyOriginShifted != expectShifted {
  356. t.Errorf("did not copy %d shifted", expectShifted)
  357. }
  358. }
  359. // Test that updating a file removes its old blocks from the blockmap
  360. func TestCopierCleanup(t *testing.T) {
  361. iterFn := func(folder, file string, index int32) bool {
  362. return true
  363. }
  364. // Create a file
  365. file := setUpFile("test", []int{0})
  366. m := setUpModel(file)
  367. file.Blocks = []protocol.BlockInfo{blocks[1]}
  368. file.Version = file.Version.Update(protocol.LocalDeviceID.Short())
  369. // Update index (removing old blocks)
  370. m.updateLocalsFromScanning("default", []protocol.FileInfo{file})
  371. if m.finder.Iterate(folders, blocks[0].Hash, iterFn) {
  372. t.Error("Unexpected block found")
  373. }
  374. if !m.finder.Iterate(folders, blocks[1].Hash, iterFn) {
  375. t.Error("Expected block not found")
  376. }
  377. file.Blocks = []protocol.BlockInfo{blocks[0]}
  378. file.Version = file.Version.Update(protocol.LocalDeviceID.Short())
  379. // Update index (removing old blocks)
  380. m.updateLocalsFromScanning("default", []protocol.FileInfo{file})
  381. if !m.finder.Iterate(folders, blocks[0].Hash, iterFn) {
  382. t.Error("Unexpected block found")
  383. }
  384. if m.finder.Iterate(folders, blocks[1].Hash, iterFn) {
  385. t.Error("Expected block not found")
  386. }
  387. }
  388. // Make sure that the copier routine hashes the content when asked, and pulls
  389. // if it fails to find the block.
  390. func TestLastResortPulling(t *testing.T) {
  391. // Add a file to index (with the incorrect block representation, as content
  392. // doesn't actually match the block list)
  393. file := setUpFile("empty", []int{0})
  394. m := setUpModel(file)
  395. // Pretend that we are handling a new file of the same content but
  396. // with a different name (causing to copy that particular block)
  397. file.Name = "newfile"
  398. iterFn := func(folder, file string, index int32) bool {
  399. return true
  400. }
  401. f := setUpSendReceiveFolder(m)
  402. copyChan := make(chan copyBlocksState)
  403. pullChan := make(chan pullBlockState, 1)
  404. finisherChan := make(chan *sharedPullerState, 1)
  405. dbUpdateChan := make(chan dbUpdateJob, 1)
  406. // Run a single copier routine
  407. go f.copierRoutine(copyChan, pullChan, finisherChan)
  408. f.handleFile(file, copyChan, finisherChan, dbUpdateChan)
  409. // Copier should hash empty file, realise that the region it has read
  410. // doesn't match the hash which was advertised by the block map, fix it
  411. // and ask to pull the block.
  412. <-pullChan
  413. // Verify that it did fix the incorrect hash.
  414. if m.finder.Iterate(folders, blocks[0].Hash, iterFn) {
  415. t.Error("Found unexpected block")
  416. }
  417. if !m.finder.Iterate(folders, scanner.SHA256OfNothing, iterFn) {
  418. t.Error("Expected block not found")
  419. }
  420. (<-finisherChan).fd.Close()
  421. os.Remove(filepath.Join("testdata", fs.TempName("newfile")))
  422. }
  423. func TestDeregisterOnFailInCopy(t *testing.T) {
  424. file := setUpFile("filex", []int{0, 2, 0, 0, 5, 0, 0, 8})
  425. defer os.Remove("testdata/" + fs.TempName("filex"))
  426. db := db.OpenMemory()
  427. m := NewModel(defaultConfig, protocol.LocalDeviceID, "syncthing", "dev", db, nil)
  428. m.AddFolder(defaultFolderConfig)
  429. f := setUpSendReceiveFolder(m)
  430. // queue.Done should be called by the finisher routine
  431. f.queue.Push("filex", 0, time.Time{})
  432. f.queue.Pop()
  433. if f.queue.lenProgress() != 1 {
  434. t.Fatal("Expected file in progress")
  435. }
  436. copyChan := make(chan copyBlocksState)
  437. pullChan := make(chan pullBlockState)
  438. finisherBufferChan := make(chan *sharedPullerState)
  439. finisherChan := make(chan *sharedPullerState)
  440. dbUpdateChan := make(chan dbUpdateJob, 1)
  441. go f.copierRoutine(copyChan, pullChan, finisherBufferChan)
  442. go f.finisherRoutine(ignore.New(defaultFs), finisherChan, dbUpdateChan, make(chan string))
  443. f.handleFile(file, copyChan, finisherChan, dbUpdateChan)
  444. // Receive a block at puller, to indicate that at least a single copier
  445. // loop has been performed.
  446. toPull := <-pullChan
  447. // Wait until copier is trying to pass something down to the puller again
  448. time.Sleep(100 * time.Millisecond)
  449. // Close the file
  450. toPull.sharedPullerState.fail("test", os.ErrNotExist)
  451. // Unblock copier
  452. <-pullChan
  453. select {
  454. case state := <-finisherBufferChan:
  455. // At this point the file should still be registered with both the job
  456. // queue, and the progress emitter. Verify this.
  457. if f.model.progressEmitter.lenRegistry() != 1 || f.queue.lenProgress() != 1 || f.queue.lenQueued() != 0 {
  458. t.Fatal("Could not find file")
  459. }
  460. // Pass the file down the real finisher, and give it time to consume
  461. finisherChan <- state
  462. time.Sleep(100 * time.Millisecond)
  463. state.mut.Lock()
  464. stateFd := state.fd
  465. state.mut.Unlock()
  466. if stateFd != nil {
  467. t.Fatal("File not closed?")
  468. }
  469. if f.model.progressEmitter.lenRegistry() != 0 || f.queue.lenProgress() != 0 || f.queue.lenQueued() != 0 {
  470. t.Fatal("Still registered", f.model.progressEmitter.lenRegistry(), f.queue.lenProgress(), f.queue.lenQueued())
  471. }
  472. // Doing it again should have no effect
  473. finisherChan <- state
  474. time.Sleep(100 * time.Millisecond)
  475. if f.model.progressEmitter.lenRegistry() != 0 || f.queue.lenProgress() != 0 || f.queue.lenQueued() != 0 {
  476. t.Fatal("Still registered", f.model.progressEmitter.lenRegistry(), f.queue.lenProgress(), f.queue.lenQueued())
  477. }
  478. case <-time.After(time.Second):
  479. t.Fatal("Didn't get anything to the finisher")
  480. }
  481. }
  482. func TestDeregisterOnFailInPull(t *testing.T) {
  483. file := setUpFile("filex", []int{0, 2, 0, 0, 5, 0, 0, 8})
  484. defer os.Remove("testdata/" + fs.TempName("filex"))
  485. db := db.OpenMemory()
  486. m := NewModel(defaultConfig, protocol.LocalDeviceID, "syncthing", "dev", db, nil)
  487. m.AddFolder(defaultFolderConfig)
  488. f := setUpSendReceiveFolder(m)
  489. // queue.Done should be called by the finisher routine
  490. f.queue.Push("filex", 0, time.Time{})
  491. f.queue.Pop()
  492. if f.queue.lenProgress() != 1 {
  493. t.Fatal("Expected file in progress")
  494. }
  495. copyChan := make(chan copyBlocksState)
  496. pullChan := make(chan pullBlockState)
  497. finisherBufferChan := make(chan *sharedPullerState)
  498. finisherChan := make(chan *sharedPullerState)
  499. dbUpdateChan := make(chan dbUpdateJob, 1)
  500. go f.copierRoutine(copyChan, pullChan, finisherBufferChan)
  501. go f.pullerRoutine(pullChan, finisherBufferChan)
  502. go f.finisherRoutine(ignore.New(defaultFs), finisherChan, dbUpdateChan, make(chan string))
  503. f.handleFile(file, copyChan, finisherChan, dbUpdateChan)
  504. // Receive at finisher, we should error out as puller has nowhere to pull
  505. // from.
  506. select {
  507. case state := <-finisherBufferChan:
  508. // At this point the file should still be registered with both the job
  509. // queue, and the progress emitter. Verify this.
  510. if f.model.progressEmitter.lenRegistry() != 1 || f.queue.lenProgress() != 1 || f.queue.lenQueued() != 0 {
  511. t.Fatal("Could not find file")
  512. }
  513. // Pass the file down the real finisher, and give it time to consume
  514. finisherChan <- state
  515. time.Sleep(100 * time.Millisecond)
  516. state.mut.Lock()
  517. stateFd := state.fd
  518. state.mut.Unlock()
  519. if stateFd != nil {
  520. t.Fatal("File not closed?")
  521. }
  522. if f.model.progressEmitter.lenRegistry() != 0 || f.queue.lenProgress() != 0 || f.queue.lenQueued() != 0 {
  523. t.Fatal("Still registered", f.model.progressEmitter.lenRegistry(), f.queue.lenProgress(), f.queue.lenQueued())
  524. }
  525. // Doing it again should have no effect
  526. finisherChan <- state
  527. time.Sleep(100 * time.Millisecond)
  528. if f.model.progressEmitter.lenRegistry() != 0 || f.queue.lenProgress() != 0 || f.queue.lenQueued() != 0 {
  529. t.Fatal("Still registered", f.model.progressEmitter.lenRegistry(), f.queue.lenProgress(), f.queue.lenQueued())
  530. }
  531. case <-time.After(time.Second):
  532. t.Fatal("Didn't get anything to the finisher")
  533. }
  534. }
  535. func TestIssue3164(t *testing.T) {
  536. m := setUpModel(protocol.FileInfo{})
  537. f := setUpSendReceiveFolder(m)
  538. defaultFs.RemoveAll("issue3164")
  539. defer defaultFs.RemoveAll("issue3164")
  540. if err := defaultFs.MkdirAll("issue3164/oktodelete/foobar", 0777); err != nil {
  541. t.Fatal(err)
  542. }
  543. if err := ioutil.WriteFile("testdata/issue3164/oktodelete/foobar/file", []byte("Hello"), 0644); err != nil {
  544. t.Fatal(err)
  545. }
  546. if err := ioutil.WriteFile("testdata/issue3164/oktodelete/file", []byte("Hello"), 0644); err != nil {
  547. t.Fatal(err)
  548. }
  549. file := protocol.FileInfo{
  550. Name: "issue3164",
  551. }
  552. matcher := ignore.New(defaultFs)
  553. if err := matcher.Parse(bytes.NewBufferString("(?d)oktodelete"), ""); err != nil {
  554. t.Fatal(err)
  555. }
  556. dbUpdateChan := make(chan dbUpdateJob, 1)
  557. f.handleDeleteDir(file, matcher, dbUpdateChan, make(chan string))
  558. if _, err := defaultFs.Stat("testdata/issue3164"); !fs.IsNotExist(err) {
  559. t.Fatal(err)
  560. }
  561. }
  562. func TestDiff(t *testing.T) {
  563. for i, test := range diffTestData {
  564. a, _ := scanner.Blocks(context.TODO(), bytes.NewBufferString(test.a), test.s, -1, nil, false)
  565. b, _ := scanner.Blocks(context.TODO(), bytes.NewBufferString(test.b), test.s, -1, nil, false)
  566. _, d := blockDiff(a, b)
  567. if len(d) != len(test.d) {
  568. t.Fatalf("Incorrect length for diff %d; %d != %d", i, len(d), len(test.d))
  569. } else {
  570. for j := range test.d {
  571. if d[j].Offset != test.d[j].Offset {
  572. t.Errorf("Incorrect offset for diff %d block %d; %d != %d", i, j, d[j].Offset, test.d[j].Offset)
  573. }
  574. if d[j].Size != test.d[j].Size {
  575. t.Errorf("Incorrect length for diff %d block %d; %d != %d", i, j, d[j].Size, test.d[j].Size)
  576. }
  577. }
  578. }
  579. }
  580. }
  581. func BenchmarkDiff(b *testing.B) {
  582. testCases := make([]struct{ a, b []protocol.BlockInfo }, 0, len(diffTestData))
  583. for _, test := range diffTestData {
  584. a, _ := scanner.Blocks(context.TODO(), bytes.NewBufferString(test.a), test.s, -1, nil, false)
  585. b, _ := scanner.Blocks(context.TODO(), bytes.NewBufferString(test.b), test.s, -1, nil, false)
  586. testCases = append(testCases, struct{ a, b []protocol.BlockInfo }{a, b})
  587. }
  588. b.ReportAllocs()
  589. b.ResetTimer()
  590. for i := 0; i < b.N; i++ {
  591. for _, tc := range testCases {
  592. blockDiff(tc.a, tc.b)
  593. }
  594. }
  595. }
  596. func TestDiffEmpty(t *testing.T) {
  597. emptyCases := []struct {
  598. a []protocol.BlockInfo
  599. b []protocol.BlockInfo
  600. need int
  601. have int
  602. }{
  603. {nil, nil, 0, 0},
  604. {[]protocol.BlockInfo{{Offset: 3, Size: 1}}, nil, 0, 0},
  605. {nil, []protocol.BlockInfo{{Offset: 3, Size: 1}}, 1, 0},
  606. }
  607. for _, emptyCase := range emptyCases {
  608. h, n := blockDiff(emptyCase.a, emptyCase.b)
  609. if len(h) != emptyCase.have {
  610. t.Errorf("incorrect have: %d != %d", len(h), emptyCase.have)
  611. }
  612. if len(n) != emptyCase.need {
  613. t.Errorf("incorrect have: %d != %d", len(h), emptyCase.have)
  614. }
  615. }
  616. }