rwfolder_test.go 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611
  1. // Copyright (C) 2014 The Syncthing Authors.
  2. //
  3. // This Source Code Form is subject to the terms of the Mozilla Public
  4. // License, v. 2.0. If a copy of the MPL was not distributed with this file,
  5. // You can obtain one at https://mozilla.org/MPL/2.0/.
  6. package model
  7. import (
  8. "context"
  9. "crypto/rand"
  10. "io"
  11. "os"
  12. "path/filepath"
  13. "testing"
  14. "time"
  15. "github.com/syncthing/syncthing/lib/db"
  16. "github.com/syncthing/syncthing/lib/fs"
  17. "github.com/syncthing/syncthing/lib/ignore"
  18. "github.com/syncthing/syncthing/lib/protocol"
  19. "github.com/syncthing/syncthing/lib/scanner"
  20. "github.com/syncthing/syncthing/lib/sync"
  21. )
  22. func TestMain(m *testing.M) {
  23. // We do this to make sure that the temp file required for the tests
  24. // does not get removed during the tests. Also set the prefix so it's
  25. // found correctly regardless of platform.
  26. if ignore.TempPrefix != ignore.WindowsTempPrefix {
  27. originalPrefix := ignore.TempPrefix
  28. ignore.TempPrefix = ignore.WindowsTempPrefix
  29. defer func() {
  30. ignore.TempPrefix = originalPrefix
  31. }()
  32. }
  33. future := time.Now().Add(time.Hour)
  34. err := os.Chtimes(filepath.Join("testdata", ignore.TempName("file")), future, future)
  35. if err != nil {
  36. panic(err)
  37. }
  38. os.Exit(m.Run())
  39. }
  40. var blocks = []protocol.BlockInfo{
  41. {Hash: []uint8{0xfa, 0x43, 0x23, 0x9b, 0xce, 0xe7, 0xb9, 0x7c, 0xa6, 0x2f, 0x0, 0x7c, 0xc6, 0x84, 0x87, 0x56, 0xa, 0x39, 0xe1, 0x9f, 0x74, 0xf3, 0xdd, 0xe7, 0x48, 0x6d, 0xb3, 0xf9, 0x8d, 0xf8, 0xe4, 0x71}}, // Zero'ed out block
  42. {Offset: 0, Size: 0x20000, Hash: []uint8{0x7e, 0xad, 0xbc, 0x36, 0xae, 0xbb, 0xcf, 0x74, 0x43, 0xe2, 0x7a, 0x5a, 0x4b, 0xb8, 0x5b, 0xce, 0xe6, 0x9e, 0x1e, 0x10, 0xf9, 0x8a, 0xbc, 0x77, 0x95, 0x2, 0x29, 0x60, 0x9e, 0x96, 0xae, 0x6c}},
  43. {Offset: 131072, Size: 0x20000, Hash: []uint8{0x3c, 0xc4, 0x20, 0xf4, 0xb, 0x2e, 0xcb, 0xb9, 0x5d, 0xce, 0x34, 0xa8, 0xc3, 0x92, 0xea, 0xf3, 0xda, 0x88, 0x33, 0xee, 0x7a, 0xb6, 0xe, 0xf1, 0x82, 0x5e, 0xb0, 0xa9, 0x26, 0xa9, 0xc0, 0xef}},
  44. {Offset: 262144, Size: 0x20000, Hash: []uint8{0x76, 0xa8, 0xc, 0x69, 0xd7, 0x5c, 0x52, 0xfd, 0xdf, 0x55, 0xef, 0x44, 0xc1, 0xd6, 0x25, 0x48, 0x4d, 0x98, 0x48, 0x4d, 0xaa, 0x50, 0xf6, 0x6b, 0x32, 0x47, 0x55, 0x81, 0x6b, 0xed, 0xee, 0xfb}},
  45. {Offset: 393216, Size: 0x20000, Hash: []uint8{0x44, 0x1e, 0xa4, 0xf2, 0x8d, 0x1f, 0xc3, 0x1b, 0x9d, 0xa5, 0x18, 0x5e, 0x59, 0x1b, 0xd8, 0x5c, 0xba, 0x7d, 0xb9, 0x8d, 0x70, 0x11, 0x5c, 0xea, 0xa1, 0x57, 0x4d, 0xcb, 0x3c, 0x5b, 0xf8, 0x6c}},
  46. {Offset: 524288, Size: 0x20000, Hash: []uint8{0x8, 0x40, 0xd0, 0x5e, 0x80, 0x0, 0x0, 0x7c, 0x8b, 0xb3, 0x8b, 0xf7, 0x7b, 0x23, 0x26, 0x28, 0xab, 0xda, 0xcf, 0x86, 0x8f, 0xc2, 0x8a, 0x39, 0xc6, 0xe6, 0x69, 0x59, 0x97, 0xb6, 0x1a, 0x43}},
  47. {Offset: 655360, Size: 0x20000, Hash: []uint8{0x38, 0x8e, 0x44, 0xcb, 0x30, 0xd8, 0x90, 0xf, 0xce, 0x7, 0x4b, 0x58, 0x86, 0xde, 0xce, 0x59, 0xa2, 0x46, 0xd2, 0xf9, 0xba, 0xaf, 0x35, 0x87, 0x38, 0xdf, 0xd2, 0xd, 0xf9, 0x45, 0xed, 0x91}},
  48. {Offset: 786432, Size: 0x20000, Hash: []uint8{0x32, 0x28, 0xcd, 0xf, 0x37, 0x21, 0xe5, 0xd4, 0x1e, 0x58, 0x87, 0x73, 0x8e, 0x36, 0xdf, 0xb2, 0x70, 0x78, 0x56, 0xc3, 0x42, 0xff, 0xf7, 0x8f, 0x37, 0x95, 0x0, 0x26, 0xa, 0xac, 0x54, 0x72}},
  49. {Offset: 917504, Size: 0x20000, Hash: []uint8{0x96, 0x6b, 0x15, 0x6b, 0xc4, 0xf, 0x19, 0x18, 0xca, 0xbb, 0x5f, 0xd6, 0xbb, 0xa2, 0xc6, 0x2a, 0xac, 0xbb, 0x8a, 0xb9, 0xce, 0xec, 0x4c, 0xdb, 0x78, 0xec, 0x57, 0x5d, 0x33, 0xf9, 0x8e, 0xaf}},
  50. }
  51. var folders = []string{"default"}
  52. func setUpFile(filename string, blockNumbers []int) protocol.FileInfo {
  53. // Create existing file
  54. existingBlocks := make([]protocol.BlockInfo, len(blockNumbers))
  55. for i := range blockNumbers {
  56. existingBlocks[i] = blocks[blockNumbers[i]]
  57. }
  58. return protocol.FileInfo{
  59. Name: filename,
  60. Blocks: existingBlocks,
  61. }
  62. }
  63. func setUpModel(file protocol.FileInfo) *Model {
  64. db := db.OpenMemory()
  65. model := NewModel(defaultConfig, protocol.LocalDeviceID, "syncthing", "dev", db, nil)
  66. model.AddFolder(defaultFolderConfig)
  67. // Update index
  68. model.updateLocalsFromScanning("default", []protocol.FileInfo{file})
  69. return model
  70. }
  71. func setUpSendReceiveFolder(model *Model) *sendReceiveFolder {
  72. f := &sendReceiveFolder{
  73. folder: folder{
  74. stateTracker: newStateTracker("default"),
  75. model: model,
  76. initialScanFinished: make(chan struct{}),
  77. ctx: context.TODO(),
  78. },
  79. mtimeFS: fs.NewMtimeFS(fs.DefaultFilesystem, db.NewNamespacedKV(model.db, "mtime")),
  80. dir: "testdata",
  81. queue: newJobQueue(),
  82. errors: make(map[string]string),
  83. errorsMut: sync.NewMutex(),
  84. }
  85. // Folders are never actually started, so no initial scan will be done
  86. close(f.initialScanFinished)
  87. return f
  88. }
  89. // Layout of the files: (indexes from the above array)
  90. // 12345678 - Required file
  91. // 02005008 - Existing file (currently in the index)
  92. // 02340070 - Temp file on the disk
  93. func TestHandleFile(t *testing.T) {
  94. // After the diff between required and existing we should:
  95. // Copy: 2, 5, 8
  96. // Pull: 1, 3, 4, 6, 7
  97. existingBlocks := []int{0, 2, 0, 0, 5, 0, 0, 8}
  98. existingFile := setUpFile("filex", existingBlocks)
  99. requiredFile := existingFile
  100. requiredFile.Blocks = blocks[1:]
  101. m := setUpModel(existingFile)
  102. f := setUpSendReceiveFolder(m)
  103. copyChan := make(chan copyBlocksState, 1)
  104. f.handleFile(requiredFile, copyChan, nil)
  105. // Receive the results
  106. toCopy := <-copyChan
  107. if len(toCopy.blocks) != 8 {
  108. t.Errorf("Unexpected count of copy blocks: %d != 8", len(toCopy.blocks))
  109. }
  110. for _, block := range blocks[1:] {
  111. found := false
  112. for _, toCopyBlock := range toCopy.blocks {
  113. if string(toCopyBlock.Hash) == string(block.Hash) {
  114. found = true
  115. break
  116. }
  117. }
  118. if !found {
  119. t.Errorf("Did not find block %s", block.String())
  120. }
  121. }
  122. }
  123. func TestHandleFileWithTemp(t *testing.T) {
  124. // After diff between required and existing we should:
  125. // Copy: 2, 5, 8
  126. // Pull: 1, 3, 4, 6, 7
  127. // After dropping out blocks already on the temp file we should:
  128. // Copy: 5, 8
  129. // Pull: 1, 6
  130. existingBlocks := []int{0, 2, 0, 0, 5, 0, 0, 8}
  131. existingFile := setUpFile("file", existingBlocks)
  132. requiredFile := existingFile
  133. requiredFile.Blocks = blocks[1:]
  134. m := setUpModel(existingFile)
  135. f := setUpSendReceiveFolder(m)
  136. copyChan := make(chan copyBlocksState, 1)
  137. f.handleFile(requiredFile, copyChan, nil)
  138. // Receive the results
  139. toCopy := <-copyChan
  140. if len(toCopy.blocks) != 4 {
  141. t.Errorf("Unexpected count of copy blocks: %d != 4", len(toCopy.blocks))
  142. }
  143. for _, idx := range []int{1, 5, 6, 8} {
  144. found := false
  145. block := blocks[idx]
  146. for _, toCopyBlock := range toCopy.blocks {
  147. if string(toCopyBlock.Hash) == string(block.Hash) {
  148. found = true
  149. break
  150. }
  151. }
  152. if !found {
  153. t.Errorf("Did not find block %s", block.String())
  154. }
  155. }
  156. }
  157. func TestCopierFinder(t *testing.T) {
  158. // After diff between required and existing we should:
  159. // Copy: 1, 2, 3, 4, 6, 7, 8
  160. // Since there is no existing file, nor a temp file
  161. // After dropping out blocks found locally:
  162. // Pull: 1, 5, 6, 8
  163. tempFile := filepath.Join("testdata", ignore.TempName("file2"))
  164. err := os.Remove(tempFile)
  165. if err != nil && !os.IsNotExist(err) {
  166. t.Error(err)
  167. }
  168. existingBlocks := []int{0, 2, 3, 4, 0, 0, 7, 0}
  169. existingFile := setUpFile(ignore.TempName("file"), existingBlocks)
  170. requiredFile := existingFile
  171. requiredFile.Blocks = blocks[1:]
  172. requiredFile.Name = "file2"
  173. m := setUpModel(existingFile)
  174. f := setUpSendReceiveFolder(m)
  175. copyChan := make(chan copyBlocksState)
  176. pullChan := make(chan pullBlockState, 4)
  177. finisherChan := make(chan *sharedPullerState, 1)
  178. // Run a single fetcher routine
  179. go f.copierRoutine(copyChan, pullChan, finisherChan)
  180. f.handleFile(requiredFile, copyChan, finisherChan)
  181. pulls := []pullBlockState{<-pullChan, <-pullChan, <-pullChan, <-pullChan}
  182. finish := <-finisherChan
  183. select {
  184. case <-pullChan:
  185. t.Fatal("Pull channel has data to be read")
  186. case <-finisherChan:
  187. t.Fatal("Finisher channel has data to be read")
  188. default:
  189. }
  190. // Verify that the right blocks went into the pull list.
  191. // They are pulled in random order.
  192. for _, idx := range []int{1, 5, 6, 8} {
  193. found := false
  194. block := blocks[idx]
  195. for _, pulledBlock := range pulls {
  196. if string(pulledBlock.block.Hash) == string(block.Hash) {
  197. found = true
  198. break
  199. }
  200. }
  201. if !found {
  202. t.Errorf("Did not find block %s", block.String())
  203. }
  204. if string(finish.file.Blocks[idx-1].Hash) != string(blocks[idx].Hash) {
  205. t.Errorf("Block %d mismatch: %s != %s", idx, finish.file.Blocks[idx-1].String(), blocks[idx].String())
  206. }
  207. }
  208. // Verify that the fetched blocks have actually been written to the temp file
  209. blks, err := scanner.HashFile(context.TODO(), fs.DefaultFilesystem, tempFile, protocol.BlockSize, nil, false)
  210. if err != nil {
  211. t.Log(err)
  212. }
  213. for _, eq := range []int{2, 3, 4, 7} {
  214. if string(blks[eq-1].Hash) != string(blocks[eq].Hash) {
  215. t.Errorf("Block %d mismatch: %s != %s", eq, blks[eq-1].String(), blocks[eq].String())
  216. }
  217. }
  218. finish.fd.Close()
  219. os.Remove(tempFile)
  220. }
  221. func TestWeakHash(t *testing.T) {
  222. tempFile := filepath.Join("testdata", ignore.TempName("weakhash"))
  223. var shift int64 = 10
  224. var size int64 = 1 << 20
  225. expectBlocks := int(size / protocol.BlockSize)
  226. expectPulls := int(shift / protocol.BlockSize)
  227. if shift > 0 {
  228. expectPulls++
  229. }
  230. cleanup := func() {
  231. for _, path := range []string{tempFile, "testdata/weakhash"} {
  232. os.Remove(path)
  233. }
  234. }
  235. cleanup()
  236. defer cleanup()
  237. f, err := os.Create("testdata/weakhash")
  238. if err != nil {
  239. t.Error(err)
  240. }
  241. defer f.Close()
  242. _, err = io.CopyN(f, rand.Reader, size)
  243. if err != nil {
  244. t.Error(err)
  245. }
  246. info, err := f.Stat()
  247. if err != nil {
  248. t.Error(err)
  249. }
  250. // Create two files, second file has `shifted` bytes random prefix, yet
  251. // both are of the same length, for example:
  252. // File 1: abcdefgh
  253. // File 2: xyabcdef
  254. f.Seek(0, os.SEEK_SET)
  255. existing, err := scanner.Blocks(context.TODO(), f, protocol.BlockSize, size, nil, true)
  256. if err != nil {
  257. t.Error(err)
  258. }
  259. f.Seek(0, os.SEEK_SET)
  260. remainder := io.LimitReader(f, size-shift)
  261. prefix := io.LimitReader(rand.Reader, shift)
  262. nf := io.MultiReader(prefix, remainder)
  263. desired, err := scanner.Blocks(context.TODO(), nf, protocol.BlockSize, size, nil, true)
  264. if err != nil {
  265. t.Error(err)
  266. }
  267. existingFile := protocol.FileInfo{
  268. Name: "weakhash",
  269. Blocks: existing,
  270. Size: size,
  271. ModifiedS: info.ModTime().Unix(),
  272. ModifiedNs: int32(info.ModTime().Nanosecond()),
  273. }
  274. desiredFile := protocol.FileInfo{
  275. Name: "weakhash",
  276. Size: size,
  277. Blocks: desired,
  278. ModifiedS: info.ModTime().Unix() + 1,
  279. }
  280. // Setup the model/pull environment
  281. m := setUpModel(existingFile)
  282. fo := setUpSendReceiveFolder(m)
  283. copyChan := make(chan copyBlocksState)
  284. pullChan := make(chan pullBlockState, expectBlocks)
  285. finisherChan := make(chan *sharedPullerState, 1)
  286. // Run a single fetcher routine
  287. go fo.copierRoutine(copyChan, pullChan, finisherChan)
  288. // Test 1 - no weak hashing, file gets fully repulled (`expectBlocks` pulls).
  289. fo.WeakHashThresholdPct = 101
  290. fo.handleFile(desiredFile, copyChan, finisherChan)
  291. var pulls []pullBlockState
  292. for len(pulls) < expectBlocks {
  293. select {
  294. case pull := <-pullChan:
  295. pulls = append(pulls, pull)
  296. case <-time.After(10 * time.Second):
  297. t.Errorf("timed out, got %d pulls expected %d", len(pulls), expectPulls)
  298. }
  299. }
  300. finish := <-finisherChan
  301. select {
  302. case <-pullChan:
  303. t.Fatal("Pull channel has data to be read")
  304. case <-finisherChan:
  305. t.Fatal("Finisher channel has data to be read")
  306. default:
  307. }
  308. finish.fd.Close()
  309. if err := os.Remove(tempFile); err != nil && !os.IsNotExist(err) {
  310. t.Error(err)
  311. }
  312. // Test 2 - using weak hash, expectPulls blocks pulled.
  313. fo.WeakHashThresholdPct = -1
  314. fo.handleFile(desiredFile, copyChan, finisherChan)
  315. pulls = pulls[:0]
  316. for len(pulls) < expectPulls {
  317. select {
  318. case pull := <-pullChan:
  319. pulls = append(pulls, pull)
  320. case <-time.After(10 * time.Second):
  321. t.Errorf("timed out, got %d pulls expected %d", len(pulls), expectPulls)
  322. }
  323. }
  324. finish = <-finisherChan
  325. finish.fd.Close()
  326. expectShifted := expectBlocks - expectPulls
  327. if finish.copyOriginShifted != expectShifted {
  328. t.Errorf("did not copy %d shifted", expectShifted)
  329. }
  330. }
  331. // Test that updating a file removes it's old blocks from the blockmap
  332. func TestCopierCleanup(t *testing.T) {
  333. iterFn := func(folder, file string, index int32) bool {
  334. return true
  335. }
  336. // Create a file
  337. file := setUpFile("test", []int{0})
  338. m := setUpModel(file)
  339. file.Blocks = []protocol.BlockInfo{blocks[1]}
  340. file.Version = file.Version.Update(protocol.LocalDeviceID.Short())
  341. // Update index (removing old blocks)
  342. m.updateLocalsFromScanning("default", []protocol.FileInfo{file})
  343. if m.finder.Iterate(folders, blocks[0].Hash, iterFn) {
  344. t.Error("Unexpected block found")
  345. }
  346. if !m.finder.Iterate(folders, blocks[1].Hash, iterFn) {
  347. t.Error("Expected block not found")
  348. }
  349. file.Blocks = []protocol.BlockInfo{blocks[0]}
  350. file.Version = file.Version.Update(protocol.LocalDeviceID.Short())
  351. // Update index (removing old blocks)
  352. m.updateLocalsFromScanning("default", []protocol.FileInfo{file})
  353. if !m.finder.Iterate(folders, blocks[0].Hash, iterFn) {
  354. t.Error("Unexpected block found")
  355. }
  356. if m.finder.Iterate(folders, blocks[1].Hash, iterFn) {
  357. t.Error("Expected block not found")
  358. }
  359. }
  360. // Make sure that the copier routine hashes the content when asked, and pulls
  361. // if it fails to find the block.
  362. func TestLastResortPulling(t *testing.T) {
  363. // Add a file to index (with the incorrect block representation, as content
  364. // doesn't actually match the block list)
  365. file := setUpFile("empty", []int{0})
  366. m := setUpModel(file)
  367. // Pretend that we are handling a new file of the same content but
  368. // with a different name (causing to copy that particular block)
  369. file.Name = "newfile"
  370. iterFn := func(folder, file string, index int32) bool {
  371. return true
  372. }
  373. f := setUpSendReceiveFolder(m)
  374. copyChan := make(chan copyBlocksState)
  375. pullChan := make(chan pullBlockState, 1)
  376. finisherChan := make(chan *sharedPullerState, 1)
  377. // Run a single copier routine
  378. go f.copierRoutine(copyChan, pullChan, finisherChan)
  379. f.handleFile(file, copyChan, finisherChan)
  380. // Copier should hash empty file, realise that the region it has read
  381. // doesn't match the hash which was advertised by the block map, fix it
  382. // and ask to pull the block.
  383. <-pullChan
  384. // Verify that it did fix the incorrect hash.
  385. if m.finder.Iterate(folders, blocks[0].Hash, iterFn) {
  386. t.Error("Found unexpected block")
  387. }
  388. if !m.finder.Iterate(folders, scanner.SHA256OfNothing, iterFn) {
  389. t.Error("Expected block not found")
  390. }
  391. (<-finisherChan).fd.Close()
  392. os.Remove(filepath.Join("testdata", ignore.TempName("newfile")))
  393. }
  394. func TestDeregisterOnFailInCopy(t *testing.T) {
  395. file := setUpFile("filex", []int{0, 2, 0, 0, 5, 0, 0, 8})
  396. defer os.Remove("testdata/" + ignore.TempName("filex"))
  397. db := db.OpenMemory()
  398. m := NewModel(defaultConfig, protocol.LocalDeviceID, "syncthing", "dev", db, nil)
  399. m.AddFolder(defaultFolderConfig)
  400. f := setUpSendReceiveFolder(m)
  401. // queue.Done should be called by the finisher routine
  402. f.queue.Push("filex", 0, time.Time{})
  403. f.queue.Pop()
  404. if f.queue.lenProgress() != 1 {
  405. t.Fatal("Expected file in progress")
  406. }
  407. copyChan := make(chan copyBlocksState)
  408. pullChan := make(chan pullBlockState)
  409. finisherBufferChan := make(chan *sharedPullerState)
  410. finisherChan := make(chan *sharedPullerState)
  411. go f.copierRoutine(copyChan, pullChan, finisherBufferChan)
  412. go f.finisherRoutine(finisherChan)
  413. f.handleFile(file, copyChan, finisherChan)
  414. // Receive a block at puller, to indicate that at least a single copier
  415. // loop has been performed.
  416. toPull := <-pullChan
  417. // Wait until copier is trying to pass something down to the puller again
  418. time.Sleep(100 * time.Millisecond)
  419. // Close the file
  420. toPull.sharedPullerState.fail("test", os.ErrNotExist)
  421. // Unblock copier
  422. <-pullChan
  423. select {
  424. case state := <-finisherBufferChan:
  425. // At this point the file should still be registered with both the job
  426. // queue, and the progress emitter. Verify this.
  427. if f.model.progressEmitter.lenRegistry() != 1 || f.queue.lenProgress() != 1 || f.queue.lenQueued() != 0 {
  428. t.Fatal("Could not find file")
  429. }
  430. // Pass the file down the real finisher, and give it time to consume
  431. finisherChan <- state
  432. time.Sleep(100 * time.Millisecond)
  433. state.mut.Lock()
  434. stateFd := state.fd
  435. state.mut.Unlock()
  436. if stateFd != nil {
  437. t.Fatal("File not closed?")
  438. }
  439. if f.model.progressEmitter.lenRegistry() != 0 || f.queue.lenProgress() != 0 || f.queue.lenQueued() != 0 {
  440. t.Fatal("Still registered", f.model.progressEmitter.lenRegistry(), f.queue.lenProgress(), f.queue.lenQueued())
  441. }
  442. // Doing it again should have no effect
  443. finisherChan <- state
  444. time.Sleep(100 * time.Millisecond)
  445. if f.model.progressEmitter.lenRegistry() != 0 || f.queue.lenProgress() != 0 || f.queue.lenQueued() != 0 {
  446. t.Fatal("Still registered", f.model.progressEmitter.lenRegistry(), f.queue.lenProgress(), f.queue.lenQueued())
  447. }
  448. case <-time.After(time.Second):
  449. t.Fatal("Didn't get anything to the finisher")
  450. }
  451. }
  452. func TestDeregisterOnFailInPull(t *testing.T) {
  453. file := setUpFile("filex", []int{0, 2, 0, 0, 5, 0, 0, 8})
  454. defer os.Remove("testdata/" + ignore.TempName("filex"))
  455. db := db.OpenMemory()
  456. m := NewModel(defaultConfig, protocol.LocalDeviceID, "syncthing", "dev", db, nil)
  457. m.AddFolder(defaultFolderConfig)
  458. f := setUpSendReceiveFolder(m)
  459. // queue.Done should be called by the finisher routine
  460. f.queue.Push("filex", 0, time.Time{})
  461. f.queue.Pop()
  462. if f.queue.lenProgress() != 1 {
  463. t.Fatal("Expected file in progress")
  464. }
  465. copyChan := make(chan copyBlocksState)
  466. pullChan := make(chan pullBlockState)
  467. finisherBufferChan := make(chan *sharedPullerState)
  468. finisherChan := make(chan *sharedPullerState)
  469. go f.copierRoutine(copyChan, pullChan, finisherBufferChan)
  470. go f.pullerRoutine(pullChan, finisherBufferChan)
  471. go f.finisherRoutine(finisherChan)
  472. f.handleFile(file, copyChan, finisherChan)
  473. // Receive at finisher, we should error out as puller has nowhere to pull
  474. // from.
  475. select {
  476. case state := <-finisherBufferChan:
  477. // At this point the file should still be registered with both the job
  478. // queue, and the progress emitter. Verify this.
  479. if f.model.progressEmitter.lenRegistry() != 1 || f.queue.lenProgress() != 1 || f.queue.lenQueued() != 0 {
  480. t.Fatal("Could not find file")
  481. }
  482. // Pass the file down the real finisher, and give it time to consume
  483. finisherChan <- state
  484. time.Sleep(100 * time.Millisecond)
  485. state.mut.Lock()
  486. stateFd := state.fd
  487. state.mut.Unlock()
  488. if stateFd != nil {
  489. t.Fatal("File not closed?")
  490. }
  491. if f.model.progressEmitter.lenRegistry() != 0 || f.queue.lenProgress() != 0 || f.queue.lenQueued() != 0 {
  492. t.Fatal("Still registered", f.model.progressEmitter.lenRegistry(), f.queue.lenProgress(), f.queue.lenQueued())
  493. }
  494. // Doing it again should have no effect
  495. finisherChan <- state
  496. time.Sleep(100 * time.Millisecond)
  497. if f.model.progressEmitter.lenRegistry() != 0 || f.queue.lenProgress() != 0 || f.queue.lenQueued() != 0 {
  498. t.Fatal("Still registered", f.model.progressEmitter.lenRegistry(), f.queue.lenProgress(), f.queue.lenQueued())
  499. }
  500. case <-time.After(time.Second):
  501. t.Fatal("Didn't get anything to the finisher")
  502. }
  503. }