rwfolder_test.go 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554
  1. // Copyright (C) 2014 The Syncthing Authors.
  2. //
  3. // This Source Code Form is subject to the terms of the Mozilla Public
  4. // License, v. 2.0. If a copy of the MPL was not distributed with this file,
  5. // You can obtain one at http://mozilla.org/MPL/2.0/.
  6. package model
  7. import (
  8. "os"
  9. "path/filepath"
  10. "testing"
  11. "time"
  12. "github.com/syncthing/syncthing/lib/protocol"
  13. "github.com/syncthing/syncthing/lib/scanner"
  14. "github.com/syncthing/syncthing/lib/sync"
  15. "github.com/syndtr/goleveldb/leveldb"
  16. "github.com/syndtr/goleveldb/leveldb/storage"
  17. )
  18. func init() {
  19. // We do this to make sure that the temp file required for the tests does
  20. // not get removed during the tests.
  21. future := time.Now().Add(time.Hour)
  22. err := os.Chtimes(filepath.Join("testdata", defTempNamer.TempName("file")), future, future)
  23. if err != nil {
  24. panic(err)
  25. }
  26. }
  27. var blocks = []protocol.BlockInfo{
  28. {Hash: []uint8{0xfa, 0x43, 0x23, 0x9b, 0xce, 0xe7, 0xb9, 0x7c, 0xa6, 0x2f, 0x0, 0x7c, 0xc6, 0x84, 0x87, 0x56, 0xa, 0x39, 0xe1, 0x9f, 0x74, 0xf3, 0xdd, 0xe7, 0x48, 0x6d, 0xb3, 0xf9, 0x8d, 0xf8, 0xe4, 0x71}}, // Zero'ed out block
  29. {Offset: 0, Size: 0x20000, Hash: []uint8{0x7e, 0xad, 0xbc, 0x36, 0xae, 0xbb, 0xcf, 0x74, 0x43, 0xe2, 0x7a, 0x5a, 0x4b, 0xb8, 0x5b, 0xce, 0xe6, 0x9e, 0x1e, 0x10, 0xf9, 0x8a, 0xbc, 0x77, 0x95, 0x2, 0x29, 0x60, 0x9e, 0x96, 0xae, 0x6c}},
  30. {Offset: 131072, Size: 0x20000, Hash: []uint8{0x3c, 0xc4, 0x20, 0xf4, 0xb, 0x2e, 0xcb, 0xb9, 0x5d, 0xce, 0x34, 0xa8, 0xc3, 0x92, 0xea, 0xf3, 0xda, 0x88, 0x33, 0xee, 0x7a, 0xb6, 0xe, 0xf1, 0x82, 0x5e, 0xb0, 0xa9, 0x26, 0xa9, 0xc0, 0xef}},
  31. {Offset: 262144, Size: 0x20000, Hash: []uint8{0x76, 0xa8, 0xc, 0x69, 0xd7, 0x5c, 0x52, 0xfd, 0xdf, 0x55, 0xef, 0x44, 0xc1, 0xd6, 0x25, 0x48, 0x4d, 0x98, 0x48, 0x4d, 0xaa, 0x50, 0xf6, 0x6b, 0x32, 0x47, 0x55, 0x81, 0x6b, 0xed, 0xee, 0xfb}},
  32. {Offset: 393216, Size: 0x20000, Hash: []uint8{0x44, 0x1e, 0xa4, 0xf2, 0x8d, 0x1f, 0xc3, 0x1b, 0x9d, 0xa5, 0x18, 0x5e, 0x59, 0x1b, 0xd8, 0x5c, 0xba, 0x7d, 0xb9, 0x8d, 0x70, 0x11, 0x5c, 0xea, 0xa1, 0x57, 0x4d, 0xcb, 0x3c, 0x5b, 0xf8, 0x6c}},
  33. {Offset: 524288, Size: 0x20000, Hash: []uint8{0x8, 0x40, 0xd0, 0x5e, 0x80, 0x0, 0x0, 0x7c, 0x8b, 0xb3, 0x8b, 0xf7, 0x7b, 0x23, 0x26, 0x28, 0xab, 0xda, 0xcf, 0x86, 0x8f, 0xc2, 0x8a, 0x39, 0xc6, 0xe6, 0x69, 0x59, 0x97, 0xb6, 0x1a, 0x43}},
  34. {Offset: 655360, Size: 0x20000, Hash: []uint8{0x38, 0x8e, 0x44, 0xcb, 0x30, 0xd8, 0x90, 0xf, 0xce, 0x7, 0x4b, 0x58, 0x86, 0xde, 0xce, 0x59, 0xa2, 0x46, 0xd2, 0xf9, 0xba, 0xaf, 0x35, 0x87, 0x38, 0xdf, 0xd2, 0xd, 0xf9, 0x45, 0xed, 0x91}},
  35. {Offset: 786432, Size: 0x20000, Hash: []uint8{0x32, 0x28, 0xcd, 0xf, 0x37, 0x21, 0xe5, 0xd4, 0x1e, 0x58, 0x87, 0x73, 0x8e, 0x36, 0xdf, 0xb2, 0x70, 0x78, 0x56, 0xc3, 0x42, 0xff, 0xf7, 0x8f, 0x37, 0x95, 0x0, 0x26, 0xa, 0xac, 0x54, 0x72}},
  36. {Offset: 917504, Size: 0x20000, Hash: []uint8{0x96, 0x6b, 0x15, 0x6b, 0xc4, 0xf, 0x19, 0x18, 0xca, 0xbb, 0x5f, 0xd6, 0xbb, 0xa2, 0xc6, 0x2a, 0xac, 0xbb, 0x8a, 0xb9, 0xce, 0xec, 0x4c, 0xdb, 0x78, 0xec, 0x57, 0x5d, 0x33, 0xf9, 0x8e, 0xaf}},
  37. }
  38. var folders = []string{"default"}
  39. // Layout of the files: (indexes from the above array)
  40. // 12345678 - Required file
  41. // 02005008 - Existing file (currently in the index)
  42. // 02340070 - Temp file on the disk
  43. func TestHandleFile(t *testing.T) {
  44. // After the diff between required and existing we should:
  45. // Copy: 2, 5, 8
  46. // Pull: 1, 3, 4, 6, 7
  47. // Create existing file
  48. existingFile := protocol.FileInfo{
  49. Name: "filex",
  50. Flags: 0,
  51. Modified: 0,
  52. Blocks: []protocol.BlockInfo{
  53. blocks[0], blocks[2], blocks[0], blocks[0],
  54. blocks[5], blocks[0], blocks[0], blocks[8],
  55. },
  56. }
  57. // Create target file
  58. requiredFile := existingFile
  59. requiredFile.Blocks = blocks[1:]
  60. db, _ := leveldb.Open(storage.NewMemStorage(), nil)
  61. m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db)
  62. m.AddFolder(defaultFolderConfig)
  63. // Update index
  64. m.updateLocals("default", []protocol.FileInfo{existingFile})
  65. p := rwFolder{
  66. folder: "default",
  67. dir: "testdata",
  68. model: m,
  69. errors: make(map[string]string),
  70. errorsMut: sync.NewMutex(),
  71. }
  72. copyChan := make(chan copyBlocksState, 1)
  73. p.handleFile(requiredFile, copyChan, nil)
  74. // Receive the results
  75. toCopy := <-copyChan
  76. if len(toCopy.blocks) != 8 {
  77. t.Errorf("Unexpected count of copy blocks: %d != 8", len(toCopy.blocks))
  78. }
  79. for i, block := range toCopy.blocks {
  80. if string(block.Hash) != string(blocks[i+1].Hash) {
  81. t.Errorf("Block mismatch: %s != %s", block.String(), blocks[i+1].String())
  82. }
  83. }
  84. }
  85. func TestHandleFileWithTemp(t *testing.T) {
  86. // After diff between required and existing we should:
  87. // Copy: 2, 5, 8
  88. // Pull: 1, 3, 4, 6, 7
  89. // After dropping out blocks already on the temp file we should:
  90. // Copy: 5, 8
  91. // Pull: 1, 6
  92. // Create existing file
  93. existingFile := protocol.FileInfo{
  94. Name: "file",
  95. Flags: 0,
  96. Modified: 0,
  97. Blocks: []protocol.BlockInfo{
  98. blocks[0], blocks[2], blocks[0], blocks[0],
  99. blocks[5], blocks[0], blocks[0], blocks[8],
  100. },
  101. }
  102. // Create target file
  103. requiredFile := existingFile
  104. requiredFile.Blocks = blocks[1:]
  105. db, _ := leveldb.Open(storage.NewMemStorage(), nil)
  106. m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db)
  107. m.AddFolder(defaultFolderConfig)
  108. // Update index
  109. m.updateLocals("default", []protocol.FileInfo{existingFile})
  110. p := rwFolder{
  111. folder: "default",
  112. dir: "testdata",
  113. model: m,
  114. errors: make(map[string]string),
  115. errorsMut: sync.NewMutex(),
  116. }
  117. copyChan := make(chan copyBlocksState, 1)
  118. p.handleFile(requiredFile, copyChan, nil)
  119. // Receive the results
  120. toCopy := <-copyChan
  121. if len(toCopy.blocks) != 4 {
  122. t.Errorf("Unexpected count of copy blocks: %d != 4", len(toCopy.blocks))
  123. }
  124. for i, eq := range []int{1, 5, 6, 8} {
  125. if string(toCopy.blocks[i].Hash) != string(blocks[eq].Hash) {
  126. t.Errorf("Block mismatch: %s != %s", toCopy.blocks[i].String(), blocks[eq].String())
  127. }
  128. }
  129. }
  130. func TestCopierFinder(t *testing.T) {
  131. // After diff between required and existing we should:
  132. // Copy: 1, 2, 3, 4, 6, 7, 8
  133. // Since there is no existing file, nor a temp file
  134. // After dropping out blocks found locally:
  135. // Pull: 1, 5, 6, 8
  136. tempFile := filepath.Join("testdata", defTempNamer.TempName("file2"))
  137. err := os.Remove(tempFile)
  138. if err != nil && !os.IsNotExist(err) {
  139. t.Error(err)
  140. }
  141. // Create existing file
  142. existingFile := protocol.FileInfo{
  143. Name: defTempNamer.TempName("file"),
  144. Flags: 0,
  145. Modified: 0,
  146. Blocks: []protocol.BlockInfo{
  147. blocks[0], blocks[2], blocks[3], blocks[4],
  148. blocks[0], blocks[0], blocks[7], blocks[0],
  149. },
  150. }
  151. // Create target file
  152. requiredFile := existingFile
  153. requiredFile.Blocks = blocks[1:]
  154. requiredFile.Name = "file2"
  155. db, _ := leveldb.Open(storage.NewMemStorage(), nil)
  156. m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db)
  157. m.AddFolder(defaultFolderConfig)
  158. // Update index
  159. m.updateLocals("default", []protocol.FileInfo{existingFile})
  160. iterFn := func(folder, file string, index int32) bool {
  161. return true
  162. }
  163. // Verify that the blocks we say exist on file, really exist in the db.
  164. for _, idx := range []int{2, 3, 4, 7} {
  165. if m.finder.Iterate(folders, blocks[idx].Hash, iterFn) == false {
  166. t.Error("Didn't find block")
  167. }
  168. }
  169. p := rwFolder{
  170. folder: "default",
  171. dir: "testdata",
  172. model: m,
  173. errors: make(map[string]string),
  174. errorsMut: sync.NewMutex(),
  175. }
  176. copyChan := make(chan copyBlocksState)
  177. pullChan := make(chan pullBlockState, 4)
  178. finisherChan := make(chan *sharedPullerState, 1)
  179. // Run a single fetcher routine
  180. go p.copierRoutine(copyChan, pullChan, finisherChan)
  181. p.handleFile(requiredFile, copyChan, finisherChan)
  182. pulls := []pullBlockState{<-pullChan, <-pullChan, <-pullChan, <-pullChan}
  183. finish := <-finisherChan
  184. select {
  185. case <-pullChan:
  186. t.Fatal("Finisher channel has data to be read")
  187. case <-finisherChan:
  188. t.Fatal("Finisher channel has data to be read")
  189. default:
  190. }
  191. // Verify that the right blocks went into the pull list
  192. for i, eq := range []int{1, 5, 6, 8} {
  193. if string(pulls[i].block.Hash) != string(blocks[eq].Hash) {
  194. t.Errorf("Block %d mismatch: %s != %s", eq, pulls[i].block.String(), blocks[eq].String())
  195. }
  196. if string(finish.file.Blocks[eq-1].Hash) != string(blocks[eq].Hash) {
  197. t.Errorf("Block %d mismatch: %s != %s", eq, finish.file.Blocks[eq-1].String(), blocks[eq].String())
  198. }
  199. }
  200. // Verify that the fetched blocks have actually been written to the temp file
  201. blks, err := scanner.HashFile(tempFile, protocol.BlockSize, 0, nil)
  202. if err != nil {
  203. t.Log(err)
  204. }
  205. for _, eq := range []int{2, 3, 4, 7} {
  206. if string(blks[eq-1].Hash) != string(blocks[eq].Hash) {
  207. t.Errorf("Block %d mismatch: %s != %s", eq, blks[eq-1].String(), blocks[eq].String())
  208. }
  209. }
  210. finish.fd.Close()
  211. os.Remove(tempFile)
  212. }
  213. // Test that updating a file removes it's old blocks from the blockmap
  214. func TestCopierCleanup(t *testing.T) {
  215. iterFn := func(folder, file string, index int32) bool {
  216. return true
  217. }
  218. db, _ := leveldb.Open(storage.NewMemStorage(), nil)
  219. m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db)
  220. m.AddFolder(defaultFolderConfig)
  221. // Create a file
  222. file := protocol.FileInfo{
  223. Name: "test",
  224. Flags: 0,
  225. Modified: 0,
  226. Blocks: []protocol.BlockInfo{blocks[0]},
  227. }
  228. // Add file to index
  229. m.updateLocals("default", []protocol.FileInfo{file})
  230. if !m.finder.Iterate(folders, blocks[0].Hash, iterFn) {
  231. t.Error("Expected block not found")
  232. }
  233. file.Blocks = []protocol.BlockInfo{blocks[1]}
  234. file.Version = file.Version.Update(protocol.LocalDeviceID.Short())
  235. // Update index (removing old blocks)
  236. m.updateLocals("default", []protocol.FileInfo{file})
  237. if m.finder.Iterate(folders, blocks[0].Hash, iterFn) {
  238. t.Error("Unexpected block found")
  239. }
  240. if !m.finder.Iterate(folders, blocks[1].Hash, iterFn) {
  241. t.Error("Expected block not found")
  242. }
  243. file.Blocks = []protocol.BlockInfo{blocks[0]}
  244. file.Version = file.Version.Update(protocol.LocalDeviceID.Short())
  245. // Update index (removing old blocks)
  246. m.updateLocals("default", []protocol.FileInfo{file})
  247. if !m.finder.Iterate(folders, blocks[0].Hash, iterFn) {
  248. t.Error("Unexpected block found")
  249. }
  250. if m.finder.Iterate(folders, blocks[1].Hash, iterFn) {
  251. t.Error("Expected block not found")
  252. }
  253. }
  254. // Make sure that the copier routine hashes the content when asked, and pulls
  255. // if it fails to find the block.
  256. func TestLastResortPulling(t *testing.T) {
  257. db, _ := leveldb.Open(storage.NewMemStorage(), nil)
  258. m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db)
  259. m.AddFolder(defaultFolderConfig)
  260. // Add a file to index (with the incorrect block representation, as content
  261. // doesn't actually match the block list)
  262. file := protocol.FileInfo{
  263. Name: "empty",
  264. Flags: 0,
  265. Modified: 0,
  266. Blocks: []protocol.BlockInfo{blocks[0]},
  267. }
  268. m.updateLocals("default", []protocol.FileInfo{file})
  269. // Pretend that we are handling a new file of the same content but
  270. // with a different name (causing to copy that particular block)
  271. file.Name = "newfile"
  272. iterFn := func(folder, file string, index int32) bool {
  273. return true
  274. }
  275. // Check that that particular block is there
  276. if !m.finder.Iterate(folders, blocks[0].Hash, iterFn) {
  277. t.Error("Expected block not found")
  278. }
  279. p := rwFolder{
  280. folder: "default",
  281. dir: "testdata",
  282. model: m,
  283. errors: make(map[string]string),
  284. errorsMut: sync.NewMutex(),
  285. }
  286. copyChan := make(chan copyBlocksState)
  287. pullChan := make(chan pullBlockState, 1)
  288. finisherChan := make(chan *sharedPullerState, 1)
  289. // Run a single copier routine
  290. go p.copierRoutine(copyChan, pullChan, finisherChan)
  291. p.handleFile(file, copyChan, finisherChan)
  292. // Copier should hash empty file, realise that the region it has read
  293. // doesn't match the hash which was advertised by the block map, fix it
  294. // and ask to pull the block.
  295. <-pullChan
  296. // Verify that it did fix the incorrect hash.
  297. if m.finder.Iterate(folders, blocks[0].Hash, iterFn) {
  298. t.Error("Found unexpected block")
  299. }
  300. if !m.finder.Iterate(folders, scanner.SHA256OfNothing, iterFn) {
  301. t.Error("Expected block not found")
  302. }
  303. (<-finisherChan).fd.Close()
  304. os.Remove(filepath.Join("testdata", defTempNamer.TempName("newfile")))
  305. }
  306. func TestDeregisterOnFailInCopy(t *testing.T) {
  307. file := protocol.FileInfo{
  308. Name: "filex",
  309. Flags: 0,
  310. Modified: 0,
  311. Blocks: []protocol.BlockInfo{
  312. blocks[0], blocks[2], blocks[0], blocks[0],
  313. blocks[5], blocks[0], blocks[0], blocks[8],
  314. },
  315. }
  316. defer os.Remove("testdata/" + defTempNamer.TempName("filex"))
  317. db, _ := leveldb.Open(storage.NewMemStorage(), nil)
  318. m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db)
  319. m.AddFolder(defaultFolderConfig)
  320. emitter := NewProgressEmitter(defaultConfig)
  321. go emitter.Serve()
  322. p := rwFolder{
  323. folder: "default",
  324. dir: "testdata",
  325. model: m,
  326. queue: newJobQueue(),
  327. progressEmitter: emitter,
  328. errors: make(map[string]string),
  329. errorsMut: sync.NewMutex(),
  330. }
  331. // queue.Done should be called by the finisher routine
  332. p.queue.Push("filex", 0, 0)
  333. p.queue.Pop()
  334. if p.queue.lenProgress() != 1 {
  335. t.Fatal("Expected file in progress")
  336. }
  337. copyChan := make(chan copyBlocksState)
  338. pullChan := make(chan pullBlockState)
  339. finisherBufferChan := make(chan *sharedPullerState)
  340. finisherChan := make(chan *sharedPullerState)
  341. go p.copierRoutine(copyChan, pullChan, finisherBufferChan)
  342. go p.finisherRoutine(finisherChan)
  343. p.handleFile(file, copyChan, finisherChan)
  344. // Receive a block at puller, to indicate that at least a single copier
  345. // loop has been performed.
  346. toPull := <-pullChan
  347. // Wait until copier is trying to pass something down to the puller again
  348. time.Sleep(100 * time.Millisecond)
  349. // Close the file
  350. toPull.sharedPullerState.fail("test", os.ErrNotExist)
  351. // Unblock copier
  352. <-pullChan
  353. select {
  354. case state := <-finisherBufferChan:
  355. // At this point the file should still be registered with both the job
  356. // queue, and the progress emitter. Verify this.
  357. if p.progressEmitter.lenRegistry() != 1 || p.queue.lenProgress() != 1 || p.queue.lenQueued() != 0 {
  358. t.Fatal("Could not find file")
  359. }
  360. // Pass the file down the real finisher, and give it time to consume
  361. finisherChan <- state
  362. time.Sleep(100 * time.Millisecond)
  363. state.mut.Lock()
  364. stateFd := state.fd
  365. state.mut.Unlock()
  366. if stateFd != nil {
  367. t.Fatal("File not closed?")
  368. }
  369. if p.progressEmitter.lenRegistry() != 0 || p.queue.lenProgress() != 0 || p.queue.lenQueued() != 0 {
  370. t.Fatal("Still registered", p.progressEmitter.lenRegistry(), p.queue.lenProgress(), p.queue.lenQueued())
  371. }
  372. // Doing it again should have no effect
  373. finisherChan <- state
  374. time.Sleep(100 * time.Millisecond)
  375. if p.progressEmitter.lenRegistry() != 0 || p.queue.lenProgress() != 0 || p.queue.lenQueued() != 0 {
  376. t.Fatal("Still registered", p.progressEmitter.lenRegistry(), p.queue.lenProgress(), p.queue.lenQueued())
  377. }
  378. case <-time.After(time.Second):
  379. t.Fatal("Didn't get anything to the finisher")
  380. }
  381. }
  382. func TestDeregisterOnFailInPull(t *testing.T) {
  383. file := protocol.FileInfo{
  384. Name: "filex",
  385. Flags: 0,
  386. Modified: 0,
  387. Blocks: []protocol.BlockInfo{
  388. blocks[0], blocks[2], blocks[0], blocks[0],
  389. blocks[5], blocks[0], blocks[0], blocks[8],
  390. },
  391. }
  392. defer os.Remove("testdata/" + defTempNamer.TempName("filex"))
  393. db, _ := leveldb.Open(storage.NewMemStorage(), nil)
  394. m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db)
  395. m.AddFolder(defaultFolderConfig)
  396. emitter := NewProgressEmitter(defaultConfig)
  397. go emitter.Serve()
  398. p := rwFolder{
  399. folder: "default",
  400. dir: "testdata",
  401. model: m,
  402. queue: newJobQueue(),
  403. progressEmitter: emitter,
  404. errors: make(map[string]string),
  405. errorsMut: sync.NewMutex(),
  406. }
  407. // queue.Done should be called by the finisher routine
  408. p.queue.Push("filex", 0, 0)
  409. p.queue.Pop()
  410. if p.queue.lenProgress() != 1 {
  411. t.Fatal("Expected file in progress")
  412. }
  413. copyChan := make(chan copyBlocksState)
  414. pullChan := make(chan pullBlockState)
  415. finisherBufferChan := make(chan *sharedPullerState)
  416. finisherChan := make(chan *sharedPullerState)
  417. go p.copierRoutine(copyChan, pullChan, finisherBufferChan)
  418. go p.pullerRoutine(pullChan, finisherBufferChan)
  419. go p.finisherRoutine(finisherChan)
  420. p.handleFile(file, copyChan, finisherChan)
  421. // Receove at finisher, we shoud error out as puller has nowhere to pull
  422. // from.
  423. select {
  424. case state := <-finisherBufferChan:
  425. // At this point the file should still be registered with both the job
  426. // queue, and the progress emitter. Verify this.
  427. if p.progressEmitter.lenRegistry() != 1 || p.queue.lenProgress() != 1 || p.queue.lenQueued() != 0 {
  428. t.Fatal("Could not find file")
  429. }
  430. // Pass the file down the real finisher, and give it time to consume
  431. finisherChan <- state
  432. time.Sleep(100 * time.Millisecond)
  433. state.mut.Lock()
  434. stateFd := state.fd
  435. state.mut.Unlock()
  436. if stateFd != nil {
  437. t.Fatal("File not closed?")
  438. }
  439. if p.progressEmitter.lenRegistry() != 0 || p.queue.lenProgress() != 0 || p.queue.lenQueued() != 0 {
  440. t.Fatal("Still registered", p.progressEmitter.lenRegistry(), p.queue.lenProgress(), p.queue.lenQueued())
  441. }
  442. // Doing it again should have no effect
  443. finisherChan <- state
  444. time.Sleep(100 * time.Millisecond)
  445. if p.progressEmitter.lenRegistry() != 0 || p.queue.lenProgress() != 0 || p.queue.lenQueued() != 0 {
  446. t.Fatal("Still registered", p.progressEmitter.lenRegistry(), p.queue.lenProgress(), p.queue.lenQueued())
  447. }
  448. case <-time.After(time.Second):
  449. t.Fatal("Didn't get anything to the finisher")
  450. }
  451. }