folder_sendrecv_test.go 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862
  1. // Copyright (C) 2014 The Syncthing Authors.
  2. //
  3. // This Source Code Form is subject to the terms of the Mozilla Public
  4. // License, v. 2.0. If a copy of the MPL was not distributed with this file,
  5. // You can obtain one at https://mozilla.org/MPL/2.0/.
  6. package model
  7. import (
  8. "bytes"
  9. "context"
  10. "crypto/rand"
  11. "io"
  12. "io/ioutil"
  13. "os"
  14. "path/filepath"
  15. "runtime"
  16. "testing"
  17. "time"
  18. "github.com/syncthing/syncthing/lib/config"
  19. "github.com/syncthing/syncthing/lib/db"
  20. "github.com/syncthing/syncthing/lib/events"
  21. "github.com/syncthing/syncthing/lib/fs"
  22. "github.com/syncthing/syncthing/lib/ignore"
  23. "github.com/syncthing/syncthing/lib/protocol"
  24. "github.com/syncthing/syncthing/lib/scanner"
  25. "github.com/syncthing/syncthing/lib/sync"
  26. )
  27. var blocks = []protocol.BlockInfo{
  28. {Hash: []uint8{0xfa, 0x43, 0x23, 0x9b, 0xce, 0xe7, 0xb9, 0x7c, 0xa6, 0x2f, 0x0, 0x7c, 0xc6, 0x84, 0x87, 0x56, 0xa, 0x39, 0xe1, 0x9f, 0x74, 0xf3, 0xdd, 0xe7, 0x48, 0x6d, 0xb3, 0xf9, 0x8d, 0xf8, 0xe4, 0x71}}, // Zero'ed out block
  29. {Offset: 0, Size: 0x20000, Hash: []uint8{0x7e, 0xad, 0xbc, 0x36, 0xae, 0xbb, 0xcf, 0x74, 0x43, 0xe2, 0x7a, 0x5a, 0x4b, 0xb8, 0x5b, 0xce, 0xe6, 0x9e, 0x1e, 0x10, 0xf9, 0x8a, 0xbc, 0x77, 0x95, 0x2, 0x29, 0x60, 0x9e, 0x96, 0xae, 0x6c}},
  30. {Offset: 131072, Size: 0x20000, Hash: []uint8{0x3c, 0xc4, 0x20, 0xf4, 0xb, 0x2e, 0xcb, 0xb9, 0x5d, 0xce, 0x34, 0xa8, 0xc3, 0x92, 0xea, 0xf3, 0xda, 0x88, 0x33, 0xee, 0x7a, 0xb6, 0xe, 0xf1, 0x82, 0x5e, 0xb0, 0xa9, 0x26, 0xa9, 0xc0, 0xef}},
  31. {Offset: 262144, Size: 0x20000, Hash: []uint8{0x76, 0xa8, 0xc, 0x69, 0xd7, 0x5c, 0x52, 0xfd, 0xdf, 0x55, 0xef, 0x44, 0xc1, 0xd6, 0x25, 0x48, 0x4d, 0x98, 0x48, 0x4d, 0xaa, 0x50, 0xf6, 0x6b, 0x32, 0x47, 0x55, 0x81, 0x6b, 0xed, 0xee, 0xfb}},
  32. {Offset: 393216, Size: 0x20000, Hash: []uint8{0x44, 0x1e, 0xa4, 0xf2, 0x8d, 0x1f, 0xc3, 0x1b, 0x9d, 0xa5, 0x18, 0x5e, 0x59, 0x1b, 0xd8, 0x5c, 0xba, 0x7d, 0xb9, 0x8d, 0x70, 0x11, 0x5c, 0xea, 0xa1, 0x57, 0x4d, 0xcb, 0x3c, 0x5b, 0xf8, 0x6c}},
  33. {Offset: 524288, Size: 0x20000, Hash: []uint8{0x8, 0x40, 0xd0, 0x5e, 0x80, 0x0, 0x0, 0x7c, 0x8b, 0xb3, 0x8b, 0xf7, 0x7b, 0x23, 0x26, 0x28, 0xab, 0xda, 0xcf, 0x86, 0x8f, 0xc2, 0x8a, 0x39, 0xc6, 0xe6, 0x69, 0x59, 0x97, 0xb6, 0x1a, 0x43}},
  34. {Offset: 655360, Size: 0x20000, Hash: []uint8{0x38, 0x8e, 0x44, 0xcb, 0x30, 0xd8, 0x90, 0xf, 0xce, 0x7, 0x4b, 0x58, 0x86, 0xde, 0xce, 0x59, 0xa2, 0x46, 0xd2, 0xf9, 0xba, 0xaf, 0x35, 0x87, 0x38, 0xdf, 0xd2, 0xd, 0xf9, 0x45, 0xed, 0x91}},
  35. {Offset: 786432, Size: 0x20000, Hash: []uint8{0x32, 0x28, 0xcd, 0xf, 0x37, 0x21, 0xe5, 0xd4, 0x1e, 0x58, 0x87, 0x73, 0x8e, 0x36, 0xdf, 0xb2, 0x70, 0x78, 0x56, 0xc3, 0x42, 0xff, 0xf7, 0x8f, 0x37, 0x95, 0x0, 0x26, 0xa, 0xac, 0x54, 0x72}},
  36. {Offset: 917504, Size: 0x20000, Hash: []uint8{0x96, 0x6b, 0x15, 0x6b, 0xc4, 0xf, 0x19, 0x18, 0xca, 0xbb, 0x5f, 0xd6, 0xbb, 0xa2, 0xc6, 0x2a, 0xac, 0xbb, 0x8a, 0xb9, 0xce, 0xec, 0x4c, 0xdb, 0x78, 0xec, 0x57, 0x5d, 0x33, 0xf9, 0x8e, 0xaf}},
  37. }
  38. var folders = []string{"default"}
  39. var diffTestData = []struct {
  40. a string
  41. b string
  42. s int
  43. d []protocol.BlockInfo
  44. }{
  45. {"contents", "contents", 1024, []protocol.BlockInfo{}},
  46. {"", "", 1024, []protocol.BlockInfo{}},
  47. {"contents", "contents", 3, []protocol.BlockInfo{}},
  48. {"contents", "cantents", 3, []protocol.BlockInfo{{Offset: 0, Size: 3}}},
  49. {"contents", "contants", 3, []protocol.BlockInfo{{Offset: 3, Size: 3}}},
  50. {"contents", "cantants", 3, []protocol.BlockInfo{{Offset: 0, Size: 3}, {Offset: 3, Size: 3}}},
  51. {"contents", "", 3, []protocol.BlockInfo{{Offset: 0, Size: 0}}},
  52. {"", "contents", 3, []protocol.BlockInfo{{Offset: 0, Size: 3}, {Offset: 3, Size: 3}, {Offset: 6, Size: 2}}},
  53. {"con", "contents", 3, []protocol.BlockInfo{{Offset: 3, Size: 3}, {Offset: 6, Size: 2}}},
  54. {"contents", "con", 3, nil},
  55. {"contents", "cont", 3, []protocol.BlockInfo{{Offset: 3, Size: 1}}},
  56. {"cont", "contents", 3, []protocol.BlockInfo{{Offset: 3, Size: 3}, {Offset: 6, Size: 2}}},
  57. }
  58. func setUpFile(filename string, blockNumbers []int) protocol.FileInfo {
  59. // Create existing file
  60. existingBlocks := make([]protocol.BlockInfo, len(blockNumbers))
  61. for i := range blockNumbers {
  62. existingBlocks[i] = blocks[blockNumbers[i]]
  63. }
  64. return protocol.FileInfo{
  65. Name: filename,
  66. Blocks: existingBlocks,
  67. }
  68. }
  69. func setUpModel(files ...protocol.FileInfo) *Model {
  70. db := db.OpenMemory()
  71. model := NewModel(defaultCfgWrapper, protocol.LocalDeviceID, "syncthing", "dev", db, nil)
  72. model.AddFolder(defaultFolderConfig)
  73. // Update index
  74. model.updateLocalsFromScanning("default", files)
  75. return model
  76. }
  77. func setUpSendReceiveFolder(model *Model) *sendReceiveFolder {
  78. f := &sendReceiveFolder{
  79. folder: folder{
  80. stateTracker: newStateTracker("default"),
  81. model: model,
  82. initialScanFinished: make(chan struct{}),
  83. ctx: context.TODO(),
  84. FolderConfiguration: config.FolderConfiguration{
  85. FilesystemType: fs.FilesystemTypeBasic,
  86. Path: "testdata",
  87. PullerMaxPendingKiB: defaultPullerPendingKiB,
  88. },
  89. },
  90. queue: newJobQueue(),
  91. pullErrors: make(map[string]string),
  92. pullErrorsMut: sync.NewMutex(),
  93. }
  94. f.fs = fs.NewMtimeFS(f.Filesystem(), db.NewNamespacedKV(model.db, "mtime"))
  95. // Folders are never actually started, so no initial scan will be done
  96. close(f.initialScanFinished)
  97. return f
  98. }
  99. // Layout of the files: (indexes from the above array)
  100. // 12345678 - Required file
  101. // 02005008 - Existing file (currently in the index)
  102. // 02340070 - Temp file on the disk
  103. func TestHandleFile(t *testing.T) {
  104. // After the diff between required and existing we should:
  105. // Copy: 2, 5, 8
  106. // Pull: 1, 3, 4, 6, 7
  107. existingBlocks := []int{0, 2, 0, 0, 5, 0, 0, 8}
  108. existingFile := setUpFile("filex", existingBlocks)
  109. requiredFile := existingFile
  110. requiredFile.Blocks = blocks[1:]
  111. m := setUpModel(existingFile)
  112. f := setUpSendReceiveFolder(m)
  113. copyChan := make(chan copyBlocksState, 1)
  114. dbUpdateChan := make(chan dbUpdateJob, 1)
  115. f.handleFile(requiredFile, copyChan, nil, dbUpdateChan)
  116. // Receive the results
  117. toCopy := <-copyChan
  118. if len(toCopy.blocks) != 8 {
  119. t.Errorf("Unexpected count of copy blocks: %d != 8", len(toCopy.blocks))
  120. }
  121. for _, block := range blocks[1:] {
  122. found := false
  123. for _, toCopyBlock := range toCopy.blocks {
  124. if string(toCopyBlock.Hash) == string(block.Hash) {
  125. found = true
  126. break
  127. }
  128. }
  129. if !found {
  130. t.Errorf("Did not find block %s", block.String())
  131. }
  132. }
  133. }
  134. func TestHandleFileWithTemp(t *testing.T) {
  135. // After diff between required and existing we should:
  136. // Copy: 2, 5, 8
  137. // Pull: 1, 3, 4, 6, 7
  138. // After dropping out blocks already on the temp file we should:
  139. // Copy: 5, 8
  140. // Pull: 1, 6
  141. existingBlocks := []int{0, 2, 0, 0, 5, 0, 0, 8}
  142. existingFile := setUpFile("file", existingBlocks)
  143. requiredFile := existingFile
  144. requiredFile.Blocks = blocks[1:]
  145. m := setUpModel(existingFile)
  146. f := setUpSendReceiveFolder(m)
  147. copyChan := make(chan copyBlocksState, 1)
  148. dbUpdateChan := make(chan dbUpdateJob, 1)
  149. f.handleFile(requiredFile, copyChan, nil, dbUpdateChan)
  150. // Receive the results
  151. toCopy := <-copyChan
  152. if len(toCopy.blocks) != 4 {
  153. t.Errorf("Unexpected count of copy blocks: %d != 4", len(toCopy.blocks))
  154. }
  155. for _, idx := range []int{1, 5, 6, 8} {
  156. found := false
  157. block := blocks[idx]
  158. for _, toCopyBlock := range toCopy.blocks {
  159. if string(toCopyBlock.Hash) == string(block.Hash) {
  160. found = true
  161. break
  162. }
  163. }
  164. if !found {
  165. t.Errorf("Did not find block %s", block.String())
  166. }
  167. }
  168. }
  169. func TestCopierFinder(t *testing.T) {
  170. testOs := &fatalOs{t}
  171. // After diff between required and existing we should:
  172. // Copy: 1, 2, 3, 4, 6, 7, 8
  173. // Since there is no existing file, nor a temp file
  174. // After dropping out blocks found locally:
  175. // Pull: 1, 5, 6, 8
  176. tempFile := filepath.Join("testdata", fs.TempName("file2"))
  177. testOs.Remove(tempFile)
  178. defer testOs.Remove(tempFile)
  179. existingBlocks := []int{0, 2, 3, 4, 0, 0, 7, 0}
  180. existingFile := setUpFile(fs.TempName("file"), existingBlocks)
  181. requiredFile := existingFile
  182. requiredFile.Blocks = blocks[1:]
  183. requiredFile.Name = "file2"
  184. m := setUpModel(existingFile)
  185. f := setUpSendReceiveFolder(m)
  186. copyChan := make(chan copyBlocksState)
  187. pullChan := make(chan pullBlockState, 4)
  188. finisherChan := make(chan *sharedPullerState, 1)
  189. dbUpdateChan := make(chan dbUpdateJob, 1)
  190. // Run a single fetcher routine
  191. go f.copierRoutine(copyChan, pullChan, finisherChan)
  192. f.handleFile(requiredFile, copyChan, finisherChan, dbUpdateChan)
  193. pulls := []pullBlockState{<-pullChan, <-pullChan, <-pullChan, <-pullChan}
  194. finish := <-finisherChan
  195. select {
  196. case <-pullChan:
  197. t.Fatal("Pull channel has data to be read")
  198. case <-finisherChan:
  199. t.Fatal("Finisher channel has data to be read")
  200. default:
  201. }
  202. // Verify that the right blocks went into the pull list.
  203. // They are pulled in random order.
  204. for _, idx := range []int{1, 5, 6, 8} {
  205. found := false
  206. block := blocks[idx]
  207. for _, pulledBlock := range pulls {
  208. if string(pulledBlock.block.Hash) == string(block.Hash) {
  209. found = true
  210. break
  211. }
  212. }
  213. if !found {
  214. t.Errorf("Did not find block %s", block.String())
  215. }
  216. if string(finish.file.Blocks[idx-1].Hash) != string(blocks[idx].Hash) {
  217. t.Errorf("Block %d mismatch: %s != %s", idx, finish.file.Blocks[idx-1].String(), blocks[idx].String())
  218. }
  219. }
  220. // Verify that the fetched blocks have actually been written to the temp file
  221. blks, err := scanner.HashFile(context.TODO(), fs.NewFilesystem(fs.FilesystemTypeBasic, "."), tempFile, protocol.MinBlockSize, nil, false)
  222. if err != nil {
  223. t.Log(err)
  224. }
  225. for _, eq := range []int{2, 3, 4, 7} {
  226. if string(blks[eq-1].Hash) != string(blocks[eq].Hash) {
  227. t.Errorf("Block %d mismatch: %s != %s", eq, blks[eq-1].String(), blocks[eq].String())
  228. }
  229. }
  230. finish.fd.Close()
  231. }
  232. func TestWeakHash(t *testing.T) {
  233. testOs := &fatalOs{t}
  234. tempFile := filepath.Join("testdata", fs.TempName("weakhash"))
  235. var shift int64 = 10
  236. var size int64 = 1 << 20
  237. expectBlocks := int(size / protocol.MinBlockSize)
  238. expectPulls := int(shift / protocol.MinBlockSize)
  239. if shift > 0 {
  240. expectPulls++
  241. }
  242. cleanup := func() {
  243. for _, path := range []string{tempFile, "testdata/weakhash"} {
  244. testOs.Remove(path)
  245. }
  246. }
  247. cleanup()
  248. defer cleanup()
  249. f, _ := testOs.Create("testdata/weakhash")
  250. defer f.Close()
  251. _, err := io.CopyN(f, rand.Reader, size)
  252. if err != nil {
  253. t.Error(err)
  254. }
  255. info, err := f.Stat()
  256. if err != nil {
  257. t.Error(err)
  258. }
  259. // Create two files, second file has `shifted` bytes random prefix, yet
  260. // both are of the same length, for example:
  261. // File 1: abcdefgh
  262. // File 2: xyabcdef
  263. f.Seek(0, os.SEEK_SET)
  264. existing, err := scanner.Blocks(context.TODO(), f, protocol.MinBlockSize, size, nil, true)
  265. if err != nil {
  266. t.Error(err)
  267. }
  268. f.Seek(0, os.SEEK_SET)
  269. remainder := io.LimitReader(f, size-shift)
  270. prefix := io.LimitReader(rand.Reader, shift)
  271. nf := io.MultiReader(prefix, remainder)
  272. desired, err := scanner.Blocks(context.TODO(), nf, protocol.MinBlockSize, size, nil, true)
  273. if err != nil {
  274. t.Error(err)
  275. }
  276. existingFile := protocol.FileInfo{
  277. Name: "weakhash",
  278. Blocks: existing,
  279. Size: size,
  280. ModifiedS: info.ModTime().Unix(),
  281. ModifiedNs: int32(info.ModTime().Nanosecond()),
  282. }
  283. desiredFile := protocol.FileInfo{
  284. Name: "weakhash",
  285. Size: size,
  286. Blocks: desired,
  287. ModifiedS: info.ModTime().Unix() + 1,
  288. }
  289. // Setup the model/pull environment
  290. m := setUpModel(existingFile)
  291. fo := setUpSendReceiveFolder(m)
  292. copyChan := make(chan copyBlocksState)
  293. pullChan := make(chan pullBlockState, expectBlocks)
  294. finisherChan := make(chan *sharedPullerState, 1)
  295. dbUpdateChan := make(chan dbUpdateJob, 1)
  296. // Run a single fetcher routine
  297. go fo.copierRoutine(copyChan, pullChan, finisherChan)
  298. // Test 1 - no weak hashing, file gets fully repulled (`expectBlocks` pulls).
  299. fo.WeakHashThresholdPct = 101
  300. fo.handleFile(desiredFile, copyChan, finisherChan, dbUpdateChan)
  301. var pulls []pullBlockState
  302. for len(pulls) < expectBlocks {
  303. select {
  304. case pull := <-pullChan:
  305. pulls = append(pulls, pull)
  306. case <-time.After(10 * time.Second):
  307. t.Errorf("timed out, got %d pulls expected %d", len(pulls), expectPulls)
  308. }
  309. }
  310. finish := <-finisherChan
  311. select {
  312. case <-pullChan:
  313. t.Fatal("Pull channel has data to be read")
  314. case <-finisherChan:
  315. t.Fatal("Finisher channel has data to be read")
  316. default:
  317. }
  318. finish.fd.Close()
  319. testOs.Remove(tempFile)
  320. // Test 2 - using weak hash, expectPulls blocks pulled.
  321. fo.WeakHashThresholdPct = -1
  322. fo.handleFile(desiredFile, copyChan, finisherChan, dbUpdateChan)
  323. pulls = pulls[:0]
  324. for len(pulls) < expectPulls {
  325. select {
  326. case pull := <-pullChan:
  327. pulls = append(pulls, pull)
  328. case <-time.After(10 * time.Second):
  329. t.Errorf("timed out, got %d pulls expected %d", len(pulls), expectPulls)
  330. }
  331. }
  332. finish = <-finisherChan
  333. finish.fd.Close()
  334. expectShifted := expectBlocks - expectPulls
  335. if finish.copyOriginShifted != expectShifted {
  336. t.Errorf("did not copy %d shifted", expectShifted)
  337. }
  338. }
  339. // Test that updating a file removes its old blocks from the blockmap
  340. func TestCopierCleanup(t *testing.T) {
  341. iterFn := func(folder, file string, index int32) bool {
  342. return true
  343. }
  344. // Create a file
  345. file := setUpFile("test", []int{0})
  346. m := setUpModel(file)
  347. file.Blocks = []protocol.BlockInfo{blocks[1]}
  348. file.Version = file.Version.Update(protocol.LocalDeviceID.Short())
  349. // Update index (removing old blocks)
  350. m.updateLocalsFromScanning("default", []protocol.FileInfo{file})
  351. if m.finder.Iterate(folders, blocks[0].Hash, iterFn) {
  352. t.Error("Unexpected block found")
  353. }
  354. if !m.finder.Iterate(folders, blocks[1].Hash, iterFn) {
  355. t.Error("Expected block not found")
  356. }
  357. file.Blocks = []protocol.BlockInfo{blocks[0]}
  358. file.Version = file.Version.Update(protocol.LocalDeviceID.Short())
  359. // Update index (removing old blocks)
  360. m.updateLocalsFromScanning("default", []protocol.FileInfo{file})
  361. if !m.finder.Iterate(folders, blocks[0].Hash, iterFn) {
  362. t.Error("Unexpected block found")
  363. }
  364. if m.finder.Iterate(folders, blocks[1].Hash, iterFn) {
  365. t.Error("Expected block not found")
  366. }
  367. }
  368. func TestDeregisterOnFailInCopy(t *testing.T) {
  369. testOs := &fatalOs{t}
  370. file := setUpFile("filex", []int{0, 2, 0, 0, 5, 0, 0, 8})
  371. defer testOs.Remove("testdata/" + fs.TempName("filex"))
  372. db := db.OpenMemory()
  373. m := NewModel(defaultCfgWrapper, protocol.LocalDeviceID, "syncthing", "dev", db, nil)
  374. m.AddFolder(defaultFolderConfig)
  375. // Set up our evet subscription early
  376. s := events.Default.Subscribe(events.ItemFinished)
  377. f := setUpSendReceiveFolder(m)
  378. // queue.Done should be called by the finisher routine
  379. f.queue.Push("filex", 0, time.Time{})
  380. f.queue.Pop()
  381. if f.queue.lenProgress() != 1 {
  382. t.Fatal("Expected file in progress")
  383. }
  384. copyChan := make(chan copyBlocksState)
  385. pullChan := make(chan pullBlockState)
  386. finisherBufferChan := make(chan *sharedPullerState)
  387. finisherChan := make(chan *sharedPullerState)
  388. dbUpdateChan := make(chan dbUpdateJob, 1)
  389. go f.copierRoutine(copyChan, pullChan, finisherBufferChan)
  390. go f.finisherRoutine(ignore.New(defaultFs), finisherChan, dbUpdateChan, make(chan string))
  391. f.handleFile(file, copyChan, finisherChan, dbUpdateChan)
  392. // Receive a block at puller, to indicate that at least a single copier
  393. // loop has been performed.
  394. toPull := <-pullChan
  395. // Close the file, causing errors on further access
  396. toPull.sharedPullerState.fail("test", os.ErrNotExist)
  397. // Unblock copier
  398. go func() {
  399. for range pullChan {
  400. }
  401. }()
  402. select {
  403. case state := <-finisherBufferChan:
  404. // At this point the file should still be registered with both the job
  405. // queue, and the progress emitter. Verify this.
  406. if f.model.progressEmitter.lenRegistry() != 1 || f.queue.lenProgress() != 1 || f.queue.lenQueued() != 0 {
  407. t.Fatal("Could not find file")
  408. }
  409. // Pass the file down the real finisher, and give it time to consume
  410. finisherChan <- state
  411. t0 := time.Now()
  412. if ev, err := s.Poll(time.Minute); err != nil {
  413. t.Fatal("Got error waiting for ItemFinished event:", err)
  414. } else if n := ev.Data.(map[string]interface{})["item"]; n != state.file.Name {
  415. t.Fatal("Got ItemFinished event for wrong file:", n)
  416. }
  417. t.Log("event took", time.Since(t0))
  418. state.mut.Lock()
  419. stateFd := state.fd
  420. state.mut.Unlock()
  421. if stateFd != nil {
  422. t.Fatal("File not closed?")
  423. }
  424. if f.model.progressEmitter.lenRegistry() != 0 || f.queue.lenProgress() != 0 || f.queue.lenQueued() != 0 {
  425. t.Fatal("Still registered", f.model.progressEmitter.lenRegistry(), f.queue.lenProgress(), f.queue.lenQueued())
  426. }
  427. // Doing it again should have no effect
  428. finisherChan <- state
  429. if _, err := s.Poll(time.Second); err != events.ErrTimeout {
  430. t.Fatal("Expected timeout, not another event", err)
  431. }
  432. if f.model.progressEmitter.lenRegistry() != 0 || f.queue.lenProgress() != 0 || f.queue.lenQueued() != 0 {
  433. t.Fatal("Still registered", f.model.progressEmitter.lenRegistry(), f.queue.lenProgress(), f.queue.lenQueued())
  434. }
  435. case <-time.After(time.Second):
  436. t.Fatal("Didn't get anything to the finisher")
  437. }
  438. }
  439. func TestDeregisterOnFailInPull(t *testing.T) {
  440. testOs := &fatalOs{t}
  441. file := setUpFile("filex", []int{0, 2, 0, 0, 5, 0, 0, 8})
  442. defer testOs.Remove("testdata/" + fs.TempName("filex"))
  443. db := db.OpenMemory()
  444. m := NewModel(defaultCfgWrapper, protocol.LocalDeviceID, "syncthing", "dev", db, nil)
  445. m.AddFolder(defaultFolderConfig)
  446. // Set up our evet subscription early
  447. s := events.Default.Subscribe(events.ItemFinished)
  448. f := setUpSendReceiveFolder(m)
  449. // queue.Done should be called by the finisher routine
  450. f.queue.Push("filex", 0, time.Time{})
  451. f.queue.Pop()
  452. if f.queue.lenProgress() != 1 {
  453. t.Fatal("Expected file in progress")
  454. }
  455. copyChan := make(chan copyBlocksState)
  456. pullChan := make(chan pullBlockState)
  457. finisherBufferChan := make(chan *sharedPullerState)
  458. finisherChan := make(chan *sharedPullerState)
  459. dbUpdateChan := make(chan dbUpdateJob, 1)
  460. go f.copierRoutine(copyChan, pullChan, finisherBufferChan)
  461. go f.pullerRoutine(pullChan, finisherBufferChan)
  462. go f.finisherRoutine(ignore.New(defaultFs), finisherChan, dbUpdateChan, make(chan string))
  463. f.handleFile(file, copyChan, finisherChan, dbUpdateChan)
  464. // Receive at finisher, we should error out as puller has nowhere to pull
  465. // from.
  466. timeout = time.Second
  467. select {
  468. case state := <-finisherBufferChan:
  469. // At this point the file should still be registered with both the job
  470. // queue, and the progress emitter. Verify this.
  471. if f.model.progressEmitter.lenRegistry() != 1 || f.queue.lenProgress() != 1 || f.queue.lenQueued() != 0 {
  472. t.Fatal("Could not find file")
  473. }
  474. // Pass the file down the real finisher, and give it time to consume
  475. finisherChan <- state
  476. t0 := time.Now()
  477. if ev, err := s.Poll(time.Minute); err != nil {
  478. t.Fatal("Got error waiting for ItemFinished event:", err)
  479. } else if n := ev.Data.(map[string]interface{})["item"]; n != state.file.Name {
  480. t.Fatal("Got ItemFinished event for wrong file:", n)
  481. }
  482. t.Log("event took", time.Since(t0))
  483. state.mut.Lock()
  484. stateFd := state.fd
  485. state.mut.Unlock()
  486. if stateFd != nil {
  487. t.Fatal("File not closed?")
  488. }
  489. if f.model.progressEmitter.lenRegistry() != 0 || f.queue.lenProgress() != 0 || f.queue.lenQueued() != 0 {
  490. t.Fatal("Still registered", f.model.progressEmitter.lenRegistry(), f.queue.lenProgress(), f.queue.lenQueued())
  491. }
  492. // Doing it again should have no effect
  493. finisherChan <- state
  494. if _, err := s.Poll(time.Second); err != events.ErrTimeout {
  495. t.Fatal("Expected timeout, not another event", err)
  496. }
  497. if f.model.progressEmitter.lenRegistry() != 0 || f.queue.lenProgress() != 0 || f.queue.lenQueued() != 0 {
  498. t.Fatal("Still registered", f.model.progressEmitter.lenRegistry(), f.queue.lenProgress(), f.queue.lenQueued())
  499. }
  500. case <-time.After(time.Second):
  501. t.Fatal("Didn't get anything to the finisher")
  502. }
  503. }
  504. func TestIssue3164(t *testing.T) {
  505. m := setUpModel(protocol.FileInfo{})
  506. f := setUpSendReceiveFolder(m)
  507. defaultFs.RemoveAll("issue3164")
  508. defer defaultFs.RemoveAll("issue3164")
  509. if err := defaultFs.MkdirAll("issue3164/oktodelete/foobar", 0777); err != nil {
  510. t.Fatal(err)
  511. }
  512. if err := ioutil.WriteFile("testdata/issue3164/oktodelete/foobar/file", []byte("Hello"), 0644); err != nil {
  513. t.Fatal(err)
  514. }
  515. if err := ioutil.WriteFile("testdata/issue3164/oktodelete/file", []byte("Hello"), 0644); err != nil {
  516. t.Fatal(err)
  517. }
  518. file := protocol.FileInfo{
  519. Name: "issue3164",
  520. }
  521. matcher := ignore.New(defaultFs)
  522. if err := matcher.Parse(bytes.NewBufferString("(?d)oktodelete"), ""); err != nil {
  523. t.Fatal(err)
  524. }
  525. dbUpdateChan := make(chan dbUpdateJob, 1)
  526. f.handleDeleteDir(file, matcher, dbUpdateChan, make(chan string))
  527. if _, err := defaultFs.Stat("testdata/issue3164"); !fs.IsNotExist(err) {
  528. t.Fatal(err)
  529. }
  530. }
  531. func TestDiff(t *testing.T) {
  532. for i, test := range diffTestData {
  533. a, _ := scanner.Blocks(context.TODO(), bytes.NewBufferString(test.a), test.s, -1, nil, false)
  534. b, _ := scanner.Blocks(context.TODO(), bytes.NewBufferString(test.b), test.s, -1, nil, false)
  535. _, d := blockDiff(a, b)
  536. if len(d) != len(test.d) {
  537. t.Fatalf("Incorrect length for diff %d; %d != %d", i, len(d), len(test.d))
  538. } else {
  539. for j := range test.d {
  540. if d[j].Offset != test.d[j].Offset {
  541. t.Errorf("Incorrect offset for diff %d block %d; %d != %d", i, j, d[j].Offset, test.d[j].Offset)
  542. }
  543. if d[j].Size != test.d[j].Size {
  544. t.Errorf("Incorrect length for diff %d block %d; %d != %d", i, j, d[j].Size, test.d[j].Size)
  545. }
  546. }
  547. }
  548. }
  549. }
  550. func BenchmarkDiff(b *testing.B) {
  551. testCases := make([]struct{ a, b []protocol.BlockInfo }, 0, len(diffTestData))
  552. for _, test := range diffTestData {
  553. a, _ := scanner.Blocks(context.TODO(), bytes.NewBufferString(test.a), test.s, -1, nil, false)
  554. b, _ := scanner.Blocks(context.TODO(), bytes.NewBufferString(test.b), test.s, -1, nil, false)
  555. testCases = append(testCases, struct{ a, b []protocol.BlockInfo }{a, b})
  556. }
  557. b.ReportAllocs()
  558. b.ResetTimer()
  559. for i := 0; i < b.N; i++ {
  560. for _, tc := range testCases {
  561. blockDiff(tc.a, tc.b)
  562. }
  563. }
  564. }
  565. func TestDiffEmpty(t *testing.T) {
  566. emptyCases := []struct {
  567. a []protocol.BlockInfo
  568. b []protocol.BlockInfo
  569. need int
  570. have int
  571. }{
  572. {nil, nil, 0, 0},
  573. {[]protocol.BlockInfo{{Offset: 3, Size: 1}}, nil, 0, 0},
  574. {nil, []protocol.BlockInfo{{Offset: 3, Size: 1}}, 1, 0},
  575. }
  576. for _, emptyCase := range emptyCases {
  577. h, n := blockDiff(emptyCase.a, emptyCase.b)
  578. if len(h) != emptyCase.have {
  579. t.Errorf("incorrect have: %d != %d", len(h), emptyCase.have)
  580. }
  581. if len(n) != emptyCase.need {
  582. t.Errorf("incorrect have: %d != %d", len(h), emptyCase.have)
  583. }
  584. }
  585. }
  586. // TestDeleteIgnorePerms checks, that a file gets deleted when the IgnorePerms
  587. // option is true and the permissions do not match between the file on disk and
  588. // in the db.
  589. func TestDeleteIgnorePerms(t *testing.T) {
  590. m := setUpModel()
  591. f := setUpSendReceiveFolder(m)
  592. f.IgnorePerms = true
  593. ffs := f.Filesystem()
  594. name := "deleteIgnorePerms"
  595. file, err := ffs.Create(name)
  596. if err != nil {
  597. t.Error(err)
  598. }
  599. defer ffs.Remove(name)
  600. defer file.Close()
  601. stat, err := file.Stat()
  602. if err != nil {
  603. t.Fatal(err)
  604. }
  605. fi, err := scanner.CreateFileInfo(stat, name, ffs)
  606. if err != nil {
  607. t.Fatal(err)
  608. }
  609. ffs.Chmod(name, 0600)
  610. scanChan := make(chan string)
  611. finished := make(chan struct{})
  612. go func() {
  613. err = f.checkToBeDeleted(fi, scanChan)
  614. close(finished)
  615. }()
  616. select {
  617. case <-scanChan:
  618. <-finished
  619. case <-finished:
  620. }
  621. if err != nil {
  622. t.Fatal(err)
  623. }
  624. }
  625. func TestCopyOwner(t *testing.T) {
  626. // Verifies that owner and group are copied from the parent, for both
  627. // files and directories.
  628. if runtime.GOOS == "windows" {
  629. t.Skip("copying owner not supported on Windows")
  630. }
  631. const (
  632. expOwner = 1234
  633. expGroup = 5678
  634. )
  635. // Set up a folder with the CopyParentOwner bit and backed by a fake
  636. // filesystem.
  637. m := setUpModel()
  638. f := &sendReceiveFolder{
  639. folder: folder{
  640. stateTracker: newStateTracker("default"),
  641. model: m,
  642. initialScanFinished: make(chan struct{}),
  643. ctx: context.TODO(),
  644. FolderConfiguration: config.FolderConfiguration{
  645. FilesystemType: fs.FilesystemTypeFake,
  646. Path: "/TestCopyOwner",
  647. CopyOwnershipFromParent: true,
  648. },
  649. },
  650. queue: newJobQueue(),
  651. pullErrors: make(map[string]string),
  652. pullErrorsMut: sync.NewMutex(),
  653. }
  654. f.fs = f.Filesystem()
  655. // Create a parent dir with a certain owner/group.
  656. f.fs.Mkdir("foo", 0755)
  657. f.fs.Lchown("foo", expOwner, expGroup)
  658. dir := protocol.FileInfo{
  659. Name: "foo/bar",
  660. Type: protocol.FileInfoTypeDirectory,
  661. Permissions: 0755,
  662. }
  663. // Have the folder create a subdirectory, verify that it's the correct
  664. // owner/group.
  665. dbUpdateChan := make(chan dbUpdateJob, 1)
  666. defer close(dbUpdateChan)
  667. f.handleDir(dir, dbUpdateChan)
  668. <-dbUpdateChan // empty the channel for later
  669. info, err := f.fs.Lstat("foo/bar")
  670. if err != nil {
  671. t.Fatal("Unexpected error (dir):", err)
  672. }
  673. if info.Owner() != expOwner || info.Group() != expGroup {
  674. t.Fatalf("Expected dir owner/group to be %d/%d, not %d/%d", expOwner, expGroup, info.Owner(), info.Group())
  675. }
  676. // Have the folder create a file, verify it's the correct owner/group.
  677. // File is zero sized to avoid having to handle copies/pulls.
  678. file := protocol.FileInfo{
  679. Name: "foo/bar/baz",
  680. Type: protocol.FileInfoTypeFile,
  681. Permissions: 0644,
  682. }
  683. // Wire some stuff. The flow here is handleFile() -[copierChan]->
  684. // copierRoutine() -[finisherChan]-> finisherRoutine() -[dbUpdateChan]->
  685. // back to us and we're done. The copier routine doesn't do anything,
  686. // but it's the way data is passed around. When the database update
  687. // comes the finisher is done.
  688. finisherChan := make(chan *sharedPullerState)
  689. defer close(finisherChan)
  690. copierChan := make(chan copyBlocksState)
  691. defer close(copierChan)
  692. go f.copierRoutine(copierChan, nil, finisherChan)
  693. go f.finisherRoutine(nil, finisherChan, dbUpdateChan, nil)
  694. f.handleFile(file, copierChan, nil, nil)
  695. <-dbUpdateChan
  696. info, err = f.fs.Lstat("foo/bar/baz")
  697. if err != nil {
  698. t.Fatal("Unexpected error (file):", err)
  699. }
  700. if info.Owner() != expOwner || info.Group() != expGroup {
  701. t.Fatalf("Expected file owner/group to be %d/%d, not %d/%d", expOwner, expGroup, info.Owner(), info.Group())
  702. }
  703. // Have the folder create a symlink. Verify it accordingly.
  704. symlink := protocol.FileInfo{
  705. Name: "foo/bar/sym",
  706. Type: protocol.FileInfoTypeSymlink,
  707. Permissions: 0644,
  708. SymlinkTarget: "over the rainbow",
  709. }
  710. f.handleSymlink(symlink, dbUpdateChan)
  711. <-dbUpdateChan
  712. info, err = f.fs.Lstat("foo/bar/sym")
  713. if err != nil {
  714. t.Fatal("Unexpected error (file):", err)
  715. }
  716. if info.Owner() != expOwner || info.Group() != expGroup {
  717. t.Fatalf("Expected symlink owner/group to be %d/%d, not %d/%d", expOwner, expGroup, info.Owner(), info.Group())
  718. }
  719. }