folder_sendrecv_test.go 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069
  1. // Copyright (C) 2014 The Syncthing Authors.
  2. //
  3. // This Source Code Form is subject to the terms of the Mozilla Public
  4. // License, v. 2.0. If a copy of the MPL was not distributed with this file,
  5. // You can obtain one at https://mozilla.org/MPL/2.0/.
  6. package model
  7. import (
  8. "bytes"
  9. "context"
  10. "crypto/rand"
  11. "io"
  12. "io/ioutil"
  13. "os"
  14. "path/filepath"
  15. "runtime"
  16. "testing"
  17. "time"
  18. "github.com/syncthing/syncthing/lib/config"
  19. "github.com/syncthing/syncthing/lib/events"
  20. "github.com/syncthing/syncthing/lib/fs"
  21. "github.com/syncthing/syncthing/lib/ignore"
  22. "github.com/syncthing/syncthing/lib/osutil"
  23. "github.com/syncthing/syncthing/lib/protocol"
  24. "github.com/syncthing/syncthing/lib/scanner"
  25. "github.com/syncthing/syncthing/lib/sync"
  26. )
  27. var blocks = []protocol.BlockInfo{
  28. {Hash: []uint8{0xfa, 0x43, 0x23, 0x9b, 0xce, 0xe7, 0xb9, 0x7c, 0xa6, 0x2f, 0x0, 0x7c, 0xc6, 0x84, 0x87, 0x56, 0xa, 0x39, 0xe1, 0x9f, 0x74, 0xf3, 0xdd, 0xe7, 0x48, 0x6d, 0xb3, 0xf9, 0x8d, 0xf8, 0xe4, 0x71}}, // Zero'ed out block
  29. {Offset: 0, Size: 0x20000, Hash: []uint8{0x7e, 0xad, 0xbc, 0x36, 0xae, 0xbb, 0xcf, 0x74, 0x43, 0xe2, 0x7a, 0x5a, 0x4b, 0xb8, 0x5b, 0xce, 0xe6, 0x9e, 0x1e, 0x10, 0xf9, 0x8a, 0xbc, 0x77, 0x95, 0x2, 0x29, 0x60, 0x9e, 0x96, 0xae, 0x6c}},
  30. {Offset: 131072, Size: 0x20000, Hash: []uint8{0x3c, 0xc4, 0x20, 0xf4, 0xb, 0x2e, 0xcb, 0xb9, 0x5d, 0xce, 0x34, 0xa8, 0xc3, 0x92, 0xea, 0xf3, 0xda, 0x88, 0x33, 0xee, 0x7a, 0xb6, 0xe, 0xf1, 0x82, 0x5e, 0xb0, 0xa9, 0x26, 0xa9, 0xc0, 0xef}},
  31. {Offset: 262144, Size: 0x20000, Hash: []uint8{0x76, 0xa8, 0xc, 0x69, 0xd7, 0x5c, 0x52, 0xfd, 0xdf, 0x55, 0xef, 0x44, 0xc1, 0xd6, 0x25, 0x48, 0x4d, 0x98, 0x48, 0x4d, 0xaa, 0x50, 0xf6, 0x6b, 0x32, 0x47, 0x55, 0x81, 0x6b, 0xed, 0xee, 0xfb}},
  32. {Offset: 393216, Size: 0x20000, Hash: []uint8{0x44, 0x1e, 0xa4, 0xf2, 0x8d, 0x1f, 0xc3, 0x1b, 0x9d, 0xa5, 0x18, 0x5e, 0x59, 0x1b, 0xd8, 0x5c, 0xba, 0x7d, 0xb9, 0x8d, 0x70, 0x11, 0x5c, 0xea, 0xa1, 0x57, 0x4d, 0xcb, 0x3c, 0x5b, 0xf8, 0x6c}},
  33. {Offset: 524288, Size: 0x20000, Hash: []uint8{0x8, 0x40, 0xd0, 0x5e, 0x80, 0x0, 0x0, 0x7c, 0x8b, 0xb3, 0x8b, 0xf7, 0x7b, 0x23, 0x26, 0x28, 0xab, 0xda, 0xcf, 0x86, 0x8f, 0xc2, 0x8a, 0x39, 0xc6, 0xe6, 0x69, 0x59, 0x97, 0xb6, 0x1a, 0x43}},
  34. {Offset: 655360, Size: 0x20000, Hash: []uint8{0x38, 0x8e, 0x44, 0xcb, 0x30, 0xd8, 0x90, 0xf, 0xce, 0x7, 0x4b, 0x58, 0x86, 0xde, 0xce, 0x59, 0xa2, 0x46, 0xd2, 0xf9, 0xba, 0xaf, 0x35, 0x87, 0x38, 0xdf, 0xd2, 0xd, 0xf9, 0x45, 0xed, 0x91}},
  35. {Offset: 786432, Size: 0x20000, Hash: []uint8{0x32, 0x28, 0xcd, 0xf, 0x37, 0x21, 0xe5, 0xd4, 0x1e, 0x58, 0x87, 0x73, 0x8e, 0x36, 0xdf, 0xb2, 0x70, 0x78, 0x56, 0xc3, 0x42, 0xff, 0xf7, 0x8f, 0x37, 0x95, 0x0, 0x26, 0xa, 0xac, 0x54, 0x72}},
  36. {Offset: 917504, Size: 0x20000, Hash: []uint8{0x96, 0x6b, 0x15, 0x6b, 0xc4, 0xf, 0x19, 0x18, 0xca, 0xbb, 0x5f, 0xd6, 0xbb, 0xa2, 0xc6, 0x2a, 0xac, 0xbb, 0x8a, 0xb9, 0xce, 0xec, 0x4c, 0xdb, 0x78, 0xec, 0x57, 0x5d, 0x33, 0xf9, 0x8e, 0xaf}},
  37. }
  38. var folders = []string{"default"}
  39. var diffTestData = []struct {
  40. a string
  41. b string
  42. s int
  43. d []protocol.BlockInfo
  44. }{
  45. {"contents", "contents", 1024, []protocol.BlockInfo{}},
  46. {"", "", 1024, []protocol.BlockInfo{}},
  47. {"contents", "contents", 3, []protocol.BlockInfo{}},
  48. {"contents", "cantents", 3, []protocol.BlockInfo{{Offset: 0, Size: 3}}},
  49. {"contents", "contants", 3, []protocol.BlockInfo{{Offset: 3, Size: 3}}},
  50. {"contents", "cantants", 3, []protocol.BlockInfo{{Offset: 0, Size: 3}, {Offset: 3, Size: 3}}},
  51. {"contents", "", 3, []protocol.BlockInfo{{Offset: 0, Size: 0}}},
  52. {"", "contents", 3, []protocol.BlockInfo{{Offset: 0, Size: 3}, {Offset: 3, Size: 3}, {Offset: 6, Size: 2}}},
  53. {"con", "contents", 3, []protocol.BlockInfo{{Offset: 3, Size: 3}, {Offset: 6, Size: 2}}},
  54. {"contents", "con", 3, nil},
  55. {"contents", "cont", 3, []protocol.BlockInfo{{Offset: 3, Size: 1}}},
  56. {"cont", "contents", 3, []protocol.BlockInfo{{Offset: 3, Size: 3}, {Offset: 6, Size: 2}}},
  57. }
  58. func setupFile(filename string, blockNumbers []int) protocol.FileInfo {
  59. // Create existing file
  60. existingBlocks := make([]protocol.BlockInfo, len(blockNumbers))
  61. for i := range blockNumbers {
  62. existingBlocks[i] = blocks[blockNumbers[i]]
  63. }
  64. return protocol.FileInfo{
  65. Name: filename,
  66. Blocks: existingBlocks,
  67. }
  68. }
  69. func createFile(t *testing.T, name string, fs fs.Filesystem) protocol.FileInfo {
  70. t.Helper()
  71. f, err := fs.Create(name)
  72. must(t, err)
  73. f.Close()
  74. fi, err := fs.Stat(name)
  75. must(t, err)
  76. file, err := scanner.CreateFileInfo(fi, name, fs)
  77. must(t, err)
  78. return file
  79. }
  80. // Sets up a folder and model, but makes sure the services aren't actually running.
  81. func setupSendReceiveFolder(files ...protocol.FileInfo) (*model, *sendReceiveFolder) {
  82. w, fcfg := tmpDefaultWrapper()
  83. model := setupModel(w)
  84. model.Supervisor.Stop()
  85. f := model.folderRunners[fcfg.ID].(*sendReceiveFolder)
  86. f.pullErrors = make(map[string]string)
  87. f.ctx = context.Background()
  88. // Update index
  89. if files != nil {
  90. f.updateLocalsFromScanning(files)
  91. }
  92. return model, f
  93. }
  94. func cleanupSRFolder(f *sendReceiveFolder, m *model) {
  95. m.evLogger.Stop()
  96. os.Remove(m.cfg.ConfigPath())
  97. os.RemoveAll(f.Filesystem().URI())
  98. }
  99. // Layout of the files: (indexes from the above array)
  100. // 12345678 - Required file
  101. // 02005008 - Existing file (currently in the index)
  102. // 02340070 - Temp file on the disk
  103. func TestHandleFile(t *testing.T) {
  104. // After the diff between required and existing we should:
  105. // Copy: 2, 5, 8
  106. // Pull: 1, 3, 4, 6, 7
  107. existingBlocks := []int{0, 2, 0, 0, 5, 0, 0, 8}
  108. existingFile := setupFile("filex", existingBlocks)
  109. requiredFile := existingFile
  110. requiredFile.Blocks = blocks[1:]
  111. m, f := setupSendReceiveFolder(existingFile)
  112. defer cleanupSRFolder(f, m)
  113. copyChan := make(chan copyBlocksState, 1)
  114. f.handleFile(requiredFile, f.fset.Snapshot(), copyChan)
  115. // Receive the results
  116. toCopy := <-copyChan
  117. if len(toCopy.blocks) != 8 {
  118. t.Errorf("Unexpected count of copy blocks: %d != 8", len(toCopy.blocks))
  119. }
  120. for _, block := range blocks[1:] {
  121. found := false
  122. for _, toCopyBlock := range toCopy.blocks {
  123. if string(toCopyBlock.Hash) == string(block.Hash) {
  124. found = true
  125. break
  126. }
  127. }
  128. if !found {
  129. t.Errorf("Did not find block %s", block.String())
  130. }
  131. }
  132. }
  133. func TestHandleFileWithTemp(t *testing.T) {
  134. // After diff between required and existing we should:
  135. // Copy: 2, 5, 8
  136. // Pull: 1, 3, 4, 6, 7
  137. // After dropping out blocks already on the temp file we should:
  138. // Copy: 5, 8
  139. // Pull: 1, 6
  140. existingBlocks := []int{0, 2, 0, 0, 5, 0, 0, 8}
  141. existingFile := setupFile("file", existingBlocks)
  142. requiredFile := existingFile
  143. requiredFile.Blocks = blocks[1:]
  144. m, f := setupSendReceiveFolder(existingFile)
  145. defer cleanupSRFolder(f, m)
  146. if _, err := prepareTmpFile(f.Filesystem()); err != nil {
  147. t.Fatal(err)
  148. }
  149. copyChan := make(chan copyBlocksState, 1)
  150. f.handleFile(requiredFile, f.fset.Snapshot(), copyChan)
  151. // Receive the results
  152. toCopy := <-copyChan
  153. if len(toCopy.blocks) != 4 {
  154. t.Errorf("Unexpected count of copy blocks: %d != 4", len(toCopy.blocks))
  155. }
  156. for _, idx := range []int{1, 5, 6, 8} {
  157. found := false
  158. block := blocks[idx]
  159. for _, toCopyBlock := range toCopy.blocks {
  160. if string(toCopyBlock.Hash) == string(block.Hash) {
  161. found = true
  162. break
  163. }
  164. }
  165. if !found {
  166. t.Errorf("Did not find block %s", block.String())
  167. }
  168. }
  169. }
  170. func TestCopierFinder(t *testing.T) {
  171. // After diff between required and existing we should:
  172. // Copy: 1, 2, 3, 4, 6, 7, 8
  173. // Since there is no existing file, nor a temp file
  174. // After dropping out blocks found locally:
  175. // Pull: 1, 5, 6, 8
  176. tempFile := fs.TempName("file2")
  177. existingBlocks := []int{0, 2, 3, 4, 0, 0, 7, 0}
  178. existingFile := setupFile(fs.TempName("file"), existingBlocks)
  179. existingFile.Size = 1
  180. requiredFile := existingFile
  181. requiredFile.Blocks = blocks[1:]
  182. requiredFile.Name = "file2"
  183. m, f := setupSendReceiveFolder(existingFile)
  184. defer cleanupSRFolder(f, m)
  185. if _, err := prepareTmpFile(f.Filesystem()); err != nil {
  186. t.Fatal(err)
  187. }
  188. copyChan := make(chan copyBlocksState)
  189. pullChan := make(chan pullBlockState, 4)
  190. finisherChan := make(chan *sharedPullerState, 1)
  191. // Run a single fetcher routine
  192. go f.copierRoutine(copyChan, pullChan, finisherChan)
  193. defer close(copyChan)
  194. f.handleFile(requiredFile, f.fset.Snapshot(), copyChan)
  195. timeout := time.After(10 * time.Second)
  196. pulls := make([]pullBlockState, 4)
  197. for i := 0; i < 4; i++ {
  198. select {
  199. case pulls[i] = <-pullChan:
  200. case <-timeout:
  201. t.Fatalf("Timed out before receiving all 4 states on pullChan (already got %v)", i)
  202. }
  203. }
  204. var finish *sharedPullerState
  205. select {
  206. case finish = <-finisherChan:
  207. case <-timeout:
  208. t.Fatal("Timed out before receiving 4 states on pullChan")
  209. }
  210. defer cleanupSharedPullerState(finish)
  211. select {
  212. case <-pullChan:
  213. t.Fatal("Pull channel has data to be read")
  214. case <-finisherChan:
  215. t.Fatal("Finisher channel has data to be read")
  216. default:
  217. }
  218. // Verify that the right blocks went into the pull list.
  219. // They are pulled in random order.
  220. for _, idx := range []int{1, 5, 6, 8} {
  221. found := false
  222. block := blocks[idx]
  223. for _, pulledBlock := range pulls {
  224. if string(pulledBlock.block.Hash) == string(block.Hash) {
  225. found = true
  226. break
  227. }
  228. }
  229. if !found {
  230. t.Errorf("Did not find block %s", block.String())
  231. }
  232. if string(finish.file.Blocks[idx-1].Hash) != string(blocks[idx].Hash) {
  233. t.Errorf("Block %d mismatch: %s != %s", idx, finish.file.Blocks[idx-1].String(), blocks[idx].String())
  234. }
  235. }
  236. // Verify that the fetched blocks have actually been written to the temp file
  237. blks, err := scanner.HashFile(context.TODO(), f.Filesystem(), tempFile, protocol.MinBlockSize, nil, false)
  238. if err != nil {
  239. t.Log(err)
  240. }
  241. for _, eq := range []int{2, 3, 4, 7} {
  242. if string(blks[eq-1].Hash) != string(blocks[eq].Hash) {
  243. t.Errorf("Block %d mismatch: %s != %s", eq, blks[eq-1].String(), blocks[eq].String())
  244. }
  245. }
  246. }
  247. func TestWeakHash(t *testing.T) {
  248. // Setup the model/pull environment
  249. model, fo := setupSendReceiveFolder()
  250. defer cleanupSRFolder(fo, model)
  251. ffs := fo.Filesystem()
  252. tempFile := fs.TempName("weakhash")
  253. var shift int64 = 10
  254. var size int64 = 1 << 20
  255. expectBlocks := int(size / protocol.MinBlockSize)
  256. expectPulls := int(shift / protocol.MinBlockSize)
  257. if shift > 0 {
  258. expectPulls++
  259. }
  260. f, err := ffs.Create("weakhash")
  261. must(t, err)
  262. defer f.Close()
  263. _, err = io.CopyN(f, rand.Reader, size)
  264. if err != nil {
  265. t.Error(err)
  266. }
  267. info, err := f.Stat()
  268. if err != nil {
  269. t.Error(err)
  270. }
  271. // Create two files, second file has `shifted` bytes random prefix, yet
  272. // both are of the same length, for example:
  273. // File 1: abcdefgh
  274. // File 2: xyabcdef
  275. f.Seek(0, os.SEEK_SET)
  276. existing, err := scanner.Blocks(context.TODO(), f, protocol.MinBlockSize, size, nil, true)
  277. if err != nil {
  278. t.Error(err)
  279. }
  280. f.Seek(0, os.SEEK_SET)
  281. remainder := io.LimitReader(f, size-shift)
  282. prefix := io.LimitReader(rand.Reader, shift)
  283. nf := io.MultiReader(prefix, remainder)
  284. desired, err := scanner.Blocks(context.TODO(), nf, protocol.MinBlockSize, size, nil, true)
  285. if err != nil {
  286. t.Error(err)
  287. }
  288. existingFile := protocol.FileInfo{
  289. Name: "weakhash",
  290. Blocks: existing,
  291. Size: size,
  292. ModifiedS: info.ModTime().Unix(),
  293. ModifiedNs: int32(info.ModTime().Nanosecond()),
  294. }
  295. desiredFile := protocol.FileInfo{
  296. Name: "weakhash",
  297. Size: size,
  298. Blocks: desired,
  299. ModifiedS: info.ModTime().Unix() + 1,
  300. }
  301. fo.updateLocalsFromScanning([]protocol.FileInfo{existingFile})
  302. copyChan := make(chan copyBlocksState)
  303. pullChan := make(chan pullBlockState, expectBlocks)
  304. finisherChan := make(chan *sharedPullerState, 1)
  305. // Run a single fetcher routine
  306. go fo.copierRoutine(copyChan, pullChan, finisherChan)
  307. defer close(copyChan)
  308. // Test 1 - no weak hashing, file gets fully repulled (`expectBlocks` pulls).
  309. fo.WeakHashThresholdPct = 101
  310. fo.handleFile(desiredFile, fo.fset.Snapshot(), copyChan)
  311. var pulls []pullBlockState
  312. timeout := time.After(10 * time.Second)
  313. for len(pulls) < expectBlocks {
  314. select {
  315. case pull := <-pullChan:
  316. pulls = append(pulls, pull)
  317. case <-timeout:
  318. t.Fatalf("timed out, got %d pulls expected %d", len(pulls), expectPulls)
  319. }
  320. }
  321. finish := <-finisherChan
  322. select {
  323. case <-pullChan:
  324. t.Fatal("Pull channel has data to be read")
  325. case <-finisherChan:
  326. t.Fatal("Finisher channel has data to be read")
  327. default:
  328. }
  329. cleanupSharedPullerState(finish)
  330. if err := ffs.Remove(tempFile); err != nil {
  331. t.Fatal(err)
  332. }
  333. // Test 2 - using weak hash, expectPulls blocks pulled.
  334. fo.WeakHashThresholdPct = -1
  335. fo.handleFile(desiredFile, fo.fset.Snapshot(), copyChan)
  336. pulls = pulls[:0]
  337. for len(pulls) < expectPulls {
  338. select {
  339. case pull := <-pullChan:
  340. pulls = append(pulls, pull)
  341. case <-time.After(10 * time.Second):
  342. t.Fatalf("timed out, got %d pulls expected %d", len(pulls), expectPulls)
  343. }
  344. }
  345. finish = <-finisherChan
  346. cleanupSharedPullerState(finish)
  347. expectShifted := expectBlocks - expectPulls
  348. if finish.copyOriginShifted != expectShifted {
  349. t.Errorf("did not copy %d shifted", expectShifted)
  350. }
  351. }
  352. // Test that updating a file removes its old blocks from the blockmap
  353. func TestCopierCleanup(t *testing.T) {
  354. iterFn := func(folder, file string, index int32) bool {
  355. return true
  356. }
  357. // Create a file
  358. file := setupFile("test", []int{0})
  359. file.Size = 1
  360. m, f := setupSendReceiveFolder(file)
  361. defer cleanupSRFolder(f, m)
  362. file.Blocks = []protocol.BlockInfo{blocks[1]}
  363. file.Version = file.Version.Update(myID.Short())
  364. // Update index (removing old blocks)
  365. f.updateLocalsFromScanning([]protocol.FileInfo{file})
  366. if m.finder.Iterate(folders, blocks[0].Hash, iterFn) {
  367. t.Error("Unexpected block found")
  368. }
  369. if !m.finder.Iterate(folders, blocks[1].Hash, iterFn) {
  370. t.Error("Expected block not found")
  371. }
  372. file.Blocks = []protocol.BlockInfo{blocks[0]}
  373. file.Version = file.Version.Update(myID.Short())
  374. // Update index (removing old blocks)
  375. f.updateLocalsFromScanning([]protocol.FileInfo{file})
  376. if !m.finder.Iterate(folders, blocks[0].Hash, iterFn) {
  377. t.Error("Unexpected block found")
  378. }
  379. if m.finder.Iterate(folders, blocks[1].Hash, iterFn) {
  380. t.Error("Expected block not found")
  381. }
  382. }
  383. func TestDeregisterOnFailInCopy(t *testing.T) {
  384. file := setupFile("filex", []int{0, 2, 0, 0, 5, 0, 0, 8})
  385. m, f := setupSendReceiveFolder()
  386. defer cleanupSRFolder(f, m)
  387. // Set up our evet subscription early
  388. s := m.evLogger.Subscribe(events.ItemFinished)
  389. // queue.Done should be called by the finisher routine
  390. f.queue.Push("filex", 0, time.Time{})
  391. f.queue.Pop()
  392. if f.queue.lenProgress() != 1 {
  393. t.Fatal("Expected file in progress")
  394. }
  395. pullChan := make(chan pullBlockState)
  396. finisherBufferChan := make(chan *sharedPullerState, 1)
  397. finisherChan := make(chan *sharedPullerState)
  398. dbUpdateChan := make(chan dbUpdateJob, 1)
  399. snap := f.fset.Snapshot()
  400. copyChan, copyWg := startCopier(f, pullChan, finisherBufferChan)
  401. go f.finisherRoutine(snap, finisherChan, dbUpdateChan, make(chan string))
  402. defer func() {
  403. close(copyChan)
  404. copyWg.Wait()
  405. close(pullChan)
  406. close(finisherBufferChan)
  407. close(finisherChan)
  408. }()
  409. f.handleFile(file, snap, copyChan)
  410. // Receive a block at puller, to indicate that at least a single copier
  411. // loop has been performed.
  412. var toPull pullBlockState
  413. select {
  414. case toPull = <-pullChan:
  415. case <-time.After(10 * time.Second):
  416. t.Fatal("timed out")
  417. }
  418. // Unblock copier
  419. go func() {
  420. for range pullChan {
  421. }
  422. }()
  423. // Close the file, causing errors on further access
  424. toPull.sharedPullerState.fail(os.ErrNotExist)
  425. select {
  426. case state := <-finisherBufferChan:
  427. // At this point the file should still be registered with both the job
  428. // queue, and the progress emitter. Verify this.
  429. if f.model.progressEmitter.lenRegistry() != 1 || f.queue.lenProgress() != 1 || f.queue.lenQueued() != 0 {
  430. t.Fatal("Could not find file")
  431. }
  432. // Pass the file down the real finisher, and give it time to consume
  433. finisherChan <- state
  434. t0 := time.Now()
  435. if ev, err := s.Poll(time.Minute); err != nil {
  436. t.Fatal("Got error waiting for ItemFinished event:", err)
  437. } else if n := ev.Data.(map[string]interface{})["item"]; n != state.file.Name {
  438. t.Fatal("Got ItemFinished event for wrong file:", n)
  439. }
  440. t.Log("event took", time.Since(t0))
  441. state.mut.Lock()
  442. stateWriter := state.writer
  443. state.mut.Unlock()
  444. if stateWriter != nil {
  445. t.Fatal("File not closed?")
  446. }
  447. if f.model.progressEmitter.lenRegistry() != 0 || f.queue.lenProgress() != 0 || f.queue.lenQueued() != 0 {
  448. t.Fatal("Still registered", f.model.progressEmitter.lenRegistry(), f.queue.lenProgress(), f.queue.lenQueued())
  449. }
  450. // Doing it again should have no effect
  451. finisherChan <- state
  452. if _, err := s.Poll(time.Second); err != events.ErrTimeout {
  453. t.Fatal("Expected timeout, not another event", err)
  454. }
  455. if f.model.progressEmitter.lenRegistry() != 0 || f.queue.lenProgress() != 0 || f.queue.lenQueued() != 0 {
  456. t.Fatal("Still registered", f.model.progressEmitter.lenRegistry(), f.queue.lenProgress(), f.queue.lenQueued())
  457. }
  458. case <-time.After(5 * time.Second):
  459. t.Fatal("Didn't get anything to the finisher")
  460. }
  461. }
  462. func TestDeregisterOnFailInPull(t *testing.T) {
  463. file := setupFile("filex", []int{0, 2, 0, 0, 5, 0, 0, 8})
  464. m, f := setupSendReceiveFolder()
  465. defer cleanupSRFolder(f, m)
  466. // Set up our evet subscription early
  467. s := m.evLogger.Subscribe(events.ItemFinished)
  468. // queue.Done should be called by the finisher routine
  469. f.queue.Push("filex", 0, time.Time{})
  470. f.queue.Pop()
  471. if f.queue.lenProgress() != 1 {
  472. t.Fatal("Expected file in progress")
  473. }
  474. pullChan := make(chan pullBlockState)
  475. finisherBufferChan := make(chan *sharedPullerState)
  476. finisherChan := make(chan *sharedPullerState)
  477. dbUpdateChan := make(chan dbUpdateJob, 1)
  478. snap := f.fset.Snapshot()
  479. copyChan, copyWg := startCopier(f, pullChan, finisherBufferChan)
  480. pullWg := sync.NewWaitGroup()
  481. pullWg.Add(1)
  482. go func() {
  483. f.pullerRoutine(pullChan, finisherBufferChan)
  484. pullWg.Done()
  485. }()
  486. go f.finisherRoutine(snap, finisherChan, dbUpdateChan, make(chan string))
  487. defer func() {
  488. // Unblock copier and puller
  489. go func() {
  490. for range finisherBufferChan {
  491. }
  492. }()
  493. close(copyChan)
  494. copyWg.Wait()
  495. close(pullChan)
  496. pullWg.Wait()
  497. close(finisherBufferChan)
  498. close(finisherChan)
  499. }()
  500. f.handleFile(file, snap, copyChan)
  501. // Receive at finisher, we should error out as puller has nowhere to pull
  502. // from.
  503. timeout = time.Second
  504. // Both the puller and copier may send to the finisherBufferChan.
  505. var state *sharedPullerState
  506. after := time.After(5 * time.Second)
  507. for {
  508. select {
  509. case state = <-finisherBufferChan:
  510. case <-after:
  511. t.Fatal("Didn't get failed state to the finisher")
  512. }
  513. if state.failed() != nil {
  514. break
  515. }
  516. }
  517. // At this point the file should still be registered with both the job
  518. // queue, and the progress emitter. Verify this.
  519. if f.model.progressEmitter.lenRegistry() != 1 || f.queue.lenProgress() != 1 || f.queue.lenQueued() != 0 {
  520. t.Fatal("Could not find file")
  521. }
  522. // Pass the file down the real finisher, and give it time to consume
  523. finisherChan <- state
  524. t0 := time.Now()
  525. if ev, err := s.Poll(time.Minute); err != nil {
  526. t.Fatal("Got error waiting for ItemFinished event:", err)
  527. } else if n := ev.Data.(map[string]interface{})["item"]; n != state.file.Name {
  528. t.Fatal("Got ItemFinished event for wrong file:", n)
  529. }
  530. t.Log("event took", time.Since(t0))
  531. state.mut.Lock()
  532. stateWriter := state.writer
  533. state.mut.Unlock()
  534. if stateWriter != nil {
  535. t.Fatal("File not closed?")
  536. }
  537. if f.model.progressEmitter.lenRegistry() != 0 || f.queue.lenProgress() != 0 || f.queue.lenQueued() != 0 {
  538. t.Fatal("Still registered", f.model.progressEmitter.lenRegistry(), f.queue.lenProgress(), f.queue.lenQueued())
  539. }
  540. // Doing it again should have no effect
  541. finisherChan <- state
  542. if _, err := s.Poll(time.Second); err != events.ErrTimeout {
  543. t.Fatal("Expected timeout, not another event", err)
  544. }
  545. if f.model.progressEmitter.lenRegistry() != 0 || f.queue.lenProgress() != 0 || f.queue.lenQueued() != 0 {
  546. t.Fatal("Still registered", f.model.progressEmitter.lenRegistry(), f.queue.lenProgress(), f.queue.lenQueued())
  547. }
  548. }
  549. func TestIssue3164(t *testing.T) {
  550. m, f := setupSendReceiveFolder()
  551. defer cleanupSRFolder(f, m)
  552. ffs := f.Filesystem()
  553. tmpDir := ffs.URI()
  554. ignDir := filepath.Join("issue3164", "oktodelete")
  555. subDir := filepath.Join(ignDir, "foobar")
  556. must(t, ffs.MkdirAll(subDir, 0777))
  557. must(t, ioutil.WriteFile(filepath.Join(tmpDir, subDir, "file"), []byte("Hello"), 0644))
  558. must(t, ioutil.WriteFile(filepath.Join(tmpDir, ignDir, "file"), []byte("Hello"), 0644))
  559. file := protocol.FileInfo{
  560. Name: "issue3164",
  561. }
  562. matcher := ignore.New(ffs)
  563. must(t, matcher.Parse(bytes.NewBufferString("(?d)oktodelete"), ""))
  564. f.ignores = matcher
  565. dbUpdateChan := make(chan dbUpdateJob, 1)
  566. f.deleteDir(file, f.fset.Snapshot(), dbUpdateChan, make(chan string))
  567. if _, err := ffs.Stat("issue3164"); !fs.IsNotExist(err) {
  568. t.Fatal(err)
  569. }
  570. }
  571. func TestDiff(t *testing.T) {
  572. for i, test := range diffTestData {
  573. a, _ := scanner.Blocks(context.TODO(), bytes.NewBufferString(test.a), test.s, -1, nil, false)
  574. b, _ := scanner.Blocks(context.TODO(), bytes.NewBufferString(test.b), test.s, -1, nil, false)
  575. _, d := blockDiff(a, b)
  576. if len(d) != len(test.d) {
  577. t.Fatalf("Incorrect length for diff %d; %d != %d", i, len(d), len(test.d))
  578. } else {
  579. for j := range test.d {
  580. if d[j].Offset != test.d[j].Offset {
  581. t.Errorf("Incorrect offset for diff %d block %d; %d != %d", i, j, d[j].Offset, test.d[j].Offset)
  582. }
  583. if d[j].Size != test.d[j].Size {
  584. t.Errorf("Incorrect length for diff %d block %d; %d != %d", i, j, d[j].Size, test.d[j].Size)
  585. }
  586. }
  587. }
  588. }
  589. }
  590. func BenchmarkDiff(b *testing.B) {
  591. testCases := make([]struct{ a, b []protocol.BlockInfo }, 0, len(diffTestData))
  592. for _, test := range diffTestData {
  593. a, _ := scanner.Blocks(context.TODO(), bytes.NewBufferString(test.a), test.s, -1, nil, false)
  594. b, _ := scanner.Blocks(context.TODO(), bytes.NewBufferString(test.b), test.s, -1, nil, false)
  595. testCases = append(testCases, struct{ a, b []protocol.BlockInfo }{a, b})
  596. }
  597. b.ReportAllocs()
  598. b.ResetTimer()
  599. for i := 0; i < b.N; i++ {
  600. for _, tc := range testCases {
  601. blockDiff(tc.a, tc.b)
  602. }
  603. }
  604. }
  605. func TestDiffEmpty(t *testing.T) {
  606. emptyCases := []struct {
  607. a []protocol.BlockInfo
  608. b []protocol.BlockInfo
  609. need int
  610. have int
  611. }{
  612. {nil, nil, 0, 0},
  613. {[]protocol.BlockInfo{{Offset: 3, Size: 1}}, nil, 0, 0},
  614. {nil, []protocol.BlockInfo{{Offset: 3, Size: 1}}, 1, 0},
  615. }
  616. for _, emptyCase := range emptyCases {
  617. h, n := blockDiff(emptyCase.a, emptyCase.b)
  618. if len(h) != emptyCase.have {
  619. t.Errorf("incorrect have: %d != %d", len(h), emptyCase.have)
  620. }
  621. if len(n) != emptyCase.need {
  622. t.Errorf("incorrect have: %d != %d", len(h), emptyCase.have)
  623. }
  624. }
  625. }
  626. // TestDeleteIgnorePerms checks, that a file gets deleted when the IgnorePerms
  627. // option is true and the permissions do not match between the file on disk and
  628. // in the db.
  629. func TestDeleteIgnorePerms(t *testing.T) {
  630. m, f := setupSendReceiveFolder()
  631. defer cleanupSRFolder(f, m)
  632. ffs := f.Filesystem()
  633. f.IgnorePerms = true
  634. name := "deleteIgnorePerms"
  635. file, err := ffs.Create(name)
  636. if err != nil {
  637. t.Error(err)
  638. }
  639. defer file.Close()
  640. stat, err := file.Stat()
  641. must(t, err)
  642. fi, err := scanner.CreateFileInfo(stat, name, ffs)
  643. must(t, err)
  644. ffs.Chmod(name, 0600)
  645. scanChan := make(chan string)
  646. finished := make(chan struct{})
  647. go func() {
  648. err = f.checkToBeDeleted(fi, scanChan)
  649. close(finished)
  650. }()
  651. select {
  652. case <-scanChan:
  653. <-finished
  654. case <-finished:
  655. }
  656. must(t, err)
  657. }
  658. func TestCopyOwner(t *testing.T) {
  659. // Verifies that owner and group are copied from the parent, for both
  660. // files and directories.
  661. if runtime.GOOS == "windows" {
  662. t.Skip("copying owner not supported on Windows")
  663. }
  664. const (
  665. expOwner = 1234
  666. expGroup = 5678
  667. )
  668. // Set up a folder with the CopyParentOwner bit and backed by a fake
  669. // filesystem.
  670. m, f := setupSendReceiveFolder()
  671. defer cleanupSRFolder(f, m)
  672. f.folder.FolderConfiguration = config.NewFolderConfiguration(m.id, f.ID, f.Label, fs.FilesystemTypeFake, "/TestCopyOwner")
  673. f.folder.FolderConfiguration.CopyOwnershipFromParent = true
  674. f.fs = f.Filesystem()
  675. // Create a parent dir with a certain owner/group.
  676. f.fs.Mkdir("foo", 0755)
  677. f.fs.Lchown("foo", expOwner, expGroup)
  678. dir := protocol.FileInfo{
  679. Name: "foo/bar",
  680. Type: protocol.FileInfoTypeDirectory,
  681. Permissions: 0755,
  682. }
  683. // Have the folder create a subdirectory, verify that it's the correct
  684. // owner/group.
  685. dbUpdateChan := make(chan dbUpdateJob, 1)
  686. defer close(dbUpdateChan)
  687. f.handleDir(dir, f.fset.Snapshot(), dbUpdateChan, nil)
  688. <-dbUpdateChan // empty the channel for later
  689. info, err := f.fs.Lstat("foo/bar")
  690. if err != nil {
  691. t.Fatal("Unexpected error (dir):", err)
  692. }
  693. if info.Owner() != expOwner || info.Group() != expGroup {
  694. t.Fatalf("Expected dir owner/group to be %d/%d, not %d/%d", expOwner, expGroup, info.Owner(), info.Group())
  695. }
  696. // Have the folder create a file, verify it's the correct owner/group.
  697. // File is zero sized to avoid having to handle copies/pulls.
  698. file := protocol.FileInfo{
  699. Name: "foo/bar/baz",
  700. Type: protocol.FileInfoTypeFile,
  701. Permissions: 0644,
  702. }
  703. // Wire some stuff. The flow here is handleFile() -[copierChan]->
  704. // copierRoutine() -[finisherChan]-> finisherRoutine() -[dbUpdateChan]->
  705. // back to us and we're done. The copier routine doesn't do anything,
  706. // but it's the way data is passed around. When the database update
  707. // comes the finisher is done.
  708. snap := f.fset.Snapshot()
  709. finisherChan := make(chan *sharedPullerState)
  710. copierChan, copyWg := startCopier(f, nil, finisherChan)
  711. go f.finisherRoutine(snap, finisherChan, dbUpdateChan, nil)
  712. defer func() {
  713. close(copierChan)
  714. copyWg.Wait()
  715. close(finisherChan)
  716. }()
  717. f.handleFile(file, snap, copierChan)
  718. <-dbUpdateChan
  719. info, err = f.fs.Lstat("foo/bar/baz")
  720. if err != nil {
  721. t.Fatal("Unexpected error (file):", err)
  722. }
  723. if info.Owner() != expOwner || info.Group() != expGroup {
  724. t.Fatalf("Expected file owner/group to be %d/%d, not %d/%d", expOwner, expGroup, info.Owner(), info.Group())
  725. }
  726. // Have the folder create a symlink. Verify it accordingly.
  727. symlink := protocol.FileInfo{
  728. Name: "foo/bar/sym",
  729. Type: protocol.FileInfoTypeSymlink,
  730. Permissions: 0644,
  731. SymlinkTarget: "over the rainbow",
  732. }
  733. f.handleSymlink(symlink, snap, dbUpdateChan, nil)
  734. <-dbUpdateChan
  735. info, err = f.fs.Lstat("foo/bar/sym")
  736. if err != nil {
  737. t.Fatal("Unexpected error (file):", err)
  738. }
  739. if info.Owner() != expOwner || info.Group() != expGroup {
  740. t.Fatalf("Expected symlink owner/group to be %d/%d, not %d/%d", expOwner, expGroup, info.Owner(), info.Group())
  741. }
  742. }
  743. // TestSRConflictReplaceFileByDir checks that a conflict is created when an existing file
  744. // is replaced with a directory and versions are conflicting
  745. func TestSRConflictReplaceFileByDir(t *testing.T) {
  746. m, f := setupSendReceiveFolder()
  747. defer cleanupSRFolder(f, m)
  748. ffs := f.Filesystem()
  749. name := "foo"
  750. // create local file
  751. file := createFile(t, name, ffs)
  752. file.Version = protocol.Vector{}.Update(myID.Short())
  753. f.updateLocalsFromScanning([]protocol.FileInfo{file})
  754. // Simulate remote creating a dir with the same name
  755. file.Type = protocol.FileInfoTypeDirectory
  756. rem := device1.Short()
  757. file.Version = protocol.Vector{}.Update(rem)
  758. file.ModifiedBy = rem
  759. dbUpdateChan := make(chan dbUpdateJob, 1)
  760. scanChan := make(chan string, 1)
  761. f.handleDir(file, f.fset.Snapshot(), dbUpdateChan, scanChan)
  762. if confls := existingConflicts(name, ffs); len(confls) != 1 {
  763. t.Fatal("Expected one conflict, got", len(confls))
  764. } else if scan := <-scanChan; confls[0] != scan {
  765. t.Fatal("Expected request to scan", confls[0], "got", scan)
  766. }
  767. }
  768. // TestSRConflictReplaceFileByLink checks that a conflict is created when an existing file
  769. // is replaced with a link and versions are conflicting
  770. func TestSRConflictReplaceFileByLink(t *testing.T) {
  771. m, f := setupSendReceiveFolder()
  772. defer cleanupSRFolder(f, m)
  773. ffs := f.Filesystem()
  774. name := "foo"
  775. // create local file
  776. file := createFile(t, name, ffs)
  777. file.Version = protocol.Vector{}.Update(myID.Short())
  778. f.updateLocalsFromScanning([]protocol.FileInfo{file})
  779. // Simulate remote creating a symlink with the same name
  780. file.Type = protocol.FileInfoTypeSymlink
  781. file.SymlinkTarget = "bar"
  782. rem := device1.Short()
  783. file.Version = protocol.Vector{}.Update(rem)
  784. file.ModifiedBy = rem
  785. dbUpdateChan := make(chan dbUpdateJob, 1)
  786. scanChan := make(chan string, 1)
  787. f.handleSymlink(file, f.fset.Snapshot(), dbUpdateChan, scanChan)
  788. if confls := existingConflicts(name, ffs); len(confls) != 1 {
  789. t.Fatal("Expected one conflict, got", len(confls))
  790. } else if scan := <-scanChan; confls[0] != scan {
  791. t.Fatal("Expected request to scan", confls[0], "got", scan)
  792. }
  793. }
  794. // TestDeleteBehindSymlink checks that we don't delete or schedule a scan
  795. // when trying to delete a file behind a symlink.
  796. func TestDeleteBehindSymlink(t *testing.T) {
  797. m, f := setupSendReceiveFolder()
  798. defer cleanupSRFolder(f, m)
  799. ffs := f.Filesystem()
  800. destDir := createTmpDir()
  801. defer os.RemoveAll(destDir)
  802. destFs := fs.NewFilesystem(fs.FilesystemTypeBasic, destDir)
  803. link := "link"
  804. file := filepath.Join(link, "file")
  805. must(t, ffs.MkdirAll(link, 0755))
  806. fi := createFile(t, file, ffs)
  807. f.updateLocalsFromScanning([]protocol.FileInfo{fi})
  808. must(t, osutil.RenameOrCopy(ffs, destFs, file, "file"))
  809. must(t, ffs.RemoveAll(link))
  810. if err := osutil.DebugSymlinkForTestsOnly(destFs.URI(), filepath.Join(ffs.URI(), link)); err != nil {
  811. if runtime.GOOS == "windows" {
  812. // Probably we require permissions we don't have.
  813. t.Skip("Need admin permissions or developer mode to run symlink test on Windows: " + err.Error())
  814. } else {
  815. t.Fatal(err)
  816. }
  817. }
  818. fi.Deleted = true
  819. fi.Version = fi.Version.Update(device1.Short())
  820. scanChan := make(chan string, 1)
  821. dbUpdateChan := make(chan dbUpdateJob, 1)
  822. f.deleteFile(fi, f.fset.Snapshot(), dbUpdateChan, scanChan)
  823. select {
  824. case f := <-scanChan:
  825. t.Fatalf("Received %v on scanChan", f)
  826. case u := <-dbUpdateChan:
  827. if u.jobType != dbUpdateDeleteFile {
  828. t.Errorf("Expected jobType %v, got %v", dbUpdateDeleteFile, u.jobType)
  829. }
  830. if u.file.Name != fi.Name {
  831. t.Errorf("Expected update for %v, got %v", fi.Name, u.file.Name)
  832. }
  833. default:
  834. t.Fatalf("No db update received")
  835. }
  836. if _, err := destFs.Stat("file"); err != nil {
  837. t.Errorf("Expected no error when stating file behind symlink, got %v", err)
  838. }
  839. }
  840. // Reproduces https://github.com/syncthing/syncthing/issues/6559
  841. func TestPullCtxCancel(t *testing.T) {
  842. m, f := setupSendReceiveFolder()
  843. defer cleanupSRFolder(f, m)
  844. pullChan := make(chan pullBlockState)
  845. finisherChan := make(chan *sharedPullerState)
  846. var cancel context.CancelFunc
  847. f.ctx, cancel = context.WithCancel(context.Background())
  848. go f.pullerRoutine(pullChan, finisherChan)
  849. defer close(pullChan)
  850. emptyState := func() pullBlockState {
  851. return pullBlockState{
  852. sharedPullerState: newSharedPullerState(protocol.FileInfo{}, nil, f.folderID, "", nil, nil, false, false, protocol.FileInfo{}, false, false),
  853. block: protocol.BlockInfo{},
  854. }
  855. }
  856. cancel()
  857. done := make(chan struct{})
  858. defer close(done)
  859. for i := 0; i < 2; i++ {
  860. go func() {
  861. select {
  862. case pullChan <- emptyState():
  863. case <-done:
  864. }
  865. }()
  866. select {
  867. case s := <-finisherChan:
  868. if s.failed() == nil {
  869. t.Errorf("state %v not failed", i)
  870. }
  871. case <-time.After(5 * time.Second):
  872. t.Fatalf("timed out before receiving state %v on finisherChan", i)
  873. }
  874. }
  875. }
  876. func cleanupSharedPullerState(s *sharedPullerState) {
  877. s.mut.Lock()
  878. defer s.mut.Unlock()
  879. if s.writer == nil {
  880. return
  881. }
  882. s.writer.mut.Lock()
  883. s.writer.fd.Close()
  884. s.writer.mut.Unlock()
  885. }
  886. func startCopier(f *sendReceiveFolder, pullChan chan<- pullBlockState, finisherChan chan<- *sharedPullerState) (chan copyBlocksState, sync.WaitGroup) {
  887. copyChan := make(chan copyBlocksState)
  888. wg := sync.NewWaitGroup()
  889. wg.Add(1)
  890. go func() {
  891. f.copierRoutine(copyChan, pullChan, finisherChan)
  892. wg.Done()
  893. }()
  894. return copyChan, wg
  895. }