folder_sendrecv_test.go 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955
  1. // Copyright (C) 2014 The Syncthing Authors.
  2. //
  3. // This Source Code Form is subject to the terms of the Mozilla Public
  4. // License, v. 2.0. If a copy of the MPL was not distributed with this file,
  5. // You can obtain one at https://mozilla.org/MPL/2.0/.
  6. package model
  7. import (
  8. "bytes"
  9. "context"
  10. "crypto/rand"
  11. "io"
  12. "io/ioutil"
  13. "os"
  14. "path/filepath"
  15. "runtime"
  16. "testing"
  17. "time"
  18. "github.com/syncthing/syncthing/lib/config"
  19. "github.com/syncthing/syncthing/lib/db"
  20. "github.com/syncthing/syncthing/lib/events"
  21. "github.com/syncthing/syncthing/lib/fs"
  22. "github.com/syncthing/syncthing/lib/ignore"
  23. "github.com/syncthing/syncthing/lib/protocol"
  24. "github.com/syncthing/syncthing/lib/scanner"
  25. "github.com/syncthing/syncthing/lib/sync"
  26. )
  27. var blocks = []protocol.BlockInfo{
  28. {Hash: []uint8{0xfa, 0x43, 0x23, 0x9b, 0xce, 0xe7, 0xb9, 0x7c, 0xa6, 0x2f, 0x0, 0x7c, 0xc6, 0x84, 0x87, 0x56, 0xa, 0x39, 0xe1, 0x9f, 0x74, 0xf3, 0xdd, 0xe7, 0x48, 0x6d, 0xb3, 0xf9, 0x8d, 0xf8, 0xe4, 0x71}}, // Zero'ed out block
  29. {Offset: 0, Size: 0x20000, Hash: []uint8{0x7e, 0xad, 0xbc, 0x36, 0xae, 0xbb, 0xcf, 0x74, 0x43, 0xe2, 0x7a, 0x5a, 0x4b, 0xb8, 0x5b, 0xce, 0xe6, 0x9e, 0x1e, 0x10, 0xf9, 0x8a, 0xbc, 0x77, 0x95, 0x2, 0x29, 0x60, 0x9e, 0x96, 0xae, 0x6c}},
  30. {Offset: 131072, Size: 0x20000, Hash: []uint8{0x3c, 0xc4, 0x20, 0xf4, 0xb, 0x2e, 0xcb, 0xb9, 0x5d, 0xce, 0x34, 0xa8, 0xc3, 0x92, 0xea, 0xf3, 0xda, 0x88, 0x33, 0xee, 0x7a, 0xb6, 0xe, 0xf1, 0x82, 0x5e, 0xb0, 0xa9, 0x26, 0xa9, 0xc0, 0xef}},
  31. {Offset: 262144, Size: 0x20000, Hash: []uint8{0x76, 0xa8, 0xc, 0x69, 0xd7, 0x5c, 0x52, 0xfd, 0xdf, 0x55, 0xef, 0x44, 0xc1, 0xd6, 0x25, 0x48, 0x4d, 0x98, 0x48, 0x4d, 0xaa, 0x50, 0xf6, 0x6b, 0x32, 0x47, 0x55, 0x81, 0x6b, 0xed, 0xee, 0xfb}},
  32. {Offset: 393216, Size: 0x20000, Hash: []uint8{0x44, 0x1e, 0xa4, 0xf2, 0x8d, 0x1f, 0xc3, 0x1b, 0x9d, 0xa5, 0x18, 0x5e, 0x59, 0x1b, 0xd8, 0x5c, 0xba, 0x7d, 0xb9, 0x8d, 0x70, 0x11, 0x5c, 0xea, 0xa1, 0x57, 0x4d, 0xcb, 0x3c, 0x5b, 0xf8, 0x6c}},
  33. {Offset: 524288, Size: 0x20000, Hash: []uint8{0x8, 0x40, 0xd0, 0x5e, 0x80, 0x0, 0x0, 0x7c, 0x8b, 0xb3, 0x8b, 0xf7, 0x7b, 0x23, 0x26, 0x28, 0xab, 0xda, 0xcf, 0x86, 0x8f, 0xc2, 0x8a, 0x39, 0xc6, 0xe6, 0x69, 0x59, 0x97, 0xb6, 0x1a, 0x43}},
  34. {Offset: 655360, Size: 0x20000, Hash: []uint8{0x38, 0x8e, 0x44, 0xcb, 0x30, 0xd8, 0x90, 0xf, 0xce, 0x7, 0x4b, 0x58, 0x86, 0xde, 0xce, 0x59, 0xa2, 0x46, 0xd2, 0xf9, 0xba, 0xaf, 0x35, 0x87, 0x38, 0xdf, 0xd2, 0xd, 0xf9, 0x45, 0xed, 0x91}},
  35. {Offset: 786432, Size: 0x20000, Hash: []uint8{0x32, 0x28, 0xcd, 0xf, 0x37, 0x21, 0xe5, 0xd4, 0x1e, 0x58, 0x87, 0x73, 0x8e, 0x36, 0xdf, 0xb2, 0x70, 0x78, 0x56, 0xc3, 0x42, 0xff, 0xf7, 0x8f, 0x37, 0x95, 0x0, 0x26, 0xa, 0xac, 0x54, 0x72}},
  36. {Offset: 917504, Size: 0x20000, Hash: []uint8{0x96, 0x6b, 0x15, 0x6b, 0xc4, 0xf, 0x19, 0x18, 0xca, 0xbb, 0x5f, 0xd6, 0xbb, 0xa2, 0xc6, 0x2a, 0xac, 0xbb, 0x8a, 0xb9, 0xce, 0xec, 0x4c, 0xdb, 0x78, 0xec, 0x57, 0x5d, 0x33, 0xf9, 0x8e, 0xaf}},
  37. }
  38. var folders = []string{"default"}
  39. var diffTestData = []struct {
  40. a string
  41. b string
  42. s int
  43. d []protocol.BlockInfo
  44. }{
  45. {"contents", "contents", 1024, []protocol.BlockInfo{}},
  46. {"", "", 1024, []protocol.BlockInfo{}},
  47. {"contents", "contents", 3, []protocol.BlockInfo{}},
  48. {"contents", "cantents", 3, []protocol.BlockInfo{{Offset: 0, Size: 3}}},
  49. {"contents", "contants", 3, []protocol.BlockInfo{{Offset: 3, Size: 3}}},
  50. {"contents", "cantants", 3, []protocol.BlockInfo{{Offset: 0, Size: 3}, {Offset: 3, Size: 3}}},
  51. {"contents", "", 3, []protocol.BlockInfo{{Offset: 0, Size: 0}}},
  52. {"", "contents", 3, []protocol.BlockInfo{{Offset: 0, Size: 3}, {Offset: 3, Size: 3}, {Offset: 6, Size: 2}}},
  53. {"con", "contents", 3, []protocol.BlockInfo{{Offset: 3, Size: 3}, {Offset: 6, Size: 2}}},
  54. {"contents", "con", 3, nil},
  55. {"contents", "cont", 3, []protocol.BlockInfo{{Offset: 3, Size: 1}}},
  56. {"cont", "contents", 3, []protocol.BlockInfo{{Offset: 3, Size: 3}, {Offset: 6, Size: 2}}},
  57. }
  58. func setupFile(filename string, blockNumbers []int) protocol.FileInfo {
  59. // Create existing file
  60. existingBlocks := make([]protocol.BlockInfo, len(blockNumbers))
  61. for i := range blockNumbers {
  62. existingBlocks[i] = blocks[blockNumbers[i]]
  63. }
  64. return protocol.FileInfo{
  65. Name: filename,
  66. Blocks: existingBlocks,
  67. }
  68. }
  69. func createFile(t *testing.T, name string, fs fs.Filesystem) protocol.FileInfo {
  70. t.Helper()
  71. f, err := fs.Create(name)
  72. if err != nil {
  73. t.Fatal(err)
  74. }
  75. f.Close()
  76. fi, err := fs.Stat(name)
  77. if err != nil {
  78. t.Fatal(err)
  79. }
  80. file, err := scanner.CreateFileInfo(fi, name, fs)
  81. if err != nil {
  82. t.Fatal(err)
  83. }
  84. return file
  85. }
  86. func setupSendReceiveFolder(files ...protocol.FileInfo) (*model, *sendReceiveFolder, string) {
  87. w := createTmpWrapper(defaultCfg)
  88. model := newModel(w, myID, "syncthing", "dev", db.OpenMemory(), nil)
  89. fcfg, tmpDir := testFolderConfigTmp()
  90. model.AddFolder(fcfg)
  91. // Update index
  92. if files != nil {
  93. model.updateLocalsFromScanning("default", files)
  94. }
  95. f := &sendReceiveFolder{
  96. folder: folder{
  97. stateTracker: newStateTracker("default"),
  98. model: model,
  99. initialScanFinished: make(chan struct{}),
  100. ctx: context.TODO(),
  101. FolderConfiguration: fcfg,
  102. },
  103. queue: newJobQueue(),
  104. pullErrors: make(map[string]string),
  105. pullErrorsMut: sync.NewMutex(),
  106. }
  107. f.fs = fs.NewMtimeFS(f.Filesystem(), db.NewNamespacedKV(model.db, "mtime"))
  108. // Folders are never actually started, so no initial scan will be done
  109. close(f.initialScanFinished)
  110. return model, f, tmpDir
  111. }
  112. // Layout of the files: (indexes from the above array)
  113. // 12345678 - Required file
  114. // 02005008 - Existing file (currently in the index)
  115. // 02340070 - Temp file on the disk
  116. func TestHandleFile(t *testing.T) {
  117. // After the diff between required and existing we should:
  118. // Copy: 2, 5, 8
  119. // Pull: 1, 3, 4, 6, 7
  120. existingBlocks := []int{0, 2, 0, 0, 5, 0, 0, 8}
  121. existingFile := setupFile("filex", existingBlocks)
  122. requiredFile := existingFile
  123. requiredFile.Blocks = blocks[1:]
  124. m, f, tmpDir := setupSendReceiveFolder(existingFile)
  125. defer func() {
  126. os.Remove(m.cfg.ConfigPath())
  127. os.Remove(tmpDir)
  128. }()
  129. copyChan := make(chan copyBlocksState, 1)
  130. dbUpdateChan := make(chan dbUpdateJob, 1)
  131. f.handleFile(requiredFile, copyChan, nil, dbUpdateChan)
  132. // Receive the results
  133. toCopy := <-copyChan
  134. if len(toCopy.blocks) != 8 {
  135. t.Errorf("Unexpected count of copy blocks: %d != 8", len(toCopy.blocks))
  136. }
  137. for _, block := range blocks[1:] {
  138. found := false
  139. for _, toCopyBlock := range toCopy.blocks {
  140. if string(toCopyBlock.Hash) == string(block.Hash) {
  141. found = true
  142. break
  143. }
  144. }
  145. if !found {
  146. t.Errorf("Did not find block %s", block.String())
  147. }
  148. }
  149. }
  150. func TestHandleFileWithTemp(t *testing.T) {
  151. // After diff between required and existing we should:
  152. // Copy: 2, 5, 8
  153. // Pull: 1, 3, 4, 6, 7
  154. // After dropping out blocks already on the temp file we should:
  155. // Copy: 5, 8
  156. // Pull: 1, 6
  157. existingBlocks := []int{0, 2, 0, 0, 5, 0, 0, 8}
  158. existingFile := setupFile("file", existingBlocks)
  159. requiredFile := existingFile
  160. requiredFile.Blocks = blocks[1:]
  161. m, f, tmpDir := setupSendReceiveFolder(existingFile)
  162. defer func() {
  163. os.Remove(m.cfg.ConfigPath())
  164. os.Remove(tmpDir)
  165. }()
  166. if _, err := prepareTmpFile(f.Filesystem()); err != nil {
  167. t.Fatal(err)
  168. }
  169. copyChan := make(chan copyBlocksState, 1)
  170. dbUpdateChan := make(chan dbUpdateJob, 1)
  171. f.handleFile(requiredFile, copyChan, nil, dbUpdateChan)
  172. // Receive the results
  173. toCopy := <-copyChan
  174. if len(toCopy.blocks) != 4 {
  175. t.Errorf("Unexpected count of copy blocks: %d != 4", len(toCopy.blocks))
  176. }
  177. for _, idx := range []int{1, 5, 6, 8} {
  178. found := false
  179. block := blocks[idx]
  180. for _, toCopyBlock := range toCopy.blocks {
  181. if string(toCopyBlock.Hash) == string(block.Hash) {
  182. found = true
  183. break
  184. }
  185. }
  186. if !found {
  187. t.Errorf("Did not find block %s", block.String())
  188. }
  189. }
  190. }
  191. func TestCopierFinder(t *testing.T) {
  192. // After diff between required and existing we should:
  193. // Copy: 1, 2, 3, 4, 6, 7, 8
  194. // Since there is no existing file, nor a temp file
  195. // After dropping out blocks found locally:
  196. // Pull: 1, 5, 6, 8
  197. tempFile := fs.TempName("file2")
  198. existingBlocks := []int{0, 2, 3, 4, 0, 0, 7, 0}
  199. existingFile := setupFile(fs.TempName("file"), existingBlocks)
  200. requiredFile := existingFile
  201. requiredFile.Blocks = blocks[1:]
  202. requiredFile.Name = "file2"
  203. m, f, tmpDir := setupSendReceiveFolder(existingFile)
  204. defer func() {
  205. os.Remove(m.cfg.ConfigPath())
  206. os.Remove(tmpDir)
  207. }()
  208. if _, err := prepareTmpFile(f.Filesystem()); err != nil {
  209. t.Fatal(err)
  210. }
  211. copyChan := make(chan copyBlocksState)
  212. pullChan := make(chan pullBlockState, 4)
  213. finisherChan := make(chan *sharedPullerState, 1)
  214. dbUpdateChan := make(chan dbUpdateJob, 1)
  215. // Run a single fetcher routine
  216. go f.copierRoutine(copyChan, pullChan, finisherChan)
  217. f.handleFile(requiredFile, copyChan, finisherChan, dbUpdateChan)
  218. pulls := []pullBlockState{<-pullChan, <-pullChan, <-pullChan, <-pullChan}
  219. finish := <-finisherChan
  220. select {
  221. case <-pullChan:
  222. t.Fatal("Pull channel has data to be read")
  223. case <-finisherChan:
  224. t.Fatal("Finisher channel has data to be read")
  225. default:
  226. }
  227. // Verify that the right blocks went into the pull list.
  228. // They are pulled in random order.
  229. for _, idx := range []int{1, 5, 6, 8} {
  230. found := false
  231. block := blocks[idx]
  232. for _, pulledBlock := range pulls {
  233. if string(pulledBlock.block.Hash) == string(block.Hash) {
  234. found = true
  235. break
  236. }
  237. }
  238. if !found {
  239. t.Errorf("Did not find block %s", block.String())
  240. }
  241. if string(finish.file.Blocks[idx-1].Hash) != string(blocks[idx].Hash) {
  242. t.Errorf("Block %d mismatch: %s != %s", idx, finish.file.Blocks[idx-1].String(), blocks[idx].String())
  243. }
  244. }
  245. // Verify that the fetched blocks have actually been written to the temp file
  246. blks, err := scanner.HashFile(context.TODO(), f.Filesystem(), tempFile, protocol.MinBlockSize, nil, false)
  247. if err != nil {
  248. t.Log(err)
  249. }
  250. for _, eq := range []int{2, 3, 4, 7} {
  251. if string(blks[eq-1].Hash) != string(blocks[eq].Hash) {
  252. t.Errorf("Block %d mismatch: %s != %s", eq, blks[eq-1].String(), blocks[eq].String())
  253. }
  254. }
  255. finish.fd.Close()
  256. }
  257. func TestWeakHash(t *testing.T) {
  258. // Setup the model/pull environment
  259. model, fo, tmpDir := setupSendReceiveFolder()
  260. defer func() {
  261. os.Remove(model.cfg.ConfigPath())
  262. os.Remove(tmpDir)
  263. }()
  264. ffs := fo.Filesystem()
  265. tempFile := fs.TempName("weakhash")
  266. var shift int64 = 10
  267. var size int64 = 1 << 20
  268. expectBlocks := int(size / protocol.MinBlockSize)
  269. expectPulls := int(shift / protocol.MinBlockSize)
  270. if shift > 0 {
  271. expectPulls++
  272. }
  273. f, err := ffs.Create("weakhash")
  274. if err != nil {
  275. t.Fatal(err)
  276. }
  277. defer f.Close()
  278. _, err = io.CopyN(f, rand.Reader, size)
  279. if err != nil {
  280. t.Error(err)
  281. }
  282. info, err := f.Stat()
  283. if err != nil {
  284. t.Error(err)
  285. }
  286. // Create two files, second file has `shifted` bytes random prefix, yet
  287. // both are of the same length, for example:
  288. // File 1: abcdefgh
  289. // File 2: xyabcdef
  290. f.Seek(0, os.SEEK_SET)
  291. existing, err := scanner.Blocks(context.TODO(), f, protocol.MinBlockSize, size, nil, true)
  292. if err != nil {
  293. t.Error(err)
  294. }
  295. f.Seek(0, os.SEEK_SET)
  296. remainder := io.LimitReader(f, size-shift)
  297. prefix := io.LimitReader(rand.Reader, shift)
  298. nf := io.MultiReader(prefix, remainder)
  299. desired, err := scanner.Blocks(context.TODO(), nf, protocol.MinBlockSize, size, nil, true)
  300. if err != nil {
  301. t.Error(err)
  302. }
  303. existingFile := protocol.FileInfo{
  304. Name: "weakhash",
  305. Blocks: existing,
  306. Size: size,
  307. ModifiedS: info.ModTime().Unix(),
  308. ModifiedNs: int32(info.ModTime().Nanosecond()),
  309. }
  310. desiredFile := protocol.FileInfo{
  311. Name: "weakhash",
  312. Size: size,
  313. Blocks: desired,
  314. ModifiedS: info.ModTime().Unix() + 1,
  315. }
  316. model.updateLocalsFromScanning("default", []protocol.FileInfo{existingFile})
  317. copyChan := make(chan copyBlocksState)
  318. pullChan := make(chan pullBlockState, expectBlocks)
  319. finisherChan := make(chan *sharedPullerState, 1)
  320. dbUpdateChan := make(chan dbUpdateJob, 1)
  321. // Run a single fetcher routine
  322. go fo.copierRoutine(copyChan, pullChan, finisherChan)
  323. // Test 1 - no weak hashing, file gets fully repulled (`expectBlocks` pulls).
  324. fo.WeakHashThresholdPct = 101
  325. fo.handleFile(desiredFile, copyChan, finisherChan, dbUpdateChan)
  326. var pulls []pullBlockState
  327. for len(pulls) < expectBlocks {
  328. select {
  329. case pull := <-pullChan:
  330. pulls = append(pulls, pull)
  331. case <-time.After(10 * time.Second):
  332. t.Errorf("timed out, got %d pulls expected %d", len(pulls), expectPulls)
  333. }
  334. }
  335. finish := <-finisherChan
  336. select {
  337. case <-pullChan:
  338. t.Fatal("Pull channel has data to be read")
  339. case <-finisherChan:
  340. t.Fatal("Finisher channel has data to be read")
  341. default:
  342. }
  343. finish.fd.Close()
  344. if err := ffs.Remove(tempFile); err != nil {
  345. t.Fatal(err)
  346. }
  347. // Test 2 - using weak hash, expectPulls blocks pulled.
  348. fo.WeakHashThresholdPct = -1
  349. fo.handleFile(desiredFile, copyChan, finisherChan, dbUpdateChan)
  350. pulls = pulls[:0]
  351. for len(pulls) < expectPulls {
  352. select {
  353. case pull := <-pullChan:
  354. pulls = append(pulls, pull)
  355. case <-time.After(10 * time.Second):
  356. t.Errorf("timed out, got %d pulls expected %d", len(pulls), expectPulls)
  357. }
  358. }
  359. finish = <-finisherChan
  360. finish.fd.Close()
  361. expectShifted := expectBlocks - expectPulls
  362. if finish.copyOriginShifted != expectShifted {
  363. t.Errorf("did not copy %d shifted", expectShifted)
  364. }
  365. }
  366. // Test that updating a file removes its old blocks from the blockmap
  367. func TestCopierCleanup(t *testing.T) {
  368. iterFn := func(folder, file string, index int32) bool {
  369. return true
  370. }
  371. // Create a file
  372. file := setupFile("test", []int{0})
  373. m, _, tmpDir := setupSendReceiveFolder(file)
  374. defer func() {
  375. os.Remove(m.cfg.ConfigPath())
  376. os.Remove(tmpDir)
  377. }()
  378. file.Blocks = []protocol.BlockInfo{blocks[1]}
  379. file.Version = file.Version.Update(myID.Short())
  380. // Update index (removing old blocks)
  381. m.updateLocalsFromScanning("default", []protocol.FileInfo{file})
  382. if m.finder.Iterate(folders, blocks[0].Hash, iterFn) {
  383. t.Error("Unexpected block found")
  384. }
  385. if !m.finder.Iterate(folders, blocks[1].Hash, iterFn) {
  386. t.Error("Expected block not found")
  387. }
  388. file.Blocks = []protocol.BlockInfo{blocks[0]}
  389. file.Version = file.Version.Update(myID.Short())
  390. // Update index (removing old blocks)
  391. m.updateLocalsFromScanning("default", []protocol.FileInfo{file})
  392. if !m.finder.Iterate(folders, blocks[0].Hash, iterFn) {
  393. t.Error("Unexpected block found")
  394. }
  395. if m.finder.Iterate(folders, blocks[1].Hash, iterFn) {
  396. t.Error("Expected block not found")
  397. }
  398. }
  399. func TestDeregisterOnFailInCopy(t *testing.T) {
  400. file := setupFile("filex", []int{0, 2, 0, 0, 5, 0, 0, 8})
  401. m, f, tmpDir := setupSendReceiveFolder()
  402. defer func() {
  403. os.Remove(m.cfg.ConfigPath())
  404. os.Remove(tmpDir)
  405. }()
  406. // Set up our evet subscription early
  407. s := events.Default.Subscribe(events.ItemFinished)
  408. // queue.Done should be called by the finisher routine
  409. f.queue.Push("filex", 0, time.Time{})
  410. f.queue.Pop()
  411. if f.queue.lenProgress() != 1 {
  412. t.Fatal("Expected file in progress")
  413. }
  414. copyChan := make(chan copyBlocksState)
  415. pullChan := make(chan pullBlockState)
  416. finisherBufferChan := make(chan *sharedPullerState)
  417. finisherChan := make(chan *sharedPullerState)
  418. dbUpdateChan := make(chan dbUpdateJob, 1)
  419. go f.copierRoutine(copyChan, pullChan, finisherBufferChan)
  420. go f.finisherRoutine(ignore.New(defaultFs), finisherChan, dbUpdateChan, make(chan string))
  421. f.handleFile(file, copyChan, finisherChan, dbUpdateChan)
  422. // Receive a block at puller, to indicate that at least a single copier
  423. // loop has been performed.
  424. toPull := <-pullChan
  425. // Close the file, causing errors on further access
  426. toPull.sharedPullerState.fail(os.ErrNotExist)
  427. // Unblock copier
  428. go func() {
  429. for range pullChan {
  430. }
  431. }()
  432. select {
  433. case state := <-finisherBufferChan:
  434. // At this point the file should still be registered with both the job
  435. // queue, and the progress emitter. Verify this.
  436. if f.model.progressEmitter.lenRegistry() != 1 || f.queue.lenProgress() != 1 || f.queue.lenQueued() != 0 {
  437. t.Fatal("Could not find file")
  438. }
  439. // Pass the file down the real finisher, and give it time to consume
  440. finisherChan <- state
  441. t0 := time.Now()
  442. if ev, err := s.Poll(time.Minute); err != nil {
  443. t.Fatal("Got error waiting for ItemFinished event:", err)
  444. } else if n := ev.Data.(map[string]interface{})["item"]; n != state.file.Name {
  445. t.Fatal("Got ItemFinished event for wrong file:", n)
  446. }
  447. t.Log("event took", time.Since(t0))
  448. state.mut.Lock()
  449. stateFd := state.fd
  450. state.mut.Unlock()
  451. if stateFd != nil {
  452. t.Fatal("File not closed?")
  453. }
  454. if f.model.progressEmitter.lenRegistry() != 0 || f.queue.lenProgress() != 0 || f.queue.lenQueued() != 0 {
  455. t.Fatal("Still registered", f.model.progressEmitter.lenRegistry(), f.queue.lenProgress(), f.queue.lenQueued())
  456. }
  457. // Doing it again should have no effect
  458. finisherChan <- state
  459. if _, err := s.Poll(time.Second); err != events.ErrTimeout {
  460. t.Fatal("Expected timeout, not another event", err)
  461. }
  462. if f.model.progressEmitter.lenRegistry() != 0 || f.queue.lenProgress() != 0 || f.queue.lenQueued() != 0 {
  463. t.Fatal("Still registered", f.model.progressEmitter.lenRegistry(), f.queue.lenProgress(), f.queue.lenQueued())
  464. }
  465. case <-time.After(time.Second):
  466. t.Fatal("Didn't get anything to the finisher")
  467. }
  468. }
  469. func TestDeregisterOnFailInPull(t *testing.T) {
  470. file := setupFile("filex", []int{0, 2, 0, 0, 5, 0, 0, 8})
  471. m, f, tmpDir := setupSendReceiveFolder()
  472. defer func() {
  473. os.Remove(m.cfg.ConfigPath())
  474. os.Remove(tmpDir)
  475. }()
  476. // Set up our evet subscription early
  477. s := events.Default.Subscribe(events.ItemFinished)
  478. // queue.Done should be called by the finisher routine
  479. f.queue.Push("filex", 0, time.Time{})
  480. f.queue.Pop()
  481. if f.queue.lenProgress() != 1 {
  482. t.Fatal("Expected file in progress")
  483. }
  484. copyChan := make(chan copyBlocksState)
  485. pullChan := make(chan pullBlockState)
  486. finisherBufferChan := make(chan *sharedPullerState)
  487. finisherChan := make(chan *sharedPullerState)
  488. dbUpdateChan := make(chan dbUpdateJob, 1)
  489. go f.copierRoutine(copyChan, pullChan, finisherBufferChan)
  490. go f.pullerRoutine(pullChan, finisherBufferChan)
  491. go f.finisherRoutine(ignore.New(defaultFs), finisherChan, dbUpdateChan, make(chan string))
  492. f.handleFile(file, copyChan, finisherChan, dbUpdateChan)
  493. // Receive at finisher, we should error out as puller has nowhere to pull
  494. // from.
  495. timeout = time.Second
  496. select {
  497. case state := <-finisherBufferChan:
  498. // At this point the file should still be registered with both the job
  499. // queue, and the progress emitter. Verify this.
  500. if f.model.progressEmitter.lenRegistry() != 1 || f.queue.lenProgress() != 1 || f.queue.lenQueued() != 0 {
  501. t.Fatal("Could not find file")
  502. }
  503. // Pass the file down the real finisher, and give it time to consume
  504. finisherChan <- state
  505. t0 := time.Now()
  506. if ev, err := s.Poll(time.Minute); err != nil {
  507. t.Fatal("Got error waiting for ItemFinished event:", err)
  508. } else if n := ev.Data.(map[string]interface{})["item"]; n != state.file.Name {
  509. t.Fatal("Got ItemFinished event for wrong file:", n)
  510. }
  511. t.Log("event took", time.Since(t0))
  512. state.mut.Lock()
  513. stateFd := state.fd
  514. state.mut.Unlock()
  515. if stateFd != nil {
  516. t.Fatal("File not closed?")
  517. }
  518. if f.model.progressEmitter.lenRegistry() != 0 || f.queue.lenProgress() != 0 || f.queue.lenQueued() != 0 {
  519. t.Fatal("Still registered", f.model.progressEmitter.lenRegistry(), f.queue.lenProgress(), f.queue.lenQueued())
  520. }
  521. // Doing it again should have no effect
  522. finisherChan <- state
  523. if _, err := s.Poll(time.Second); err != events.ErrTimeout {
  524. t.Fatal("Expected timeout, not another event", err)
  525. }
  526. if f.model.progressEmitter.lenRegistry() != 0 || f.queue.lenProgress() != 0 || f.queue.lenQueued() != 0 {
  527. t.Fatal("Still registered", f.model.progressEmitter.lenRegistry(), f.queue.lenProgress(), f.queue.lenQueued())
  528. }
  529. case <-time.After(time.Second):
  530. t.Fatal("Didn't get anything to the finisher")
  531. }
  532. }
  533. func TestIssue3164(t *testing.T) {
  534. m, f, tmpDir := setupSendReceiveFolder()
  535. defer func() {
  536. os.Remove(m.cfg.ConfigPath())
  537. os.Remove(tmpDir)
  538. }()
  539. ffs := f.Filesystem()
  540. ignDir := filepath.Join("issue3164", "oktodelete")
  541. subDir := filepath.Join(ignDir, "foobar")
  542. if err := ffs.MkdirAll(subDir, 0777); err != nil {
  543. t.Fatal(err)
  544. }
  545. if err := ioutil.WriteFile(filepath.Join(tmpDir, subDir, "file"), []byte("Hello"), 0644); err != nil {
  546. t.Fatal(err)
  547. }
  548. if err := ioutil.WriteFile(filepath.Join(tmpDir, ignDir, "file"), []byte("Hello"), 0644); err != nil {
  549. t.Fatal(err)
  550. }
  551. file := protocol.FileInfo{
  552. Name: "issue3164",
  553. }
  554. matcher := ignore.New(ffs)
  555. if err := matcher.Parse(bytes.NewBufferString("(?d)oktodelete"), ""); err != nil {
  556. t.Fatal(err)
  557. }
  558. dbUpdateChan := make(chan dbUpdateJob, 1)
  559. f.deleteDir(file, matcher, dbUpdateChan, make(chan string))
  560. if _, err := ffs.Stat("issue3164"); !fs.IsNotExist(err) {
  561. t.Fatal(err)
  562. }
  563. }
  564. func TestDiff(t *testing.T) {
  565. for i, test := range diffTestData {
  566. a, _ := scanner.Blocks(context.TODO(), bytes.NewBufferString(test.a), test.s, -1, nil, false)
  567. b, _ := scanner.Blocks(context.TODO(), bytes.NewBufferString(test.b), test.s, -1, nil, false)
  568. _, d := blockDiff(a, b)
  569. if len(d) != len(test.d) {
  570. t.Fatalf("Incorrect length for diff %d; %d != %d", i, len(d), len(test.d))
  571. } else {
  572. for j := range test.d {
  573. if d[j].Offset != test.d[j].Offset {
  574. t.Errorf("Incorrect offset for diff %d block %d; %d != %d", i, j, d[j].Offset, test.d[j].Offset)
  575. }
  576. if d[j].Size != test.d[j].Size {
  577. t.Errorf("Incorrect length for diff %d block %d; %d != %d", i, j, d[j].Size, test.d[j].Size)
  578. }
  579. }
  580. }
  581. }
  582. }
  583. func BenchmarkDiff(b *testing.B) {
  584. testCases := make([]struct{ a, b []protocol.BlockInfo }, 0, len(diffTestData))
  585. for _, test := range diffTestData {
  586. a, _ := scanner.Blocks(context.TODO(), bytes.NewBufferString(test.a), test.s, -1, nil, false)
  587. b, _ := scanner.Blocks(context.TODO(), bytes.NewBufferString(test.b), test.s, -1, nil, false)
  588. testCases = append(testCases, struct{ a, b []protocol.BlockInfo }{a, b})
  589. }
  590. b.ReportAllocs()
  591. b.ResetTimer()
  592. for i := 0; i < b.N; i++ {
  593. for _, tc := range testCases {
  594. blockDiff(tc.a, tc.b)
  595. }
  596. }
  597. }
  598. func TestDiffEmpty(t *testing.T) {
  599. emptyCases := []struct {
  600. a []protocol.BlockInfo
  601. b []protocol.BlockInfo
  602. need int
  603. have int
  604. }{
  605. {nil, nil, 0, 0},
  606. {[]protocol.BlockInfo{{Offset: 3, Size: 1}}, nil, 0, 0},
  607. {nil, []protocol.BlockInfo{{Offset: 3, Size: 1}}, 1, 0},
  608. }
  609. for _, emptyCase := range emptyCases {
  610. h, n := blockDiff(emptyCase.a, emptyCase.b)
  611. if len(h) != emptyCase.have {
  612. t.Errorf("incorrect have: %d != %d", len(h), emptyCase.have)
  613. }
  614. if len(n) != emptyCase.need {
  615. t.Errorf("incorrect have: %d != %d", len(h), emptyCase.have)
  616. }
  617. }
  618. }
  619. // TestDeleteIgnorePerms checks, that a file gets deleted when the IgnorePerms
  620. // option is true and the permissions do not match between the file on disk and
  621. // in the db.
  622. func TestDeleteIgnorePerms(t *testing.T) {
  623. m, f, tmpDir := setupSendReceiveFolder()
  624. defer func() {
  625. os.Remove(m.cfg.ConfigPath())
  626. os.Remove(tmpDir)
  627. }()
  628. f.IgnorePerms = true
  629. ffs := f.Filesystem()
  630. name := "deleteIgnorePerms"
  631. file, err := ffs.Create(name)
  632. if err != nil {
  633. t.Error(err)
  634. }
  635. defer file.Close()
  636. stat, err := file.Stat()
  637. if err != nil {
  638. t.Fatal(err)
  639. }
  640. fi, err := scanner.CreateFileInfo(stat, name, ffs)
  641. if err != nil {
  642. t.Fatal(err)
  643. }
  644. ffs.Chmod(name, 0600)
  645. scanChan := make(chan string)
  646. finished := make(chan struct{})
  647. go func() {
  648. err = f.checkToBeDeleted(fi, scanChan)
  649. close(finished)
  650. }()
  651. select {
  652. case <-scanChan:
  653. <-finished
  654. case <-finished:
  655. }
  656. if err != nil {
  657. t.Fatal(err)
  658. }
  659. }
  660. func TestCopyOwner(t *testing.T) {
  661. // Verifies that owner and group are copied from the parent, for both
  662. // files and directories.
  663. if runtime.GOOS == "windows" {
  664. t.Skip("copying owner not supported on Windows")
  665. }
  666. const (
  667. expOwner = 1234
  668. expGroup = 5678
  669. )
  670. // Set up a folder with the CopyParentOwner bit and backed by a fake
  671. // filesystem.
  672. m, f, _ := setupSendReceiveFolder()
  673. defer os.Remove(m.cfg.ConfigPath())
  674. f.folder.FolderConfiguration = config.NewFolderConfiguration(m.id, f.ID, f.Label, fs.FilesystemTypeFake, "/TestCopyOwner")
  675. f.folder.FolderConfiguration.CopyOwnershipFromParent = true
  676. f.fs = f.Filesystem()
  677. // Create a parent dir with a certain owner/group.
  678. f.fs.Mkdir("foo", 0755)
  679. f.fs.Lchown("foo", expOwner, expGroup)
  680. dir := protocol.FileInfo{
  681. Name: "foo/bar",
  682. Type: protocol.FileInfoTypeDirectory,
  683. Permissions: 0755,
  684. }
  685. // Have the folder create a subdirectory, verify that it's the correct
  686. // owner/group.
  687. dbUpdateChan := make(chan dbUpdateJob, 1)
  688. defer close(dbUpdateChan)
  689. f.handleDir(dir, ignore.New(f.fs), dbUpdateChan, nil)
  690. <-dbUpdateChan // empty the channel for later
  691. info, err := f.fs.Lstat("foo/bar")
  692. if err != nil {
  693. t.Fatal("Unexpected error (dir):", err)
  694. }
  695. if info.Owner() != expOwner || info.Group() != expGroup {
  696. t.Fatalf("Expected dir owner/group to be %d/%d, not %d/%d", expOwner, expGroup, info.Owner(), info.Group())
  697. }
  698. // Have the folder create a file, verify it's the correct owner/group.
  699. // File is zero sized to avoid having to handle copies/pulls.
  700. file := protocol.FileInfo{
  701. Name: "foo/bar/baz",
  702. Type: protocol.FileInfoTypeFile,
  703. Permissions: 0644,
  704. }
  705. // Wire some stuff. The flow here is handleFile() -[copierChan]->
  706. // copierRoutine() -[finisherChan]-> finisherRoutine() -[dbUpdateChan]->
  707. // back to us and we're done. The copier routine doesn't do anything,
  708. // but it's the way data is passed around. When the database update
  709. // comes the finisher is done.
  710. finisherChan := make(chan *sharedPullerState)
  711. defer close(finisherChan)
  712. copierChan := make(chan copyBlocksState)
  713. defer close(copierChan)
  714. go f.copierRoutine(copierChan, nil, finisherChan)
  715. go f.finisherRoutine(nil, finisherChan, dbUpdateChan, nil)
  716. f.handleFile(file, copierChan, nil, nil)
  717. <-dbUpdateChan
  718. info, err = f.fs.Lstat("foo/bar/baz")
  719. if err != nil {
  720. t.Fatal("Unexpected error (file):", err)
  721. }
  722. if info.Owner() != expOwner || info.Group() != expGroup {
  723. t.Fatalf("Expected file owner/group to be %d/%d, not %d/%d", expOwner, expGroup, info.Owner(), info.Group())
  724. }
  725. // Have the folder create a symlink. Verify it accordingly.
  726. symlink := protocol.FileInfo{
  727. Name: "foo/bar/sym",
  728. Type: protocol.FileInfoTypeSymlink,
  729. Permissions: 0644,
  730. SymlinkTarget: "over the rainbow",
  731. }
  732. f.handleSymlink(symlink, ignore.New(f.fs), dbUpdateChan, nil)
  733. <-dbUpdateChan
  734. info, err = f.fs.Lstat("foo/bar/sym")
  735. if err != nil {
  736. t.Fatal("Unexpected error (file):", err)
  737. }
  738. if info.Owner() != expOwner || info.Group() != expGroup {
  739. t.Fatalf("Expected symlink owner/group to be %d/%d, not %d/%d", expOwner, expGroup, info.Owner(), info.Group())
  740. }
  741. }
  742. // TestSRConflictReplaceFileByDir checks that a conflict is created when an existing file
  743. // is replaced with a directory and versions are conflicting
  744. func TestSRConflictReplaceFileByDir(t *testing.T) {
  745. m, f, tmpDir := setupSendReceiveFolder()
  746. defer func() {
  747. os.Remove(m.cfg.ConfigPath())
  748. os.Remove(tmpDir)
  749. }()
  750. ffs := f.Filesystem()
  751. name := "foo"
  752. // create local file
  753. file := createFile(t, name, ffs)
  754. file.Version = protocol.Vector{}.Update(myID.Short())
  755. m.updateLocalsFromScanning(f.ID, []protocol.FileInfo{file})
  756. // Simulate remote creating a dir with the same name
  757. file.Type = protocol.FileInfoTypeDirectory
  758. rem := device1.Short()
  759. file.Version = protocol.Vector{}.Update(rem)
  760. file.ModifiedBy = rem
  761. dbUpdateChan := make(chan dbUpdateJob, 1)
  762. scanChan := make(chan string, 1)
  763. f.handleDir(file, ignore.New(f.fs), dbUpdateChan, scanChan)
  764. if confls := existingConflicts(name, ffs); len(confls) != 1 {
  765. t.Fatal("Expected one conflict, got", len(confls))
  766. } else if scan := <-scanChan; confls[0] != scan {
  767. t.Fatal("Expected request to scan", confls[0], "got", scan)
  768. }
  769. }
  770. // TestSRConflictReplaceFileByLink checks that a conflict is created when an existing file
  771. // is replaced with a link and versions are conflicting
  772. func TestSRConflictReplaceFileByLink(t *testing.T) {
  773. m, f, tmpDir := setupSendReceiveFolder()
  774. defer func() {
  775. os.Remove(m.cfg.ConfigPath())
  776. os.Remove(tmpDir)
  777. }()
  778. ffs := f.Filesystem()
  779. name := "foo"
  780. // create local file
  781. file := createFile(t, name, ffs)
  782. file.Version = protocol.Vector{}.Update(myID.Short())
  783. m.updateLocalsFromScanning(f.ID, []protocol.FileInfo{file})
  784. // Simulate remote creating a symlink with the same name
  785. file.Type = protocol.FileInfoTypeSymlink
  786. file.SymlinkTarget = "bar"
  787. rem := device1.Short()
  788. file.Version = protocol.Vector{}.Update(rem)
  789. file.ModifiedBy = rem
  790. dbUpdateChan := make(chan dbUpdateJob, 1)
  791. scanChan := make(chan string, 1)
  792. f.handleSymlink(file, ignore.New(f.fs), dbUpdateChan, scanChan)
  793. if confls := existingConflicts(name, ffs); len(confls) != 1 {
  794. t.Fatal("Expected one conflict, got", len(confls))
  795. } else if scan := <-scanChan; confls[0] != scan {
  796. t.Fatal("Expected request to scan", confls[0], "got", scan)
  797. }
  798. }