folder_sendrecv_test.go 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038
  1. // Copyright (C) 2014 The Syncthing Authors.
  2. //
  3. // This Source Code Form is subject to the terms of the Mozilla Public
  4. // License, v. 2.0. If a copy of the MPL was not distributed with this file,
  5. // You can obtain one at https://mozilla.org/MPL/2.0/.
  6. package model
  7. import (
  8. "bytes"
  9. "context"
  10. "crypto/rand"
  11. "io"
  12. "io/ioutil"
  13. "os"
  14. "path/filepath"
  15. "runtime"
  16. "testing"
  17. "time"
  18. "github.com/syncthing/syncthing/lib/config"
  19. "github.com/syncthing/syncthing/lib/db"
  20. "github.com/syncthing/syncthing/lib/db/backend"
  21. "github.com/syncthing/syncthing/lib/events"
  22. "github.com/syncthing/syncthing/lib/fs"
  23. "github.com/syncthing/syncthing/lib/ignore"
  24. "github.com/syncthing/syncthing/lib/osutil"
  25. "github.com/syncthing/syncthing/lib/protocol"
  26. "github.com/syncthing/syncthing/lib/scanner"
  27. "github.com/syncthing/syncthing/lib/sync"
  28. )
  29. var blocks = []protocol.BlockInfo{
  30. {Hash: []uint8{0xfa, 0x43, 0x23, 0x9b, 0xce, 0xe7, 0xb9, 0x7c, 0xa6, 0x2f, 0x0, 0x7c, 0xc6, 0x84, 0x87, 0x56, 0xa, 0x39, 0xe1, 0x9f, 0x74, 0xf3, 0xdd, 0xe7, 0x48, 0x6d, 0xb3, 0xf9, 0x8d, 0xf8, 0xe4, 0x71}}, // Zero'ed out block
  31. {Offset: 0, Size: 0x20000, Hash: []uint8{0x7e, 0xad, 0xbc, 0x36, 0xae, 0xbb, 0xcf, 0x74, 0x43, 0xe2, 0x7a, 0x5a, 0x4b, 0xb8, 0x5b, 0xce, 0xe6, 0x9e, 0x1e, 0x10, 0xf9, 0x8a, 0xbc, 0x77, 0x95, 0x2, 0x29, 0x60, 0x9e, 0x96, 0xae, 0x6c}},
  32. {Offset: 131072, Size: 0x20000, Hash: []uint8{0x3c, 0xc4, 0x20, 0xf4, 0xb, 0x2e, 0xcb, 0xb9, 0x5d, 0xce, 0x34, 0xa8, 0xc3, 0x92, 0xea, 0xf3, 0xda, 0x88, 0x33, 0xee, 0x7a, 0xb6, 0xe, 0xf1, 0x82, 0x5e, 0xb0, 0xa9, 0x26, 0xa9, 0xc0, 0xef}},
  33. {Offset: 262144, Size: 0x20000, Hash: []uint8{0x76, 0xa8, 0xc, 0x69, 0xd7, 0x5c, 0x52, 0xfd, 0xdf, 0x55, 0xef, 0x44, 0xc1, 0xd6, 0x25, 0x48, 0x4d, 0x98, 0x48, 0x4d, 0xaa, 0x50, 0xf6, 0x6b, 0x32, 0x47, 0x55, 0x81, 0x6b, 0xed, 0xee, 0xfb}},
  34. {Offset: 393216, Size: 0x20000, Hash: []uint8{0x44, 0x1e, 0xa4, 0xf2, 0x8d, 0x1f, 0xc3, 0x1b, 0x9d, 0xa5, 0x18, 0x5e, 0x59, 0x1b, 0xd8, 0x5c, 0xba, 0x7d, 0xb9, 0x8d, 0x70, 0x11, 0x5c, 0xea, 0xa1, 0x57, 0x4d, 0xcb, 0x3c, 0x5b, 0xf8, 0x6c}},
  35. {Offset: 524288, Size: 0x20000, Hash: []uint8{0x8, 0x40, 0xd0, 0x5e, 0x80, 0x0, 0x0, 0x7c, 0x8b, 0xb3, 0x8b, 0xf7, 0x7b, 0x23, 0x26, 0x28, 0xab, 0xda, 0xcf, 0x86, 0x8f, 0xc2, 0x8a, 0x39, 0xc6, 0xe6, 0x69, 0x59, 0x97, 0xb6, 0x1a, 0x43}},
  36. {Offset: 655360, Size: 0x20000, Hash: []uint8{0x38, 0x8e, 0x44, 0xcb, 0x30, 0xd8, 0x90, 0xf, 0xce, 0x7, 0x4b, 0x58, 0x86, 0xde, 0xce, 0x59, 0xa2, 0x46, 0xd2, 0xf9, 0xba, 0xaf, 0x35, 0x87, 0x38, 0xdf, 0xd2, 0xd, 0xf9, 0x45, 0xed, 0x91}},
  37. {Offset: 786432, Size: 0x20000, Hash: []uint8{0x32, 0x28, 0xcd, 0xf, 0x37, 0x21, 0xe5, 0xd4, 0x1e, 0x58, 0x87, 0x73, 0x8e, 0x36, 0xdf, 0xb2, 0x70, 0x78, 0x56, 0xc3, 0x42, 0xff, 0xf7, 0x8f, 0x37, 0x95, 0x0, 0x26, 0xa, 0xac, 0x54, 0x72}},
  38. {Offset: 917504, Size: 0x20000, Hash: []uint8{0x96, 0x6b, 0x15, 0x6b, 0xc4, 0xf, 0x19, 0x18, 0xca, 0xbb, 0x5f, 0xd6, 0xbb, 0xa2, 0xc6, 0x2a, 0xac, 0xbb, 0x8a, 0xb9, 0xce, 0xec, 0x4c, 0xdb, 0x78, 0xec, 0x57, 0x5d, 0x33, 0xf9, 0x8e, 0xaf}},
  39. }
  40. var folders = []string{"default"}
  41. var diffTestData = []struct {
  42. a string
  43. b string
  44. s int
  45. d []protocol.BlockInfo
  46. }{
  47. {"contents", "contents", 1024, []protocol.BlockInfo{}},
  48. {"", "", 1024, []protocol.BlockInfo{}},
  49. {"contents", "contents", 3, []protocol.BlockInfo{}},
  50. {"contents", "cantents", 3, []protocol.BlockInfo{{Offset: 0, Size: 3}}},
  51. {"contents", "contants", 3, []protocol.BlockInfo{{Offset: 3, Size: 3}}},
  52. {"contents", "cantants", 3, []protocol.BlockInfo{{Offset: 0, Size: 3}, {Offset: 3, Size: 3}}},
  53. {"contents", "", 3, []protocol.BlockInfo{{Offset: 0, Size: 0}}},
  54. {"", "contents", 3, []protocol.BlockInfo{{Offset: 0, Size: 3}, {Offset: 3, Size: 3}, {Offset: 6, Size: 2}}},
  55. {"con", "contents", 3, []protocol.BlockInfo{{Offset: 3, Size: 3}, {Offset: 6, Size: 2}}},
  56. {"contents", "con", 3, nil},
  57. {"contents", "cont", 3, []protocol.BlockInfo{{Offset: 3, Size: 1}}},
  58. {"cont", "contents", 3, []protocol.BlockInfo{{Offset: 3, Size: 3}, {Offset: 6, Size: 2}}},
  59. }
  60. func setupFile(filename string, blockNumbers []int) protocol.FileInfo {
  61. // Create existing file
  62. existingBlocks := make([]protocol.BlockInfo, len(blockNumbers))
  63. for i := range blockNumbers {
  64. existingBlocks[i] = blocks[blockNumbers[i]]
  65. }
  66. return protocol.FileInfo{
  67. Name: filename,
  68. Blocks: existingBlocks,
  69. }
  70. }
  71. func createFile(t *testing.T, name string, fs fs.Filesystem) protocol.FileInfo {
  72. t.Helper()
  73. f, err := fs.Create(name)
  74. must(t, err)
  75. f.Close()
  76. fi, err := fs.Stat(name)
  77. must(t, err)
  78. file, err := scanner.CreateFileInfo(fi, name, fs)
  79. must(t, err)
  80. return file
  81. }
  82. func setupSendReceiveFolder(files ...protocol.FileInfo) (*model, *sendReceiveFolder) {
  83. w := createTmpWrapper(defaultCfg)
  84. model := newModel(w, myID, "syncthing", "dev", db.NewLowlevel(backend.OpenMemory()), nil)
  85. fcfg := testFolderConfigTmp()
  86. model.addFolder(fcfg)
  87. f := &sendReceiveFolder{
  88. folder: folder{
  89. stateTracker: newStateTracker("default", model.evLogger),
  90. model: model,
  91. fset: model.folderFiles[fcfg.ID],
  92. initialScanFinished: make(chan struct{}),
  93. ctx: context.TODO(),
  94. FolderConfiguration: fcfg,
  95. },
  96. queue: newJobQueue(),
  97. pullErrors: make(map[string]string),
  98. pullErrorsMut: sync.NewMutex(),
  99. }
  100. f.fs = fs.NewMtimeFS(f.Filesystem(), db.NewNamespacedKV(model.db, "mtime"))
  101. // Update index
  102. if files != nil {
  103. f.updateLocalsFromScanning(files)
  104. }
  105. // Folders are never actually started, so no initial scan will be done
  106. close(f.initialScanFinished)
  107. return model, f
  108. }
  109. func cleanupSRFolder(f *sendReceiveFolder, m *model) {
  110. m.evLogger.Stop()
  111. os.Remove(m.cfg.ConfigPath())
  112. os.RemoveAll(f.Filesystem().URI())
  113. }
  114. // Layout of the files: (indexes from the above array)
  115. // 12345678 - Required file
  116. // 02005008 - Existing file (currently in the index)
  117. // 02340070 - Temp file on the disk
  118. func TestHandleFile(t *testing.T) {
  119. // After the diff between required and existing we should:
  120. // Copy: 2, 5, 8
  121. // Pull: 1, 3, 4, 6, 7
  122. existingBlocks := []int{0, 2, 0, 0, 5, 0, 0, 8}
  123. existingFile := setupFile("filex", existingBlocks)
  124. requiredFile := existingFile
  125. requiredFile.Blocks = blocks[1:]
  126. m, f := setupSendReceiveFolder(existingFile)
  127. defer cleanupSRFolder(f, m)
  128. copyChan := make(chan copyBlocksState, 1)
  129. dbUpdateChan := make(chan dbUpdateJob, 1)
  130. f.handleFile(requiredFile, copyChan, dbUpdateChan)
  131. // Receive the results
  132. toCopy := <-copyChan
  133. if len(toCopy.blocks) != 8 {
  134. t.Errorf("Unexpected count of copy blocks: %d != 8", len(toCopy.blocks))
  135. }
  136. for _, block := range blocks[1:] {
  137. found := false
  138. for _, toCopyBlock := range toCopy.blocks {
  139. if string(toCopyBlock.Hash) == string(block.Hash) {
  140. found = true
  141. break
  142. }
  143. }
  144. if !found {
  145. t.Errorf("Did not find block %s", block.String())
  146. }
  147. }
  148. }
  149. func TestHandleFileWithTemp(t *testing.T) {
  150. // After diff between required and existing we should:
  151. // Copy: 2, 5, 8
  152. // Pull: 1, 3, 4, 6, 7
  153. // After dropping out blocks already on the temp file we should:
  154. // Copy: 5, 8
  155. // Pull: 1, 6
  156. existingBlocks := []int{0, 2, 0, 0, 5, 0, 0, 8}
  157. existingFile := setupFile("file", existingBlocks)
  158. requiredFile := existingFile
  159. requiredFile.Blocks = blocks[1:]
  160. m, f := setupSendReceiveFolder(existingFile)
  161. defer cleanupSRFolder(f, m)
  162. if _, err := prepareTmpFile(f.Filesystem()); err != nil {
  163. t.Fatal(err)
  164. }
  165. copyChan := make(chan copyBlocksState, 1)
  166. dbUpdateChan := make(chan dbUpdateJob, 1)
  167. f.handleFile(requiredFile, copyChan, dbUpdateChan)
  168. // Receive the results
  169. toCopy := <-copyChan
  170. if len(toCopy.blocks) != 4 {
  171. t.Errorf("Unexpected count of copy blocks: %d != 4", len(toCopy.blocks))
  172. }
  173. for _, idx := range []int{1, 5, 6, 8} {
  174. found := false
  175. block := blocks[idx]
  176. for _, toCopyBlock := range toCopy.blocks {
  177. if string(toCopyBlock.Hash) == string(block.Hash) {
  178. found = true
  179. break
  180. }
  181. }
  182. if !found {
  183. t.Errorf("Did not find block %s", block.String())
  184. }
  185. }
  186. }
  187. func TestCopierFinder(t *testing.T) {
  188. // After diff between required and existing we should:
  189. // Copy: 1, 2, 3, 4, 6, 7, 8
  190. // Since there is no existing file, nor a temp file
  191. // After dropping out blocks found locally:
  192. // Pull: 1, 5, 6, 8
  193. tempFile := fs.TempName("file2")
  194. existingBlocks := []int{0, 2, 3, 4, 0, 0, 7, 0}
  195. existingFile := setupFile(fs.TempName("file"), existingBlocks)
  196. requiredFile := existingFile
  197. requiredFile.Blocks = blocks[1:]
  198. requiredFile.Name = "file2"
  199. m, f := setupSendReceiveFolder(existingFile)
  200. defer cleanupSRFolder(f, m)
  201. if _, err := prepareTmpFile(f.Filesystem()); err != nil {
  202. t.Fatal(err)
  203. }
  204. copyChan := make(chan copyBlocksState)
  205. pullChan := make(chan pullBlockState, 4)
  206. finisherChan := make(chan *sharedPullerState, 1)
  207. dbUpdateChan := make(chan dbUpdateJob, 1)
  208. // Run a single fetcher routine
  209. go f.copierRoutine(copyChan, pullChan, finisherChan)
  210. defer close(copyChan)
  211. f.handleFile(requiredFile, copyChan, dbUpdateChan)
  212. timeout := time.After(10 * time.Second)
  213. pulls := make([]pullBlockState, 4)
  214. for i := 0; i < 4; i++ {
  215. select {
  216. case pulls[i] = <-pullChan:
  217. case <-timeout:
  218. t.Fatalf("Timed out before receiving all 4 states on pullChan (already got %v)", i)
  219. }
  220. }
  221. var finish *sharedPullerState
  222. select {
  223. case finish = <-finisherChan:
  224. case <-timeout:
  225. t.Fatal("Timed out before receiving 4 states on pullChan")
  226. }
  227. defer cleanupSharedPullerState(finish)
  228. select {
  229. case <-pullChan:
  230. t.Fatal("Pull channel has data to be read")
  231. case <-finisherChan:
  232. t.Fatal("Finisher channel has data to be read")
  233. default:
  234. }
  235. // Verify that the right blocks went into the pull list.
  236. // They are pulled in random order.
  237. for _, idx := range []int{1, 5, 6, 8} {
  238. found := false
  239. block := blocks[idx]
  240. for _, pulledBlock := range pulls {
  241. if string(pulledBlock.block.Hash) == string(block.Hash) {
  242. found = true
  243. break
  244. }
  245. }
  246. if !found {
  247. t.Errorf("Did not find block %s", block.String())
  248. }
  249. if string(finish.file.Blocks[idx-1].Hash) != string(blocks[idx].Hash) {
  250. t.Errorf("Block %d mismatch: %s != %s", idx, finish.file.Blocks[idx-1].String(), blocks[idx].String())
  251. }
  252. }
  253. // Verify that the fetched blocks have actually been written to the temp file
  254. blks, err := scanner.HashFile(context.TODO(), f.Filesystem(), tempFile, protocol.MinBlockSize, nil, false)
  255. if err != nil {
  256. t.Log(err)
  257. }
  258. for _, eq := range []int{2, 3, 4, 7} {
  259. if string(blks[eq-1].Hash) != string(blocks[eq].Hash) {
  260. t.Errorf("Block %d mismatch: %s != %s", eq, blks[eq-1].String(), blocks[eq].String())
  261. }
  262. }
  263. }
  264. func TestWeakHash(t *testing.T) {
  265. // Setup the model/pull environment
  266. model, fo := setupSendReceiveFolder()
  267. defer cleanupSRFolder(fo, model)
  268. ffs := fo.Filesystem()
  269. tempFile := fs.TempName("weakhash")
  270. var shift int64 = 10
  271. var size int64 = 1 << 20
  272. expectBlocks := int(size / protocol.MinBlockSize)
  273. expectPulls := int(shift / protocol.MinBlockSize)
  274. if shift > 0 {
  275. expectPulls++
  276. }
  277. f, err := ffs.Create("weakhash")
  278. must(t, err)
  279. defer f.Close()
  280. _, err = io.CopyN(f, rand.Reader, size)
  281. if err != nil {
  282. t.Error(err)
  283. }
  284. info, err := f.Stat()
  285. if err != nil {
  286. t.Error(err)
  287. }
  288. // Create two files, second file has `shifted` bytes random prefix, yet
  289. // both are of the same length, for example:
  290. // File 1: abcdefgh
  291. // File 2: xyabcdef
  292. f.Seek(0, os.SEEK_SET)
  293. existing, err := scanner.Blocks(context.TODO(), f, protocol.MinBlockSize, size, nil, true)
  294. if err != nil {
  295. t.Error(err)
  296. }
  297. f.Seek(0, os.SEEK_SET)
  298. remainder := io.LimitReader(f, size-shift)
  299. prefix := io.LimitReader(rand.Reader, shift)
  300. nf := io.MultiReader(prefix, remainder)
  301. desired, err := scanner.Blocks(context.TODO(), nf, protocol.MinBlockSize, size, nil, true)
  302. if err != nil {
  303. t.Error(err)
  304. }
  305. existingFile := protocol.FileInfo{
  306. Name: "weakhash",
  307. Blocks: existing,
  308. Size: size,
  309. ModifiedS: info.ModTime().Unix(),
  310. ModifiedNs: int32(info.ModTime().Nanosecond()),
  311. }
  312. desiredFile := protocol.FileInfo{
  313. Name: "weakhash",
  314. Size: size,
  315. Blocks: desired,
  316. ModifiedS: info.ModTime().Unix() + 1,
  317. }
  318. fo.updateLocalsFromScanning([]protocol.FileInfo{existingFile})
  319. copyChan := make(chan copyBlocksState)
  320. pullChan := make(chan pullBlockState, expectBlocks)
  321. finisherChan := make(chan *sharedPullerState, 1)
  322. dbUpdateChan := make(chan dbUpdateJob, 1)
  323. // Run a single fetcher routine
  324. go fo.copierRoutine(copyChan, pullChan, finisherChan)
  325. defer close(copyChan)
  326. // Test 1 - no weak hashing, file gets fully repulled (`expectBlocks` pulls).
  327. fo.WeakHashThresholdPct = 101
  328. fo.handleFile(desiredFile, copyChan, dbUpdateChan)
  329. var pulls []pullBlockState
  330. timeout := time.After(10 * time.Second)
  331. for len(pulls) < expectBlocks {
  332. select {
  333. case pull := <-pullChan:
  334. pulls = append(pulls, pull)
  335. case <-timeout:
  336. t.Errorf("timed out, got %d pulls expected %d", len(pulls), expectPulls)
  337. }
  338. }
  339. finish := <-finisherChan
  340. select {
  341. case <-pullChan:
  342. t.Fatal("Pull channel has data to be read")
  343. case <-finisherChan:
  344. t.Fatal("Finisher channel has data to be read")
  345. default:
  346. }
  347. cleanupSharedPullerState(finish)
  348. if err := ffs.Remove(tempFile); err != nil {
  349. t.Fatal(err)
  350. }
  351. // Test 2 - using weak hash, expectPulls blocks pulled.
  352. fo.WeakHashThresholdPct = -1
  353. fo.handleFile(desiredFile, copyChan, dbUpdateChan)
  354. pulls = pulls[:0]
  355. for len(pulls) < expectPulls {
  356. select {
  357. case pull := <-pullChan:
  358. pulls = append(pulls, pull)
  359. case <-time.After(10 * time.Second):
  360. t.Errorf("timed out, got %d pulls expected %d", len(pulls), expectPulls)
  361. }
  362. }
  363. finish = <-finisherChan
  364. cleanupSharedPullerState(finish)
  365. expectShifted := expectBlocks - expectPulls
  366. if finish.copyOriginShifted != expectShifted {
  367. t.Errorf("did not copy %d shifted", expectShifted)
  368. }
  369. }
  370. // Test that updating a file removes its old blocks from the blockmap
  371. func TestCopierCleanup(t *testing.T) {
  372. iterFn := func(folder, file string, index int32) bool {
  373. return true
  374. }
  375. // Create a file
  376. file := setupFile("test", []int{0})
  377. m, f := setupSendReceiveFolder(file)
  378. defer cleanupSRFolder(f, m)
  379. file.Blocks = []protocol.BlockInfo{blocks[1]}
  380. file.Version = file.Version.Update(myID.Short())
  381. // Update index (removing old blocks)
  382. f.updateLocalsFromScanning([]protocol.FileInfo{file})
  383. if m.finder.Iterate(folders, blocks[0].Hash, iterFn) {
  384. t.Error("Unexpected block found")
  385. }
  386. if !m.finder.Iterate(folders, blocks[1].Hash, iterFn) {
  387. t.Error("Expected block not found")
  388. }
  389. file.Blocks = []protocol.BlockInfo{blocks[0]}
  390. file.Version = file.Version.Update(myID.Short())
  391. // Update index (removing old blocks)
  392. f.updateLocalsFromScanning([]protocol.FileInfo{file})
  393. if !m.finder.Iterate(folders, blocks[0].Hash, iterFn) {
  394. t.Error("Unexpected block found")
  395. }
  396. if m.finder.Iterate(folders, blocks[1].Hash, iterFn) {
  397. t.Error("Expected block not found")
  398. }
  399. }
  400. func TestDeregisterOnFailInCopy(t *testing.T) {
  401. file := setupFile("filex", []int{0, 2, 0, 0, 5, 0, 0, 8})
  402. m, f := setupSendReceiveFolder()
  403. defer cleanupSRFolder(f, m)
  404. // Set up our evet subscription early
  405. s := m.evLogger.Subscribe(events.ItemFinished)
  406. // queue.Done should be called by the finisher routine
  407. f.queue.Push("filex", 0, time.Time{})
  408. f.queue.Pop()
  409. if f.queue.lenProgress() != 1 {
  410. t.Fatal("Expected file in progress")
  411. }
  412. pullChan := make(chan pullBlockState)
  413. finisherBufferChan := make(chan *sharedPullerState)
  414. finisherChan := make(chan *sharedPullerState)
  415. dbUpdateChan := make(chan dbUpdateJob, 1)
  416. copyChan, copyWg := startCopier(f, pullChan, finisherBufferChan)
  417. go f.finisherRoutine(finisherChan, dbUpdateChan, make(chan string))
  418. defer func() {
  419. close(copyChan)
  420. copyWg.Wait()
  421. close(pullChan)
  422. close(finisherBufferChan)
  423. close(finisherChan)
  424. }()
  425. f.handleFile(file, copyChan, dbUpdateChan)
  426. // Receive a block at puller, to indicate that at least a single copier
  427. // loop has been performed.
  428. toPull := <-pullChan
  429. // Unblock copier
  430. go func() {
  431. for range pullChan {
  432. }
  433. }()
  434. // Close the file, causing errors on further access
  435. toPull.sharedPullerState.fail(os.ErrNotExist)
  436. select {
  437. case state := <-finisherBufferChan:
  438. // At this point the file should still be registered with both the job
  439. // queue, and the progress emitter. Verify this.
  440. if f.model.progressEmitter.lenRegistry() != 1 || f.queue.lenProgress() != 1 || f.queue.lenQueued() != 0 {
  441. t.Fatal("Could not find file")
  442. }
  443. // Pass the file down the real finisher, and give it time to consume
  444. finisherChan <- state
  445. t0 := time.Now()
  446. if ev, err := s.Poll(time.Minute); err != nil {
  447. t.Fatal("Got error waiting for ItemFinished event:", err)
  448. } else if n := ev.Data.(map[string]interface{})["item"]; n != state.file.Name {
  449. t.Fatal("Got ItemFinished event for wrong file:", n)
  450. }
  451. t.Log("event took", time.Since(t0))
  452. state.mut.Lock()
  453. stateWriter := state.writer
  454. state.mut.Unlock()
  455. if stateWriter != nil {
  456. t.Fatal("File not closed?")
  457. }
  458. if f.model.progressEmitter.lenRegistry() != 0 || f.queue.lenProgress() != 0 || f.queue.lenQueued() != 0 {
  459. t.Fatal("Still registered", f.model.progressEmitter.lenRegistry(), f.queue.lenProgress(), f.queue.lenQueued())
  460. }
  461. // Doing it again should have no effect
  462. finisherChan <- state
  463. if _, err := s.Poll(time.Second); err != events.ErrTimeout {
  464. t.Fatal("Expected timeout, not another event", err)
  465. }
  466. if f.model.progressEmitter.lenRegistry() != 0 || f.queue.lenProgress() != 0 || f.queue.lenQueued() != 0 {
  467. t.Fatal("Still registered", f.model.progressEmitter.lenRegistry(), f.queue.lenProgress(), f.queue.lenQueued())
  468. }
  469. case <-time.After(5 * time.Second):
  470. t.Fatal("Didn't get anything to the finisher")
  471. }
  472. }
  473. func TestDeregisterOnFailInPull(t *testing.T) {
  474. file := setupFile("filex", []int{0, 2, 0, 0, 5, 0, 0, 8})
  475. m, f := setupSendReceiveFolder()
  476. defer cleanupSRFolder(f, m)
  477. // Set up our evet subscription early
  478. s := m.evLogger.Subscribe(events.ItemFinished)
  479. // queue.Done should be called by the finisher routine
  480. f.queue.Push("filex", 0, time.Time{})
  481. f.queue.Pop()
  482. if f.queue.lenProgress() != 1 {
  483. t.Fatal("Expected file in progress")
  484. }
  485. pullChan := make(chan pullBlockState)
  486. finisherBufferChan := make(chan *sharedPullerState)
  487. finisherChan := make(chan *sharedPullerState)
  488. dbUpdateChan := make(chan dbUpdateJob, 1)
  489. copyChan, copyWg := startCopier(f, pullChan, finisherBufferChan)
  490. pullWg := sync.NewWaitGroup()
  491. pullWg.Add(1)
  492. go func() {
  493. f.pullerRoutine(pullChan, finisherBufferChan)
  494. pullWg.Done()
  495. }()
  496. go f.finisherRoutine(finisherChan, dbUpdateChan, make(chan string))
  497. defer func() {
  498. // Unblock copier and puller
  499. go func() {
  500. for range finisherBufferChan {
  501. }
  502. }()
  503. close(copyChan)
  504. copyWg.Wait()
  505. close(pullChan)
  506. pullWg.Wait()
  507. close(finisherBufferChan)
  508. close(finisherChan)
  509. }()
  510. f.handleFile(file, copyChan, dbUpdateChan)
  511. // Receive at finisher, we should error out as puller has nowhere to pull
  512. // from.
  513. timeout = time.Second
  514. // Both the puller and copier may send to the finisherBufferChan.
  515. var state *sharedPullerState
  516. after := time.After(5 * time.Second)
  517. for {
  518. select {
  519. case state = <-finisherBufferChan:
  520. case <-after:
  521. t.Fatal("Didn't get failed state to the finisher")
  522. }
  523. if state.failed() != nil {
  524. break
  525. }
  526. }
  527. // At this point the file should still be registered with both the job
  528. // queue, and the progress emitter. Verify this.
  529. if f.model.progressEmitter.lenRegistry() != 1 || f.queue.lenProgress() != 1 || f.queue.lenQueued() != 0 {
  530. t.Fatal("Could not find file")
  531. }
  532. // Pass the file down the real finisher, and give it time to consume
  533. finisherChan <- state
  534. t0 := time.Now()
  535. if ev, err := s.Poll(time.Minute); err != nil {
  536. t.Fatal("Got error waiting for ItemFinished event:", err)
  537. } else if n := ev.Data.(map[string]interface{})["item"]; n != state.file.Name {
  538. t.Fatal("Got ItemFinished event for wrong file:", n)
  539. }
  540. t.Log("event took", time.Since(t0))
  541. state.mut.Lock()
  542. stateWriter := state.writer
  543. state.mut.Unlock()
  544. if stateWriter != nil {
  545. t.Fatal("File not closed?")
  546. }
  547. if f.model.progressEmitter.lenRegistry() != 0 || f.queue.lenProgress() != 0 || f.queue.lenQueued() != 0 {
  548. t.Fatal("Still registered", f.model.progressEmitter.lenRegistry(), f.queue.lenProgress(), f.queue.lenQueued())
  549. }
  550. // Doing it again should have no effect
  551. finisherChan <- state
  552. if _, err := s.Poll(time.Second); err != events.ErrTimeout {
  553. t.Fatal("Expected timeout, not another event", err)
  554. }
  555. if f.model.progressEmitter.lenRegistry() != 0 || f.queue.lenProgress() != 0 || f.queue.lenQueued() != 0 {
  556. t.Fatal("Still registered", f.model.progressEmitter.lenRegistry(), f.queue.lenProgress(), f.queue.lenQueued())
  557. }
  558. }
  559. func TestIssue3164(t *testing.T) {
  560. m, f := setupSendReceiveFolder()
  561. defer cleanupSRFolder(f, m)
  562. ffs := f.Filesystem()
  563. tmpDir := ffs.URI()
  564. ignDir := filepath.Join("issue3164", "oktodelete")
  565. subDir := filepath.Join(ignDir, "foobar")
  566. must(t, ffs.MkdirAll(subDir, 0777))
  567. must(t, ioutil.WriteFile(filepath.Join(tmpDir, subDir, "file"), []byte("Hello"), 0644))
  568. must(t, ioutil.WriteFile(filepath.Join(tmpDir, ignDir, "file"), []byte("Hello"), 0644))
  569. file := protocol.FileInfo{
  570. Name: "issue3164",
  571. }
  572. matcher := ignore.New(ffs)
  573. must(t, matcher.Parse(bytes.NewBufferString("(?d)oktodelete"), ""))
  574. f.ignores = matcher
  575. dbUpdateChan := make(chan dbUpdateJob, 1)
  576. f.deleteDir(file, dbUpdateChan, make(chan string))
  577. if _, err := ffs.Stat("issue3164"); !fs.IsNotExist(err) {
  578. t.Fatal(err)
  579. }
  580. }
  581. func TestDiff(t *testing.T) {
  582. for i, test := range diffTestData {
  583. a, _ := scanner.Blocks(context.TODO(), bytes.NewBufferString(test.a), test.s, -1, nil, false)
  584. b, _ := scanner.Blocks(context.TODO(), bytes.NewBufferString(test.b), test.s, -1, nil, false)
  585. _, d := blockDiff(a, b)
  586. if len(d) != len(test.d) {
  587. t.Fatalf("Incorrect length for diff %d; %d != %d", i, len(d), len(test.d))
  588. } else {
  589. for j := range test.d {
  590. if d[j].Offset != test.d[j].Offset {
  591. t.Errorf("Incorrect offset for diff %d block %d; %d != %d", i, j, d[j].Offset, test.d[j].Offset)
  592. }
  593. if d[j].Size != test.d[j].Size {
  594. t.Errorf("Incorrect length for diff %d block %d; %d != %d", i, j, d[j].Size, test.d[j].Size)
  595. }
  596. }
  597. }
  598. }
  599. }
  600. func BenchmarkDiff(b *testing.B) {
  601. testCases := make([]struct{ a, b []protocol.BlockInfo }, 0, len(diffTestData))
  602. for _, test := range diffTestData {
  603. a, _ := scanner.Blocks(context.TODO(), bytes.NewBufferString(test.a), test.s, -1, nil, false)
  604. b, _ := scanner.Blocks(context.TODO(), bytes.NewBufferString(test.b), test.s, -1, nil, false)
  605. testCases = append(testCases, struct{ a, b []protocol.BlockInfo }{a, b})
  606. }
  607. b.ReportAllocs()
  608. b.ResetTimer()
  609. for i := 0; i < b.N; i++ {
  610. for _, tc := range testCases {
  611. blockDiff(tc.a, tc.b)
  612. }
  613. }
  614. }
  615. func TestDiffEmpty(t *testing.T) {
  616. emptyCases := []struct {
  617. a []protocol.BlockInfo
  618. b []protocol.BlockInfo
  619. need int
  620. have int
  621. }{
  622. {nil, nil, 0, 0},
  623. {[]protocol.BlockInfo{{Offset: 3, Size: 1}}, nil, 0, 0},
  624. {nil, []protocol.BlockInfo{{Offset: 3, Size: 1}}, 1, 0},
  625. }
  626. for _, emptyCase := range emptyCases {
  627. h, n := blockDiff(emptyCase.a, emptyCase.b)
  628. if len(h) != emptyCase.have {
  629. t.Errorf("incorrect have: %d != %d", len(h), emptyCase.have)
  630. }
  631. if len(n) != emptyCase.need {
  632. t.Errorf("incorrect have: %d != %d", len(h), emptyCase.have)
  633. }
  634. }
  635. }
  636. // TestDeleteIgnorePerms checks, that a file gets deleted when the IgnorePerms
  637. // option is true and the permissions do not match between the file on disk and
  638. // in the db.
  639. func TestDeleteIgnorePerms(t *testing.T) {
  640. m, f := setupSendReceiveFolder()
  641. defer cleanupSRFolder(f, m)
  642. ffs := f.Filesystem()
  643. f.IgnorePerms = true
  644. name := "deleteIgnorePerms"
  645. file, err := ffs.Create(name)
  646. if err != nil {
  647. t.Error(err)
  648. }
  649. defer file.Close()
  650. stat, err := file.Stat()
  651. must(t, err)
  652. fi, err := scanner.CreateFileInfo(stat, name, ffs)
  653. must(t, err)
  654. ffs.Chmod(name, 0600)
  655. scanChan := make(chan string)
  656. finished := make(chan struct{})
  657. go func() {
  658. err = f.checkToBeDeleted(fi, scanChan)
  659. close(finished)
  660. }()
  661. select {
  662. case <-scanChan:
  663. <-finished
  664. case <-finished:
  665. }
  666. must(t, err)
  667. }
  668. func TestCopyOwner(t *testing.T) {
  669. // Verifies that owner and group are copied from the parent, for both
  670. // files and directories.
  671. if runtime.GOOS == "windows" {
  672. t.Skip("copying owner not supported on Windows")
  673. }
  674. const (
  675. expOwner = 1234
  676. expGroup = 5678
  677. )
  678. // Set up a folder with the CopyParentOwner bit and backed by a fake
  679. // filesystem.
  680. m, f := setupSendReceiveFolder()
  681. defer cleanupSRFolder(f, m)
  682. f.folder.FolderConfiguration = config.NewFolderConfiguration(m.id, f.ID, f.Label, fs.FilesystemTypeFake, "/TestCopyOwner")
  683. f.folder.FolderConfiguration.CopyOwnershipFromParent = true
  684. f.fs = f.Filesystem()
  685. // Create a parent dir with a certain owner/group.
  686. f.fs.Mkdir("foo", 0755)
  687. f.fs.Lchown("foo", expOwner, expGroup)
  688. dir := protocol.FileInfo{
  689. Name: "foo/bar",
  690. Type: protocol.FileInfoTypeDirectory,
  691. Permissions: 0755,
  692. }
  693. // Have the folder create a subdirectory, verify that it's the correct
  694. // owner/group.
  695. dbUpdateChan := make(chan dbUpdateJob, 1)
  696. defer close(dbUpdateChan)
  697. f.handleDir(dir, dbUpdateChan, nil)
  698. <-dbUpdateChan // empty the channel for later
  699. info, err := f.fs.Lstat("foo/bar")
  700. if err != nil {
  701. t.Fatal("Unexpected error (dir):", err)
  702. }
  703. if info.Owner() != expOwner || info.Group() != expGroup {
  704. t.Fatalf("Expected dir owner/group to be %d/%d, not %d/%d", expOwner, expGroup, info.Owner(), info.Group())
  705. }
  706. // Have the folder create a file, verify it's the correct owner/group.
  707. // File is zero sized to avoid having to handle copies/pulls.
  708. file := protocol.FileInfo{
  709. Name: "foo/bar/baz",
  710. Type: protocol.FileInfoTypeFile,
  711. Permissions: 0644,
  712. }
  713. // Wire some stuff. The flow here is handleFile() -[copierChan]->
  714. // copierRoutine() -[finisherChan]-> finisherRoutine() -[dbUpdateChan]->
  715. // back to us and we're done. The copier routine doesn't do anything,
  716. // but it's the way data is passed around. When the database update
  717. // comes the finisher is done.
  718. finisherChan := make(chan *sharedPullerState)
  719. copierChan, copyWg := startCopier(f, nil, finisherChan)
  720. go f.finisherRoutine(finisherChan, dbUpdateChan, nil)
  721. defer func() {
  722. close(copierChan)
  723. copyWg.Wait()
  724. close(finisherChan)
  725. }()
  726. f.handleFile(file, copierChan, nil)
  727. <-dbUpdateChan
  728. info, err = f.fs.Lstat("foo/bar/baz")
  729. if err != nil {
  730. t.Fatal("Unexpected error (file):", err)
  731. }
  732. if info.Owner() != expOwner || info.Group() != expGroup {
  733. t.Fatalf("Expected file owner/group to be %d/%d, not %d/%d", expOwner, expGroup, info.Owner(), info.Group())
  734. }
  735. // Have the folder create a symlink. Verify it accordingly.
  736. symlink := protocol.FileInfo{
  737. Name: "foo/bar/sym",
  738. Type: protocol.FileInfoTypeSymlink,
  739. Permissions: 0644,
  740. SymlinkTarget: "over the rainbow",
  741. }
  742. f.handleSymlink(symlink, dbUpdateChan, nil)
  743. <-dbUpdateChan
  744. info, err = f.fs.Lstat("foo/bar/sym")
  745. if err != nil {
  746. t.Fatal("Unexpected error (file):", err)
  747. }
  748. if info.Owner() != expOwner || info.Group() != expGroup {
  749. t.Fatalf("Expected symlink owner/group to be %d/%d, not %d/%d", expOwner, expGroup, info.Owner(), info.Group())
  750. }
  751. }
  752. // TestSRConflictReplaceFileByDir checks that a conflict is created when an existing file
  753. // is replaced with a directory and versions are conflicting
  754. func TestSRConflictReplaceFileByDir(t *testing.T) {
  755. m, f := setupSendReceiveFolder()
  756. defer cleanupSRFolder(f, m)
  757. ffs := f.Filesystem()
  758. name := "foo"
  759. // create local file
  760. file := createFile(t, name, ffs)
  761. file.Version = protocol.Vector{}.Update(myID.Short())
  762. f.updateLocalsFromScanning([]protocol.FileInfo{file})
  763. // Simulate remote creating a dir with the same name
  764. file.Type = protocol.FileInfoTypeDirectory
  765. rem := device1.Short()
  766. file.Version = protocol.Vector{}.Update(rem)
  767. file.ModifiedBy = rem
  768. dbUpdateChan := make(chan dbUpdateJob, 1)
  769. scanChan := make(chan string, 1)
  770. f.handleDir(file, dbUpdateChan, scanChan)
  771. if confls := existingConflicts(name, ffs); len(confls) != 1 {
  772. t.Fatal("Expected one conflict, got", len(confls))
  773. } else if scan := <-scanChan; confls[0] != scan {
  774. t.Fatal("Expected request to scan", confls[0], "got", scan)
  775. }
  776. }
  777. // TestSRConflictReplaceFileByLink checks that a conflict is created when an existing file
  778. // is replaced with a link and versions are conflicting
  779. func TestSRConflictReplaceFileByLink(t *testing.T) {
  780. m, f := setupSendReceiveFolder()
  781. defer cleanupSRFolder(f, m)
  782. ffs := f.Filesystem()
  783. name := "foo"
  784. // create local file
  785. file := createFile(t, name, ffs)
  786. file.Version = protocol.Vector{}.Update(myID.Short())
  787. f.updateLocalsFromScanning([]protocol.FileInfo{file})
  788. // Simulate remote creating a symlink with the same name
  789. file.Type = protocol.FileInfoTypeSymlink
  790. file.SymlinkTarget = "bar"
  791. rem := device1.Short()
  792. file.Version = protocol.Vector{}.Update(rem)
  793. file.ModifiedBy = rem
  794. dbUpdateChan := make(chan dbUpdateJob, 1)
  795. scanChan := make(chan string, 1)
  796. f.handleSymlink(file, dbUpdateChan, scanChan)
  797. if confls := existingConflicts(name, ffs); len(confls) != 1 {
  798. t.Fatal("Expected one conflict, got", len(confls))
  799. } else if scan := <-scanChan; confls[0] != scan {
  800. t.Fatal("Expected request to scan", confls[0], "got", scan)
  801. }
  802. }
  803. // TestDeleteBehindSymlink checks that we don't delete or schedule a scan
  804. // when trying to delete a file behind a symlink.
  805. func TestDeleteBehindSymlink(t *testing.T) {
  806. m, f := setupSendReceiveFolder()
  807. defer cleanupSRFolder(f, m)
  808. ffs := f.Filesystem()
  809. destDir := createTmpDir()
  810. defer os.RemoveAll(destDir)
  811. destFs := fs.NewFilesystem(fs.FilesystemTypeBasic, destDir)
  812. link := "link"
  813. file := filepath.Join(link, "file")
  814. must(t, ffs.MkdirAll(link, 0755))
  815. fi := createFile(t, file, ffs)
  816. f.updateLocalsFromScanning([]protocol.FileInfo{fi})
  817. must(t, osutil.RenameOrCopy(ffs, destFs, file, "file"))
  818. must(t, ffs.RemoveAll(link))
  819. if err := osutil.DebugSymlinkForTestsOnly(destFs.URI(), filepath.Join(ffs.URI(), link)); err != nil {
  820. if runtime.GOOS == "windows" {
  821. // Probably we require permissions we don't have.
  822. t.Skip("Need admin permissions or developer mode to run symlink test on Windows: " + err.Error())
  823. } else {
  824. t.Fatal(err)
  825. }
  826. }
  827. fi.Deleted = true
  828. fi.Version = fi.Version.Update(device1.Short())
  829. scanChan := make(chan string, 1)
  830. dbUpdateChan := make(chan dbUpdateJob, 1)
  831. f.deleteFile(fi, dbUpdateChan, scanChan)
  832. select {
  833. case f := <-scanChan:
  834. t.Fatalf("Received %v on scanChan", f)
  835. case u := <-dbUpdateChan:
  836. if u.jobType != dbUpdateDeleteFile {
  837. t.Errorf("Expected jobType %v, got %v", dbUpdateDeleteFile, u.jobType)
  838. }
  839. if u.file.Name != fi.Name {
  840. t.Errorf("Expected update for %v, got %v", fi.Name, u.file.Name)
  841. }
  842. default:
  843. t.Fatalf("No db update received")
  844. }
  845. if _, err := destFs.Stat("file"); err != nil {
  846. t.Errorf("Expected no error when stating file behind symlink, got %v", err)
  847. }
  848. }
  849. func cleanupSharedPullerState(s *sharedPullerState) {
  850. s.mut.Lock()
  851. defer s.mut.Unlock()
  852. if s.writer == nil {
  853. return
  854. }
  855. s.writer.mut.Lock()
  856. s.writer.fd.Close()
  857. s.writer.mut.Unlock()
  858. }
  859. func startCopier(f *sendReceiveFolder, pullChan chan<- pullBlockState, finisherChan chan<- *sharedPullerState) (chan copyBlocksState, sync.WaitGroup) {
  860. copyChan := make(chan copyBlocksState)
  861. wg := sync.NewWaitGroup()
  862. wg.Add(1)
  863. go func() {
  864. f.copierRoutine(copyChan, pullChan, finisherChan)
  865. wg.Done()
  866. }()
  867. return copyChan, wg
  868. }