model.go 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909
  1. // Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
  2. // All rights reserved. Use of this source code is governed by an MIT-style
  3. // license that can be found in the LICENSE file.
  4. package model
  5. import (
  6. "errors"
  7. "fmt"
  8. "io"
  9. "net"
  10. "os"
  11. "path/filepath"
  12. "strconv"
  13. "sync"
  14. "time"
  15. "github.com/calmh/syncthing/config"
  16. "github.com/calmh/syncthing/events"
  17. "github.com/calmh/syncthing/files"
  18. "github.com/calmh/syncthing/lamport"
  19. "github.com/calmh/syncthing/protocol"
  20. "github.com/calmh/syncthing/scanner"
  21. "github.com/syndtr/goleveldb/leveldb"
  22. )
  23. type repoState int
  24. const (
  25. RepoIdle repoState = iota
  26. RepoScanning
  27. RepoSyncing
  28. RepoCleaning
  29. )
  30. func (s repoState) String() string {
  31. switch s {
  32. case RepoIdle:
  33. return "idle"
  34. case RepoScanning:
  35. return "scanning"
  36. case RepoCleaning:
  37. return "cleaning"
  38. case RepoSyncing:
  39. return "syncing"
  40. default:
  41. return "unknown"
  42. }
  43. }
  44. // Somewhat arbitrary amount of bytes that we choose to let represent the size
  45. // of an unsynchronized directory entry or a deleted file. We need it to be
  46. // larger than zero so that it's visible that there is some amount of bytes to
  47. // transfer to bring the systems into synchronization.
  48. const zeroEntrySize = 128
  49. // How many files to send in each Index/IndexUpdate message.
  50. const indexBatchSize = 1000
  51. type Model struct {
  52. indexDir string
  53. cfg *config.Configuration
  54. db *leveldb.DB
  55. clientName string
  56. clientVersion string
  57. repoCfgs map[string]config.RepositoryConfiguration // repo -> cfg
  58. repoFiles map[string]*files.Set // repo -> files
  59. repoNodes map[string][]protocol.NodeID // repo -> nodeIDs
  60. nodeRepos map[protocol.NodeID][]string // nodeID -> repos
  61. suppressor map[string]*suppressor // repo -> suppressor
  62. rmut sync.RWMutex // protects the above
  63. repoState map[string]repoState // repo -> state
  64. repoStateChanged map[string]time.Time // repo -> time when state changed
  65. smut sync.RWMutex
  66. protoConn map[protocol.NodeID]protocol.Connection
  67. rawConn map[protocol.NodeID]io.Closer
  68. nodeVer map[protocol.NodeID]string
  69. pmut sync.RWMutex // protects protoConn and rawConn
  70. sentLocalVer map[protocol.NodeID]map[string]uint64
  71. slMut sync.Mutex
  72. sup suppressor
  73. addedRepo bool
  74. started bool
  75. }
  76. var (
  77. ErrNoSuchFile = errors.New("no such file")
  78. ErrInvalid = errors.New("file is invalid")
  79. )
  80. // NewModel creates and starts a new model. The model starts in read-only mode,
  81. // where it sends index information to connected peers and responds to requests
  82. // for file data without altering the local repository in any way.
  83. func NewModel(indexDir string, cfg *config.Configuration, clientName, clientVersion string, db *leveldb.DB) *Model {
  84. m := &Model{
  85. indexDir: indexDir,
  86. cfg: cfg,
  87. db: db,
  88. clientName: clientName,
  89. clientVersion: clientVersion,
  90. repoCfgs: make(map[string]config.RepositoryConfiguration),
  91. repoFiles: make(map[string]*files.Set),
  92. repoNodes: make(map[string][]protocol.NodeID),
  93. nodeRepos: make(map[protocol.NodeID][]string),
  94. repoState: make(map[string]repoState),
  95. repoStateChanged: make(map[string]time.Time),
  96. suppressor: make(map[string]*suppressor),
  97. protoConn: make(map[protocol.NodeID]protocol.Connection),
  98. rawConn: make(map[protocol.NodeID]io.Closer),
  99. nodeVer: make(map[protocol.NodeID]string),
  100. sentLocalVer: make(map[protocol.NodeID]map[string]uint64),
  101. sup: suppressor{threshold: int64(cfg.Options.MaxChangeKbps)},
  102. }
  103. var timeout = 20 * 60 // seconds
  104. if t := os.Getenv("STDEADLOCKTIMEOUT"); len(t) > 0 {
  105. it, err := strconv.Atoi(t)
  106. if err == nil {
  107. timeout = it
  108. }
  109. }
  110. deadlockDetect(&m.rmut, time.Duration(timeout)*time.Second)
  111. deadlockDetect(&m.smut, time.Duration(timeout)*time.Second)
  112. deadlockDetect(&m.pmut, time.Duration(timeout)*time.Second)
  113. return m
  114. }
  115. // StartRW starts read/write processing on the current model. When in
  116. // read/write mode the model will attempt to keep in sync with the cluster by
  117. // pulling needed files from peer nodes.
  118. func (m *Model) StartRepoRW(repo string, threads int) {
  119. m.rmut.RLock()
  120. defer m.rmut.RUnlock()
  121. if cfg, ok := m.repoCfgs[repo]; !ok {
  122. panic("cannot start without repo")
  123. } else {
  124. newPuller(cfg, m, threads, m.cfg)
  125. }
  126. }
  127. // StartRO starts read only processing on the current model. When in
  128. // read only mode the model will announce files to the cluster but not
  129. // pull in any external changes.
  130. func (m *Model) StartRepoRO(repo string) {
  131. m.StartRepoRW(repo, 0) // zero threads => read only
  132. }
  133. type ConnectionInfo struct {
  134. protocol.Statistics
  135. Address string
  136. ClientVersion string
  137. Completion int
  138. }
  139. // ConnectionStats returns a map with connection statistics for each connected node.
  140. func (m *Model) ConnectionStats() map[string]ConnectionInfo {
  141. type remoteAddrer interface {
  142. RemoteAddr() net.Addr
  143. }
  144. m.pmut.RLock()
  145. m.rmut.RLock()
  146. var res = make(map[string]ConnectionInfo)
  147. for node, conn := range m.protoConn {
  148. ci := ConnectionInfo{
  149. Statistics: conn.Statistics(),
  150. ClientVersion: m.nodeVer[node],
  151. }
  152. if nc, ok := m.rawConn[node].(remoteAddrer); ok {
  153. ci.Address = nc.RemoteAddr().String()
  154. }
  155. var tot int64
  156. var have int64
  157. for _, repo := range m.nodeRepos[node] {
  158. m.repoFiles[repo].WithGlobal(func(f protocol.FileInfo) bool {
  159. if !protocol.IsDeleted(f.Flags) {
  160. var size int64
  161. if protocol.IsDirectory(f.Flags) {
  162. size = zeroEntrySize
  163. } else {
  164. size = f.Size()
  165. }
  166. tot += size
  167. have += size
  168. }
  169. return true
  170. })
  171. m.repoFiles[repo].WithNeed(node, func(f protocol.FileInfo) bool {
  172. if !protocol.IsDeleted(f.Flags) {
  173. var size int64
  174. if protocol.IsDirectory(f.Flags) {
  175. size = zeroEntrySize
  176. } else {
  177. size = f.Size()
  178. }
  179. have -= size
  180. }
  181. return true
  182. })
  183. }
  184. ci.Completion = 100
  185. if tot != 0 {
  186. ci.Completion = int(100 * have / tot)
  187. }
  188. res[node.String()] = ci
  189. }
  190. m.rmut.RUnlock()
  191. m.pmut.RUnlock()
  192. in, out := protocol.TotalInOut()
  193. res["total"] = ConnectionInfo{
  194. Statistics: protocol.Statistics{
  195. At: time.Now(),
  196. InBytesTotal: in,
  197. OutBytesTotal: out,
  198. },
  199. }
  200. return res
  201. }
  202. func sizeOf(fs []protocol.FileInfo) (files, deleted int, bytes int64) {
  203. for _, f := range fs {
  204. fs, de, by := sizeOfFile(f)
  205. files += fs
  206. deleted += de
  207. bytes += by
  208. }
  209. return
  210. }
  211. func sizeOfFile(f protocol.FileInfo) (files, deleted int, bytes int64) {
  212. if !protocol.IsDeleted(f.Flags) {
  213. files++
  214. if !protocol.IsDirectory(f.Flags) {
  215. bytes += f.Size()
  216. } else {
  217. bytes += zeroEntrySize
  218. }
  219. } else {
  220. deleted++
  221. bytes += zeroEntrySize
  222. }
  223. return
  224. }
  225. // GlobalSize returns the number of files, deleted files and total bytes for all
  226. // files in the global model.
  227. func (m *Model) GlobalSize(repo string) (files, deleted int, bytes int64) {
  228. m.rmut.RLock()
  229. defer m.rmut.RUnlock()
  230. if rf, ok := m.repoFiles[repo]; ok {
  231. rf.WithGlobal(func(f protocol.FileInfo) bool {
  232. fs, de, by := sizeOfFile(f)
  233. files += fs
  234. deleted += de
  235. bytes += by
  236. return true
  237. })
  238. }
  239. return
  240. }
  241. // LocalSize returns the number of files, deleted files and total bytes for all
  242. // files in the local repository.
  243. func (m *Model) LocalSize(repo string) (files, deleted int, bytes int64) {
  244. m.rmut.RLock()
  245. defer m.rmut.RUnlock()
  246. if rf, ok := m.repoFiles[repo]; ok {
  247. rf.WithHave(protocol.LocalNodeID, func(f protocol.FileInfo) bool {
  248. fs, de, by := sizeOfFile(f)
  249. files += fs
  250. deleted += de
  251. bytes += by
  252. return true
  253. })
  254. }
  255. return
  256. }
  257. // NeedSize returns the number and total size of currently needed files.
  258. func (m *Model) NeedSize(repo string) (files int, bytes int64) {
  259. m.rmut.RLock()
  260. defer m.rmut.RUnlock()
  261. if rf, ok := m.repoFiles[repo]; ok {
  262. rf.WithNeed(protocol.LocalNodeID, func(f protocol.FileInfo) bool {
  263. fs, de, by := sizeOfFile(f)
  264. files += fs + de
  265. bytes += by
  266. return true
  267. })
  268. }
  269. return
  270. }
  271. // NeedFiles returns the list of currently needed files
  272. func (m *Model) NeedFilesRepo(repo string) []protocol.FileInfo {
  273. m.rmut.RLock()
  274. defer m.rmut.RUnlock()
  275. if rf, ok := m.repoFiles[repo]; ok {
  276. fs := make([]protocol.FileInfo, 0, indexBatchSize)
  277. rf.WithNeed(protocol.LocalNodeID, func(f protocol.FileInfo) bool {
  278. fs = append(fs, f)
  279. return len(fs) < indexBatchSize
  280. })
  281. return fs
  282. }
  283. return nil
  284. }
  285. // Index is called when a new node is connected and we receive their full index.
  286. // Implements the protocol.Model interface.
  287. func (m *Model) Index(nodeID protocol.NodeID, repo string, fs []protocol.FileInfo) {
  288. if debug {
  289. l.Debugf("IDX(in): %s %q: %d files", nodeID, repo, len(fs))
  290. }
  291. if !m.repoSharedWith(repo, nodeID) {
  292. l.Warnf("Unexpected repository ID %q sent from node %q; ensure that the repository exists and that this node is selected under \"Share With\" in the repository configuration.", repo, nodeID)
  293. return
  294. }
  295. for i := range fs {
  296. lamport.Default.Tick(fs[i].Version)
  297. }
  298. m.rmut.RLock()
  299. r, ok := m.repoFiles[repo]
  300. m.rmut.RUnlock()
  301. if ok {
  302. r.Replace(nodeID, fs)
  303. } else {
  304. l.Fatalf("Index for nonexistant repo %q", repo)
  305. }
  306. events.Default.Log(events.RemoteIndexUpdated, map[string]interface{}{
  307. "node": nodeID.String(),
  308. "repo": repo,
  309. "items": len(fs),
  310. "version": r.LocalVersion(nodeID),
  311. })
  312. }
  313. // IndexUpdate is called for incremental updates to connected nodes' indexes.
  314. // Implements the protocol.Model interface.
  315. func (m *Model) IndexUpdate(nodeID protocol.NodeID, repo string, fs []protocol.FileInfo) {
  316. if debug {
  317. l.Debugf("IDXUP(in): %s / %q: %d files", nodeID, repo, len(fs))
  318. }
  319. if !m.repoSharedWith(repo, nodeID) {
  320. l.Warnf("Unexpected repository ID %q sent from node %q; ensure that the repository exists and that this node is selected under \"Share With\" in the repository configuration.", repo, nodeID)
  321. return
  322. }
  323. for i := range fs {
  324. lamport.Default.Tick(fs[i].Version)
  325. }
  326. m.rmut.RLock()
  327. r, ok := m.repoFiles[repo]
  328. m.rmut.RUnlock()
  329. if ok {
  330. r.Update(nodeID, fs)
  331. } else {
  332. l.Fatalf("IndexUpdate for nonexistant repo %q", repo)
  333. }
  334. events.Default.Log(events.RemoteIndexUpdated, map[string]interface{}{
  335. "node": nodeID.String(),
  336. "repo": repo,
  337. "items": len(fs),
  338. "version": r.LocalVersion(nodeID),
  339. })
  340. }
  341. func (m *Model) repoSharedWith(repo string, nodeID protocol.NodeID) bool {
  342. m.rmut.RLock()
  343. defer m.rmut.RUnlock()
  344. for _, nrepo := range m.nodeRepos[nodeID] {
  345. if nrepo == repo {
  346. return true
  347. }
  348. }
  349. return false
  350. }
  351. func (m *Model) ClusterConfig(nodeID protocol.NodeID, config protocol.ClusterConfigMessage) {
  352. compErr := compareClusterConfig(m.clusterConfig(nodeID), config)
  353. if debug {
  354. l.Debugf("ClusterConfig: %s: %#v", nodeID, config)
  355. l.Debugf(" ... compare: %s: %v", nodeID, compErr)
  356. }
  357. if compErr != nil {
  358. l.Warnf("%s: %v", nodeID, compErr)
  359. m.Close(nodeID, compErr)
  360. }
  361. m.pmut.Lock()
  362. if config.ClientName == "syncthing" {
  363. m.nodeVer[nodeID] = config.ClientVersion
  364. } else {
  365. m.nodeVer[nodeID] = config.ClientName + " " + config.ClientVersion
  366. }
  367. m.pmut.Unlock()
  368. l.Infof(`Node %s client is "%s %s"`, nodeID, config.ClientName, config.ClientVersion)
  369. }
  370. // Close removes the peer from the model and closes the underlying connection if possible.
  371. // Implements the protocol.Model interface.
  372. func (m *Model) Close(node protocol.NodeID, err error) {
  373. l.Infof("Connection to %s closed: %v", node, err)
  374. events.Default.Log(events.NodeDisconnected, map[string]string{
  375. "id": node.String(),
  376. "error": err.Error(),
  377. })
  378. m.pmut.Lock()
  379. m.rmut.RLock()
  380. for _, repo := range m.nodeRepos[node] {
  381. m.repoFiles[repo].Replace(node, nil)
  382. }
  383. m.rmut.RUnlock()
  384. conn, ok := m.rawConn[node]
  385. if ok {
  386. conn.Close()
  387. }
  388. delete(m.protoConn, node)
  389. delete(m.rawConn, node)
  390. delete(m.nodeVer, node)
  391. m.pmut.Unlock()
  392. }
  393. // Request returns the specified data segment by reading it from local disk.
  394. // Implements the protocol.Model interface.
  395. func (m *Model) Request(nodeID protocol.NodeID, repo, name string, offset int64, size int) ([]byte, error) {
  396. // Verify that the requested file exists in the local model.
  397. m.rmut.RLock()
  398. r, ok := m.repoFiles[repo]
  399. m.rmut.RUnlock()
  400. if !ok {
  401. l.Warnf("Request from %s for file %s in nonexistent repo %q", nodeID, name, repo)
  402. return nil, ErrNoSuchFile
  403. }
  404. lf := r.Get(protocol.LocalNodeID, name)
  405. if protocol.IsInvalid(lf.Flags) || protocol.IsDeleted(lf.Flags) {
  406. if debug {
  407. l.Debugf("REQ(in): %s: %q / %q o=%d s=%d; invalid: %v", nodeID, repo, name, offset, size, lf)
  408. }
  409. return nil, ErrInvalid
  410. }
  411. if offset > lf.Size() {
  412. if debug {
  413. l.Debugf("REQ(in; nonexistent): %s: %q o=%d s=%d", nodeID, name, offset, size)
  414. }
  415. return nil, ErrNoSuchFile
  416. }
  417. if debug && nodeID != protocol.LocalNodeID {
  418. l.Debugf("REQ(in): %s: %q / %q o=%d s=%d", nodeID, repo, name, offset, size)
  419. }
  420. m.rmut.RLock()
  421. fn := filepath.Join(m.repoCfgs[repo].Directory, name)
  422. m.rmut.RUnlock()
  423. fd, err := os.Open(fn) // XXX: Inefficient, should cache fd?
  424. if err != nil {
  425. return nil, err
  426. }
  427. defer fd.Close()
  428. buf := make([]byte, size)
  429. _, err = fd.ReadAt(buf, offset)
  430. if err != nil {
  431. return nil, err
  432. }
  433. return buf, nil
  434. }
  435. // ReplaceLocal replaces the local repository index with the given list of files.
  436. func (m *Model) ReplaceLocal(repo string, fs []protocol.FileInfo) {
  437. m.rmut.RLock()
  438. m.repoFiles[repo].ReplaceWithDelete(protocol.LocalNodeID, fs)
  439. m.rmut.RUnlock()
  440. }
  441. func (m *Model) CurrentRepoFile(repo string, file string) protocol.FileInfo {
  442. m.rmut.RLock()
  443. f := m.repoFiles[repo].Get(protocol.LocalNodeID, file)
  444. m.rmut.RUnlock()
  445. return f
  446. }
  447. func (m *Model) CurrentGlobalFile(repo string, file string) protocol.FileInfo {
  448. m.rmut.RLock()
  449. f := m.repoFiles[repo].GetGlobal(file)
  450. m.rmut.RUnlock()
  451. return f
  452. }
  453. type cFiler struct {
  454. m *Model
  455. r string
  456. }
  457. // Implements scanner.CurrentFiler
  458. func (cf cFiler) CurrentFile(file string) protocol.FileInfo {
  459. return cf.m.CurrentRepoFile(cf.r, file)
  460. }
  461. // ConnectedTo returns true if we are connected to the named node.
  462. func (m *Model) ConnectedTo(nodeID protocol.NodeID) bool {
  463. m.pmut.RLock()
  464. _, ok := m.protoConn[nodeID]
  465. m.pmut.RUnlock()
  466. return ok
  467. }
  468. // AddConnection adds a new peer connection to the model. An initial index will
  469. // be sent to the connected peer, thereafter index updates whenever the local
  470. // repository changes.
  471. func (m *Model) AddConnection(rawConn io.Closer, protoConn protocol.Connection) {
  472. nodeID := protoConn.ID()
  473. m.pmut.Lock()
  474. if _, ok := m.protoConn[nodeID]; ok {
  475. panic("add existing node")
  476. }
  477. m.protoConn[nodeID] = protoConn
  478. if _, ok := m.rawConn[nodeID]; ok {
  479. panic("add existing node")
  480. }
  481. m.rawConn[nodeID] = rawConn
  482. cm := m.clusterConfig(nodeID)
  483. protoConn.ClusterConfig(cm)
  484. m.rmut.RLock()
  485. for _, repo := range m.nodeRepos[nodeID] {
  486. fs := m.repoFiles[repo]
  487. go sendIndexes(protoConn, repo, fs)
  488. }
  489. m.rmut.RUnlock()
  490. m.pmut.Unlock()
  491. }
  492. func sendIndexes(conn protocol.Connection, repo string, fs *files.Set) {
  493. nodeID := conn.ID()
  494. name := conn.Name()
  495. if debug {
  496. l.Debugf("sendIndexes for %s-%s@/%q starting", nodeID, name, repo)
  497. }
  498. initial := true
  499. minLocalVer := uint64(0)
  500. var err error
  501. defer func() {
  502. if debug {
  503. l.Debugf("sendIndexes for %s-%s@/%q exiting: %v", nodeID, name, repo, err)
  504. }
  505. }()
  506. for err == nil {
  507. if !initial && fs.LocalVersion(protocol.LocalNodeID) <= minLocalVer {
  508. time.Sleep(1 * time.Second)
  509. continue
  510. }
  511. batch := make([]protocol.FileInfo, 0, indexBatchSize)
  512. maxLocalVer := uint64(0)
  513. fs.WithHave(protocol.LocalNodeID, func(f protocol.FileInfo) bool {
  514. if f.LocalVersion <= minLocalVer {
  515. return true
  516. }
  517. if f.LocalVersion > maxLocalVer {
  518. maxLocalVer = f.LocalVersion
  519. }
  520. if len(batch) == indexBatchSize {
  521. if initial {
  522. if err = conn.Index(repo, batch); err != nil {
  523. return false
  524. }
  525. if debug {
  526. l.Debugf("sendIndexes for %s-%s/%q: %d files (initial index)", nodeID, name, repo, len(batch))
  527. }
  528. initial = false
  529. } else {
  530. if err = conn.IndexUpdate(repo, batch); err != nil {
  531. return false
  532. }
  533. if debug {
  534. l.Debugf("sendIndexes for %s-%s/%q: %d files (batched update)", nodeID, name, repo, len(batch))
  535. }
  536. }
  537. batch = make([]protocol.FileInfo, 0, indexBatchSize)
  538. }
  539. batch = append(batch, f)
  540. return true
  541. })
  542. if initial {
  543. err = conn.Index(repo, batch)
  544. if debug && err == nil {
  545. l.Debugf("sendIndexes for %s-%s/%q: %d files (small initial index)", nodeID, name, repo, len(batch))
  546. }
  547. initial = false
  548. } else if len(batch) > 0 {
  549. err = conn.IndexUpdate(repo, batch)
  550. if debug && err == nil {
  551. l.Debugf("sendIndexes for %s-%s/%q: %d files (last batch)", nodeID, name, repo, len(batch))
  552. }
  553. }
  554. minLocalVer = maxLocalVer
  555. }
  556. }
  557. func (m *Model) updateLocal(repo string, f protocol.FileInfo) {
  558. f.LocalVersion = 0
  559. m.rmut.RLock()
  560. m.repoFiles[repo].Update(protocol.LocalNodeID, []protocol.FileInfo{f})
  561. m.rmut.RUnlock()
  562. events.Default.Log(events.LocalIndexUpdated, map[string]interface{}{
  563. "repo": repo,
  564. "name": f.Name,
  565. "modified": time.Unix(f.Modified, 0),
  566. "flags": fmt.Sprintf("0%o", f.Flags),
  567. "size": f.Size(),
  568. })
  569. }
  570. func (m *Model) requestGlobal(nodeID protocol.NodeID, repo, name string, offset int64, size int, hash []byte) ([]byte, error) {
  571. m.pmut.RLock()
  572. nc, ok := m.protoConn[nodeID]
  573. m.pmut.RUnlock()
  574. if !ok {
  575. return nil, fmt.Errorf("requestGlobal: no such node: %s", nodeID)
  576. }
  577. if debug {
  578. l.Debugf("REQ(out): %s: %q / %q o=%d s=%d h=%x", nodeID, repo, name, offset, size, hash)
  579. }
  580. return nc.Request(repo, name, offset, size)
  581. }
  582. func (m *Model) AddRepo(cfg config.RepositoryConfiguration) {
  583. if m.started {
  584. panic("cannot add repo to started model")
  585. }
  586. if len(cfg.ID) == 0 {
  587. panic("cannot add empty repo id")
  588. }
  589. m.rmut.Lock()
  590. m.repoCfgs[cfg.ID] = cfg
  591. m.repoFiles[cfg.ID] = files.NewSet(cfg.ID, m.db)
  592. m.suppressor[cfg.ID] = &suppressor{threshold: int64(m.cfg.Options.MaxChangeKbps)}
  593. m.repoNodes[cfg.ID] = make([]protocol.NodeID, len(cfg.Nodes))
  594. for i, node := range cfg.Nodes {
  595. m.repoNodes[cfg.ID][i] = node.NodeID
  596. m.nodeRepos[node.NodeID] = append(m.nodeRepos[node.NodeID], cfg.ID)
  597. }
  598. m.addedRepo = true
  599. m.rmut.Unlock()
  600. }
  601. func (m *Model) ScanRepos() {
  602. m.rmut.RLock()
  603. var repos = make([]string, 0, len(m.repoCfgs))
  604. for repo := range m.repoCfgs {
  605. repos = append(repos, repo)
  606. }
  607. m.rmut.RUnlock()
  608. var wg sync.WaitGroup
  609. wg.Add(len(repos))
  610. for _, repo := range repos {
  611. repo := repo
  612. go func() {
  613. err := m.ScanRepo(repo)
  614. if err != nil {
  615. invalidateRepo(m.cfg, repo, err)
  616. }
  617. wg.Done()
  618. }()
  619. }
  620. wg.Wait()
  621. }
  622. func (m *Model) CleanRepos() {
  623. m.rmut.RLock()
  624. var dirs = make([]string, 0, len(m.repoCfgs))
  625. for _, cfg := range m.repoCfgs {
  626. dirs = append(dirs, cfg.Directory)
  627. }
  628. m.rmut.RUnlock()
  629. var wg sync.WaitGroup
  630. wg.Add(len(dirs))
  631. for _, dir := range dirs {
  632. w := &scanner.Walker{
  633. Dir: dir,
  634. TempNamer: defTempNamer,
  635. }
  636. go func() {
  637. w.CleanTempFiles()
  638. wg.Done()
  639. }()
  640. }
  641. wg.Wait()
  642. }
  643. func (m *Model) ScanRepo(repo string) error {
  644. m.rmut.RLock()
  645. fs := m.repoFiles[repo]
  646. dir := m.repoCfgs[repo].Directory
  647. w := &scanner.Walker{
  648. Dir: dir,
  649. IgnoreFile: ".stignore",
  650. BlockSize: scanner.StandardBlockSize,
  651. TempNamer: defTempNamer,
  652. Suppressor: m.suppressor[repo],
  653. CurrentFiler: cFiler{m, repo},
  654. IgnorePerms: m.repoCfgs[repo].IgnorePerms,
  655. }
  656. m.rmut.RUnlock()
  657. m.setState(repo, RepoScanning)
  658. fchan, _, err := w.Walk()
  659. if err != nil {
  660. return err
  661. }
  662. batchSize := 100
  663. batch := make([]protocol.FileInfo, 0, 00)
  664. for f := range fchan {
  665. if len(batch) == batchSize {
  666. fs.Update(protocol.LocalNodeID, batch)
  667. batch = batch[:0]
  668. }
  669. batch = append(batch, f)
  670. }
  671. if len(batch) > 0 {
  672. fs.Update(protocol.LocalNodeID, batch)
  673. }
  674. batch = batch[:0]
  675. fs.WithHave(protocol.LocalNodeID, func(f protocol.FileInfo) bool {
  676. if !protocol.IsDeleted(f.Flags) {
  677. if len(batch) == batchSize {
  678. fs.Update(protocol.LocalNodeID, batch)
  679. batch = batch[:0]
  680. }
  681. if _, err := os.Stat(filepath.Join(dir, f.Name)); err != nil && os.IsNotExist(err) {
  682. // File has been deleted
  683. f.Blocks = nil
  684. f.Flags |= protocol.FlagDeleted
  685. f.Version = lamport.Default.Tick(f.Version)
  686. f.LocalVersion = 0
  687. batch = append(batch, f)
  688. }
  689. }
  690. return true
  691. })
  692. if len(batch) > 0 {
  693. fs.Update(protocol.LocalNodeID, batch)
  694. }
  695. m.setState(repo, RepoIdle)
  696. return nil
  697. }
  698. // clusterConfig returns a ClusterConfigMessage that is correct for the given peer node
  699. func (m *Model) clusterConfig(node protocol.NodeID) protocol.ClusterConfigMessage {
  700. cm := protocol.ClusterConfigMessage{
  701. ClientName: m.clientName,
  702. ClientVersion: m.clientVersion,
  703. }
  704. m.rmut.RLock()
  705. for _, repo := range m.nodeRepos[node] {
  706. cr := protocol.Repository{
  707. ID: repo,
  708. }
  709. for _, node := range m.repoNodes[repo] {
  710. // TODO: Set read only bit when relevant
  711. cr.Nodes = append(cr.Nodes, protocol.Node{
  712. ID: node[:],
  713. Flags: protocol.FlagShareTrusted,
  714. })
  715. }
  716. cm.Repositories = append(cm.Repositories, cr)
  717. }
  718. m.rmut.RUnlock()
  719. return cm
  720. }
  721. func (m *Model) setState(repo string, state repoState) {
  722. m.smut.Lock()
  723. oldState := m.repoState[repo]
  724. changed, ok := m.repoStateChanged[repo]
  725. if state != oldState {
  726. m.repoState[repo] = state
  727. m.repoStateChanged[repo] = time.Now()
  728. eventData := map[string]interface{}{
  729. "repo": repo,
  730. "to": state.String(),
  731. }
  732. if ok {
  733. eventData["duration"] = time.Since(changed).Seconds()
  734. eventData["from"] = oldState.String()
  735. }
  736. events.Default.Log(events.StateChanged, eventData)
  737. }
  738. m.smut.Unlock()
  739. }
  740. func (m *Model) State(repo string) (string, time.Time) {
  741. m.smut.RLock()
  742. state := m.repoState[repo]
  743. changed := m.repoStateChanged[repo]
  744. m.smut.RUnlock()
  745. return state.String(), changed
  746. }
  747. func (m *Model) Override(repo string) {
  748. m.rmut.RLock()
  749. fs := m.repoFiles[repo]
  750. m.rmut.RUnlock()
  751. batch := make([]protocol.FileInfo, 0, indexBatchSize)
  752. fs.WithNeed(protocol.LocalNodeID, func(need protocol.FileInfo) bool {
  753. if len(batch) == indexBatchSize {
  754. fs.Update(protocol.LocalNodeID, batch)
  755. batch = batch[:0]
  756. }
  757. have := fs.Get(protocol.LocalNodeID, need.Name)
  758. if have.Name != need.Name {
  759. // We are missing the file
  760. need.Flags |= protocol.FlagDeleted
  761. need.Blocks = nil
  762. } else {
  763. // We have the file, replace with our version
  764. need = have
  765. }
  766. need.Version = lamport.Default.Tick(need.Version)
  767. need.LocalVersion = 0
  768. batch = append(batch, need)
  769. return true
  770. })
  771. if len(batch) > 0 {
  772. fs.Update(protocol.LocalNodeID, batch)
  773. }
  774. }
  775. // Version returns the change version for the given repository. This is
  776. // guaranteed to increment if the contents of the local or global repository
  777. // has changed.
  778. func (m *Model) LocalVersion(repo string) uint64 {
  779. m.rmut.Lock()
  780. defer m.rmut.Unlock()
  781. fs, ok := m.repoFiles[repo]
  782. if !ok {
  783. return 0
  784. }
  785. ver := fs.LocalVersion(protocol.LocalNodeID)
  786. for _, n := range m.repoNodes[repo] {
  787. ver += fs.LocalVersion(n)
  788. }
  789. return ver
  790. }