model.go 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912
  1. // Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
  2. // All rights reserved. Use of this source code is governed by an MIT-style
  3. // license that can be found in the LICENSE file.
  4. package model
  5. import (
  6. "errors"
  7. "fmt"
  8. "io"
  9. "net"
  10. "os"
  11. "path/filepath"
  12. "strconv"
  13. "sync"
  14. "time"
  15. "github.com/calmh/syncthing/config"
  16. "github.com/calmh/syncthing/events"
  17. "github.com/calmh/syncthing/files"
  18. "github.com/calmh/syncthing/lamport"
  19. "github.com/calmh/syncthing/protocol"
  20. "github.com/calmh/syncthing/scanner"
  21. "github.com/syndtr/goleveldb/leveldb"
  22. )
  23. type repoState int
  24. const (
  25. RepoIdle repoState = iota
  26. RepoScanning
  27. RepoSyncing
  28. RepoCleaning
  29. )
  30. func (s repoState) String() string {
  31. switch s {
  32. case RepoIdle:
  33. return "idle"
  34. case RepoScanning:
  35. return "scanning"
  36. case RepoCleaning:
  37. return "cleaning"
  38. case RepoSyncing:
  39. return "syncing"
  40. default:
  41. return "unknown"
  42. }
  43. }
  44. // Somewhat arbitrary amount of bytes that we choose to let represent the size
  45. // of an unsynchronized directory entry or a deleted file. We need it to be
  46. // larger than zero so that it's visible that there is some amount of bytes to
  47. // transfer to bring the systems into synchronization.
  48. const zeroEntrySize = 128
  49. // How many files to send in each Index/IndexUpdate message.
  50. const indexBatchSize = 1000
  51. type Model struct {
  52. indexDir string
  53. cfg *config.Configuration
  54. db *leveldb.DB
  55. clientName string
  56. clientVersion string
  57. repoCfgs map[string]config.RepositoryConfiguration // repo -> cfg
  58. repoFiles map[string]*files.Set // repo -> files
  59. repoNodes map[string][]protocol.NodeID // repo -> nodeIDs
  60. nodeRepos map[protocol.NodeID][]string // nodeID -> repos
  61. suppressor map[string]*suppressor // repo -> suppressor
  62. rmut sync.RWMutex // protects the above
  63. repoState map[string]repoState // repo -> state
  64. repoStateChanged map[string]time.Time // repo -> time when state changed
  65. smut sync.RWMutex
  66. protoConn map[protocol.NodeID]protocol.Connection
  67. rawConn map[protocol.NodeID]io.Closer
  68. nodeVer map[protocol.NodeID]string
  69. pmut sync.RWMutex // protects protoConn and rawConn
  70. sentLocalVer map[protocol.NodeID]map[string]uint64
  71. slMut sync.Mutex
  72. sup suppressor
  73. addedRepo bool
  74. started bool
  75. }
  76. var (
  77. ErrNoSuchFile = errors.New("no such file")
  78. ErrInvalid = errors.New("file is invalid")
  79. )
  80. // NewModel creates and starts a new model. The model starts in read-only mode,
  81. // where it sends index information to connected peers and responds to requests
  82. // for file data without altering the local repository in any way.
  83. func NewModel(indexDir string, cfg *config.Configuration, clientName, clientVersion string, db *leveldb.DB) *Model {
  84. m := &Model{
  85. indexDir: indexDir,
  86. cfg: cfg,
  87. db: db,
  88. clientName: clientName,
  89. clientVersion: clientVersion,
  90. repoCfgs: make(map[string]config.RepositoryConfiguration),
  91. repoFiles: make(map[string]*files.Set),
  92. repoNodes: make(map[string][]protocol.NodeID),
  93. nodeRepos: make(map[protocol.NodeID][]string),
  94. repoState: make(map[string]repoState),
  95. repoStateChanged: make(map[string]time.Time),
  96. suppressor: make(map[string]*suppressor),
  97. protoConn: make(map[protocol.NodeID]protocol.Connection),
  98. rawConn: make(map[protocol.NodeID]io.Closer),
  99. nodeVer: make(map[protocol.NodeID]string),
  100. sentLocalVer: make(map[protocol.NodeID]map[string]uint64),
  101. sup: suppressor{threshold: int64(cfg.Options.MaxChangeKbps)},
  102. }
  103. var timeout = 20 * 60 // seconds
  104. if t := os.Getenv("STDEADLOCKTIMEOUT"); len(t) > 0 {
  105. it, err := strconv.Atoi(t)
  106. if err == nil {
  107. timeout = it
  108. }
  109. }
  110. deadlockDetect(&m.rmut, time.Duration(timeout)*time.Second)
  111. deadlockDetect(&m.smut, time.Duration(timeout)*time.Second)
  112. deadlockDetect(&m.pmut, time.Duration(timeout)*time.Second)
  113. return m
  114. }
  115. // StartRW starts read/write processing on the current model. When in
  116. // read/write mode the model will attempt to keep in sync with the cluster by
  117. // pulling needed files from peer nodes.
  118. func (m *Model) StartRepoRW(repo string, threads int) {
  119. m.rmut.RLock()
  120. defer m.rmut.RUnlock()
  121. if cfg, ok := m.repoCfgs[repo]; !ok {
  122. panic("cannot start without repo")
  123. } else {
  124. newPuller(cfg, m, threads, m.cfg)
  125. }
  126. }
  127. // StartRO starts read only processing on the current model. When in
  128. // read only mode the model will announce files to the cluster but not
  129. // pull in any external changes.
  130. func (m *Model) StartRepoRO(repo string) {
  131. m.StartRepoRW(repo, 0) // zero threads => read only
  132. }
  133. type ConnectionInfo struct {
  134. protocol.Statistics
  135. Address string
  136. ClientVersion string
  137. }
  138. // ConnectionStats returns a map with connection statistics for each connected node.
  139. func (m *Model) ConnectionStats() map[string]ConnectionInfo {
  140. type remoteAddrer interface {
  141. RemoteAddr() net.Addr
  142. }
  143. m.pmut.RLock()
  144. m.rmut.RLock()
  145. var res = make(map[string]ConnectionInfo)
  146. for node, conn := range m.protoConn {
  147. ci := ConnectionInfo{
  148. Statistics: conn.Statistics(),
  149. ClientVersion: m.nodeVer[node],
  150. }
  151. if nc, ok := m.rawConn[node].(remoteAddrer); ok {
  152. ci.Address = nc.RemoteAddr().String()
  153. }
  154. res[node.String()] = ci
  155. }
  156. m.rmut.RUnlock()
  157. m.pmut.RUnlock()
  158. in, out := protocol.TotalInOut()
  159. res["total"] = ConnectionInfo{
  160. Statistics: protocol.Statistics{
  161. At: time.Now(),
  162. InBytesTotal: in,
  163. OutBytesTotal: out,
  164. },
  165. }
  166. return res
  167. }
  168. // Returns the completion status, in percent, for the given node and repo.
  169. func (m *Model) Completion(node protocol.NodeID, repo string) float64 {
  170. var tot int64
  171. m.repoFiles[repo].WithGlobal(func(f protocol.FileInfo) bool {
  172. if !protocol.IsDeleted(f.Flags) {
  173. var size int64
  174. if protocol.IsDirectory(f.Flags) {
  175. size = zeroEntrySize
  176. } else {
  177. size = f.Size()
  178. }
  179. tot += size
  180. }
  181. return true
  182. })
  183. var need int64
  184. m.repoFiles[repo].WithNeed(node, func(f protocol.FileInfo) bool {
  185. if !protocol.IsDeleted(f.Flags) {
  186. var size int64
  187. if protocol.IsDirectory(f.Flags) {
  188. size = zeroEntrySize
  189. } else {
  190. size = f.Size()
  191. }
  192. need += size
  193. }
  194. return true
  195. })
  196. return 100 * (1 - float64(need)/float64(tot))
  197. }
  198. func sizeOf(fs []protocol.FileInfo) (files, deleted int, bytes int64) {
  199. for _, f := range fs {
  200. fs, de, by := sizeOfFile(f)
  201. files += fs
  202. deleted += de
  203. bytes += by
  204. }
  205. return
  206. }
  207. func sizeOfFile(f protocol.FileInfo) (files, deleted int, bytes int64) {
  208. if !protocol.IsDeleted(f.Flags) {
  209. files++
  210. if !protocol.IsDirectory(f.Flags) {
  211. bytes += f.Size()
  212. } else {
  213. bytes += zeroEntrySize
  214. }
  215. } else {
  216. deleted++
  217. bytes += zeroEntrySize
  218. }
  219. return
  220. }
  221. // GlobalSize returns the number of files, deleted files and total bytes for all
  222. // files in the global model.
  223. func (m *Model) GlobalSize(repo string) (files, deleted int, bytes int64) {
  224. m.rmut.RLock()
  225. defer m.rmut.RUnlock()
  226. if rf, ok := m.repoFiles[repo]; ok {
  227. rf.WithGlobal(func(f protocol.FileInfo) bool {
  228. fs, de, by := sizeOfFile(f)
  229. files += fs
  230. deleted += de
  231. bytes += by
  232. return true
  233. })
  234. }
  235. return
  236. }
  237. // LocalSize returns the number of files, deleted files and total bytes for all
  238. // files in the local repository.
  239. func (m *Model) LocalSize(repo string) (files, deleted int, bytes int64) {
  240. m.rmut.RLock()
  241. defer m.rmut.RUnlock()
  242. if rf, ok := m.repoFiles[repo]; ok {
  243. rf.WithHave(protocol.LocalNodeID, func(f protocol.FileInfo) bool {
  244. fs, de, by := sizeOfFile(f)
  245. files += fs
  246. deleted += de
  247. bytes += by
  248. return true
  249. })
  250. }
  251. return
  252. }
  253. // NeedSize returns the number and total size of currently needed files.
  254. func (m *Model) NeedSize(repo string) (files int, bytes int64) {
  255. m.rmut.RLock()
  256. defer m.rmut.RUnlock()
  257. if rf, ok := m.repoFiles[repo]; ok {
  258. rf.WithNeed(protocol.LocalNodeID, func(f protocol.FileInfo) bool {
  259. fs, de, by := sizeOfFile(f)
  260. files += fs + de
  261. bytes += by
  262. return true
  263. })
  264. }
  265. return
  266. }
  267. // NeedFiles returns the list of currently needed files
  268. func (m *Model) NeedFilesRepo(repo string) []protocol.FileInfo {
  269. m.rmut.RLock()
  270. defer m.rmut.RUnlock()
  271. if rf, ok := m.repoFiles[repo]; ok {
  272. fs := make([]protocol.FileInfo, 0, indexBatchSize)
  273. rf.WithNeed(protocol.LocalNodeID, func(f protocol.FileInfo) bool {
  274. fs = append(fs, f)
  275. return len(fs) < indexBatchSize
  276. })
  277. return fs
  278. }
  279. return nil
  280. }
  281. // Index is called when a new node is connected and we receive their full index.
  282. // Implements the protocol.Model interface.
  283. func (m *Model) Index(nodeID protocol.NodeID, repo string, fs []protocol.FileInfo) {
  284. if debug {
  285. l.Debugf("IDX(in): %s %q: %d files", nodeID, repo, len(fs))
  286. }
  287. if !m.repoSharedWith(repo, nodeID) {
  288. l.Warnf("Unexpected repository ID %q sent from node %q; ensure that the repository exists and that this node is selected under \"Share With\" in the repository configuration.", repo, nodeID)
  289. return
  290. }
  291. for i := range fs {
  292. lamport.Default.Tick(fs[i].Version)
  293. }
  294. m.rmut.RLock()
  295. r, ok := m.repoFiles[repo]
  296. m.rmut.RUnlock()
  297. if ok {
  298. r.Replace(nodeID, fs)
  299. } else {
  300. l.Fatalf("Index for nonexistant repo %q", repo)
  301. }
  302. events.Default.Log(events.RemoteIndexUpdated, map[string]interface{}{
  303. "node": nodeID.String(),
  304. "repo": repo,
  305. "items": len(fs),
  306. "version": r.LocalVersion(nodeID),
  307. })
  308. }
  309. // IndexUpdate is called for incremental updates to connected nodes' indexes.
  310. // Implements the protocol.Model interface.
  311. func (m *Model) IndexUpdate(nodeID protocol.NodeID, repo string, fs []protocol.FileInfo) {
  312. if debug {
  313. l.Debugf("IDXUP(in): %s / %q: %d files", nodeID, repo, len(fs))
  314. }
  315. if !m.repoSharedWith(repo, nodeID) {
  316. l.Warnf("Unexpected repository ID %q sent from node %q; ensure that the repository exists and that this node is selected under \"Share With\" in the repository configuration.", repo, nodeID)
  317. return
  318. }
  319. for i := range fs {
  320. lamport.Default.Tick(fs[i].Version)
  321. }
  322. m.rmut.RLock()
  323. r, ok := m.repoFiles[repo]
  324. m.rmut.RUnlock()
  325. if ok {
  326. r.Update(nodeID, fs)
  327. } else {
  328. l.Fatalf("IndexUpdate for nonexistant repo %q", repo)
  329. }
  330. events.Default.Log(events.RemoteIndexUpdated, map[string]interface{}{
  331. "node": nodeID.String(),
  332. "repo": repo,
  333. "items": len(fs),
  334. "version": r.LocalVersion(nodeID),
  335. })
  336. }
  337. func (m *Model) repoSharedWith(repo string, nodeID protocol.NodeID) bool {
  338. m.rmut.RLock()
  339. defer m.rmut.RUnlock()
  340. for _, nrepo := range m.nodeRepos[nodeID] {
  341. if nrepo == repo {
  342. return true
  343. }
  344. }
  345. return false
  346. }
  347. func (m *Model) ClusterConfig(nodeID protocol.NodeID, config protocol.ClusterConfigMessage) {
  348. m.pmut.Lock()
  349. if config.ClientName == "syncthing" {
  350. m.nodeVer[nodeID] = config.ClientVersion
  351. } else {
  352. m.nodeVer[nodeID] = config.ClientName + " " + config.ClientVersion
  353. }
  354. m.pmut.Unlock()
  355. l.Infof(`Node %s client is "%s %s"`, nodeID, config.ClientName, config.ClientVersion)
  356. }
  357. // Close removes the peer from the model and closes the underlying connection if possible.
  358. // Implements the protocol.Model interface.
  359. func (m *Model) Close(node protocol.NodeID, err error) {
  360. l.Infof("Connection to %s closed: %v", node, err)
  361. events.Default.Log(events.NodeDisconnected, map[string]string{
  362. "id": node.String(),
  363. "error": err.Error(),
  364. })
  365. m.pmut.Lock()
  366. m.rmut.RLock()
  367. for _, repo := range m.nodeRepos[node] {
  368. m.repoFiles[repo].Replace(node, nil)
  369. }
  370. m.rmut.RUnlock()
  371. conn, ok := m.rawConn[node]
  372. if ok {
  373. conn.Close()
  374. }
  375. delete(m.protoConn, node)
  376. delete(m.rawConn, node)
  377. delete(m.nodeVer, node)
  378. m.pmut.Unlock()
  379. }
  380. // Request returns the specified data segment by reading it from local disk.
  381. // Implements the protocol.Model interface.
  382. func (m *Model) Request(nodeID protocol.NodeID, repo, name string, offset int64, size int) ([]byte, error) {
  383. // Verify that the requested file exists in the local model.
  384. m.rmut.RLock()
  385. r, ok := m.repoFiles[repo]
  386. m.rmut.RUnlock()
  387. if !ok {
  388. l.Warnf("Request from %s for file %s in nonexistent repo %q", nodeID, name, repo)
  389. return nil, ErrNoSuchFile
  390. }
  391. lf := r.Get(protocol.LocalNodeID, name)
  392. if protocol.IsInvalid(lf.Flags) || protocol.IsDeleted(lf.Flags) {
  393. if debug {
  394. l.Debugf("REQ(in): %s: %q / %q o=%d s=%d; invalid: %v", nodeID, repo, name, offset, size, lf)
  395. }
  396. return nil, ErrInvalid
  397. }
  398. if offset > lf.Size() {
  399. if debug {
  400. l.Debugf("REQ(in; nonexistent): %s: %q o=%d s=%d", nodeID, name, offset, size)
  401. }
  402. return nil, ErrNoSuchFile
  403. }
  404. if debug && nodeID != protocol.LocalNodeID {
  405. l.Debugf("REQ(in): %s: %q / %q o=%d s=%d", nodeID, repo, name, offset, size)
  406. }
  407. m.rmut.RLock()
  408. fn := filepath.Join(m.repoCfgs[repo].Directory, name)
  409. m.rmut.RUnlock()
  410. fd, err := os.Open(fn) // XXX: Inefficient, should cache fd?
  411. if err != nil {
  412. return nil, err
  413. }
  414. defer fd.Close()
  415. buf := make([]byte, size)
  416. _, err = fd.ReadAt(buf, offset)
  417. if err != nil {
  418. return nil, err
  419. }
  420. return buf, nil
  421. }
  422. // ReplaceLocal replaces the local repository index with the given list of files.
  423. func (m *Model) ReplaceLocal(repo string, fs []protocol.FileInfo) {
  424. m.rmut.RLock()
  425. m.repoFiles[repo].ReplaceWithDelete(protocol.LocalNodeID, fs)
  426. m.rmut.RUnlock()
  427. }
  428. func (m *Model) CurrentRepoFile(repo string, file string) protocol.FileInfo {
  429. m.rmut.RLock()
  430. f := m.repoFiles[repo].Get(protocol.LocalNodeID, file)
  431. m.rmut.RUnlock()
  432. return f
  433. }
  434. func (m *Model) CurrentGlobalFile(repo string, file string) protocol.FileInfo {
  435. m.rmut.RLock()
  436. f := m.repoFiles[repo].GetGlobal(file)
  437. m.rmut.RUnlock()
  438. return f
  439. }
  440. type cFiler struct {
  441. m *Model
  442. r string
  443. }
  444. // Implements scanner.CurrentFiler
  445. func (cf cFiler) CurrentFile(file string) protocol.FileInfo {
  446. return cf.m.CurrentRepoFile(cf.r, file)
  447. }
  448. // ConnectedTo returns true if we are connected to the named node.
  449. func (m *Model) ConnectedTo(nodeID protocol.NodeID) bool {
  450. m.pmut.RLock()
  451. _, ok := m.protoConn[nodeID]
  452. m.pmut.RUnlock()
  453. return ok
  454. }
  455. // AddConnection adds a new peer connection to the model. An initial index will
  456. // be sent to the connected peer, thereafter index updates whenever the local
  457. // repository changes.
  458. func (m *Model) AddConnection(rawConn io.Closer, protoConn protocol.Connection) {
  459. nodeID := protoConn.ID()
  460. m.pmut.Lock()
  461. if _, ok := m.protoConn[nodeID]; ok {
  462. panic("add existing node")
  463. }
  464. m.protoConn[nodeID] = protoConn
  465. if _, ok := m.rawConn[nodeID]; ok {
  466. panic("add existing node")
  467. }
  468. m.rawConn[nodeID] = rawConn
  469. cm := m.clusterConfig(nodeID)
  470. protoConn.ClusterConfig(cm)
  471. m.rmut.RLock()
  472. for _, repo := range m.nodeRepos[nodeID] {
  473. fs := m.repoFiles[repo]
  474. go sendIndexes(protoConn, repo, fs)
  475. }
  476. m.rmut.RUnlock()
  477. m.pmut.Unlock()
  478. }
  479. func sendIndexes(conn protocol.Connection, repo string, fs *files.Set) {
  480. nodeID := conn.ID()
  481. name := conn.Name()
  482. var err error
  483. if debug {
  484. l.Debugf("sendIndexes for %s-%s@/%q starting", nodeID, name, repo)
  485. }
  486. defer func() {
  487. if debug {
  488. l.Debugf("sendIndexes for %s-%s@/%q exiting: %v", nodeID, name, repo, err)
  489. }
  490. }()
  491. minLocalVer, err := sendIndexTo(true, 0, conn, repo, fs)
  492. for err == nil {
  493. time.Sleep(5 * time.Second)
  494. if fs.LocalVersion(protocol.LocalNodeID) <= minLocalVer {
  495. continue
  496. }
  497. minLocalVer, err = sendIndexTo(false, minLocalVer, conn, repo, fs)
  498. }
  499. }
  500. func sendIndexTo(initial bool, minLocalVer uint64, conn protocol.Connection, repo string, fs *files.Set) (uint64, error) {
  501. nodeID := conn.ID()
  502. name := conn.Name()
  503. batch := make([]protocol.FileInfo, 0, indexBatchSize)
  504. maxLocalVer := uint64(0)
  505. var err error
  506. fs.WithHave(protocol.LocalNodeID, func(f protocol.FileInfo) bool {
  507. if f.LocalVersion <= minLocalVer {
  508. return true
  509. }
  510. if f.LocalVersion > maxLocalVer {
  511. maxLocalVer = f.LocalVersion
  512. }
  513. if len(batch) == indexBatchSize {
  514. if initial {
  515. if err = conn.Index(repo, batch); err != nil {
  516. return false
  517. }
  518. if debug {
  519. l.Debugf("sendIndexes for %s-%s/%q: %d files (initial index)", nodeID, name, repo, len(batch))
  520. }
  521. initial = false
  522. } else {
  523. if err = conn.IndexUpdate(repo, batch); err != nil {
  524. return false
  525. }
  526. if debug {
  527. l.Debugf("sendIndexes for %s-%s/%q: %d files (batched update)", nodeID, name, repo, len(batch))
  528. }
  529. }
  530. batch = make([]protocol.FileInfo, 0, indexBatchSize)
  531. }
  532. batch = append(batch, f)
  533. return true
  534. })
  535. if initial && err == nil {
  536. err = conn.Index(repo, batch)
  537. if debug && err == nil {
  538. l.Debugf("sendIndexes for %s-%s/%q: %d files (small initial index)", nodeID, name, repo, len(batch))
  539. }
  540. } else if len(batch) > 0 && err == nil {
  541. err = conn.IndexUpdate(repo, batch)
  542. if debug && err == nil {
  543. l.Debugf("sendIndexes for %s-%s/%q: %d files (last batch)", nodeID, name, repo, len(batch))
  544. }
  545. }
  546. return maxLocalVer, err
  547. }
  548. func (m *Model) updateLocal(repo string, f protocol.FileInfo) {
  549. f.LocalVersion = 0
  550. m.rmut.RLock()
  551. m.repoFiles[repo].Update(protocol.LocalNodeID, []protocol.FileInfo{f})
  552. m.rmut.RUnlock()
  553. events.Default.Log(events.LocalIndexUpdated, map[string]interface{}{
  554. "repo": repo,
  555. "name": f.Name,
  556. "modified": time.Unix(f.Modified, 0),
  557. "flags": fmt.Sprintf("0%o", f.Flags),
  558. "size": f.Size(),
  559. })
  560. }
  561. func (m *Model) requestGlobal(nodeID protocol.NodeID, repo, name string, offset int64, size int, hash []byte) ([]byte, error) {
  562. m.pmut.RLock()
  563. nc, ok := m.protoConn[nodeID]
  564. m.pmut.RUnlock()
  565. if !ok {
  566. return nil, fmt.Errorf("requestGlobal: no such node: %s", nodeID)
  567. }
  568. if debug {
  569. l.Debugf("REQ(out): %s: %q / %q o=%d s=%d h=%x", nodeID, repo, name, offset, size, hash)
  570. }
  571. return nc.Request(repo, name, offset, size)
  572. }
  573. func (m *Model) AddRepo(cfg config.RepositoryConfiguration) {
  574. if m.started {
  575. panic("cannot add repo to started model")
  576. }
  577. if len(cfg.ID) == 0 {
  578. panic("cannot add empty repo id")
  579. }
  580. m.rmut.Lock()
  581. m.repoCfgs[cfg.ID] = cfg
  582. m.repoFiles[cfg.ID] = files.NewSet(cfg.ID, m.db)
  583. m.suppressor[cfg.ID] = &suppressor{threshold: int64(m.cfg.Options.MaxChangeKbps)}
  584. m.repoNodes[cfg.ID] = make([]protocol.NodeID, len(cfg.Nodes))
  585. for i, node := range cfg.Nodes {
  586. m.repoNodes[cfg.ID][i] = node.NodeID
  587. m.nodeRepos[node.NodeID] = append(m.nodeRepos[node.NodeID], cfg.ID)
  588. }
  589. m.addedRepo = true
  590. m.rmut.Unlock()
  591. }
  592. func (m *Model) ScanRepos() {
  593. m.rmut.RLock()
  594. var repos = make([]string, 0, len(m.repoCfgs))
  595. for repo := range m.repoCfgs {
  596. repos = append(repos, repo)
  597. }
  598. m.rmut.RUnlock()
  599. var wg sync.WaitGroup
  600. wg.Add(len(repos))
  601. for _, repo := range repos {
  602. repo := repo
  603. go func() {
  604. err := m.ScanRepo(repo)
  605. if err != nil {
  606. invalidateRepo(m.cfg, repo, err)
  607. }
  608. wg.Done()
  609. }()
  610. }
  611. wg.Wait()
  612. }
  613. func (m *Model) CleanRepos() {
  614. m.rmut.RLock()
  615. var dirs = make([]string, 0, len(m.repoCfgs))
  616. for _, cfg := range m.repoCfgs {
  617. dirs = append(dirs, cfg.Directory)
  618. }
  619. m.rmut.RUnlock()
  620. var wg sync.WaitGroup
  621. wg.Add(len(dirs))
  622. for _, dir := range dirs {
  623. w := &scanner.Walker{
  624. Dir: dir,
  625. TempNamer: defTempNamer,
  626. }
  627. go func() {
  628. w.CleanTempFiles()
  629. wg.Done()
  630. }()
  631. }
  632. wg.Wait()
  633. }
  634. func (m *Model) ScanRepo(repo string) error {
  635. m.rmut.RLock()
  636. fs := m.repoFiles[repo]
  637. dir := m.repoCfgs[repo].Directory
  638. w := &scanner.Walker{
  639. Dir: dir,
  640. IgnoreFile: ".stignore",
  641. BlockSize: scanner.StandardBlockSize,
  642. TempNamer: defTempNamer,
  643. Suppressor: m.suppressor[repo],
  644. CurrentFiler: cFiler{m, repo},
  645. IgnorePerms: m.repoCfgs[repo].IgnorePerms,
  646. }
  647. m.rmut.RUnlock()
  648. m.setState(repo, RepoScanning)
  649. fchan, _, err := w.Walk()
  650. if err != nil {
  651. return err
  652. }
  653. batchSize := 100
  654. batch := make([]protocol.FileInfo, 0, 00)
  655. for f := range fchan {
  656. events.Default.Log(events.LocalIndexUpdated, map[string]interface{}{
  657. "repo": repo,
  658. "name": f.Name,
  659. "modified": time.Unix(f.Modified, 0),
  660. "flags": fmt.Sprintf("0%o", f.Flags),
  661. "size": f.Size(),
  662. })
  663. if len(batch) == batchSize {
  664. fs.Update(protocol.LocalNodeID, batch)
  665. batch = batch[:0]
  666. }
  667. batch = append(batch, f)
  668. }
  669. if len(batch) > 0 {
  670. fs.Update(protocol.LocalNodeID, batch)
  671. }
  672. batch = batch[:0]
  673. fs.WithHave(protocol.LocalNodeID, func(f protocol.FileInfo) bool {
  674. if !protocol.IsDeleted(f.Flags) {
  675. if len(batch) == batchSize {
  676. fs.Update(protocol.LocalNodeID, batch)
  677. batch = batch[:0]
  678. }
  679. if _, err := os.Stat(filepath.Join(dir, f.Name)); err != nil && os.IsNotExist(err) {
  680. // File has been deleted
  681. f.Blocks = nil
  682. f.Flags |= protocol.FlagDeleted
  683. f.Version = lamport.Default.Tick(f.Version)
  684. f.LocalVersion = 0
  685. events.Default.Log(events.LocalIndexUpdated, map[string]interface{}{
  686. "repo": repo,
  687. "name": f.Name,
  688. "modified": time.Unix(f.Modified, 0),
  689. "flags": fmt.Sprintf("0%o", f.Flags),
  690. "size": f.Size(),
  691. })
  692. batch = append(batch, f)
  693. }
  694. }
  695. return true
  696. })
  697. if len(batch) > 0 {
  698. fs.Update(protocol.LocalNodeID, batch)
  699. }
  700. m.setState(repo, RepoIdle)
  701. return nil
  702. }
  703. // clusterConfig returns a ClusterConfigMessage that is correct for the given peer node
  704. func (m *Model) clusterConfig(node protocol.NodeID) protocol.ClusterConfigMessage {
  705. cm := protocol.ClusterConfigMessage{
  706. ClientName: m.clientName,
  707. ClientVersion: m.clientVersion,
  708. }
  709. m.rmut.RLock()
  710. for _, repo := range m.nodeRepos[node] {
  711. cr := protocol.Repository{
  712. ID: repo,
  713. }
  714. for _, node := range m.repoNodes[repo] {
  715. // TODO: Set read only bit when relevant
  716. cr.Nodes = append(cr.Nodes, protocol.Node{
  717. ID: node[:],
  718. Flags: protocol.FlagShareTrusted,
  719. })
  720. }
  721. cm.Repositories = append(cm.Repositories, cr)
  722. }
  723. m.rmut.RUnlock()
  724. return cm
  725. }
  726. func (m *Model) setState(repo string, state repoState) {
  727. m.smut.Lock()
  728. oldState := m.repoState[repo]
  729. changed, ok := m.repoStateChanged[repo]
  730. if state != oldState {
  731. m.repoState[repo] = state
  732. m.repoStateChanged[repo] = time.Now()
  733. eventData := map[string]interface{}{
  734. "repo": repo,
  735. "to": state.String(),
  736. }
  737. if ok {
  738. eventData["duration"] = time.Since(changed).Seconds()
  739. eventData["from"] = oldState.String()
  740. }
  741. events.Default.Log(events.StateChanged, eventData)
  742. }
  743. m.smut.Unlock()
  744. }
  745. func (m *Model) State(repo string) (string, time.Time) {
  746. m.smut.RLock()
  747. state := m.repoState[repo]
  748. changed := m.repoStateChanged[repo]
  749. m.smut.RUnlock()
  750. return state.String(), changed
  751. }
  752. func (m *Model) Override(repo string) {
  753. m.rmut.RLock()
  754. fs := m.repoFiles[repo]
  755. m.rmut.RUnlock()
  756. batch := make([]protocol.FileInfo, 0, indexBatchSize)
  757. fs.WithNeed(protocol.LocalNodeID, func(need protocol.FileInfo) bool {
  758. if len(batch) == indexBatchSize {
  759. fs.Update(protocol.LocalNodeID, batch)
  760. batch = batch[:0]
  761. }
  762. have := fs.Get(protocol.LocalNodeID, need.Name)
  763. if have.Name != need.Name {
  764. // We are missing the file
  765. need.Flags |= protocol.FlagDeleted
  766. need.Blocks = nil
  767. } else {
  768. // We have the file, replace with our version
  769. need = have
  770. }
  771. need.Version = lamport.Default.Tick(need.Version)
  772. need.LocalVersion = 0
  773. batch = append(batch, need)
  774. return true
  775. })
  776. if len(batch) > 0 {
  777. fs.Update(protocol.LocalNodeID, batch)
  778. }
  779. }
  780. // Version returns the change version for the given repository. This is
  781. // guaranteed to increment if the contents of the local or global repository
  782. // has changed.
  783. func (m *Model) LocalVersion(repo string) uint64 {
  784. m.rmut.Lock()
  785. defer m.rmut.Unlock()
  786. fs, ok := m.repoFiles[repo]
  787. if !ok {
  788. return 0
  789. }
  790. ver := fs.LocalVersion(protocol.LocalNodeID)
  791. for _, n := range m.repoNodes[repo] {
  792. ver += fs.LocalVersion(n)
  793. }
  794. return ver
  795. }