protocol.go 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589
  1. // Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
  2. // Use of this source code is governed by an MIT-style license that can be
  3. // found in the LICENSE file.
  4. package protocol
  5. import (
  6. "bufio"
  7. "compress/flate"
  8. "errors"
  9. "fmt"
  10. "io"
  11. "sync"
  12. "time"
  13. "github.com/calmh/syncthing/xdr"
  14. )
  15. const BlockSize = 128 * 1024
  16. const (
  17. messageTypeClusterConfig = 0
  18. messageTypeIndex = 1
  19. messageTypeRequest = 2
  20. messageTypeResponse = 3
  21. messageTypePing = 4
  22. messageTypePong = 5
  23. messageTypeIndexUpdate = 6
  24. )
  25. const (
  26. FlagDeleted uint32 = 1 << 12
  27. FlagInvalid = 1 << 13
  28. FlagDirectory = 1 << 14
  29. FlagNoPermBits = 1 << 15
  30. )
  31. const (
  32. FlagShareTrusted uint32 = 1 << 0
  33. FlagShareReadOnly = 1 << 1
  34. FlagShareBits = 0x000000ff
  35. )
  36. var (
  37. ErrClusterHash = fmt.Errorf("configuration error: mismatched cluster hash")
  38. ErrClosed = errors.New("connection closed")
  39. )
  40. type Model interface {
  41. // An index was received from the peer node
  42. Index(nodeID NodeID, repo string, files []FileInfo)
  43. // An index update was received from the peer node
  44. IndexUpdate(nodeID NodeID, repo string, files []FileInfo)
  45. // A request was made by the peer node
  46. Request(nodeID NodeID, repo string, name string, offset int64, size int) ([]byte, error)
  47. // A cluster configuration message was received
  48. ClusterConfig(nodeID NodeID, config ClusterConfigMessage)
  49. // The peer node closed the connection
  50. Close(nodeID NodeID, err error)
  51. }
  52. type Connection interface {
  53. ID() NodeID
  54. Index(repo string, files []FileInfo)
  55. Request(repo string, name string, offset int64, size int) ([]byte, error)
  56. ClusterConfig(config ClusterConfigMessage)
  57. Statistics() Statistics
  58. }
  59. type rawConnection struct {
  60. id NodeID
  61. receiver Model
  62. reader io.ReadCloser
  63. cr *countingReader
  64. xr *xdr.Reader
  65. writer io.WriteCloser
  66. cw *countingWriter
  67. wb *bufio.Writer
  68. xw *xdr.Writer
  69. wmut sync.Mutex
  70. indexSent map[string]map[string]uint64
  71. awaiting []chan asyncResult
  72. imut sync.Mutex
  73. idxMut sync.Mutex // ensures serialization of Index calls
  74. nextID chan int
  75. outbox chan []encodable
  76. closed chan struct{}
  77. }
  78. type asyncResult struct {
  79. val []byte
  80. err error
  81. }
  82. const (
  83. pingTimeout = 30 * time.Second
  84. pingIdleTime = 60 * time.Second
  85. )
  86. func NewConnection(nodeID NodeID, reader io.Reader, writer io.Writer, receiver Model) Connection {
  87. cr := &countingReader{Reader: reader}
  88. cw := &countingWriter{Writer: writer}
  89. flrd := flate.NewReader(cr)
  90. flwr, err := flate.NewWriter(cw, flate.BestSpeed)
  91. if err != nil {
  92. panic(err)
  93. }
  94. wb := bufio.NewWriter(flwr)
  95. c := rawConnection{
  96. id: nodeID,
  97. receiver: nativeModel{receiver},
  98. reader: flrd,
  99. cr: cr,
  100. xr: xdr.NewReader(flrd),
  101. writer: flwr,
  102. cw: cw,
  103. wb: wb,
  104. xw: xdr.NewWriter(wb),
  105. awaiting: make([]chan asyncResult, 0x1000),
  106. indexSent: make(map[string]map[string]uint64),
  107. outbox: make(chan []encodable),
  108. nextID: make(chan int),
  109. closed: make(chan struct{}),
  110. }
  111. go c.indexSerializerLoop()
  112. go c.readerLoop()
  113. go c.writerLoop()
  114. go c.pingerLoop()
  115. go c.idGenerator()
  116. return wireFormatConnection{&c}
  117. }
  118. func (c *rawConnection) ID() NodeID {
  119. return c.id
  120. }
  121. // Index writes the list of file information to the connected peer node
  122. func (c *rawConnection) Index(repo string, idx []FileInfo) {
  123. c.idxMut.Lock()
  124. defer c.idxMut.Unlock()
  125. c.imut.Lock()
  126. var msgType int
  127. if c.indexSent[repo] == nil {
  128. // This is the first time we send an index.
  129. msgType = messageTypeIndex
  130. c.indexSent[repo] = make(map[string]uint64)
  131. for _, f := range idx {
  132. c.indexSent[repo][f.Name] = f.Version
  133. }
  134. } else {
  135. // We have sent one full index. Only send updates now.
  136. msgType = messageTypeIndexUpdate
  137. var diff []FileInfo
  138. for _, f := range idx {
  139. if vs, ok := c.indexSent[repo][f.Name]; !ok || f.Version != vs {
  140. diff = append(diff, f)
  141. c.indexSent[repo][f.Name] = f.Version
  142. }
  143. }
  144. idx = diff
  145. }
  146. c.imut.Unlock()
  147. if len(idx) > 0 {
  148. c.send(header{0, -1, msgType}, IndexMessage{repo, idx})
  149. }
  150. }
  151. // Request returns the bytes for the specified block after fetching them from the connected peer.
  152. func (c *rawConnection) Request(repo string, name string, offset int64, size int) ([]byte, error) {
  153. var id int
  154. select {
  155. case id = <-c.nextID:
  156. case <-c.closed:
  157. return nil, ErrClosed
  158. }
  159. c.imut.Lock()
  160. if ch := c.awaiting[id]; ch != nil {
  161. panic("id taken")
  162. }
  163. rc := make(chan asyncResult)
  164. c.awaiting[id] = rc
  165. c.imut.Unlock()
  166. ok := c.send(header{0, id, messageTypeRequest},
  167. RequestMessage{repo, name, uint64(offset), uint32(size)})
  168. if !ok {
  169. return nil, ErrClosed
  170. }
  171. res, ok := <-rc
  172. if !ok {
  173. return nil, ErrClosed
  174. }
  175. return res.val, res.err
  176. }
  177. // ClusterConfig send the cluster configuration message to the peer and returns any error
  178. func (c *rawConnection) ClusterConfig(config ClusterConfigMessage) {
  179. c.send(header{0, -1, messageTypeClusterConfig}, config)
  180. }
  181. func (c *rawConnection) ping() bool {
  182. var id int
  183. select {
  184. case id = <-c.nextID:
  185. case <-c.closed:
  186. return false
  187. }
  188. rc := make(chan asyncResult, 1)
  189. c.imut.Lock()
  190. c.awaiting[id] = rc
  191. c.imut.Unlock()
  192. ok := c.send(header{0, id, messageTypePing})
  193. if !ok {
  194. return false
  195. }
  196. res, ok := <-rc
  197. return ok && res.err == nil
  198. }
  199. func (c *rawConnection) readerLoop() (err error) {
  200. defer func() {
  201. c.close(err)
  202. }()
  203. for {
  204. select {
  205. case <-c.closed:
  206. return ErrClosed
  207. default:
  208. }
  209. var hdr header
  210. hdr.decodeXDR(c.xr)
  211. if err := c.xr.Error(); err != nil {
  212. return err
  213. }
  214. if hdr.version != 0 {
  215. return fmt.Errorf("protocol error: %s: unknown message version %#x", c.id, hdr.version)
  216. }
  217. switch hdr.msgType {
  218. case messageTypeIndex:
  219. if err := c.handleIndex(); err != nil {
  220. return err
  221. }
  222. case messageTypeIndexUpdate:
  223. if err := c.handleIndexUpdate(); err != nil {
  224. return err
  225. }
  226. case messageTypeRequest:
  227. if err := c.handleRequest(hdr); err != nil {
  228. return err
  229. }
  230. case messageTypeResponse:
  231. if err := c.handleResponse(hdr); err != nil {
  232. return err
  233. }
  234. case messageTypePing:
  235. c.send(header{0, hdr.msgID, messageTypePong})
  236. case messageTypePong:
  237. c.handlePong(hdr)
  238. case messageTypeClusterConfig:
  239. if err := c.handleClusterConfig(); err != nil {
  240. return err
  241. }
  242. default:
  243. return fmt.Errorf("protocol error: %s: unknown message type %#x", c.id, hdr.msgType)
  244. }
  245. }
  246. }
  247. type incomingIndex struct {
  248. update bool
  249. id NodeID
  250. repo string
  251. files []FileInfo
  252. }
  253. var incomingIndexes = make(chan incomingIndex, 100) // should be enough for anyone, right?
  254. func (c *rawConnection) indexSerializerLoop() {
  255. // We must avoid blocking the reader loop when processing large indexes.
  256. // There is otherwise a potential deadlock where both sides has the model
  257. // locked because it's sending a large index update and can't receive the
  258. // large index update from the other side. But we must also ensure to
  259. // process the indexes in the order they are received, hence the separate
  260. // routine and buffered channel.
  261. for ii := range incomingIndexes {
  262. if ii.update {
  263. c.receiver.IndexUpdate(ii.id, ii.repo, ii.files)
  264. } else {
  265. c.receiver.Index(ii.id, ii.repo, ii.files)
  266. }
  267. }
  268. }
  269. func (c *rawConnection) handleIndex() error {
  270. var im IndexMessage
  271. im.decodeXDR(c.xr)
  272. if err := c.xr.Error(); err != nil {
  273. return err
  274. } else {
  275. // We run this (and the corresponding one for update, below)
  276. // in a separate goroutine to avoid blocking the read loop.
  277. // There is otherwise a potential deadlock where both sides
  278. // has the model locked because it's sending a large index
  279. // update and can't receive the large index update from the
  280. // other side.
  281. incomingIndexes <- incomingIndex{false, c.id, im.Repository, im.Files}
  282. }
  283. return nil
  284. }
  285. func (c *rawConnection) handleIndexUpdate() error {
  286. var im IndexMessage
  287. im.decodeXDR(c.xr)
  288. if err := c.xr.Error(); err != nil {
  289. return err
  290. } else {
  291. incomingIndexes <- incomingIndex{true, c.id, im.Repository, im.Files}
  292. }
  293. return nil
  294. }
  295. func (c *rawConnection) handleRequest(hdr header) error {
  296. var req RequestMessage
  297. req.decodeXDR(c.xr)
  298. if err := c.xr.Error(); err != nil {
  299. return err
  300. }
  301. go c.processRequest(hdr.msgID, req)
  302. return nil
  303. }
  304. func (c *rawConnection) handleResponse(hdr header) error {
  305. data := c.xr.ReadBytesMax(256 * 1024) // Sufficiently larger than max expected block size
  306. if err := c.xr.Error(); err != nil {
  307. return err
  308. }
  309. go func(hdr header, err error) {
  310. c.imut.Lock()
  311. rc := c.awaiting[hdr.msgID]
  312. c.awaiting[hdr.msgID] = nil
  313. c.imut.Unlock()
  314. if rc != nil {
  315. rc <- asyncResult{data, err}
  316. close(rc)
  317. }
  318. }(hdr, c.xr.Error())
  319. return nil
  320. }
  321. func (c *rawConnection) handlePong(hdr header) {
  322. c.imut.Lock()
  323. if rc := c.awaiting[hdr.msgID]; rc != nil {
  324. go func() {
  325. rc <- asyncResult{}
  326. close(rc)
  327. }()
  328. c.awaiting[hdr.msgID] = nil
  329. }
  330. c.imut.Unlock()
  331. }
  332. func (c *rawConnection) handleClusterConfig() error {
  333. var cm ClusterConfigMessage
  334. cm.decodeXDR(c.xr)
  335. if err := c.xr.Error(); err != nil {
  336. return err
  337. } else {
  338. go c.receiver.ClusterConfig(c.id, cm)
  339. }
  340. return nil
  341. }
  342. type encodable interface {
  343. encodeXDR(*xdr.Writer) (int, error)
  344. }
  345. type encodableBytes []byte
  346. func (e encodableBytes) encodeXDR(xw *xdr.Writer) (int, error) {
  347. return xw.WriteBytes(e)
  348. }
  349. func (c *rawConnection) send(h header, es ...encodable) bool {
  350. if h.msgID < 0 {
  351. select {
  352. case id := <-c.nextID:
  353. h.msgID = id
  354. case <-c.closed:
  355. return false
  356. }
  357. }
  358. msg := append([]encodable{h}, es...)
  359. select {
  360. case c.outbox <- msg:
  361. return true
  362. case <-c.closed:
  363. return false
  364. }
  365. }
  366. func (c *rawConnection) writerLoop() {
  367. var err error
  368. for es := range c.outbox {
  369. c.wmut.Lock()
  370. for _, e := range es {
  371. e.encodeXDR(c.xw)
  372. }
  373. if err = c.flush(); err != nil {
  374. c.wmut.Unlock()
  375. c.close(err)
  376. return
  377. }
  378. c.wmut.Unlock()
  379. }
  380. }
  381. type flusher interface {
  382. Flush() error
  383. }
  384. func (c *rawConnection) flush() error {
  385. if err := c.xw.Error(); err != nil {
  386. return err
  387. }
  388. if err := c.wb.Flush(); err != nil {
  389. return err
  390. }
  391. if f, ok := c.writer.(flusher); ok {
  392. return f.Flush()
  393. }
  394. return nil
  395. }
  396. func (c *rawConnection) close(err error) {
  397. c.imut.Lock()
  398. c.wmut.Lock()
  399. defer c.imut.Unlock()
  400. defer c.wmut.Unlock()
  401. select {
  402. case <-c.closed:
  403. return
  404. default:
  405. close(c.closed)
  406. for i, ch := range c.awaiting {
  407. if ch != nil {
  408. close(ch)
  409. c.awaiting[i] = nil
  410. }
  411. }
  412. c.writer.Close()
  413. c.reader.Close()
  414. go c.receiver.Close(c.id, err)
  415. }
  416. }
  417. func (c *rawConnection) idGenerator() {
  418. nextID := 0
  419. for {
  420. nextID = (nextID + 1) & 0xfff
  421. select {
  422. case c.nextID <- nextID:
  423. case <-c.closed:
  424. return
  425. }
  426. }
  427. }
  428. func (c *rawConnection) pingerLoop() {
  429. var rc = make(chan bool, 1)
  430. ticker := time.Tick(pingIdleTime / 2)
  431. for {
  432. select {
  433. case <-ticker:
  434. if d := time.Since(c.xr.LastRead()); d < pingIdleTime {
  435. if debug {
  436. l.Debugln(c.id, "ping skipped after rd", d)
  437. }
  438. continue
  439. }
  440. if d := time.Since(c.xw.LastWrite()); d < pingIdleTime {
  441. if debug {
  442. l.Debugln(c.id, "ping skipped after wr", d)
  443. }
  444. continue
  445. }
  446. go func() {
  447. if debug {
  448. l.Debugln(c.id, "ping ->")
  449. }
  450. rc <- c.ping()
  451. }()
  452. select {
  453. case ok := <-rc:
  454. if debug {
  455. l.Debugln(c.id, "<- pong")
  456. }
  457. if !ok {
  458. c.close(fmt.Errorf("ping failure"))
  459. }
  460. case <-time.After(pingTimeout):
  461. c.close(fmt.Errorf("ping timeout"))
  462. case <-c.closed:
  463. return
  464. }
  465. case <-c.closed:
  466. return
  467. }
  468. }
  469. }
  470. func (c *rawConnection) processRequest(msgID int, req RequestMessage) {
  471. data, _ := c.receiver.Request(c.id, req.Repository, req.Name, int64(req.Offset), int(req.Size))
  472. c.send(header{0, msgID, messageTypeResponse},
  473. encodableBytes(data))
  474. }
  475. type Statistics struct {
  476. At time.Time
  477. InBytesTotal uint64
  478. OutBytesTotal uint64
  479. }
  480. func (c *rawConnection) Statistics() Statistics {
  481. return Statistics{
  482. At: time.Now(),
  483. InBytesTotal: c.cr.Tot(),
  484. OutBytesTotal: c.cw.Tot(),
  485. }
  486. }
  487. func IsDeleted(bits uint32) bool {
  488. return bits&FlagDeleted != 0
  489. }
  490. func IsInvalid(bits uint32) bool {
  491. return bits&FlagInvalid != 0
  492. }
  493. func IsDirectory(bits uint32) bool {
  494. return bits&FlagDirectory != 0
  495. }
  496. func HasPermissionBits(bits uint32) bool {
  497. return bits&FlagNoPermBits == 0
  498. }