blockmap.go 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238
  1. // Copyright (C) 2014 The Syncthing Authors.
  2. //
  3. // This Source Code Form is subject to the terms of the Mozilla Public
  4. // License, v. 2.0. If a copy of the MPL was not distributed with this file,
  5. // You can obtain one at http://mozilla.org/MPL/2.0/.
  6. // Package db provides a set type to track local/remote files with newness
  7. // checks. We must do a certain amount of normalization in here. We will get
  8. // fed paths with either native or wire-format separators and encodings
  9. // depending on who calls us. We transform paths to wire-format (NFC and
  10. // slashes) on the way to the database, and transform to native format
  11. // (varying separator and encoding) on the way back out.
  12. package db
  13. import (
  14. "bytes"
  15. "encoding/binary"
  16. "fmt"
  17. "github.com/syncthing/syncthing/lib/osutil"
  18. "github.com/syncthing/syncthing/lib/protocol"
  19. "github.com/syndtr/goleveldb/leveldb"
  20. "github.com/syndtr/goleveldb/leveldb/util"
  21. )
  22. var blockFinder *BlockFinder
  23. const maxBatchSize = 256 << 10
  24. type BlockMap struct {
  25. db *Instance
  26. folder string
  27. }
  28. func NewBlockMap(db *Instance, folder string) *BlockMap {
  29. return &BlockMap{
  30. db: db,
  31. folder: folder,
  32. }
  33. }
  34. // Add files to the block map, ignoring any deleted or invalid files.
  35. func (m *BlockMap) Add(files []protocol.FileInfo) error {
  36. batch := new(leveldb.Batch)
  37. buf := make([]byte, 4)
  38. var key []byte
  39. for _, file := range files {
  40. if batch.Len() > maxBatchSize {
  41. if err := m.db.Write(batch, nil); err != nil {
  42. return err
  43. }
  44. batch.Reset()
  45. }
  46. if file.IsDirectory() || file.IsDeleted() || file.IsInvalid() {
  47. continue
  48. }
  49. for i, block := range file.Blocks {
  50. binary.BigEndian.PutUint32(buf, uint32(i))
  51. key = m.blockKeyInto(key, block.Hash, file.Name)
  52. batch.Put(key, buf)
  53. }
  54. }
  55. return m.db.Write(batch, nil)
  56. }
  57. // Update block map state, removing any deleted or invalid files.
  58. func (m *BlockMap) Update(files []protocol.FileInfo) error {
  59. batch := new(leveldb.Batch)
  60. buf := make([]byte, 4)
  61. var key []byte
  62. for _, file := range files {
  63. if batch.Len() > maxBatchSize {
  64. if err := m.db.Write(batch, nil); err != nil {
  65. return err
  66. }
  67. batch.Reset()
  68. }
  69. if file.IsDirectory() {
  70. continue
  71. }
  72. if file.IsDeleted() || file.IsInvalid() {
  73. for _, block := range file.Blocks {
  74. key = m.blockKeyInto(key, block.Hash, file.Name)
  75. batch.Delete(key)
  76. }
  77. continue
  78. }
  79. for i, block := range file.Blocks {
  80. binary.BigEndian.PutUint32(buf, uint32(i))
  81. key = m.blockKeyInto(key, block.Hash, file.Name)
  82. batch.Put(key, buf)
  83. }
  84. }
  85. return m.db.Write(batch, nil)
  86. }
  87. // Discard block map state, removing the given files
  88. func (m *BlockMap) Discard(files []protocol.FileInfo) error {
  89. batch := new(leveldb.Batch)
  90. var key []byte
  91. for _, file := range files {
  92. if batch.Len() > maxBatchSize {
  93. if err := m.db.Write(batch, nil); err != nil {
  94. return err
  95. }
  96. batch.Reset()
  97. }
  98. for _, block := range file.Blocks {
  99. key = m.blockKeyInto(key, block.Hash, file.Name)
  100. batch.Delete(key)
  101. }
  102. }
  103. return m.db.Write(batch, nil)
  104. }
  105. // Drop block map, removing all entries related to this block map from the db.
  106. func (m *BlockMap) Drop() error {
  107. batch := new(leveldb.Batch)
  108. iter := m.db.NewIterator(util.BytesPrefix(m.blockKeyInto(nil, nil, "")[:1+64]), nil)
  109. defer iter.Release()
  110. for iter.Next() {
  111. if batch.Len() > maxBatchSize {
  112. if err := m.db.Write(batch, nil); err != nil {
  113. return err
  114. }
  115. batch.Reset()
  116. }
  117. batch.Delete(iter.Key())
  118. }
  119. if iter.Error() != nil {
  120. return iter.Error()
  121. }
  122. return m.db.Write(batch, nil)
  123. }
  124. func (m *BlockMap) blockKeyInto(o, hash []byte, file string) []byte {
  125. return blockKeyInto(o, hash, m.folder, file)
  126. }
  127. type BlockFinder struct {
  128. db *Instance
  129. }
  130. func NewBlockFinder(db *Instance) *BlockFinder {
  131. if blockFinder != nil {
  132. return blockFinder
  133. }
  134. f := &BlockFinder{
  135. db: db,
  136. }
  137. return f
  138. }
  139. func (f *BlockFinder) String() string {
  140. return fmt.Sprintf("BlockFinder@%p", f)
  141. }
  142. // Iterate takes an iterator function which iterates over all matching blocks
  143. // for the given hash. The iterator function has to return either true (if
  144. // they are happy with the block) or false to continue iterating for whatever
  145. // reason. The iterator finally returns the result, whether or not a
  146. // satisfying block was eventually found.
  147. func (f *BlockFinder) Iterate(folders []string, hash []byte, iterFn func(string, string, int32) bool) bool {
  148. var key []byte
  149. for _, folder := range folders {
  150. key = blockKeyInto(key, hash, folder, "")
  151. iter := f.db.NewIterator(util.BytesPrefix(key), nil)
  152. defer iter.Release()
  153. for iter.Next() && iter.Error() == nil {
  154. folder, file := fromBlockKey(iter.Key())
  155. index := int32(binary.BigEndian.Uint32(iter.Value()))
  156. if iterFn(folder, osutil.NativeFilename(file), index) {
  157. return true
  158. }
  159. }
  160. }
  161. return false
  162. }
  163. // Fix repairs incorrect blockmap entries, removing the old entry and
  164. // replacing it with a new entry for the given block
  165. func (f *BlockFinder) Fix(folder, file string, index int32, oldHash, newHash []byte) error {
  166. buf := make([]byte, 4)
  167. binary.BigEndian.PutUint32(buf, uint32(index))
  168. batch := new(leveldb.Batch)
  169. batch.Delete(blockKeyInto(nil, oldHash, folder, file))
  170. batch.Put(blockKeyInto(nil, newHash, folder, file), buf)
  171. return f.db.Write(batch, nil)
  172. }
  173. // m.blockKey returns a byte slice encoding the following information:
  174. // keyTypeBlock (1 byte)
  175. // folder (64 bytes)
  176. // block hash (32 bytes)
  177. // file name (variable size)
  178. func blockKeyInto(o, hash []byte, folder, file string) []byte {
  179. reqLen := 1 + 64 + 32 + len(file)
  180. if cap(o) < reqLen {
  181. o = make([]byte, reqLen)
  182. } else {
  183. o = o[:reqLen]
  184. }
  185. o[0] = KeyTypeBlock
  186. copy(o[1:], []byte(folder))
  187. copy(o[1+64:], []byte(hash))
  188. copy(o[1+64+32:], []byte(file))
  189. return o
  190. }
  191. func fromBlockKey(data []byte) (string, string) {
  192. if len(data) < 1+64+32+1 {
  193. panic("Incorrect key length")
  194. }
  195. if data[0] != KeyTypeBlock {
  196. panic("Incorrect key type")
  197. }
  198. file := string(data[1+64+32:])
  199. slice := data[1 : 1+64]
  200. izero := bytes.IndexByte(slice, 0)
  201. if izero > -1 {
  202. return string(slice[:izero]), file
  203. }
  204. return string(slice), file
  205. }