blockmap.go 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228
  1. // Copyright (C) 2014 The Syncthing Authors.
  2. //
  3. // This Source Code Form is subject to the terms of the Mozilla Public
  4. // License, v. 2.0. If a copy of the MPL was not distributed with this file,
  5. // You can obtain one at https://mozilla.org/MPL/2.0/.
  6. package db
  7. import (
  8. "encoding/binary"
  9. "fmt"
  10. "github.com/syncthing/syncthing/lib/osutil"
  11. "github.com/syncthing/syncthing/lib/protocol"
  12. "github.com/syndtr/goleveldb/leveldb"
  13. "github.com/syndtr/goleveldb/leveldb/util"
  14. )
  15. var blockFinder *BlockFinder
  16. const maxBatchSize = 256 << 10
  17. type BlockMap struct {
  18. db *Instance
  19. folder uint32
  20. }
  21. func NewBlockMap(db *Instance, folder uint32) *BlockMap {
  22. return &BlockMap{
  23. db: db,
  24. folder: folder,
  25. }
  26. }
  27. // Add files to the block map, ignoring any deleted or invalid files.
  28. func (m *BlockMap) Add(files []protocol.FileInfo) error {
  29. batch := new(leveldb.Batch)
  30. buf := make([]byte, 4)
  31. var key []byte
  32. for _, file := range files {
  33. if batch.Len() > maxBatchSize {
  34. if err := m.db.Write(batch, nil); err != nil {
  35. return err
  36. }
  37. batch.Reset()
  38. }
  39. if file.IsDirectory() || file.IsDeleted() || file.IsInvalid() {
  40. continue
  41. }
  42. for i, block := range file.Blocks {
  43. binary.BigEndian.PutUint32(buf, uint32(i))
  44. key = m.blockKeyInto(key, block.Hash, file.Name)
  45. batch.Put(key, buf)
  46. }
  47. }
  48. return m.db.Write(batch, nil)
  49. }
  50. // Update block map state, removing any deleted or invalid files.
  51. func (m *BlockMap) Update(files []protocol.FileInfo) error {
  52. batch := new(leveldb.Batch)
  53. buf := make([]byte, 4)
  54. var key []byte
  55. for _, file := range files {
  56. if batch.Len() > maxBatchSize {
  57. if err := m.db.Write(batch, nil); err != nil {
  58. return err
  59. }
  60. batch.Reset()
  61. }
  62. if file.IsDirectory() {
  63. continue
  64. }
  65. if file.IsDeleted() || file.IsInvalid() {
  66. for _, block := range file.Blocks {
  67. key = m.blockKeyInto(key, block.Hash, file.Name)
  68. batch.Delete(key)
  69. }
  70. continue
  71. }
  72. for i, block := range file.Blocks {
  73. binary.BigEndian.PutUint32(buf, uint32(i))
  74. key = m.blockKeyInto(key, block.Hash, file.Name)
  75. batch.Put(key, buf)
  76. }
  77. }
  78. return m.db.Write(batch, nil)
  79. }
  80. // Discard block map state, removing the given files
  81. func (m *BlockMap) Discard(files []protocol.FileInfo) error {
  82. batch := new(leveldb.Batch)
  83. var key []byte
  84. for _, file := range files {
  85. if batch.Len() > maxBatchSize {
  86. if err := m.db.Write(batch, nil); err != nil {
  87. return err
  88. }
  89. batch.Reset()
  90. }
  91. for _, block := range file.Blocks {
  92. key = m.blockKeyInto(key, block.Hash, file.Name)
  93. batch.Delete(key)
  94. }
  95. }
  96. return m.db.Write(batch, nil)
  97. }
  98. // Drop block map, removing all entries related to this block map from the db.
  99. func (m *BlockMap) Drop() error {
  100. batch := new(leveldb.Batch)
  101. iter := m.db.NewIterator(util.BytesPrefix(m.blockKeyInto(nil, nil, "")[:keyPrefixLen+keyFolderLen]), nil)
  102. defer iter.Release()
  103. for iter.Next() {
  104. if batch.Len() > maxBatchSize {
  105. if err := m.db.Write(batch, nil); err != nil {
  106. return err
  107. }
  108. batch.Reset()
  109. }
  110. batch.Delete(iter.Key())
  111. }
  112. if iter.Error() != nil {
  113. return iter.Error()
  114. }
  115. return m.db.Write(batch, nil)
  116. }
  117. func (m *BlockMap) blockKeyInto(o, hash []byte, file string) []byte {
  118. return blockKeyInto(o, hash, m.folder, file)
  119. }
  120. type BlockFinder struct {
  121. db *Instance
  122. }
  123. func NewBlockFinder(db *Instance) *BlockFinder {
  124. if blockFinder != nil {
  125. return blockFinder
  126. }
  127. f := &BlockFinder{
  128. db: db,
  129. }
  130. return f
  131. }
  132. func (f *BlockFinder) String() string {
  133. return fmt.Sprintf("BlockFinder@%p", f)
  134. }
  135. // Iterate takes an iterator function which iterates over all matching blocks
  136. // for the given hash. The iterator function has to return either true (if
  137. // they are happy with the block) or false to continue iterating for whatever
  138. // reason. The iterator finally returns the result, whether or not a
  139. // satisfying block was eventually found.
  140. func (f *BlockFinder) Iterate(folders []string, hash []byte, iterFn func(string, string, int32) bool) bool {
  141. var key []byte
  142. for _, folder := range folders {
  143. folderID := f.db.folderIdx.ID([]byte(folder))
  144. key = blockKeyInto(key, hash, folderID, "")
  145. iter := f.db.NewIterator(util.BytesPrefix(key), nil)
  146. defer iter.Release()
  147. for iter.Next() && iter.Error() == nil {
  148. file := blockKeyName(iter.Key())
  149. index := int32(binary.BigEndian.Uint32(iter.Value()))
  150. if iterFn(folder, osutil.NativeFilename(file), index) {
  151. return true
  152. }
  153. }
  154. }
  155. return false
  156. }
  157. // Fix repairs incorrect blockmap entries, removing the old entry and
  158. // replacing it with a new entry for the given block
  159. func (f *BlockFinder) Fix(folder, file string, index int32, oldHash, newHash []byte) error {
  160. buf := make([]byte, 4)
  161. binary.BigEndian.PutUint32(buf, uint32(index))
  162. folderID := f.db.folderIdx.ID([]byte(folder))
  163. batch := new(leveldb.Batch)
  164. batch.Delete(blockKeyInto(nil, oldHash, folderID, file))
  165. batch.Put(blockKeyInto(nil, newHash, folderID, file), buf)
  166. return f.db.Write(batch, nil)
  167. }
  168. // m.blockKey returns a byte slice encoding the following information:
  169. // keyTypeBlock (1 byte)
  170. // folder (4 bytes)
  171. // block hash (32 bytes)
  172. // file name (variable size)
  173. func blockKeyInto(o, hash []byte, folder uint32, file string) []byte {
  174. reqLen := keyPrefixLen + keyFolderLen + keyHashLen + len(file)
  175. if cap(o) < reqLen {
  176. o = make([]byte, reqLen)
  177. } else {
  178. o = o[:reqLen]
  179. }
  180. o[0] = KeyTypeBlock
  181. binary.BigEndian.PutUint32(o[keyPrefixLen:], folder)
  182. copy(o[keyPrefixLen+keyFolderLen:], hash)
  183. copy(o[keyPrefixLen+keyFolderLen+keyHashLen:], []byte(file))
  184. return o
  185. }
  186. // blockKeyName returns the file name from the block key
  187. func blockKeyName(data []byte) string {
  188. if len(data) < keyPrefixLen+keyFolderLen+keyHashLen+1 {
  189. panic("Incorrect key length")
  190. }
  191. if data[0] != KeyTypeBlock {
  192. panic("Incorrect key type")
  193. }
  194. file := string(data[keyPrefixLen+keyFolderLen+keyHashLen:])
  195. return file
  196. }