leveldb_open.go 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232
  1. // Copyright (C) 2018 The Syncthing Authors.
  2. //
  3. // This Source Code Form is subject to the terms of the Mozilla Public
  4. // License, v. 2.0. If a copy of the MPL was not distributed with this file,
  5. // You can obtain one at https://mozilla.org/MPL/2.0/.
  6. package backend
  7. import (
  8. "fmt"
  9. "os"
  10. "strconv"
  11. "strings"
  12. "github.com/syndtr/goleveldb/leveldb"
  13. "github.com/syndtr/goleveldb/leveldb/errors"
  14. "github.com/syndtr/goleveldb/leveldb/opt"
  15. "github.com/syndtr/goleveldb/leveldb/storage"
  16. "github.com/syndtr/goleveldb/leveldb/util"
  17. )
  18. const (
  19. dbMaxOpenFiles = 100
  20. // A large database is > 200 MiB. It's a mostly arbitrary value, but
  21. // it's also the case that each file is 2 MiB by default and when we
  22. // have dbMaxOpenFiles of them we will need to start thrashing fd:s.
  23. // Switching to large database settings causes larger files to be used
  24. // when compacting, reducing the number.
  25. dbLargeThreshold = dbMaxOpenFiles * (2 << MiB)
  26. KiB = 10
  27. MiB = 20
  28. )
  29. // OpenLevelDB attempts to open the database at the given location, and runs
  30. // recovery on it if opening fails. Worst case, if recovery is not possible,
  31. // the database is erased and created from scratch.
  32. func OpenLevelDB(location string, tuning Tuning) (Backend, error) {
  33. opts := optsFor(location, tuning)
  34. ldb, err := open(location, opts)
  35. if err != nil {
  36. return nil, err
  37. }
  38. return newLeveldbBackend(ldb, location), nil
  39. }
  40. // OpenLevelDBAuto is OpenLevelDB with TuningAuto tuning.
  41. func OpenLevelDBAuto(location string) (Backend, error) {
  42. return OpenLevelDB(location, TuningAuto)
  43. }
  44. // OpenLevelDBRO attempts to open the database at the given location, read
  45. // only.
  46. func OpenLevelDBRO(location string) (Backend, error) {
  47. opts := &opt.Options{
  48. OpenFilesCacheCapacity: dbMaxOpenFiles,
  49. ReadOnly: true,
  50. }
  51. ldb, err := open(location, opts)
  52. if err != nil {
  53. return nil, err
  54. }
  55. return newLeveldbBackend(ldb, location), nil
  56. }
  57. // OpenMemory returns a new Backend referencing an in-memory database.
  58. func OpenLevelDBMemory() Backend {
  59. ldb, _ := leveldb.Open(storage.NewMemStorage(), nil)
  60. return newLeveldbBackend(ldb, "")
  61. }
  62. // optsFor returns the database options to use when opening a database with
  63. // the given location and tuning. Settings can be overridden by debug
  64. // environment variables.
  65. func optsFor(location string, tuning Tuning) *opt.Options {
  66. large := false
  67. switch tuning {
  68. case TuningLarge:
  69. large = true
  70. case TuningAuto:
  71. large = dbIsLarge(location)
  72. }
  73. var (
  74. // Set defaults used for small databases.
  75. defaultBlockCacheCapacity = 0 // 0 means let leveldb use default
  76. defaultBlockSize = 0
  77. defaultCompactionTableSize = 0
  78. defaultCompactionTableSizeMultiplier = 0
  79. defaultWriteBuffer = 16 << MiB // increased from leveldb default of 4 MiB
  80. defaultCompactionL0Trigger = opt.DefaultCompactionL0Trigger // explicit because we use it as base for other stuff
  81. )
  82. if large {
  83. // Change the parameters for better throughput at the price of some
  84. // RAM and larger files. This results in larger batches of writes
  85. // and compaction at a lower frequency.
  86. l.Infoln("Using large-database tuning")
  87. defaultBlockCacheCapacity = 64 << MiB
  88. defaultBlockSize = 64 << KiB
  89. defaultCompactionTableSize = 16 << MiB
  90. defaultCompactionTableSizeMultiplier = 20 // 2.0 after division by ten
  91. defaultWriteBuffer = 64 << MiB
  92. defaultCompactionL0Trigger = 8 // number of l0 files
  93. }
  94. opts := &opt.Options{
  95. BlockCacheCapacity: debugEnvValue("BlockCacheCapacity", defaultBlockCacheCapacity),
  96. BlockCacheEvictRemoved: debugEnvValue("BlockCacheEvictRemoved", 0) != 0,
  97. BlockRestartInterval: debugEnvValue("BlockRestartInterval", 0),
  98. BlockSize: debugEnvValue("BlockSize", defaultBlockSize),
  99. CompactionExpandLimitFactor: debugEnvValue("CompactionExpandLimitFactor", 0),
  100. CompactionGPOverlapsFactor: debugEnvValue("CompactionGPOverlapsFactor", 0),
  101. CompactionL0Trigger: debugEnvValue("CompactionL0Trigger", defaultCompactionL0Trigger),
  102. CompactionSourceLimitFactor: debugEnvValue("CompactionSourceLimitFactor", 0),
  103. CompactionTableSize: debugEnvValue("CompactionTableSize", defaultCompactionTableSize),
  104. CompactionTableSizeMultiplier: float64(debugEnvValue("CompactionTableSizeMultiplier", defaultCompactionTableSizeMultiplier)) / 10.0,
  105. CompactionTotalSize: debugEnvValue("CompactionTotalSize", 0),
  106. CompactionTotalSizeMultiplier: float64(debugEnvValue("CompactionTotalSizeMultiplier", 0)) / 10.0,
  107. DisableBufferPool: debugEnvValue("DisableBufferPool", 0) != 0,
  108. DisableBlockCache: debugEnvValue("DisableBlockCache", 0) != 0,
  109. DisableCompactionBackoff: debugEnvValue("DisableCompactionBackoff", 0) != 0,
  110. DisableLargeBatchTransaction: debugEnvValue("DisableLargeBatchTransaction", 0) != 0,
  111. NoSync: debugEnvValue("NoSync", 0) != 0,
  112. NoWriteMerge: debugEnvValue("NoWriteMerge", 0) != 0,
  113. OpenFilesCacheCapacity: debugEnvValue("OpenFilesCacheCapacity", dbMaxOpenFiles),
  114. WriteBuffer: debugEnvValue("WriteBuffer", defaultWriteBuffer),
  115. // The write slowdown and pause can be overridden, but even if they
  116. // are not and the compaction trigger is overridden we need to
  117. // adjust so that we don't pause writes for L0 compaction before we
  118. // even *start* L0 compaction...
  119. WriteL0SlowdownTrigger: debugEnvValue("WriteL0SlowdownTrigger", 2*debugEnvValue("CompactionL0Trigger", defaultCompactionL0Trigger)),
  120. WriteL0PauseTrigger: debugEnvValue("WriteL0SlowdownTrigger", 3*debugEnvValue("CompactionL0Trigger", defaultCompactionL0Trigger)),
  121. }
  122. return opts
  123. }
  124. func open(location string, opts *opt.Options) (*leveldb.DB, error) {
  125. db, err := leveldb.OpenFile(location, opts)
  126. if leveldbIsCorrupted(err) {
  127. db, err = leveldb.RecoverFile(location, opts)
  128. }
  129. if leveldbIsCorrupted(err) {
  130. // The database is corrupted, and we've tried to recover it but it
  131. // didn't work. At this point there isn't much to do beyond dropping
  132. // the database and reindexing...
  133. l.Infoln("Database corruption detected, unable to recover. Reinitializing...")
  134. if err := os.RemoveAll(location); err != nil {
  135. return nil, &errorSuggestion{err, "failed to delete corrupted database"}
  136. }
  137. db, err = leveldb.OpenFile(location, opts)
  138. }
  139. if err != nil {
  140. return nil, &errorSuggestion{err, "is another instance of Syncthing running?"}
  141. }
  142. if debugEnvValue("CompactEverything", 0) != 0 {
  143. if err := db.CompactRange(util.Range{}); err != nil {
  144. l.Warnln("Compacting database:", err)
  145. }
  146. }
  147. return db, nil
  148. }
  149. func debugEnvValue(key string, def int) int {
  150. v, err := strconv.ParseInt(os.Getenv("STDEBUG_"+key), 10, 63)
  151. if err != nil {
  152. return def
  153. }
  154. return int(v)
  155. }
  156. // A "better" version of leveldb's errors.IsCorrupted.
  157. func leveldbIsCorrupted(err error) bool {
  158. switch {
  159. case err == nil:
  160. return false
  161. case errors.IsCorrupted(err):
  162. return true
  163. case strings.Contains(err.Error(), "corrupted"):
  164. return true
  165. }
  166. return false
  167. }
  168. // dbIsLarge returns whether the estimated size of the database at location
  169. // is large enough to warrant optimization for large databases.
  170. func dbIsLarge(location string) bool {
  171. if ^uint(0)>>63 == 0 {
  172. // We're compiled for a 32 bit architecture. We've seen trouble with
  173. // large settings there.
  174. // (https://forum.syncthing.net/t/many-small-ldb-files-with-database-tuning/13842)
  175. return false
  176. }
  177. dir, err := os.Open(location)
  178. if err != nil {
  179. return false
  180. }
  181. fis, err := dir.Readdir(-1)
  182. if err != nil {
  183. return false
  184. }
  185. var size int64
  186. for _, fi := range fis {
  187. if fi.Name() == "LOG" {
  188. // don't count the size
  189. continue
  190. }
  191. size += fi.Size()
  192. }
  193. return size > dbLargeThreshold
  194. }
  195. type errorSuggestion struct {
  196. inner error
  197. suggestion string
  198. }
  199. func (e *errorSuggestion) Error() string {
  200. return fmt.Sprintf("%s (%s)", e.inner.Error(), e.suggestion)
  201. }