leveldb_open.go 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226
  1. // Copyright (C) 2018 The Syncthing Authors.
  2. //
  3. // This Source Code Form is subject to the terms of the Mozilla Public
  4. // License, v. 2.0. If a copy of the MPL was not distributed with this file,
  5. // You can obtain one at https://mozilla.org/MPL/2.0/.
  6. package backend
  7. import (
  8. "fmt"
  9. "os"
  10. "strconv"
  11. "strings"
  12. "github.com/syndtr/goleveldb/leveldb"
  13. "github.com/syndtr/goleveldb/leveldb/errors"
  14. "github.com/syndtr/goleveldb/leveldb/opt"
  15. "github.com/syndtr/goleveldb/leveldb/storage"
  16. "github.com/syndtr/goleveldb/leveldb/util"
  17. )
  18. const (
  19. dbMaxOpenFiles = 100
  20. // A large database is > 200 MiB. It's a mostly arbitrary value, but
  21. // it's also the case that each file is 2 MiB by default and when we
  22. // have dbMaxOpenFiles of them we will need to start thrashing fd:s.
  23. // Switching to large database settings causes larger files to be used
  24. // when compacting, reducing the number.
  25. dbLargeThreshold = dbMaxOpenFiles * (2 << MiB)
  26. KiB = 10
  27. MiB = 20
  28. )
  29. // Open attempts to open the database at the given location, and runs
  30. // recovery on it if opening fails. Worst case, if recovery is not possible,
  31. // the database is erased and created from scratch.
  32. func OpenLevelDB(location string, tuning Tuning) (Backend, error) {
  33. opts := optsFor(location, tuning)
  34. ldb, err := open(location, opts)
  35. if err != nil {
  36. return nil, err
  37. }
  38. return &leveldbBackend{ldb: ldb}, nil
  39. }
  40. // OpenRO attempts to open the database at the given location, read only.
  41. func OpenLevelDBRO(location string) (Backend, error) {
  42. opts := &opt.Options{
  43. OpenFilesCacheCapacity: dbMaxOpenFiles,
  44. ReadOnly: true,
  45. }
  46. ldb, err := open(location, opts)
  47. if err != nil {
  48. return nil, err
  49. }
  50. return &leveldbBackend{ldb: ldb}, nil
  51. }
  52. // OpenMemory returns a new Lowlevel referencing an in-memory database.
  53. func OpenLevelDBMemory() Backend {
  54. ldb, _ := leveldb.Open(storage.NewMemStorage(), nil)
  55. return &leveldbBackend{ldb: ldb}
  56. }
  57. // optsFor returns the database options to use when opening a database with
  58. // the given location and tuning. Settings can be overridden by debug
  59. // environment variables.
  60. func optsFor(location string, tuning Tuning) *opt.Options {
  61. large := false
  62. switch tuning {
  63. case TuningLarge:
  64. large = true
  65. case TuningAuto:
  66. large = dbIsLarge(location)
  67. }
  68. var (
  69. // Set defaults used for small databases.
  70. defaultBlockCacheCapacity = 0 // 0 means let leveldb use default
  71. defaultBlockSize = 0
  72. defaultCompactionTableSize = 0
  73. defaultCompactionTableSizeMultiplier = 0
  74. defaultWriteBuffer = 16 << MiB // increased from leveldb default of 4 MiB
  75. defaultCompactionL0Trigger = opt.DefaultCompactionL0Trigger // explicit because we use it as base for other stuff
  76. )
  77. if large {
  78. // Change the parameters for better throughput at the price of some
  79. // RAM and larger files. This results in larger batches of writes
  80. // and compaction at a lower frequency.
  81. l.Infoln("Using large-database tuning")
  82. defaultBlockCacheCapacity = 64 << MiB
  83. defaultBlockSize = 64 << KiB
  84. defaultCompactionTableSize = 16 << MiB
  85. defaultCompactionTableSizeMultiplier = 20 // 2.0 after division by ten
  86. defaultWriteBuffer = 64 << MiB
  87. defaultCompactionL0Trigger = 8 // number of l0 files
  88. }
  89. opts := &opt.Options{
  90. BlockCacheCapacity: debugEnvValue("BlockCacheCapacity", defaultBlockCacheCapacity),
  91. BlockCacheEvictRemoved: debugEnvValue("BlockCacheEvictRemoved", 0) != 0,
  92. BlockRestartInterval: debugEnvValue("BlockRestartInterval", 0),
  93. BlockSize: debugEnvValue("BlockSize", defaultBlockSize),
  94. CompactionExpandLimitFactor: debugEnvValue("CompactionExpandLimitFactor", 0),
  95. CompactionGPOverlapsFactor: debugEnvValue("CompactionGPOverlapsFactor", 0),
  96. CompactionL0Trigger: debugEnvValue("CompactionL0Trigger", defaultCompactionL0Trigger),
  97. CompactionSourceLimitFactor: debugEnvValue("CompactionSourceLimitFactor", 0),
  98. CompactionTableSize: debugEnvValue("CompactionTableSize", defaultCompactionTableSize),
  99. CompactionTableSizeMultiplier: float64(debugEnvValue("CompactionTableSizeMultiplier", defaultCompactionTableSizeMultiplier)) / 10.0,
  100. CompactionTotalSize: debugEnvValue("CompactionTotalSize", 0),
  101. CompactionTotalSizeMultiplier: float64(debugEnvValue("CompactionTotalSizeMultiplier", 0)) / 10.0,
  102. DisableBufferPool: debugEnvValue("DisableBufferPool", 0) != 0,
  103. DisableBlockCache: debugEnvValue("DisableBlockCache", 0) != 0,
  104. DisableCompactionBackoff: debugEnvValue("DisableCompactionBackoff", 0) != 0,
  105. DisableLargeBatchTransaction: debugEnvValue("DisableLargeBatchTransaction", 0) != 0,
  106. NoSync: debugEnvValue("NoSync", 0) != 0,
  107. NoWriteMerge: debugEnvValue("NoWriteMerge", 0) != 0,
  108. OpenFilesCacheCapacity: debugEnvValue("OpenFilesCacheCapacity", dbMaxOpenFiles),
  109. WriteBuffer: debugEnvValue("WriteBuffer", defaultWriteBuffer),
  110. // The write slowdown and pause can be overridden, but even if they
  111. // are not and the compaction trigger is overridden we need to
  112. // adjust so that we don't pause writes for L0 compaction before we
  113. // even *start* L0 compaction...
  114. WriteL0SlowdownTrigger: debugEnvValue("WriteL0SlowdownTrigger", 2*debugEnvValue("CompactionL0Trigger", defaultCompactionL0Trigger)),
  115. WriteL0PauseTrigger: debugEnvValue("WriteL0SlowdownTrigger", 3*debugEnvValue("CompactionL0Trigger", defaultCompactionL0Trigger)),
  116. }
  117. return opts
  118. }
  119. func open(location string, opts *opt.Options) (*leveldb.DB, error) {
  120. db, err := leveldb.OpenFile(location, opts)
  121. if leveldbIsCorrupted(err) {
  122. db, err = leveldb.RecoverFile(location, opts)
  123. }
  124. if leveldbIsCorrupted(err) {
  125. // The database is corrupted, and we've tried to recover it but it
  126. // didn't work. At this point there isn't much to do beyond dropping
  127. // the database and reindexing...
  128. l.Infoln("Database corruption detected, unable to recover. Reinitializing...")
  129. if err := os.RemoveAll(location); err != nil {
  130. return nil, errorSuggestion{err, "failed to delete corrupted database"}
  131. }
  132. db, err = leveldb.OpenFile(location, opts)
  133. }
  134. if err != nil {
  135. return nil, errorSuggestion{err, "is another instance of Syncthing running?"}
  136. }
  137. if debugEnvValue("CompactEverything", 0) != 0 {
  138. if err := db.CompactRange(util.Range{}); err != nil {
  139. l.Warnln("Compacting database:", err)
  140. }
  141. }
  142. return db, nil
  143. }
  144. func debugEnvValue(key string, def int) int {
  145. v, err := strconv.ParseInt(os.Getenv("STDEBUG_"+key), 10, 63)
  146. if err != nil {
  147. return def
  148. }
  149. return int(v)
  150. }
  151. // A "better" version of leveldb's errors.IsCorrupted.
  152. func leveldbIsCorrupted(err error) bool {
  153. switch {
  154. case err == nil:
  155. return false
  156. case errors.IsCorrupted(err):
  157. return true
  158. case strings.Contains(err.Error(), "corrupted"):
  159. return true
  160. }
  161. return false
  162. }
  163. // dbIsLarge returns whether the estimated size of the database at location
  164. // is large enough to warrant optimization for large databases.
  165. func dbIsLarge(location string) bool {
  166. if ^uint(0)>>63 == 0 {
  167. // We're compiled for a 32 bit architecture. We've seen trouble with
  168. // large settings there.
  169. // (https://forum.syncthing.net/t/many-small-ldb-files-with-database-tuning/13842)
  170. return false
  171. }
  172. dir, err := os.Open(location)
  173. if err != nil {
  174. return false
  175. }
  176. fis, err := dir.Readdir(-1)
  177. if err != nil {
  178. return false
  179. }
  180. var size int64
  181. for _, fi := range fis {
  182. if fi.Name() == "LOG" {
  183. // don't count the size
  184. continue
  185. }
  186. size += fi.Size()
  187. }
  188. return size > dbLargeThreshold
  189. }
  190. type errorSuggestion struct {
  191. inner error
  192. suggestion string
  193. }
  194. func (e errorSuggestion) Error() string {
  195. return fmt.Sprintf("%s (%s)", e.inner.Error(), e.suggestion)
  196. }