|
|
@@ -30,9 +30,10 @@ type DB struct {
|
|
|
// Need 64-bit alignment.
|
|
|
seq uint64
|
|
|
|
|
|
+ // Session.
|
|
|
s *session
|
|
|
|
|
|
- // MemDB
|
|
|
+ // MemDB.
|
|
|
memMu sync.RWMutex
|
|
|
mem *memdb.DB
|
|
|
frozenMem *memdb.DB
|
|
|
@@ -42,11 +43,11 @@ type DB struct {
|
|
|
frozenJournalFile storage.File
|
|
|
frozenSeq uint64
|
|
|
|
|
|
- // Snapshot
|
|
|
+ // Snapshot.
|
|
|
snapsMu sync.Mutex
|
|
|
snapsRoot snapshotElement
|
|
|
|
|
|
- // Write
|
|
|
+ // Write.
|
|
|
writeC chan *Batch
|
|
|
writeMergedC chan bool
|
|
|
writeLockC chan struct{}
|
|
|
@@ -54,7 +55,7 @@ type DB struct {
|
|
|
journalC chan *Batch
|
|
|
journalAckC chan error
|
|
|
|
|
|
- // Compaction
|
|
|
+ // Compaction.
|
|
|
tcompCmdC chan cCmd
|
|
|
tcompPauseC chan chan<- struct{}
|
|
|
tcompTriggerC chan struct{}
|
|
|
@@ -64,7 +65,7 @@ type DB struct {
|
|
|
compErrSetC chan error
|
|
|
compStats [kNumLevels]cStats
|
|
|
|
|
|
- // Close
|
|
|
+ // Close.
|
|
|
closeW sync.WaitGroup
|
|
|
closeC chan struct{}
|
|
|
closed uint32
|
|
|
@@ -135,9 +136,10 @@ func openDB(s *session) (*DB, error) {
|
|
|
// detected in the DB. Corrupted DB can be recovered with Recover
|
|
|
// function.
|
|
|
//
|
|
|
+// The returned DB instance is goroutine-safe.
|
|
|
// The DB must be closed after use, by calling Close method.
|
|
|
-func Open(p storage.Storage, o *opt.Options) (db *DB, err error) {
|
|
|
- s, err := newSession(p, o)
|
|
|
+func Open(stor storage.Storage, o *opt.Options) (db *DB, err error) {
|
|
|
+ s, err := newSession(stor, o)
|
|
|
if err != nil {
|
|
|
return
|
|
|
}
|
|
|
@@ -177,6 +179,7 @@ func Open(p storage.Storage, o *opt.Options) (db *DB, err error) {
|
|
|
// detected in the DB. Corrupted DB can be recovered with Recover
|
|
|
// function.
|
|
|
//
|
|
|
+// The returned DB instance is goroutine-safe.
|
|
|
// The DB must be closed after use, by calling Close method.
|
|
|
func OpenFile(path string, o *opt.Options) (db *DB, err error) {
|
|
|
stor, err := storage.OpenFile(path)
|
|
|
@@ -197,9 +200,10 @@ func OpenFile(path string, o *opt.Options) (db *DB, err error) {
|
|
|
// The DB must already exist or it will returns an error.
|
|
|
// Also, Recover will ignore ErrorIfMissing and ErrorIfExist options.
|
|
|
//
|
|
|
+// The returned DB instance is goroutine-safe.
|
|
|
// The DB must be closed after use, by calling Close method.
|
|
|
-func Recover(p storage.Storage, o *opt.Options) (db *DB, err error) {
|
|
|
- s, err := newSession(p, o)
|
|
|
+func Recover(stor storage.Storage, o *opt.Options) (db *DB, err error) {
|
|
|
+ s, err := newSession(stor, o)
|
|
|
if err != nil {
|
|
|
return
|
|
|
}
|
|
|
@@ -225,6 +229,7 @@ func Recover(p storage.Storage, o *opt.Options) (db *DB, err error) {
|
|
|
// RecoverFile uses standard file-system backed storage implementation as desribed
|
|
|
// in the leveldb/storage package.
|
|
|
//
|
|
|
+// The returned DB instance is goroutine-safe.
|
|
|
// The DB must be closed after use, by calling Close method.
|
|
|
func RecoverFile(path string, o *opt.Options) (db *DB, err error) {
|
|
|
stor, err := storage.OpenFile(path)
|
|
|
@@ -241,12 +246,13 @@ func RecoverFile(path string, o *opt.Options) (db *DB, err error) {
|
|
|
}
|
|
|
|
|
|
func recoverTable(s *session, o *opt.Options) error {
|
|
|
- ff0, err := s.getFiles(storage.TypeTable)
|
|
|
+ // Get all tables and sort it by file number.
|
|
|
+ tableFiles_, err := s.getFiles(storage.TypeTable)
|
|
|
if err != nil {
|
|
|
return err
|
|
|
}
|
|
|
- ff1 := files(ff0)
|
|
|
- ff1.sort()
|
|
|
+ tableFiles := files(tableFiles_)
|
|
|
+ tableFiles.sort()
|
|
|
|
|
|
var mSeq uint64
|
|
|
var good, corrupted int
|
|
|
@@ -264,8 +270,9 @@ func recoverTable(s *session, o *opt.Options) error {
|
|
|
tmp = nil
|
|
|
}
|
|
|
}()
|
|
|
+
|
|
|
+ // Copy entries.
|
|
|
tw := table.NewWriter(writer, o)
|
|
|
- // Copy records.
|
|
|
for iter.Next() {
|
|
|
key := iter.Key()
|
|
|
if validIkey(key) {
|
|
|
@@ -297,20 +304,23 @@ func recoverTable(s *session, o *opt.Options) error {
|
|
|
return err
|
|
|
}
|
|
|
defer reader.Close()
|
|
|
+
|
|
|
// Get file size.
|
|
|
size, err := reader.Seek(0, 2)
|
|
|
if err != nil {
|
|
|
return err
|
|
|
}
|
|
|
+
|
|
|
var tSeq uint64
|
|
|
var tgood, tcorrupted, blockerr int
|
|
|
- var min, max []byte
|
|
|
+ var imin, imax []byte
|
|
|
tr := table.NewReader(reader, size, nil, o)
|
|
|
iter := tr.NewIterator(nil, nil)
|
|
|
iter.(iterator.ErrorCallbackSetter).SetErrorCallback(func(err error) {
|
|
|
s.logf("table@recovery found error @%d %q", file.Num(), err)
|
|
|
blockerr++
|
|
|
})
|
|
|
+
|
|
|
// Scan the table.
|
|
|
for iter.Next() {
|
|
|
key := iter.Key()
|
|
|
@@ -323,16 +333,17 @@ func recoverTable(s *session, o *opt.Options) error {
|
|
|
if seq > tSeq {
|
|
|
tSeq = seq
|
|
|
}
|
|
|
- if min == nil {
|
|
|
- min = append([]byte{}, key...)
|
|
|
+ if imin == nil {
|
|
|
+ imin = append([]byte{}, key...)
|
|
|
}
|
|
|
- max = append(max[:0], key...)
|
|
|
+ imax = append(imax[:0], key...)
|
|
|
}
|
|
|
if err := iter.Error(); err != nil {
|
|
|
iter.Release()
|
|
|
return err
|
|
|
}
|
|
|
iter.Release()
|
|
|
+
|
|
|
if tgood > 0 {
|
|
|
if tcorrupted > 0 || blockerr > 0 {
|
|
|
// Rebuild the table.
|
|
|
@@ -353,7 +364,7 @@ func recoverTable(s *session, o *opt.Options) error {
|
|
|
mSeq = tSeq
|
|
|
}
|
|
|
// Add table to level 0.
|
|
|
- rec.addTable(0, file.Num(), uint64(size), min, max)
|
|
|
+ rec.addTable(0, file.Num(), uint64(size), imin, imax)
|
|
|
s.logf("table@recovery recovered @%d N·%d C·%d B·%d S·%d Q·%d", file.Num(), tgood, tcorrupted, blockerr, size, tSeq)
|
|
|
} else {
|
|
|
s.logf("table@recovery unrecoverable @%d C·%d B·%d S·%d", file.Num(), tcorrupted, blockerr, size)
|
|
|
@@ -364,41 +375,56 @@ func recoverTable(s *session, o *opt.Options) error {
|
|
|
|
|
|
return nil
|
|
|
}
|
|
|
+
|
|
|
// Recover all tables.
|
|
|
- if len(ff1) > 0 {
|
|
|
- s.logf("table@recovery F·%d", len(ff1))
|
|
|
- s.markFileNum(ff1[len(ff1)-1].Num())
|
|
|
- for _, file := range ff1 {
|
|
|
+ if len(tableFiles) > 0 {
|
|
|
+ s.logf("table@recovery F·%d", len(tableFiles))
|
|
|
+
|
|
|
+ // Mark file number as used.
|
|
|
+ s.markFileNum(tableFiles[len(tableFiles)-1].Num())
|
|
|
+
|
|
|
+ for _, file := range tableFiles {
|
|
|
if err := recoverTable(file); err != nil {
|
|
|
return err
|
|
|
}
|
|
|
}
|
|
|
- s.logf("table@recovery recovered F·%d N·%d C·%d Q·%d", len(ff1), good, corrupted, mSeq)
|
|
|
+
|
|
|
+ s.logf("table@recovery recovered F·%d N·%d C·%d Q·%d", len(tableFiles), good, corrupted, mSeq)
|
|
|
}
|
|
|
+
|
|
|
// Set sequence number.
|
|
|
rec.setSeq(mSeq + 1)
|
|
|
+
|
|
|
// Create new manifest.
|
|
|
if err := s.create(); err != nil {
|
|
|
return err
|
|
|
}
|
|
|
+
|
|
|
// Commit.
|
|
|
return s.commit(rec)
|
|
|
}
|
|
|
|
|
|
-func (d *DB) recoverJournal() error {
|
|
|
- s := d.s
|
|
|
-
|
|
|
- ff0, err := s.getFiles(storage.TypeJournal)
|
|
|
+func (db *DB) recoverJournal() error {
|
|
|
+ // Get all tables and sort it by file number.
|
|
|
+ journalFiles_, err := db.s.getFiles(storage.TypeJournal)
|
|
|
if err != nil {
|
|
|
return err
|
|
|
}
|
|
|
- ff1 := files(ff0)
|
|
|
- ff1.sort()
|
|
|
- ff2 := make([]storage.File, 0, len(ff1))
|
|
|
- for _, file := range ff1 {
|
|
|
- if file.Num() >= s.stJournalNum || file.Num() == s.stPrevJournalNum {
|
|
|
- s.markFileNum(file.Num())
|
|
|
- ff2 = append(ff2, file)
|
|
|
+ journalFiles := files(journalFiles_)
|
|
|
+ journalFiles.sort()
|
|
|
+
|
|
|
+ // Discard older journal.
|
|
|
+ prev := -1
|
|
|
+ for i, file := range journalFiles {
|
|
|
+ if file.Num() >= db.s.stJournalNum {
|
|
|
+ if prev >= 0 {
|
|
|
+ i--
|
|
|
+ journalFiles[i] = journalFiles[prev]
|
|
|
+ }
|
|
|
+ journalFiles = journalFiles[i:]
|
|
|
+ break
|
|
|
+ } else if file.Num() == db.s.stPrevJournalNum {
|
|
|
+ prev = i
|
|
|
}
|
|
|
}
|
|
|
|
|
|
@@ -406,38 +432,43 @@ func (d *DB) recoverJournal() error {
|
|
|
var of storage.File
|
|
|
var mem *memdb.DB
|
|
|
batch := new(Batch)
|
|
|
- cm := newCMem(s)
|
|
|
+ cm := newCMem(db.s)
|
|
|
buf := new(util.Buffer)
|
|
|
// Options.
|
|
|
- strict := s.o.GetStrict(opt.StrictJournal)
|
|
|
- checksum := s.o.GetStrict(opt.StrictJournalChecksum)
|
|
|
- writeBuffer := s.o.GetWriteBuffer()
|
|
|
+ strict := db.s.o.GetStrict(opt.StrictJournal)
|
|
|
+ checksum := db.s.o.GetStrict(opt.StrictJournalChecksum)
|
|
|
+ writeBuffer := db.s.o.GetWriteBuffer()
|
|
|
recoverJournal := func(file storage.File) error {
|
|
|
- s.logf("journal@recovery recovering @%d", file.Num())
|
|
|
+ db.logf("journal@recovery recovering @%d", file.Num())
|
|
|
reader, err := file.Open()
|
|
|
if err != nil {
|
|
|
return err
|
|
|
}
|
|
|
defer reader.Close()
|
|
|
+
|
|
|
+ // Create/reset journal reader instance.
|
|
|
if jr == nil {
|
|
|
- jr = journal.NewReader(reader, dropper{s, file}, strict, checksum)
|
|
|
+ jr = journal.NewReader(reader, dropper{db.s, file}, strict, checksum)
|
|
|
} else {
|
|
|
- jr.Reset(reader, dropper{s, file}, strict, checksum)
|
|
|
+ jr.Reset(reader, dropper{db.s, file}, strict, checksum)
|
|
|
}
|
|
|
+
|
|
|
+ // Flush memdb and remove obsolete journal file.
|
|
|
if of != nil {
|
|
|
if mem.Len() > 0 {
|
|
|
if err := cm.flush(mem, 0); err != nil {
|
|
|
return err
|
|
|
}
|
|
|
}
|
|
|
- if err := cm.commit(file.Num(), d.seq); err != nil {
|
|
|
+ if err := cm.commit(file.Num(), db.seq); err != nil {
|
|
|
return err
|
|
|
}
|
|
|
cm.reset()
|
|
|
of.Remove()
|
|
|
of = nil
|
|
|
}
|
|
|
- // Reset memdb.
|
|
|
+
|
|
|
+ // Replay journal to memdb.
|
|
|
mem.Reset()
|
|
|
for {
|
|
|
r, err := jr.Next()
|
|
|
@@ -447,6 +478,7 @@ func (d *DB) recoverJournal() error {
|
|
|
}
|
|
|
return err
|
|
|
}
|
|
|
+
|
|
|
buf.Reset()
|
|
|
if _, err := buf.ReadFrom(r); err != nil {
|
|
|
if strict {
|
|
|
@@ -460,28 +492,37 @@ func (d *DB) recoverJournal() error {
|
|
|
if err := batch.memReplay(mem); err != nil {
|
|
|
return err
|
|
|
}
|
|
|
- d.seq = batch.seq + uint64(batch.len())
|
|
|
+
|
|
|
+ // Save sequence number.
|
|
|
+ db.seq = batch.seq + uint64(batch.len())
|
|
|
+
|
|
|
+ // Flush it if large enough.
|
|
|
if mem.Size() >= writeBuffer {
|
|
|
- // Large enough, flush it.
|
|
|
if err := cm.flush(mem, 0); err != nil {
|
|
|
return err
|
|
|
}
|
|
|
- // Reset memdb.
|
|
|
mem.Reset()
|
|
|
}
|
|
|
}
|
|
|
+
|
|
|
of = file
|
|
|
return nil
|
|
|
}
|
|
|
+
|
|
|
// Recover all journals.
|
|
|
- if len(ff2) > 0 {
|
|
|
- s.logf("journal@recovery F·%d", len(ff2))
|
|
|
- mem = memdb.New(s.icmp, writeBuffer)
|
|
|
- for _, file := range ff2 {
|
|
|
+ if len(journalFiles) > 0 {
|
|
|
+ db.logf("journal@recovery F·%d", len(journalFiles))
|
|
|
+
|
|
|
+ // Mark file number as used.
|
|
|
+ db.s.markFileNum(journalFiles[len(journalFiles)-1].Num())
|
|
|
+
|
|
|
+ mem = memdb.New(db.s.icmp, writeBuffer)
|
|
|
+ for _, file := range journalFiles {
|
|
|
if err := recoverJournal(file); err != nil {
|
|
|
return err
|
|
|
}
|
|
|
}
|
|
|
+
|
|
|
// Flush the last journal.
|
|
|
if mem.Len() > 0 {
|
|
|
if err := cm.flush(mem, 0); err != nil {
|
|
|
@@ -489,35 +530,43 @@ func (d *DB) recoverJournal() error {
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
+
|
|
|
// Create a new journal.
|
|
|
- if _, err := d.newMem(0); err != nil {
|
|
|
+ if _, err := db.newMem(0); err != nil {
|
|
|
return err
|
|
|
}
|
|
|
+
|
|
|
// Commit.
|
|
|
- if err := cm.commit(d.journalFile.Num(), d.seq); err != nil {
|
|
|
+ if err := cm.commit(db.journalFile.Num(), db.seq); err != nil {
|
|
|
+ // Close journal.
|
|
|
+ if db.journal != nil {
|
|
|
+ db.journal.Close()
|
|
|
+ db.journalWriter.Close()
|
|
|
+ }
|
|
|
return err
|
|
|
}
|
|
|
- // Remove the last journal.
|
|
|
+
|
|
|
+ // Remove the last obsolete journal file.
|
|
|
if of != nil {
|
|
|
of.Remove()
|
|
|
}
|
|
|
+
|
|
|
return nil
|
|
|
}
|
|
|
|
|
|
-func (d *DB) get(key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err error) {
|
|
|
- s := d.s
|
|
|
-
|
|
|
+func (db *DB) get(key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err error) {
|
|
|
ikey := newIKey(key, seq, tSeek)
|
|
|
|
|
|
- em, fm := d.getMems()
|
|
|
+ em, fm := db.getMems()
|
|
|
for _, m := range [...]*memdb.DB{em, fm} {
|
|
|
if m == nil {
|
|
|
continue
|
|
|
}
|
|
|
+
|
|
|
mk, mv, me := m.Find(ikey)
|
|
|
if me == nil {
|
|
|
ukey, _, t, ok := parseIkey(mk)
|
|
|
- if ok && s.icmp.uCompare(ukey, key) == 0 {
|
|
|
+ if ok && db.s.icmp.uCompare(ukey, key) == 0 {
|
|
|
if t == tDel {
|
|
|
return nil, ErrNotFound
|
|
|
}
|
|
|
@@ -528,12 +577,12 @@ func (d *DB) get(key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- v := s.version()
|
|
|
+ v := db.s.version()
|
|
|
value, cSched, err := v.get(ikey, ro)
|
|
|
v.release()
|
|
|
if cSched {
|
|
|
// Trigger table compaction.
|
|
|
- d.compTrigger(d.tcompTriggerC)
|
|
|
+ db.compTrigger(db.tcompTriggerC)
|
|
|
}
|
|
|
return
|
|
|
}
|
|
|
@@ -543,13 +592,13 @@ func (d *DB) get(key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err
|
|
|
//
|
|
|
// The caller should not modify the contents of the returned slice, but
|
|
|
// it is safe to modify the contents of the argument after Get returns.
|
|
|
-func (d *DB) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) {
|
|
|
- err = d.ok()
|
|
|
+func (db *DB) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) {
|
|
|
+ err = db.ok()
|
|
|
if err != nil {
|
|
|
return
|
|
|
}
|
|
|
|
|
|
- return d.get(key, d.getSeq(), ro)
|
|
|
+ return db.get(key, db.getSeq(), ro)
|
|
|
}
|
|
|
|
|
|
// NewIterator returns an iterator for the latest snapshot of the
|
|
|
@@ -568,14 +617,14 @@ func (d *DB) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) {
|
|
|
// The iterator must be released after use, by calling Release method.
|
|
|
//
|
|
|
// Also read Iterator documentation of the leveldb/iterator package.
|
|
|
-func (d *DB) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
|
|
|
- if err := d.ok(); err != nil {
|
|
|
+func (db *DB) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
|
|
|
+ if err := db.ok(); err != nil {
|
|
|
return iterator.NewEmptyIterator(err)
|
|
|
}
|
|
|
|
|
|
- p := d.newSnapshot()
|
|
|
- defer p.Release()
|
|
|
- return p.NewIterator(slice, ro)
|
|
|
+ snap := db.newSnapshot()
|
|
|
+ defer snap.Release()
|
|
|
+ return snap.NewIterator(slice, ro)
|
|
|
}
|
|
|
|
|
|
// GetSnapshot returns a latest snapshot of the underlying DB. A snapshot
|
|
|
@@ -583,12 +632,12 @@ func (d *DB) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterat
|
|
|
// content of snapshot are guaranteed to be consistent.
|
|
|
//
|
|
|
// The snapshot must be released after use, by calling Release method.
|
|
|
-func (d *DB) GetSnapshot() (*Snapshot, error) {
|
|
|
- if err := d.ok(); err != nil {
|
|
|
+func (db *DB) GetSnapshot() (*Snapshot, error) {
|
|
|
+ if err := db.ok(); err != nil {
|
|
|
return nil, err
|
|
|
}
|
|
|
|
|
|
- return d.newSnapshot(), nil
|
|
|
+ return db.newSnapshot(), nil
|
|
|
}
|
|
|
|
|
|
// GetProperty returns value of the given property name.
|
|
|
@@ -600,8 +649,8 @@ func (d *DB) GetSnapshot() (*Snapshot, error) {
|
|
|
// Returns statistics of the underlying DB.
|
|
|
// leveldb.sstables
|
|
|
// Returns sstables list for each level.
|
|
|
-func (d *DB) GetProperty(name string) (value string, err error) {
|
|
|
- err = d.ok()
|
|
|
+func (db *DB) GetProperty(name string) (value string, err error) {
|
|
|
+ err = db.ok()
|
|
|
if err != nil {
|
|
|
return
|
|
|
}
|
|
|
@@ -610,11 +659,9 @@ func (d *DB) GetProperty(name string) (value string, err error) {
|
|
|
if !strings.HasPrefix(name, prefix) {
|
|
|
return "", errors.New("leveldb: GetProperty: unknown property: " + name)
|
|
|
}
|
|
|
-
|
|
|
p := name[len(prefix):]
|
|
|
|
|
|
- s := d.s
|
|
|
- v := s.version()
|
|
|
+ v := db.s.version()
|
|
|
defer v.release()
|
|
|
|
|
|
switch {
|
|
|
@@ -631,20 +678,20 @@ func (d *DB) GetProperty(name string) (value string, err error) {
|
|
|
value = "Compactions\n" +
|
|
|
" Level | Tables | Size(MB) | Time(sec) | Read(MB) | Write(MB)\n" +
|
|
|
"-------+------------+---------------+---------------+---------------+---------------\n"
|
|
|
- for level, tt := range v.tables {
|
|
|
- duration, read, write := d.compStats[level].get()
|
|
|
- if len(tt) == 0 && duration == 0 {
|
|
|
+ for level, tables := range v.tables {
|
|
|
+ duration, read, write := db.compStats[level].get()
|
|
|
+ if len(tables) == 0 && duration == 0 {
|
|
|
continue
|
|
|
}
|
|
|
value += fmt.Sprintf(" %3d | %10d | %13.5f | %13.5f | %13.5f | %13.5f\n",
|
|
|
- level, len(tt), float64(tt.size())/1048576.0, duration.Seconds(),
|
|
|
+ level, len(tables), float64(tables.size())/1048576.0, duration.Seconds(),
|
|
|
float64(read)/1048576.0, float64(write)/1048576.0)
|
|
|
}
|
|
|
case p == "sstables":
|
|
|
- for level, tt := range v.tables {
|
|
|
+ for level, tables := range v.tables {
|
|
|
value += fmt.Sprintf("--- level %d ---\n", level)
|
|
|
- for _, t := range tt {
|
|
|
- value += fmt.Sprintf("%d:%d[%q .. %q]\n", t.file.Num(), t.size, t.min, t.max)
|
|
|
+ for _, t := range tables {
|
|
|
+ value += fmt.Sprintf("%d:%d[%q .. %q]\n", t.file.Num(), t.size, t.imin, t.imax)
|
|
|
}
|
|
|
}
|
|
|
default:
|
|
|
@@ -660,23 +707,23 @@ func (d *DB) GetProperty(name string) (value string, err error) {
|
|
|
// data compresses by a factor of ten, the returned sizes will be one-tenth
|
|
|
// the size of the corresponding user data size.
|
|
|
// The results may not include the sizes of recently written data.
|
|
|
-func (d *DB) SizeOf(ranges []util.Range) (Sizes, error) {
|
|
|
- if err := d.ok(); err != nil {
|
|
|
+func (db *DB) SizeOf(ranges []util.Range) (Sizes, error) {
|
|
|
+ if err := db.ok(); err != nil {
|
|
|
return nil, err
|
|
|
}
|
|
|
|
|
|
- v := d.s.version()
|
|
|
+ v := db.s.version()
|
|
|
defer v.release()
|
|
|
|
|
|
sizes := make(Sizes, 0, len(ranges))
|
|
|
for _, r := range ranges {
|
|
|
- min := newIKey(r.Start, kMaxSeq, tSeek)
|
|
|
- max := newIKey(r.Limit, kMaxSeq, tSeek)
|
|
|
- start, err := v.offsetOf(min)
|
|
|
+ imin := newIKey(r.Start, kMaxSeq, tSeek)
|
|
|
+ imax := newIKey(r.Limit, kMaxSeq, tSeek)
|
|
|
+ start, err := v.offsetOf(imin)
|
|
|
if err != nil {
|
|
|
return nil, err
|
|
|
}
|
|
|
- limit, err := v.offsetOf(max)
|
|
|
+ limit, err := v.offsetOf(imax)
|
|
|
if err != nil {
|
|
|
return nil, err
|
|
|
}
|
|
|
@@ -690,61 +737,63 @@ func (d *DB) SizeOf(ranges []util.Range) (Sizes, error) {
|
|
|
return sizes, nil
|
|
|
}
|
|
|
|
|
|
-// Close closes the DB. This will also releases any outstanding snapshot.
|
|
|
+// Close closes the DB. This will also releases any outstanding snapshot and
|
|
|
+// abort any in-flight compaction.
|
|
|
//
|
|
|
// It is not safe to close a DB until all outstanding iterators are released.
|
|
|
// It is valid to call Close multiple times. Other methods should not be
|
|
|
// called after the DB has been closed.
|
|
|
-func (d *DB) Close() error {
|
|
|
- if !d.setClosed() {
|
|
|
+func (db *DB) Close() error {
|
|
|
+ if !db.setClosed() {
|
|
|
return ErrClosed
|
|
|
}
|
|
|
|
|
|
- s := d.s
|
|
|
start := time.Now()
|
|
|
- s.log("db@close closing")
|
|
|
+ db.log("db@close closing")
|
|
|
|
|
|
// Clear the finalizer.
|
|
|
- runtime.SetFinalizer(d, nil)
|
|
|
+ runtime.SetFinalizer(db, nil)
|
|
|
|
|
|
// Get compaction error.
|
|
|
var err error
|
|
|
select {
|
|
|
- case err = <-d.compErrC:
|
|
|
+ case err = <-db.compErrC:
|
|
|
default:
|
|
|
}
|
|
|
|
|
|
- close(d.closeC)
|
|
|
+ close(db.closeC)
|
|
|
|
|
|
// Wait for the close WaitGroup.
|
|
|
- d.closeW.Wait()
|
|
|
+ db.closeW.Wait()
|
|
|
|
|
|
// Close journal.
|
|
|
- if d.journal != nil {
|
|
|
- d.journal.Close()
|
|
|
- d.journalWriter.Close()
|
|
|
+ db.writeLockC <- struct{}{}
|
|
|
+ if db.journal != nil {
|
|
|
+ db.journal.Close()
|
|
|
+ db.journalWriter.Close()
|
|
|
}
|
|
|
|
|
|
// Close session.
|
|
|
- s.close()
|
|
|
- s.logf("db@close done T·%v", time.Since(start))
|
|
|
- s.release()
|
|
|
+ db.s.close()
|
|
|
+ db.logf("db@close done T·%v", time.Since(start))
|
|
|
+ db.s.release()
|
|
|
|
|
|
- if d.closer != nil {
|
|
|
- if err1 := d.closer.Close(); err == nil {
|
|
|
+ if db.closer != nil {
|
|
|
+ if err1 := db.closer.Close(); err == nil {
|
|
|
err = err1
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- d.s = nil
|
|
|
- d.mem = nil
|
|
|
- d.frozenMem = nil
|
|
|
- d.journal = nil
|
|
|
- d.journalWriter = nil
|
|
|
- d.journalFile = nil
|
|
|
- d.frozenJournalFile = nil
|
|
|
- d.snapsRoot = snapshotElement{}
|
|
|
- d.closer = nil
|
|
|
+ // NIL'ing pointers.
|
|
|
+ db.s = nil
|
|
|
+ db.mem = nil
|
|
|
+ db.frozenMem = nil
|
|
|
+ db.journal = nil
|
|
|
+ db.journalWriter = nil
|
|
|
+ db.journalFile = nil
|
|
|
+ db.frozenJournalFile = nil
|
|
|
+ db.snapsRoot = snapshotElement{}
|
|
|
+ db.closer = nil
|
|
|
|
|
|
return err
|
|
|
}
|