| 1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078 |
- // Copyright (C) 2014 The Syncthing Authors.
- //
- // This Source Code Form is subject to the terms of the Mozilla Public
- // License, v. 2.0. If a copy of the MPL was not distributed with this file,
- // You can obtain one at https://mozilla.org/MPL/2.0/.
- package db
- import (
- "bytes"
- "encoding/binary"
- "os"
- "sort"
- "strings"
- "sync/atomic"
- "github.com/syncthing/syncthing/lib/protocol"
- "github.com/syncthing/syncthing/lib/sync"
- "github.com/syndtr/goleveldb/leveldb"
- "github.com/syndtr/goleveldb/leveldb/errors"
- "github.com/syndtr/goleveldb/leveldb/iterator"
- "github.com/syndtr/goleveldb/leveldb/opt"
- "github.com/syndtr/goleveldb/leveldb/storage"
- "github.com/syndtr/goleveldb/leveldb/util"
- )
- type deletionHandler func(t readWriteTransaction, folder, device, name []byte, dbi iterator.Iterator)
- type Instance struct {
- committed int64 // this must be the first attribute in the struct to ensure 64 bit alignment on 32 bit plaforms
- *leveldb.DB
- location string
- folderIdx *smallIndex
- deviceIdx *smallIndex
- }
- const (
- keyPrefixLen = 1
- keyFolderLen = 4 // indexed
- keyDeviceLen = 4 // indexed
- keySequenceLen = 8
- keyHashLen = 32
- maxInt64 int64 = 1<<63 - 1
- )
- func Open(file string) (*Instance, error) {
- opts := &opt.Options{
- OpenFilesCacheCapacity: 100,
- WriteBuffer: 4 << 20,
- }
- db, err := leveldb.OpenFile(file, opts)
- if leveldbIsCorrupted(err) {
- db, err = leveldb.RecoverFile(file, opts)
- }
- if leveldbIsCorrupted(err) {
- // The database is corrupted, and we've tried to recover it but it
- // didn't work. At this point there isn't much to do beyond dropping
- // the database and reindexing...
- l.Infoln("Database corruption detected, unable to recover. Reinitializing...")
- if err := os.RemoveAll(file); err != nil {
- return nil, err
- }
- db, err = leveldb.OpenFile(file, opts)
- }
- if err != nil {
- return nil, err
- }
- return newDBInstance(db, file), nil
- }
- func OpenMemory() *Instance {
- db, _ := leveldb.Open(storage.NewMemStorage(), nil)
- return newDBInstance(db, "<memory>")
- }
- func newDBInstance(db *leveldb.DB, location string) *Instance {
- i := &Instance{
- DB: db,
- location: location,
- }
- i.folderIdx = newSmallIndex(i, []byte{KeyTypeFolderIdx})
- i.deviceIdx = newSmallIndex(i, []byte{KeyTypeDeviceIdx})
- return i
- }
- // UpdateSchema does transitions to the current db version if necessary
- func (db *Instance) UpdateSchema() {
- miscDB := NewNamespacedKV(db, string(KeyTypeMiscData))
- prevVersion, _ := miscDB.Int64("dbVersion")
- if prevVersion >= dbVersion {
- return
- }
- l.Infof("Updating database schema version from %v to %v...", prevVersion, dbVersion)
- if prevVersion == 0 {
- db.updateSchema0to1()
- }
- if prevVersion <= 1 {
- db.updateSchema1to2()
- }
- if prevVersion <= 2 {
- db.updateSchema2to3()
- }
- l.Infof("Finished updating database schema version from %v to %v", prevVersion, dbVersion)
- miscDB.PutInt64("dbVersion", dbVersion)
- }
- // Committed returns the number of items committed to the database since startup
- func (db *Instance) Committed() int64 {
- return atomic.LoadInt64(&db.committed)
- }
- // Location returns the filesystem path where the database is stored
- func (db *Instance) Location() string {
- return db.location
- }
- func (db *Instance) updateFiles(folder, device []byte, fs []protocol.FileInfo, meta *metadataTracker) {
- t := db.newReadWriteTransaction()
- defer t.close()
- var fk []byte
- var gk []byte
- for _, f := range fs {
- name := []byte(f.Name)
- fk = db.deviceKeyInto(fk, folder, device, name)
- // Get and unmarshal the file entry. If it doesn't exist or can't be
- // unmarshalled we'll add it as a new entry.
- bs, err := t.Get(fk, nil)
- var ef FileInfoTruncated
- if err == nil {
- err = ef.Unmarshal(bs)
- }
- // The Invalid flag might change without the version being bumped.
- if err == nil && ef.Version.Equal(f.Version) && ef.Invalid == f.Invalid {
- continue
- }
- devID := protocol.DeviceIDFromBytes(device)
- if err == nil {
- meta.removeFile(devID, ef)
- }
- meta.addFile(devID, f)
- t.insertFile(fk, folder, device, f)
- gk = db.globalKeyInto(gk, folder, name)
- t.updateGlobal(gk, folder, device, f, meta)
- // Write out and reuse the batch every few records, to avoid the batch
- // growing too large and thus allocating unnecessarily much memory.
- t.checkFlush()
- }
- }
- func (db *Instance) addSequences(folder []byte, fs []protocol.FileInfo) {
- t := db.newReadWriteTransaction()
- defer t.close()
- var sk []byte
- var dk []byte
- for _, f := range fs {
- sk = db.sequenceKeyInto(sk, folder, f.Sequence)
- dk = db.deviceKeyInto(dk, folder, protocol.LocalDeviceID[:], []byte(f.Name))
- t.Put(sk, dk)
- l.Debugf("adding sequence; folder=%q sequence=%v %v", folder, f.Sequence, f.Name)
- t.checkFlush()
- }
- }
- func (db *Instance) removeSequences(folder []byte, fs []protocol.FileInfo) {
- t := db.newReadWriteTransaction()
- defer t.close()
- var sk []byte
- for _, f := range fs {
- t.Delete(db.sequenceKeyInto(sk, folder, f.Sequence))
- l.Debugf("removing sequence; folder=%q sequence=%v %v", folder, f.Sequence, f.Name)
- t.checkFlush()
- }
- }
- func (db *Instance) withHave(folder, device, prefix []byte, truncate bool, fn Iterator) {
- if len(prefix) > 0 {
- unslashedPrefix := prefix
- if bytes.HasSuffix(prefix, []byte{'/'}) {
- unslashedPrefix = unslashedPrefix[:len(unslashedPrefix)-1]
- } else {
- prefix = append(prefix, '/')
- }
- if f, ok := db.getFileTrunc(db.deviceKey(folder, device, unslashedPrefix), true); ok && !fn(f) {
- return
- }
- }
- t := db.newReadOnlyTransaction()
- defer t.close()
- dbi := t.NewIterator(util.BytesPrefix(db.deviceKey(folder, device, prefix)[:keyPrefixLen+keyFolderLen+keyDeviceLen+len(prefix)]), nil)
- defer dbi.Release()
- for dbi.Next() {
- name := db.deviceKeyName(dbi.Key())
- if len(prefix) > 0 && !bytes.HasPrefix(name, prefix) {
- return
- }
- // The iterator function may keep a reference to the unmarshalled
- // struct, which in turn references the buffer it was unmarshalled
- // from. dbi.Value() just returns an internal slice that it reuses, so
- // we need to copy it.
- f, err := unmarshalTrunc(append([]byte{}, dbi.Value()...), truncate)
- if err != nil {
- l.Debugln("unmarshal error:", err)
- continue
- }
- if !fn(f) {
- return
- }
- }
- }
- func (db *Instance) withHaveSequence(folder []byte, startSeq int64, fn Iterator) {
- t := db.newReadOnlyTransaction()
- defer t.close()
- dbi := t.NewIterator(&util.Range{Start: db.sequenceKey(folder, startSeq), Limit: db.sequenceKey(folder, maxInt64)}, nil)
- defer dbi.Release()
- for dbi.Next() {
- f, ok := db.getFile(dbi.Value())
- if !ok {
- l.Debugln("missing file for sequence number", db.sequenceKeySequence(dbi.Key()))
- continue
- }
- if !fn(f) {
- return
- }
- }
- }
- func (db *Instance) withAllFolderTruncated(folder []byte, fn func(device []byte, f FileInfoTruncated) bool) {
- t := db.newReadWriteTransaction()
- defer t.close()
- dbi := t.NewIterator(util.BytesPrefix(db.deviceKey(folder, nil, nil)[:keyPrefixLen+keyFolderLen]), nil)
- defer dbi.Release()
- var gk []byte
- for dbi.Next() {
- device := db.deviceKeyDevice(dbi.Key())
- var f FileInfoTruncated
- // The iterator function may keep a reference to the unmarshalled
- // struct, which in turn references the buffer it was unmarshalled
- // from. dbi.Value() just returns an internal slice that it reuses, so
- // we need to copy it.
- err := f.Unmarshal(append([]byte{}, dbi.Value()...))
- if err != nil {
- l.Debugln("unmarshal error:", err)
- continue
- }
- switch f.Name {
- case "", ".", "..", "/": // A few obviously invalid filenames
- l.Infof("Dropping invalid filename %q from database", f.Name)
- name := []byte(f.Name)
- gk = db.globalKeyInto(gk, folder, name)
- t.removeFromGlobal(gk, folder, device, name, nil)
- t.Delete(dbi.Key())
- t.checkFlush()
- continue
- }
- if !fn(device, f) {
- return
- }
- }
- }
- func (db *Instance) getFile(key []byte) (protocol.FileInfo, bool) {
- if f, ok := db.getFileTrunc(key, false); ok {
- return f.(protocol.FileInfo), true
- }
- return protocol.FileInfo{}, false
- }
- func (db *Instance) getFileTrunc(key []byte, trunc bool) (FileIntf, bool) {
- bs, err := db.Get(key, nil)
- if err == leveldb.ErrNotFound {
- return nil, false
- }
- if err != nil {
- l.Debugln("surprise error:", err)
- return nil, false
- }
- f, err := unmarshalTrunc(bs, trunc)
- if err != nil {
- l.Debugln("unmarshal error:", err)
- return nil, false
- }
- return f, true
- }
- func (db *Instance) getGlobal(folder, file []byte, truncate bool) (FileIntf, bool) {
- t := db.newReadOnlyTransaction()
- defer t.close()
- _, _, f, ok := db.getGlobalInto(t, nil, nil, folder, file, truncate)
- return f, ok
- }
- func (db *Instance) getGlobalInto(t readOnlyTransaction, gk, dk, folder, file []byte, truncate bool) ([]byte, []byte, FileIntf, bool) {
- gk = db.globalKeyInto(gk, folder, file)
- bs, err := t.Get(gk, nil)
- if err != nil {
- return gk, dk, nil, false
- }
- vl, ok := unmarshalVersionList(bs)
- if !ok {
- return gk, dk, nil, false
- }
- dk = db.deviceKeyInto(dk, folder, vl.Versions[0].Device, file)
- if fi, ok := db.getFileTrunc(dk, truncate); ok {
- return gk, dk, fi, true
- }
- return gk, dk, nil, false
- }
- func (db *Instance) withGlobal(folder, prefix []byte, truncate bool, fn Iterator) {
- if len(prefix) > 0 {
- unslashedPrefix := prefix
- if bytes.HasSuffix(prefix, []byte{'/'}) {
- unslashedPrefix = unslashedPrefix[:len(unslashedPrefix)-1]
- } else {
- prefix = append(prefix, '/')
- }
- if f, ok := db.getGlobal(folder, unslashedPrefix, truncate); ok && !fn(f) {
- return
- }
- }
- t := db.newReadOnlyTransaction()
- defer t.close()
- dbi := t.NewIterator(util.BytesPrefix(db.globalKey(folder, prefix)), nil)
- defer dbi.Release()
- var fk []byte
- for dbi.Next() {
- name := db.globalKeyName(dbi.Key())
- if len(prefix) > 0 && !bytes.HasPrefix(name, prefix) {
- return
- }
- vl, ok := unmarshalVersionList(dbi.Value())
- if !ok {
- continue
- }
- fk = db.deviceKeyInto(fk, folder, vl.Versions[0].Device, name)
- f, ok := db.getFileTrunc(fk, truncate)
- if !ok {
- continue
- }
- if !fn(f) {
- return
- }
- }
- }
- func (db *Instance) availability(folder, file []byte) []protocol.DeviceID {
- k := db.globalKey(folder, file)
- bs, err := db.Get(k, nil)
- if err == leveldb.ErrNotFound {
- return nil
- }
- if err != nil {
- l.Debugln("surprise error:", err)
- return nil
- }
- vl, ok := unmarshalVersionList(bs)
- if !ok {
- return nil
- }
- var devices []protocol.DeviceID
- for _, v := range vl.Versions {
- if !v.Version.Equal(vl.Versions[0].Version) {
- break
- }
- if v.Invalid {
- continue
- }
- n := protocol.DeviceIDFromBytes(v.Device)
- devices = append(devices, n)
- }
- return devices
- }
- func (db *Instance) withNeed(folder, device []byte, truncate bool, fn Iterator) {
- if bytes.Equal(device, protocol.LocalDeviceID[:]) {
- db.withNeedLocal(folder, truncate, fn)
- return
- }
- t := db.newReadOnlyTransaction()
- defer t.close()
- dbi := t.NewIterator(util.BytesPrefix(db.globalKey(folder, nil)[:keyPrefixLen+keyFolderLen]), nil)
- defer dbi.Release()
- var fk []byte
- for dbi.Next() {
- vl, ok := unmarshalVersionList(dbi.Value())
- if !ok {
- continue
- }
- haveFV, have := vl.Get(device)
- // XXX: This marks Concurrent (i.e. conflicting) changes as
- // needs. Maybe we should do that, but it needs special
- // handling in the puller.
- if have && haveFV.Version.GreaterEqual(vl.Versions[0].Version) {
- continue
- }
- name := db.globalKeyName(dbi.Key())
- needVersion := vl.Versions[0].Version
- needDevice := protocol.DeviceIDFromBytes(vl.Versions[0].Device)
- for i := range vl.Versions {
- if !vl.Versions[i].Version.Equal(needVersion) {
- // We haven't found a valid copy of the file with the needed version.
- break
- }
- if vl.Versions[i].Invalid {
- // The file is marked invalid, don't use it.
- continue
- }
- fk = db.deviceKeyInto(fk, folder, vl.Versions[i].Device, name)
- bs, err := t.Get(fk, nil)
- if err != nil {
- l.Debugln("surprise error:", err)
- continue
- }
- gf, err := unmarshalTrunc(bs, truncate)
- if err != nil {
- l.Debugln("unmarshal error:", err)
- continue
- }
- if gf.IsDeleted() && !have {
- // We don't need deleted files that we don't have
- break
- }
- l.Debugf("need folder=%q device=%v name=%q have=%v invalid=%v haveV=%v globalV=%v globalDev=%v", folder, protocol.DeviceIDFromBytes(device), name, have, haveFV.Invalid, haveFV.Version, needVersion, needDevice)
- if !fn(gf) {
- return
- }
- // This file is handled, no need to look further in the version list
- break
- }
- }
- }
- func (db *Instance) withNeedLocal(folder []byte, truncate bool, fn Iterator) {
- t := db.newReadOnlyTransaction()
- defer t.close()
- dbi := t.NewIterator(util.BytesPrefix(db.needKey(folder, nil)[:keyPrefixLen+keyFolderLen]), nil)
- defer dbi.Release()
- var dk []byte
- var gk []byte
- var f FileIntf
- var ok bool
- for dbi.Next() {
- gk, dk, f, ok = db.getGlobalInto(t, gk, dk, folder, db.globalKeyName(dbi.Key()), truncate)
- if !ok {
- continue
- }
- if !fn(f) {
- return
- }
- }
- }
- func (db *Instance) ListFolders() []string {
- t := db.newReadOnlyTransaction()
- defer t.close()
- dbi := t.NewIterator(util.BytesPrefix([]byte{KeyTypeGlobal}), nil)
- defer dbi.Release()
- folderExists := make(map[string]bool)
- for dbi.Next() {
- folder, ok := db.globalKeyFolder(dbi.Key())
- if ok && !folderExists[string(folder)] {
- folderExists[string(folder)] = true
- }
- }
- folders := make([]string, 0, len(folderExists))
- for k := range folderExists {
- folders = append(folders, k)
- }
- sort.Strings(folders)
- return folders
- }
- func (db *Instance) dropFolder(folder []byte) {
- t := db.newReadWriteTransaction()
- defer t.close()
- for _, key := range [][]byte{
- // Remove all items related to the given folder from the device->file bucket
- db.deviceKey(folder, nil, nil)[:keyPrefixLen+keyFolderLen],
- // Remove all sequences related to the folder
- db.sequenceKey([]byte(folder), 0)[:keyPrefixLen+keyFolderLen],
- // Remove all items related to the given folder from the global bucket
- db.globalKey(folder, nil)[:keyPrefixLen+keyFolderLen],
- // Remove all needs related to the folder
- db.needKey(folder, nil)[:keyPrefixLen+keyFolderLen],
- } {
- t.deleteKeyPrefix(key)
- }
- }
- func (db *Instance) dropDeviceFolder(device, folder []byte, meta *metadataTracker) {
- t := db.newReadWriteTransaction()
- defer t.close()
- dbi := t.NewIterator(util.BytesPrefix(db.deviceKey(folder, device, nil)), nil)
- defer dbi.Release()
- var gk []byte
- for dbi.Next() {
- key := dbi.Key()
- name := db.deviceKeyName(key)
- gk = db.globalKeyInto(gk, folder, name)
- t.removeFromGlobal(gk, folder, device, name, meta)
- t.Delete(key)
- t.checkFlush()
- }
- }
- func (db *Instance) checkGlobals(folder []byte, meta *metadataTracker) {
- t := db.newReadWriteTransaction()
- defer t.close()
- dbi := t.NewIterator(util.BytesPrefix(db.globalKey(folder, nil)[:keyPrefixLen+keyFolderLen]), nil)
- defer dbi.Release()
- var fk []byte
- for dbi.Next() {
- vl, ok := unmarshalVersionList(dbi.Value())
- if !ok {
- continue
- }
- // Check the global version list for consistency. An issue in previous
- // versions of goleveldb could result in reordered writes so that
- // there are global entries pointing to no longer existing files. Here
- // we find those and clear them out.
- name := db.globalKeyName(dbi.Key())
- var newVL VersionList
- for i, version := range vl.Versions {
- fk = db.deviceKeyInto(fk, folder, version.Device, name)
- _, err := t.Get(fk, nil)
- if err == leveldb.ErrNotFound {
- continue
- }
- if err != nil {
- l.Debugln("surprise error:", err)
- return
- }
- newVL.Versions = append(newVL.Versions, version)
- if i == 0 {
- if fi, ok := db.getFile(fk); ok {
- meta.addFile(globalDeviceID, fi)
- }
- }
- }
- if len(newVL.Versions) != len(vl.Versions) {
- t.Put(dbi.Key(), mustMarshal(&newVL))
- t.checkFlush()
- }
- }
- l.Debugf("db check completed for %q", folder)
- }
- func (db *Instance) updateSchema0to1() {
- t := db.newReadWriteTransaction()
- defer t.close()
- dbi := t.NewIterator(util.BytesPrefix([]byte{KeyTypeDevice}), nil)
- defer dbi.Release()
- symlinkConv := 0
- changedFolders := make(map[string]struct{})
- ignAdded := 0
- meta := newMetadataTracker() // dummy metadata tracker
- var gk []byte
- for dbi.Next() {
- folder := db.deviceKeyFolder(dbi.Key())
- device := db.deviceKeyDevice(dbi.Key())
- name := db.deviceKeyName(dbi.Key())
- // Remove files with absolute path (see #4799)
- if strings.HasPrefix(string(name), "/") {
- if _, ok := changedFolders[string(folder)]; !ok {
- changedFolders[string(folder)] = struct{}{}
- }
- gk = db.globalKeyInto(gk, folder, name)
- t.removeFromGlobal(gk, folder, device, nil, nil)
- t.Delete(dbi.Key())
- t.checkFlush()
- continue
- }
- // Change SYMLINK_FILE and SYMLINK_DIRECTORY types to the current SYMLINK
- // type (previously SYMLINK_UNKNOWN). It does this for all devices, both
- // local and remote, and does not reset delta indexes. It shouldn't really
- // matter what the symlink type is, but this cleans it up for a possible
- // future when SYMLINK_FILE and SYMLINK_DIRECTORY are no longer understood.
- var f protocol.FileInfo
- if err := f.Unmarshal(dbi.Value()); err != nil {
- // probably can't happen
- continue
- }
- if f.Type == protocol.FileInfoTypeDeprecatedSymlinkDirectory || f.Type == protocol.FileInfoTypeDeprecatedSymlinkFile {
- f.Type = protocol.FileInfoTypeSymlink
- bs, err := f.Marshal()
- if err != nil {
- panic("can't happen: " + err.Error())
- }
- t.Put(dbi.Key(), bs)
- t.checkFlush()
- symlinkConv++
- }
- // Add invalid files to global list
- if f.Invalid {
- gk = db.globalKeyInto(gk, folder, name)
- if t.updateGlobal(gk, folder, device, f, meta) {
- if _, ok := changedFolders[string(folder)]; !ok {
- changedFolders[string(folder)] = struct{}{}
- }
- ignAdded++
- }
- }
- }
- for folder := range changedFolders {
- db.dropFolderMeta([]byte(folder))
- }
- l.Infof("Updated symlink type for %d index entries and added %d invalid files to global list", symlinkConv, ignAdded)
- }
- // updateSchema1to2 introduces a sequenceKey->deviceKey bucket for local items
- // to allow iteration in sequence order (simplifies sending indexes).
- func (db *Instance) updateSchema1to2() {
- t := db.newReadWriteTransaction()
- defer t.close()
- var sk []byte
- var dk []byte
- for _, folderStr := range db.ListFolders() {
- folder := []byte(folderStr)
- db.withHave(folder, protocol.LocalDeviceID[:], nil, true, func(f FileIntf) bool {
- sk = db.sequenceKeyInto(sk, folder, f.SequenceNo())
- dk = db.deviceKeyInto(dk, folder, protocol.LocalDeviceID[:], []byte(f.FileName()))
- t.Put(sk, dk)
- t.checkFlush()
- return true
- })
- }
- }
- // updateSchema2to3 introduces a needKey->nil bucket for locally needed files.
- func (db *Instance) updateSchema2to3() {
- t := db.newReadWriteTransaction()
- defer t.close()
- var nk []byte
- var dk []byte
- for _, folderStr := range db.ListFolders() {
- folder := []byte(folderStr)
- db.withGlobal(folder, nil, true, func(f FileIntf) bool {
- name := []byte(f.FileName())
- dk = db.deviceKeyInto(dk, folder, protocol.LocalDeviceID[:], name)
- var v protocol.Vector
- haveFile, ok := db.getFileTrunc(dk, true)
- if ok {
- v = haveFile.FileVersion()
- }
- if !need(f, ok, v) {
- return true
- }
- nk = t.db.needKeyInto(nk, folder, []byte(f.FileName()))
- t.Put(nk, nil)
- t.checkFlush()
- return true
- })
- }
- }
- // deviceKey returns a byte slice encoding the following information:
- // keyTypeDevice (1 byte)
- // folder (4 bytes)
- // device (4 bytes)
- // name (variable size)
- func (db *Instance) deviceKey(folder, device, file []byte) []byte {
- return db.deviceKeyInto(nil, folder, device, file)
- }
- func (db *Instance) deviceKeyInto(k, folder, device, file []byte) []byte {
- reqLen := keyPrefixLen + keyFolderLen + keyDeviceLen + len(file)
- k = resize(k, reqLen)
- k[0] = KeyTypeDevice
- binary.BigEndian.PutUint32(k[keyPrefixLen:], db.folderIdx.ID(folder))
- binary.BigEndian.PutUint32(k[keyPrefixLen+keyFolderLen:], db.deviceIdx.ID(device))
- copy(k[keyPrefixLen+keyFolderLen+keyDeviceLen:], file)
- return k
- }
- // deviceKeyName returns the device ID from the key
- func (db *Instance) deviceKeyName(key []byte) []byte {
- return key[keyPrefixLen+keyFolderLen+keyDeviceLen:]
- }
- // deviceKeyFolder returns the folder name from the key
- func (db *Instance) deviceKeyFolder(key []byte) []byte {
- folder, ok := db.folderIdx.Val(binary.BigEndian.Uint32(key[keyPrefixLen:]))
- if !ok {
- panic("bug: lookup of nonexistent folder ID")
- }
- return folder
- }
- // deviceKeyDevice returns the device ID from the key
- func (db *Instance) deviceKeyDevice(key []byte) []byte {
- device, ok := db.deviceIdx.Val(binary.BigEndian.Uint32(key[keyPrefixLen+keyFolderLen:]))
- if !ok {
- panic("bug: lookup of nonexistent device ID")
- }
- return device
- }
- // globalKey returns a byte slice encoding the following information:
- // keyTypeGlobal (1 byte)
- // folder (4 bytes)
- // name (variable size)
- func (db *Instance) globalKey(folder, file []byte) []byte {
- return db.globalKeyInto(nil, folder, file)
- }
- func (db *Instance) globalKeyInto(gk, folder, file []byte) []byte {
- reqLen := keyPrefixLen + keyFolderLen + len(file)
- gk = resize(gk, reqLen)
- gk[0] = KeyTypeGlobal
- binary.BigEndian.PutUint32(gk[keyPrefixLen:], db.folderIdx.ID(folder))
- copy(gk[keyPrefixLen+keyFolderLen:], file)
- return gk
- }
- // globalKeyName returns the filename from the key
- func (db *Instance) globalKeyName(key []byte) []byte {
- return key[keyPrefixLen+keyFolderLen:]
- }
- // globalKeyFolder returns the folder name from the key
- func (db *Instance) globalKeyFolder(key []byte) ([]byte, bool) {
- return db.folderIdx.Val(binary.BigEndian.Uint32(key[keyPrefixLen:]))
- }
- // needKey is a globalKey with a different prefix
- func (db *Instance) needKey(folder, file []byte) []byte {
- return db.needKeyInto(nil, folder, file)
- }
- func (db *Instance) needKeyInto(k, folder, file []byte) []byte {
- k = db.globalKeyInto(k, folder, file)
- k[0] = KeyTypeNeed
- return k
- }
- // sequenceKey returns a byte slice encoding the following information:
- // KeyTypeSequence (1 byte)
- // folder (4 bytes)
- // sequence number (8 bytes)
- func (db *Instance) sequenceKey(folder []byte, seq int64) []byte {
- return db.sequenceKeyInto(nil, folder, seq)
- }
- func (db *Instance) sequenceKeyInto(k []byte, folder []byte, seq int64) []byte {
- reqLen := keyPrefixLen + keyFolderLen + keySequenceLen
- k = resize(k, reqLen)
- k[0] = KeyTypeSequence
- binary.BigEndian.PutUint32(k[keyPrefixLen:], db.folderIdx.ID(folder))
- binary.BigEndian.PutUint64(k[keyPrefixLen+keyFolderLen:], uint64(seq))
- return k
- }
- // sequenceKeySequence returns the sequence number from the key
- func (db *Instance) sequenceKeySequence(key []byte) int64 {
- return int64(binary.BigEndian.Uint64(key[keyPrefixLen+keyFolderLen:]))
- }
- func (db *Instance) getIndexID(device, folder []byte) protocol.IndexID {
- key := db.indexIDKey(device, folder)
- cur, err := db.Get(key, nil)
- if err != nil {
- return 0
- }
- var id protocol.IndexID
- if err := id.Unmarshal(cur); err != nil {
- return 0
- }
- return id
- }
- func (db *Instance) setIndexID(device, folder []byte, id protocol.IndexID) {
- key := db.indexIDKey(device, folder)
- bs, _ := id.Marshal() // marshalling can't fail
- if err := db.Put(key, bs, nil); err != nil {
- panic("storing index ID: " + err.Error())
- }
- }
- func (db *Instance) indexIDKey(device, folder []byte) []byte {
- k := make([]byte, keyPrefixLen+keyDeviceLen+keyFolderLen)
- k[0] = KeyTypeIndexID
- binary.BigEndian.PutUint32(k[keyPrefixLen:], db.deviceIdx.ID(device))
- binary.BigEndian.PutUint32(k[keyPrefixLen+keyDeviceLen:], db.folderIdx.ID(folder))
- return k
- }
- func (db *Instance) indexIDDevice(key []byte) []byte {
- device, ok := db.deviceIdx.Val(binary.BigEndian.Uint32(key[keyPrefixLen:]))
- if !ok {
- // uuh ...
- return nil
- }
- return device
- }
- func (db *Instance) mtimesKey(folder []byte) []byte {
- prefix := make([]byte, 5) // key type + 4 bytes folder idx number
- prefix[0] = KeyTypeVirtualMtime
- binary.BigEndian.PutUint32(prefix[1:], db.folderIdx.ID(folder))
- return prefix
- }
- func (db *Instance) folderMetaKey(folder []byte) []byte {
- prefix := make([]byte, 5) // key type + 4 bytes folder idx number
- prefix[0] = KeyTypeFolderMeta
- binary.BigEndian.PutUint32(prefix[1:], db.folderIdx.ID(folder))
- return prefix
- }
- // DropLocalDeltaIndexIDs removes all index IDs for the local device ID from
- // the database. This will cause a full index transmission on the next
- // connection.
- func (db *Instance) DropLocalDeltaIndexIDs() {
- db.dropDeltaIndexIDs(true)
- }
- // DropRemoteDeltaIndexIDs removes all index IDs for the other devices than
- // the local one from the database. This will cause them to send us a full
- // index on the next connection.
- func (db *Instance) DropRemoteDeltaIndexIDs() {
- db.dropDeltaIndexIDs(false)
- }
- func (db *Instance) dropDeltaIndexIDs(local bool) {
- t := db.newReadWriteTransaction()
- defer t.close()
- dbi := t.NewIterator(util.BytesPrefix([]byte{KeyTypeIndexID}), nil)
- defer dbi.Release()
- for dbi.Next() {
- device := db.indexIDDevice(dbi.Key())
- if bytes.Equal(device, protocol.LocalDeviceID[:]) == local {
- t.Delete(dbi.Key())
- }
- }
- }
- func (db *Instance) dropMtimes(folder []byte) {
- db.dropPrefix(db.mtimesKey(folder))
- }
- func (db *Instance) dropFolderMeta(folder []byte) {
- db.dropPrefix(db.folderMetaKey(folder))
- }
- func (db *Instance) dropPrefix(prefix []byte) {
- t := db.newReadWriteTransaction()
- defer t.close()
- dbi := t.NewIterator(util.BytesPrefix(prefix), nil)
- defer dbi.Release()
- for dbi.Next() {
- t.Delete(dbi.Key())
- }
- }
- func unmarshalTrunc(bs []byte, truncate bool) (FileIntf, error) {
- if truncate {
- var tf FileInfoTruncated
- err := tf.Unmarshal(bs)
- return tf, err
- }
- var tf protocol.FileInfo
- err := tf.Unmarshal(bs)
- return tf, err
- }
- func unmarshalVersionList(data []byte) (VersionList, bool) {
- var vl VersionList
- if err := vl.Unmarshal(data); err != nil {
- l.Debugln("unmarshal error:", err)
- return VersionList{}, false
- }
- if len(vl.Versions) == 0 {
- l.Debugln("empty version list")
- return VersionList{}, false
- }
- return vl, true
- }
- // A "better" version of leveldb's errors.IsCorrupted.
- func leveldbIsCorrupted(err error) bool {
- switch {
- case err == nil:
- return false
- case errors.IsCorrupted(err):
- return true
- case strings.Contains(err.Error(), "corrupted"):
- return true
- }
- return false
- }
- // A smallIndex is an in memory bidirectional []byte to uint32 map. It gives
- // fast lookups in both directions and persists to the database. Don't use for
- // storing more items than fit comfortably in RAM.
- type smallIndex struct {
- db *Instance
- prefix []byte
- id2val map[uint32]string
- val2id map[string]uint32
- nextID uint32
- mut sync.Mutex
- }
- func newSmallIndex(db *Instance, prefix []byte) *smallIndex {
- idx := &smallIndex{
- db: db,
- prefix: prefix,
- id2val: make(map[uint32]string),
- val2id: make(map[string]uint32),
- mut: sync.NewMutex(),
- }
- idx.load()
- return idx
- }
- // load iterates over the prefix space in the database and populates the in
- // memory maps.
- func (i *smallIndex) load() {
- tr := i.db.newReadOnlyTransaction()
- it := tr.NewIterator(util.BytesPrefix(i.prefix), nil)
- for it.Next() {
- val := string(it.Value())
- id := binary.BigEndian.Uint32(it.Key()[len(i.prefix):])
- i.id2val[id] = val
- i.val2id[val] = id
- if id >= i.nextID {
- i.nextID = id + 1
- }
- }
- it.Release()
- tr.close()
- }
- // ID returns the index number for the given byte slice, allocating a new one
- // and persisting this to the database if necessary.
- func (i *smallIndex) ID(val []byte) uint32 {
- i.mut.Lock()
- // intentionally avoiding defer here as we want this call to be as fast as
- // possible in the general case (folder ID already exists). The map lookup
- // with the conversion of []byte to string is compiler optimized to not
- // copy the []byte, which is why we don't assign it to a temp variable
- // here.
- if id, ok := i.val2id[string(val)]; ok {
- i.mut.Unlock()
- return id
- }
- id := i.nextID
- i.nextID++
- valStr := string(val)
- i.val2id[valStr] = id
- i.id2val[id] = valStr
- key := make([]byte, len(i.prefix)+8) // prefix plus uint32 id
- copy(key, i.prefix)
- binary.BigEndian.PutUint32(key[len(i.prefix):], id)
- i.db.Put(key, val, nil)
- i.mut.Unlock()
- return id
- }
- // Val returns the value for the given index number, or (nil, false) if there
- // is no such index number.
- func (i *smallIndex) Val(id uint32) ([]byte, bool) {
- i.mut.Lock()
- val, ok := i.id2val[id]
- i.mut.Unlock()
- if !ok {
- return nil, false
- }
- return []byte(val), true
- }
- // resize returns a byte array of length reqLen, reusing k if possible
- func resize(k []byte, reqLen int) []byte {
- if cap(k) < reqLen {
- return make([]byte, reqLen)
- }
- return k[:reqLen]
- }
|