|
|
@@ -39,7 +39,7 @@ func max(x, y int) int {
|
|
|
}
|
|
|
|
|
|
type block struct {
|
|
|
- tr *Reader
|
|
|
+ bpool *util.BufferPool
|
|
|
data []byte
|
|
|
restartsLen int
|
|
|
restartsOffset int
|
|
|
@@ -47,14 +47,14 @@ type block struct {
|
|
|
checksum bool
|
|
|
}
|
|
|
|
|
|
-func (b *block) seek(rstart, rlimit int, key []byte) (index, offset int, err error) {
|
|
|
+func (b *block) seek(cmp comparer.Comparer, rstart, rlimit int, key []byte) (index, offset int, err error) {
|
|
|
index = sort.Search(b.restartsLen-rstart-(b.restartsLen-rlimit), func(i int) bool {
|
|
|
offset := int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*(rstart+i):]))
|
|
|
offset += 1 // shared always zero, since this is a restart point
|
|
|
v1, n1 := binary.Uvarint(b.data[offset:]) // key length
|
|
|
_, n2 := binary.Uvarint(b.data[offset+n1:]) // value length
|
|
|
m := offset + n1 + n2
|
|
|
- return b.tr.cmp.Compare(b.data[m:m+int(v1)], key) > 0
|
|
|
+ return cmp.Compare(b.data[m:m+int(v1)], key) > 0
|
|
|
}) + rstart - 1
|
|
|
if index < rstart {
|
|
|
// The smallest key is greater-than key sought.
|
|
|
@@ -96,48 +96,9 @@ func (b *block) entry(offset int) (key, value []byte, nShared, n int, err error)
|
|
|
return
|
|
|
}
|
|
|
|
|
|
-func (b *block) newIterator(slice *util.Range, inclLimit bool, cache util.Releaser) *blockIter {
|
|
|
- bi := &blockIter{
|
|
|
- block: b,
|
|
|
- cache: cache,
|
|
|
- // Valid key should never be nil.
|
|
|
- key: make([]byte, 0),
|
|
|
- dir: dirSOI,
|
|
|
- riStart: 0,
|
|
|
- riLimit: b.restartsLen,
|
|
|
- offsetStart: 0,
|
|
|
- offsetRealStart: 0,
|
|
|
- offsetLimit: b.restartsOffset,
|
|
|
- }
|
|
|
- if slice != nil {
|
|
|
- if slice.Start != nil {
|
|
|
- if bi.Seek(slice.Start) {
|
|
|
- bi.riStart = b.restartIndex(bi.restartIndex, b.restartsLen, bi.prevOffset)
|
|
|
- bi.offsetStart = b.restartOffset(bi.riStart)
|
|
|
- bi.offsetRealStart = bi.prevOffset
|
|
|
- } else {
|
|
|
- bi.riStart = b.restartsLen
|
|
|
- bi.offsetStart = b.restartsOffset
|
|
|
- bi.offsetRealStart = b.restartsOffset
|
|
|
- }
|
|
|
- }
|
|
|
- if slice.Limit != nil {
|
|
|
- if bi.Seek(slice.Limit) && (!inclLimit || bi.Next()) {
|
|
|
- bi.offsetLimit = bi.prevOffset
|
|
|
- bi.riLimit = bi.restartIndex + 1
|
|
|
- }
|
|
|
- }
|
|
|
- bi.reset()
|
|
|
- if bi.offsetStart > bi.offsetLimit {
|
|
|
- bi.sErr(errors.New("leveldb/table: Reader: invalid slice range"))
|
|
|
- }
|
|
|
- }
|
|
|
- return bi
|
|
|
-}
|
|
|
-
|
|
|
func (b *block) Release() {
|
|
|
- b.tr.bpool.Put(b.data)
|
|
|
- b.tr = nil
|
|
|
+ b.bpool.Put(b.data)
|
|
|
+ b.bpool = nil
|
|
|
b.data = nil
|
|
|
}
|
|
|
|
|
|
@@ -152,10 +113,12 @@ const (
|
|
|
)
|
|
|
|
|
|
type blockIter struct {
|
|
|
- block *block
|
|
|
- cache, releaser util.Releaser
|
|
|
- key, value []byte
|
|
|
- offset int
|
|
|
+ tr *Reader
|
|
|
+ block *block
|
|
|
+ blockReleaser util.Releaser
|
|
|
+ releaser util.Releaser
|
|
|
+ key, value []byte
|
|
|
+ offset int
|
|
|
// Previous offset, only filled by Next.
|
|
|
prevOffset int
|
|
|
prevNode []int
|
|
|
@@ -252,7 +215,7 @@ func (i *blockIter) Seek(key []byte) bool {
|
|
|
return false
|
|
|
}
|
|
|
|
|
|
- ri, offset, err := i.block.seek(i.riStart, i.riLimit, key)
|
|
|
+ ri, offset, err := i.block.seek(i.tr.cmp, i.riStart, i.riLimit, key)
|
|
|
if err != nil {
|
|
|
i.sErr(err)
|
|
|
return false
|
|
|
@@ -263,7 +226,7 @@ func (i *blockIter) Seek(key []byte) bool {
|
|
|
i.dir = dirForward
|
|
|
}
|
|
|
for i.Next() {
|
|
|
- if i.block.tr.cmp.Compare(i.key, key) >= 0 {
|
|
|
+ if i.tr.cmp.Compare(i.key, key) >= 0 {
|
|
|
return true
|
|
|
}
|
|
|
}
|
|
|
@@ -440,15 +403,16 @@ func (i *blockIter) Value() []byte {
|
|
|
|
|
|
func (i *blockIter) Release() {
|
|
|
if i.dir != dirReleased {
|
|
|
+ i.tr = nil
|
|
|
i.block = nil
|
|
|
i.prevNode = nil
|
|
|
i.prevKeys = nil
|
|
|
i.key = nil
|
|
|
i.value = nil
|
|
|
i.dir = dirReleased
|
|
|
- if i.cache != nil {
|
|
|
- i.cache.Release()
|
|
|
- i.cache = nil
|
|
|
+ if i.blockReleaser != nil {
|
|
|
+ i.blockReleaser.Release()
|
|
|
+ i.blockReleaser = nil
|
|
|
}
|
|
|
if i.releaser != nil {
|
|
|
i.releaser.Release()
|
|
|
@@ -476,21 +440,21 @@ func (i *blockIter) Error() error {
|
|
|
}
|
|
|
|
|
|
type filterBlock struct {
|
|
|
- tr *Reader
|
|
|
+ bpool *util.BufferPool
|
|
|
data []byte
|
|
|
oOffset int
|
|
|
baseLg uint
|
|
|
filtersNum int
|
|
|
}
|
|
|
|
|
|
-func (b *filterBlock) contains(offset uint64, key []byte) bool {
|
|
|
+func (b *filterBlock) contains(filter filter.Filter, offset uint64, key []byte) bool {
|
|
|
i := int(offset >> b.baseLg)
|
|
|
if i < b.filtersNum {
|
|
|
o := b.data[b.oOffset+i*4:]
|
|
|
n := int(binary.LittleEndian.Uint32(o))
|
|
|
m := int(binary.LittleEndian.Uint32(o[4:]))
|
|
|
if n < m && m <= b.oOffset {
|
|
|
- return b.tr.filter.Contains(b.data[n:m], key)
|
|
|
+ return filter.Contains(b.data[n:m], key)
|
|
|
} else if n == m {
|
|
|
return false
|
|
|
}
|
|
|
@@ -499,13 +463,14 @@ func (b *filterBlock) contains(offset uint64, key []byte) bool {
|
|
|
}
|
|
|
|
|
|
func (b *filterBlock) Release() {
|
|
|
- b.tr.bpool.Put(b.data)
|
|
|
- b.tr = nil
|
|
|
+ b.bpool.Put(b.data)
|
|
|
+ b.bpool = nil
|
|
|
b.data = nil
|
|
|
}
|
|
|
|
|
|
type indexIter struct {
|
|
|
*blockIter
|
|
|
+ tr *Reader
|
|
|
slice *util.Range
|
|
|
// Options
|
|
|
checksum bool
|
|
|
@@ -526,7 +491,7 @@ func (i *indexIter) Get() iterator.Iterator {
|
|
|
if i.slice != nil && (i.blockIter.isFirst() || i.blockIter.isLast()) {
|
|
|
slice = i.slice
|
|
|
}
|
|
|
- return i.blockIter.block.tr.getDataIterErr(dataBH, slice, i.checksum, i.fillCache)
|
|
|
+ return i.tr.getDataIterErr(dataBH, slice, i.checksum, i.fillCache)
|
|
|
}
|
|
|
|
|
|
// Reader is a table reader.
|
|
|
@@ -594,7 +559,7 @@ func (r *Reader) readBlock(bh blockHandle, checksum bool) (*block, error) {
|
|
|
}
|
|
|
restartsLen := int(binary.LittleEndian.Uint32(data[len(data)-4:]))
|
|
|
b := &block{
|
|
|
- tr: r,
|
|
|
+ bpool: r.bpool,
|
|
|
data: data,
|
|
|
restartsLen: restartsLen,
|
|
|
restartsOffset: len(data) - (restartsLen+1)*4,
|
|
|
@@ -655,7 +620,7 @@ func (r *Reader) readFilterBlock(bh blockHandle) (*filterBlock, error) {
|
|
|
return nil, errors.New("leveldb/table: Reader: invalid filter block (invalid offset)")
|
|
|
}
|
|
|
b := &filterBlock{
|
|
|
- tr: r,
|
|
|
+ bpool: r.bpool,
|
|
|
data: data,
|
|
|
oOffset: oOffset,
|
|
|
baseLg: uint(data[n-1]),
|
|
|
@@ -713,7 +678,7 @@ func (r *Reader) getDataIter(dataBH blockHandle, slice *util.Range, checksum, fi
|
|
|
if err != nil {
|
|
|
return iterator.NewEmptyIterator(err)
|
|
|
}
|
|
|
- return b.newIterator(slice, false, rel)
|
|
|
+ return r.newBlockIter(b, rel, slice, false)
|
|
|
}
|
|
|
|
|
|
func (r *Reader) getDataIterErr(dataBH blockHandle, slice *util.Range, checksum, fillCache bool) iterator.Iterator {
|
|
|
@@ -727,6 +692,46 @@ func (r *Reader) getDataIterErr(dataBH blockHandle, slice *util.Range, checksum,
|
|
|
return r.getDataIter(dataBH, slice, checksum, fillCache)
|
|
|
}
|
|
|
|
|
|
+func (r *Reader) newBlockIter(b *block, bReleaser util.Releaser, slice *util.Range, inclLimit bool) *blockIter {
|
|
|
+ bi := &blockIter{
|
|
|
+ tr: r,
|
|
|
+ block: b,
|
|
|
+ blockReleaser: bReleaser,
|
|
|
+ // Valid key should never be nil.
|
|
|
+ key: make([]byte, 0),
|
|
|
+ dir: dirSOI,
|
|
|
+ riStart: 0,
|
|
|
+ riLimit: b.restartsLen,
|
|
|
+ offsetStart: 0,
|
|
|
+ offsetRealStart: 0,
|
|
|
+ offsetLimit: b.restartsOffset,
|
|
|
+ }
|
|
|
+ if slice != nil {
|
|
|
+ if slice.Start != nil {
|
|
|
+ if bi.Seek(slice.Start) {
|
|
|
+ bi.riStart = b.restartIndex(bi.restartIndex, b.restartsLen, bi.prevOffset)
|
|
|
+ bi.offsetStart = b.restartOffset(bi.riStart)
|
|
|
+ bi.offsetRealStart = bi.prevOffset
|
|
|
+ } else {
|
|
|
+ bi.riStart = b.restartsLen
|
|
|
+ bi.offsetStart = b.restartsOffset
|
|
|
+ bi.offsetRealStart = b.restartsOffset
|
|
|
+ }
|
|
|
+ }
|
|
|
+ if slice.Limit != nil {
|
|
|
+ if bi.Seek(slice.Limit) && (!inclLimit || bi.Next()) {
|
|
|
+ bi.offsetLimit = bi.prevOffset
|
|
|
+ bi.riLimit = bi.restartIndex + 1
|
|
|
+ }
|
|
|
+ }
|
|
|
+ bi.reset()
|
|
|
+ if bi.offsetStart > bi.offsetLimit {
|
|
|
+ bi.sErr(errors.New("leveldb/table: Reader: invalid slice range"))
|
|
|
+ }
|
|
|
+ }
|
|
|
+ return bi
|
|
|
+}
|
|
|
+
|
|
|
// NewIterator creates an iterator from the table.
|
|
|
//
|
|
|
// Slice allows slicing the iterator to only contains keys in the given
|
|
|
@@ -752,7 +757,8 @@ func (r *Reader) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.It
|
|
|
return iterator.NewEmptyIterator(err)
|
|
|
}
|
|
|
index := &indexIter{
|
|
|
- blockIter: indexBlock.newIterator(slice, true, rel),
|
|
|
+ blockIter: r.newBlockIter(indexBlock, rel, slice, true),
|
|
|
+ tr: r,
|
|
|
slice: slice,
|
|
|
checksum: ro.GetStrict(opt.StrictBlockChecksum),
|
|
|
fillCache: !ro.GetDontFillCache(),
|
|
|
@@ -781,7 +787,7 @@ func (r *Reader) Find(key []byte, ro *opt.ReadOptions) (rkey, value []byte, err
|
|
|
}
|
|
|
defer rel.Release()
|
|
|
|
|
|
- index := indexBlock.newIterator(nil, true, nil)
|
|
|
+ index := r.newBlockIter(indexBlock, nil, nil, true)
|
|
|
defer index.Release()
|
|
|
if !index.Seek(key) {
|
|
|
err = index.Error()
|
|
|
@@ -798,7 +804,7 @@ func (r *Reader) Find(key []byte, ro *opt.ReadOptions) (rkey, value []byte, err
|
|
|
if r.filter != nil {
|
|
|
filterBlock, rel, ferr := r.getFilterBlock(true)
|
|
|
if ferr == nil {
|
|
|
- if !filterBlock.contains(dataBH.offset, key) {
|
|
|
+ if !filterBlock.contains(r.filter, dataBH.offset, key) {
|
|
|
rel.Release()
|
|
|
return nil, nil, ErrNotFound
|
|
|
}
|
|
|
@@ -866,7 +872,7 @@ func (r *Reader) OffsetOf(key []byte) (offset int64, err error) {
|
|
|
}
|
|
|
defer rel.Release()
|
|
|
|
|
|
- index := indexBlock.newIterator(nil, true, nil)
|
|
|
+ index := r.newBlockIter(indexBlock, nil, nil, true)
|
|
|
defer index.Release()
|
|
|
if index.Seek(key) {
|
|
|
dataBH, n := decodeBlockHandle(index.Value())
|
|
|
@@ -956,7 +962,7 @@ func NewReader(f io.ReaderAt, size int64, cache cache.Namespace, bpool *util.Buf
|
|
|
}
|
|
|
// Set data end.
|
|
|
r.dataEnd = int64(metaBH.offset)
|
|
|
- metaIter := metaBlock.newIterator(nil, false, nil)
|
|
|
+ metaIter := r.newBlockIter(metaBlock, nil, nil, false)
|
|
|
for metaIter.Next() {
|
|
|
key := string(metaIter.Key())
|
|
|
if !strings.HasPrefix(key, "filter.") {
|