0
0
bleve/index/upside_down/dump.go
Marty Schoch 5aa9e95468 major refactor of index/search API
index id's are now opaque (until finally returned to top-level user)
 - the TermFieldDoc's returned by TermFieldReader no longer contain doc id
 - instead they return an opaque IndexInternalID
 - items returned are still in the "natural index order"
 - but that is no longer guaranteed to be "doc id order"
 - correct behavior requires that they all follow the same order
 - but not any particular order

 - new API FinalizeDocID which converts index internal ID's to public string ID

 - APIs used internally which previously took doc id now take IndexInternalID
     - that is DocumentFieldTerms() and DocumentFieldTermsForFields()
 - however, APIs that are used externally do not reflect this change
     - that is Document()

 - DocumentIDReader follows the same changes, but this is less obvious
     - behavior clarified, used to iterate doc ids, BUT NOT in doc id order
     - method STILL available to iterate doc ids in range
     - but again, you won't get them in any meaningful order
     - new method to iterate actual doc ids from list of possible ids
         - this was introduced to make the DocIDSearcher continue working

searchers now work with the new opaque index internal doc ids
 - they return new DocumentMatchInternal (which does not have string ID)
scorerers also work with these opaque index internal doc ids
 - they return DocumentMatchInternal (which does not have string ID)
collectors now also perform a final step of converting the final result
 - they STILL return traditional DocumentMatch (with string ID)
 - but they now also require an IndexReader (so that they can do the conversion)
2016-07-31 13:46:18 -04:00

209 lines
4.5 KiB
Go

// Copyright (c) 2014 Couchbase, Inc.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
// except in compliance with the License. You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing permissions
// and limitations under the License.
package upside_down
import (
"bytes"
"sort"
"github.com/blevesearch/bleve/index/store"
)
// the functions in this file are only intended to be used by
// the bleve_dump utility and the debug http handlers
// if your application relies on them, you're doing something wrong
// they may change or be removed at any time
func (udc *UpsideDownCouch) dumpPrefix(kvreader store.KVReader, rv chan interface{}, prefix []byte) {
start := prefix
if start == nil {
start = []byte{0}
}
it := kvreader.PrefixIterator(start)
defer func() {
cerr := it.Close()
if cerr != nil {
rv <- cerr
}
}()
key, val, valid := it.Current()
for valid {
ck := make([]byte, len(key))
copy(ck, key)
cv := make([]byte, len(val))
copy(cv, val)
row, err := ParseFromKeyValue(ck, cv)
if err != nil {
rv <- err
return
}
rv <- row
it.Next()
key, val, valid = it.Current()
}
}
func (udc *UpsideDownCouch) dumpRange(kvreader store.KVReader, rv chan interface{}, start, end []byte) {
it := kvreader.RangeIterator(start, end)
defer func() {
cerr := it.Close()
if cerr != nil {
rv <- cerr
}
}()
key, val, valid := it.Current()
for valid {
ck := make([]byte, len(key))
copy(ck, key)
cv := make([]byte, len(val))
copy(cv, val)
row, err := ParseFromKeyValue(ck, cv)
if err != nil {
rv <- err
return
}
rv <- row
it.Next()
key, val, valid = it.Current()
}
}
func (udc *UpsideDownCouch) DumpAll() chan interface{} {
rv := make(chan interface{})
go func() {
defer close(rv)
// start an isolated reader for use during the dump
kvreader, err := udc.store.Reader()
if err != nil {
rv <- err
return
}
defer func() {
cerr := kvreader.Close()
if cerr != nil {
rv <- cerr
}
}()
udc.dumpRange(kvreader, rv, nil, nil)
}()
return rv
}
func (udc *UpsideDownCouch) DumpFields() chan interface{} {
rv := make(chan interface{})
go func() {
defer close(rv)
// start an isolated reader for use during the dump
kvreader, err := udc.store.Reader()
if err != nil {
rv <- err
return
}
defer func() {
cerr := kvreader.Close()
if cerr != nil {
rv <- cerr
}
}()
udc.dumpPrefix(kvreader, rv, []byte{'f'})
}()
return rv
}
type keyset [][]byte
func (k keyset) Len() int { return len(k) }
func (k keyset) Swap(i, j int) { k[i], k[j] = k[j], k[i] }
func (k keyset) Less(i, j int) bool { return bytes.Compare(k[i], k[j]) < 0 }
// DumpDoc returns all rows in the index related to this doc id
func (udc *UpsideDownCouch) DumpDoc(id string) chan interface{} {
idBytes := []byte(id)
rv := make(chan interface{})
go func() {
defer close(rv)
// start an isolated reader for use during the dump
kvreader, err := udc.store.Reader()
if err != nil {
rv <- err
return
}
defer func() {
cerr := kvreader.Close()
if cerr != nil {
rv <- cerr
}
}()
back, err := udc.backIndexRowForDoc(kvreader, []byte(id))
if err != nil {
rv <- err
return
}
// no such doc
if back == nil {
return
}
// build sorted list of term keys
keys := make(keyset, 0)
for _, entry := range back.termEntries {
tfr := NewTermFrequencyRow([]byte(*entry.Term), uint16(*entry.Field), idBytes, 0, 0)
key := tfr.Key()
keys = append(keys, key)
}
sort.Sort(keys)
// first add all the stored rows
storedRowPrefix := NewStoredRow(idBytes, 0, []uint64{}, 'x', []byte{}).ScanPrefixForDoc()
udc.dumpPrefix(kvreader, rv, storedRowPrefix)
// now walk term keys in order and add them as well
if len(keys) > 0 {
it := kvreader.RangeIterator(keys[0], nil)
defer func() {
cerr := it.Close()
if cerr != nil {
rv <- cerr
}
}()
for _, key := range keys {
it.Seek(key)
rkey, rval, valid := it.Current()
if !valid {
break
}
rck := make([]byte, len(rkey))
copy(rck, key)
rcv := make([]byte, len(rval))
copy(rcv, rval)
row, err := ParseFromKeyValue(rck, rcv)
if err != nil {
rv <- err
return
}
rv <- row
}
}
}()
return rv
}