0
0
Fork 0

BREAKING CHANGE - removed DumpXXX() methods from bleve.Index

The DumpXXX() methods were always documented as internal and
unsupported.  However, now they are being removed from the
public top-level API.  They are still available on the internal
IndexReader, which can be accessed using the Advanced() method.

The DocCount() and DumpXXX() methods on the internal index
have moved to the internal index reader, since they logically
operate on a snapshot of an index.
This commit is contained in:
Marty Schoch 2016-09-13 12:40:01 -04:00
parent e1fb860a86
commit 3fd2a64872
21 changed files with 531 additions and 501 deletions

View File

@ -52,8 +52,19 @@ func (h *DebugDocumentHandler) ServeHTTP(w http.ResponseWriter, req *http.Reques
docID = h.DocIDLookup(req)
}
rv := make([]interface{}, 0)
rowChan := index.DumpDoc(docID)
internalIndex, _, err := index.Advanced()
if err != nil {
showError(w, req, fmt.Sprintf("error getting index: %v", err), 500)
return
}
internalIndexReader, err := internalIndex.Reader()
if err != nil {
showError(w, req, fmt.Sprintf("error operning index reader: %v", err), 500)
return
}
var rv []interface{}
rowChan := internalIndexReader.DumpDoc(docID)
for row := range rowChan {
switch row := row.(type) {
case error:
@ -70,5 +81,10 @@ func (h *DebugDocumentHandler) ServeHTTP(w http.ResponseWriter, req *http.Reques
rv = append(rv, tmp)
}
}
err = internalIndexReader.Close()
if err != nil {
showError(w, req, fmt.Sprintf("error closing index reader: %v", err), 500)
return
}
mustEncode(w, rv)
}

View File

@ -176,24 +176,6 @@ type Index interface {
FieldDictRange(field string, startTerm []byte, endTerm []byte) (index.FieldDict, error)
FieldDictPrefix(field string, termPrefix []byte) (index.FieldDict, error)
// DumpAll returns a channel receiving all index rows as
// UpsideDownCouchRow, in lexicographic byte order. If the enumeration
// fails, an error is sent. The channel is closed once the enumeration
// completes or an error is encountered. The caller must consume all
// channel entries until the channel is closed to ensure the transaction
// and other resources associated with the enumeration are released.
//
// DumpAll exists for debugging and tooling purpose and may change in the
// future.
DumpAll() chan interface{}
// DumpDoc works like DumpAll but returns only StoredRows and
// TermFrequencyRows related to a document.
DumpDoc(id string) chan interface{}
// DumpFields works like DumpAll but returns only FieldRows.
DumpFields() chan interface{}
Close() error
Mapping() *IndexMapping

View File

@ -24,8 +24,6 @@ type Index interface {
Open() error
Close() error
DocCount() (uint64, error)
Update(doc *document.Document) error
Delete(id string) error
Batch(batch *Batch) error
@ -33,10 +31,6 @@ type Index interface {
SetInternal(key, val []byte) error
DeleteInternal(key []byte) error
DumpAll() chan interface{}
DumpDoc(id string) chan interface{}
DumpFields() chan interface{}
// Reader returns a low-level accessor on the index data. Close it to
// release associated resources.
Reader() (IndexReader, error)
@ -71,11 +65,15 @@ type IndexReader interface {
GetInternal(key []byte) ([]byte, error)
DocCount() uint64
DocCount() (uint64, error)
ExternalID(id IndexInternalID) (string, error)
InternalID(id string) (IndexInternalID, error)
DumpAll() chan interface{}
DumpDoc(id string) chan interface{}
DumpFields() chan interface{}
Close() error
}

View File

@ -22,7 +22,7 @@ import (
// if your application relies on them, you're doing something wrong
// they may change or be removed at any time
func (udc *SmolderingCouch) dumpPrefix(kvreader store.KVReader, rv chan interface{}, prefix []byte) {
func dumpPrefix(kvreader store.KVReader, rv chan interface{}, prefix []byte) {
start := prefix
if start == nil {
start = []byte{0}
@ -52,7 +52,7 @@ func (udc *SmolderingCouch) dumpPrefix(kvreader store.KVReader, rv chan interfac
}
}
func (udc *SmolderingCouch) dumpRange(kvreader store.KVReader, rv chan interface{}, start, end []byte) {
func dumpRange(kvreader store.KVReader, rv chan interface{}, start, end []byte) {
it := kvreader.RangeIterator(start, end)
defer func() {
cerr := it.Close()
@ -78,48 +78,20 @@ func (udc *SmolderingCouch) dumpRange(kvreader store.KVReader, rv chan interface
}
}
func (udc *SmolderingCouch) DumpAll() chan interface{} {
func (i *IndexReader) DumpAll() chan interface{} {
rv := make(chan interface{})
go func() {
defer close(rv)
// start an isolated reader for use during the dump
kvreader, err := udc.store.Reader()
if err != nil {
rv <- err
return
}
defer func() {
cerr := kvreader.Close()
if cerr != nil {
rv <- cerr
}
}()
udc.dumpRange(kvreader, rv, nil, nil)
dumpRange(i.kvreader, rv, nil, nil)
}()
return rv
}
func (udc *SmolderingCouch) DumpFields() chan interface{} {
func (i *IndexReader) DumpFields() chan interface{} {
rv := make(chan interface{})
go func() {
defer close(rv)
// start an isolated reader for use during the dump
kvreader, err := udc.store.Reader()
if err != nil {
rv <- err
return
}
defer func() {
cerr := kvreader.Close()
if cerr != nil {
rv <- cerr
}
}()
udc.dumpPrefix(kvreader, rv, []byte{'f'})
dumpPrefix(i.kvreader, rv, []byte{'f'})
}()
return rv
}
@ -131,26 +103,13 @@ func (k keyset) Swap(i, j int) { k[i], k[j] = k[j], k[i] }
func (k keyset) Less(i, j int) bool { return bytes.Compare(k[i], k[j]) < 0 }
// DumpDoc returns all rows in the index related to this doc id
func (udc *SmolderingCouch) DumpDoc(id string) chan interface{} {
func (i *IndexReader) DumpDoc(id string) chan interface{} {
rv := make(chan interface{})
go func() {
defer close(rv)
indexReader, err := udc.Reader()
if err != nil {
rv <- err
return
}
defer func() {
cerr := indexReader.Close()
if cerr != nil {
rv <- cerr
}
}()
back, err := udc.backIndexRowForDoc(indexReader, nil, id)
back, err := i.backIndexRowForDoc(nil, id)
if err != nil {
rv <- err
return
@ -171,16 +130,13 @@ func (udc *SmolderingCouch) DumpDoc(id string) chan interface{} {
}
sort.Sort(keys)
// start an isolated reader for use during the dump
kvreader := indexReader.(*IndexReader).kvreader
// first add all the stored rows
storedRowPrefix := NewStoredRowDocBytes(back.docNumber, 0, []uint64{}, 'x', []byte{}).ScanPrefixForDoc()
udc.dumpPrefix(kvreader, rv, storedRowPrefix)
dumpPrefix(i.kvreader, rv, storedRowPrefix)
// now walk term keys in order and add them as well
if len(keys) > 0 {
it := kvreader.RangeIterator(keys[0], nil)
it := i.kvreader.RangeIterator(keys[0], nil)
defer func() {
cerr := it.Close()
if cerr != nil {

View File

@ -44,13 +44,21 @@ func TestDump(t *testing.T) {
}()
var expectedCount uint64
docCount, err := idx.DocCount()
reader, err := idx.Reader()
if err != nil {
t.Fatal(err)
}
docCount, err := reader.DocCount()
if err != nil {
t.Error(err)
}
if docCount != expectedCount {
t.Errorf("Expected document count to be %d got %d", expectedCount, docCount)
}
err = reader.Close()
if err != nil {
t.Fatal(err)
}
doc := document.NewDocument("1")
doc.AddField(document.NewTextFieldWithIndexingOptions("name", []uint64{}, []byte("test"), document.IndexField|document.StoreField))
@ -79,7 +87,11 @@ func TestDump(t *testing.T) {
}
fieldsCount := 0
fieldsRows := idx.DumpFields()
reader, err = idx.Reader()
if err != nil {
t.Fatal(err)
}
fieldsRows := reader.DumpFields()
for range fieldsRows {
fieldsCount++
}
@ -95,7 +107,7 @@ func TestDump(t *testing.T) {
// 1 id stored row
expectedDocRowCount := int(1+(2*(64/document.DefaultPrecisionStep))+3) + 1 + 1
docRowCount := 0
docRows := idx.DumpDoc("1")
docRows := reader.DumpDoc("1")
for range docRows {
docRowCount++
}
@ -104,7 +116,7 @@ func TestDump(t *testing.T) {
}
docRowCount = 0
docRows = idx.DumpDoc("2")
docRows = reader.DumpDoc("2")
for range docRows {
docRowCount++
}
@ -121,11 +133,16 @@ func TestDump(t *testing.T) {
// 16 date term row counts (shared for both docs, same date value)
expectedAllRowCount := int(1 + fieldsCount + (2 * expectedDocRowCount) + 2 + 4 + int((2 * (64 / document.DefaultPrecisionStep))))
allRowCount := 0
allRows := idx.DumpAll()
allRows := reader.DumpAll()
for range allRows {
allRowCount++
}
if allRowCount != expectedAllRowCount {
t.Errorf("expected %d rows for all, got %d", expectedAllRowCount, allRowCount)
}
err = reader.Close()
if err != nil {
t.Fatal(err)
}
}

View File

@ -59,7 +59,7 @@ func (i *IndexReader) Document(id string) (doc *document.Document, err error) {
// first hit the back index to confirm doc exists
var backIndexRow *BackIndexRow
backIndexRow, err = i.index.backIndexRowForDoc(i, nil, id)
backIndexRow, err = i.backIndexRowForDoc(nil, id)
if err != nil {
return
}
@ -99,7 +99,7 @@ func (i *IndexReader) Document(id string) (doc *document.Document, err error) {
}
func (i *IndexReader) DocumentFieldTerms(id index.IndexInternalID, fields []string) (index.FieldTerms, error) {
back, err := i.index.backIndexRowForDoc(i, id, "")
back, err := i.backIndexRowForDoc(id, "")
if err != nil {
return nil, err
}
@ -157,8 +157,8 @@ func (i *IndexReader) GetInternal(key []byte) ([]byte, error) {
return i.kvreader.Get(internalRow.Key())
}
func (i *IndexReader) DocCount() uint64 {
return i.docCount
func (i *IndexReader) DocCount() (uint64, error) {
return i.docCount, nil
}
func (i *IndexReader) Close() error {
@ -190,6 +190,50 @@ func (i *IndexReader) InternalID(id string) (index.IndexInternalID, error) {
return tfd.ID, nil
}
func (i *IndexReader) backIndexRowForDoc(docID index.IndexInternalID, externalDocID string) (*BackIndexRow, error) {
var err error
// first look up the docID if it isn't known
if docID == nil {
// first get the internal identifier
docID, err = i.InternalID(externalDocID)
if err != nil {
return nil, err
}
}
if len(docID) < 1 {
return nil, nil
}
// use a temporary row structure to build key
tempRow := &BackIndexRow{
docNumber: docID,
}
keyBuf := GetRowBuffer()
if tempRow.KeySize() > len(keyBuf) {
keyBuf = make([]byte, 2*tempRow.KeySize())
}
defer PutRowBuffer(keyBuf)
keySize, err := tempRow.KeyTo(keyBuf)
if err != nil {
return nil, err
}
value, err := i.kvreader.Get(keyBuf[:keySize])
if err != nil {
return nil, err
}
if value == nil {
return nil, nil
}
backIndexRow, err := NewBackIndexRowKV(keyBuf[:keySize], value)
if err != nil {
return nil, err
}
return backIndexRow, nil
}
func incrementBytes(in []byte) []byte {
rv := make([]byte, len(in))
copy(rv, in)

View File

@ -295,12 +295,6 @@ func (udc *SmolderingCouch) batchRows(writer store.KVWriter, addRowsAll [][]Smol
return writer.ExecuteBatch(wb)
}
func (udc *SmolderingCouch) DocCount() (uint64, error) {
udc.m.RLock()
defer udc.m.RUnlock()
return udc.docCount, nil
}
func (udc *SmolderingCouch) Open() (err error) {
//acquire the write mutex for the duratin of Open()
udc.writeMutex.Lock()
@ -462,7 +456,7 @@ func (udc *SmolderingCouch) Update(doc *document.Document) (err error) {
udc.writeMutex.Lock()
defer udc.writeMutex.Unlock()
indexReader, err := udc.Reader()
indexReader, err := udc.reader()
if err != nil {
return
}
@ -471,7 +465,7 @@ func (udc *SmolderingCouch) Update(doc *document.Document) (err error) {
// lookup the back index row
var backIndexRow *BackIndexRow
if udc.cf.Lookup([]byte(doc.ID)) {
backIndexRow, err = udc.backIndexRowForDoc(indexReader, nil, doc.ID)
backIndexRow, err = indexReader.backIndexRowForDoc(nil, doc.ID)
if err != nil {
_ = indexReader.Close()
atomic.AddUint64(&udc.stats.errors, 1)
@ -674,7 +668,7 @@ func (udc *SmolderingCouch) Delete(id string) (err error) {
udc.writeMutex.Lock()
defer udc.writeMutex.Unlock()
indexReader, err := udc.Reader()
indexReader, err := udc.reader()
if err != nil {
return
}
@ -682,7 +676,7 @@ func (udc *SmolderingCouch) Delete(id string) (err error) {
// first we lookup the backindex row for the doc id if it exists
// lookup the back index row
var backIndexRow *BackIndexRow
backIndexRow, err = udc.backIndexRowForDoc(indexReader, nil, id)
backIndexRow, err = indexReader.backIndexRowForDoc(nil, id)
if err != nil {
_ = indexReader.Close()
atomic.AddUint64(&udc.stats.errors, 1)
@ -751,54 +745,6 @@ func (udc *SmolderingCouch) deleteSingle(id index.IndexInternalID, backIndexRow
return deleteRows
}
func (udc *SmolderingCouch) backIndexRowForDoc(indexReader index.IndexReader, docID index.IndexInternalID, externalDocID string) (*BackIndexRow, error) {
var err error
// first look up the docID if it isn't known
if docID == nil {
// first get the internal identifier
docID, err = indexReader.InternalID(externalDocID)
if err != nil {
_ = indexReader.Close()
return nil, err
}
}
if len(docID) < 1 {
return nil, nil
}
// use a temporary row structure to build key
tempRow := &BackIndexRow{
docNumber: docID,
}
keyBuf := GetRowBuffer()
if tempRow.KeySize() > len(keyBuf) {
keyBuf = make([]byte, 2*tempRow.KeySize())
}
defer PutRowBuffer(keyBuf)
keySize, err := tempRow.KeyTo(keyBuf)
if err != nil {
return nil, err
}
// open a reader for backindex lookup
var kvreader = indexReader.(*IndexReader).kvreader
value, err := kvreader.Get(keyBuf[:keySize])
if err != nil {
return nil, err
}
if value == nil {
return nil, nil
}
backIndexRow, err := NewBackIndexRowKV(keyBuf[:keySize], value)
if err != nil {
return nil, err
}
return backIndexRow, nil
}
func decodeFieldType(typ byte, name string, pos []uint64, value []byte) document.Field {
switch typ {
case 't':
@ -908,7 +854,7 @@ func (udc *SmolderingCouch) Batch(batch *index.Batch) (err error) {
// open a reader for backindex lookup
indexReader, err := udc.Reader()
indexReader, err := udc.reader()
if err != nil {
docBackIndexRowErr = err
return
@ -917,7 +863,7 @@ func (udc *SmolderingCouch) Batch(batch *index.Batch) (err error) {
for docID, doc := range batch.IndexOps {
var backIndexRow *BackIndexRow
if udc.cf.Lookup([]byte(docID)) {
backIndexRow, err = udc.backIndexRowForDoc(indexReader, nil, docID)
backIndexRow, err = indexReader.backIndexRowForDoc(nil, docID)
if err != nil {
docBackIndexRowErr = err
return
@ -1094,6 +1040,10 @@ func (udc *SmolderingCouch) DeleteInternal(key []byte) (err error) {
}
func (udc *SmolderingCouch) Reader() (index.IndexReader, error) {
return udc.reader()
}
func (udc *SmolderingCouch) reader() (*IndexReader, error) {
kvr, err := udc.store.Reader()
if err != nil {
return nil, fmt.Errorf("error opening store reader: %v", err)

View File

@ -51,13 +51,21 @@ func TestIndexOpenReopen(t *testing.T) {
}
var expectedCount uint64
docCount, err := idx.DocCount()
reader, err := idx.Reader()
if err != nil {
t.Fatal(err)
}
docCount, err := reader.DocCount()
if err != nil {
t.Error(err)
}
if docCount != expectedCount {
t.Errorf("Expected document count to be %d got %d", expectedCount, docCount)
}
err = reader.Close()
if err != nil {
t.Fatal(err)
}
// opening the database should have inserted a version and _id field
expectedLength := uint64(2)
@ -116,13 +124,21 @@ func TestIndexInsert(t *testing.T) {
}()
var expectedCount uint64
docCount, err := idx.DocCount()
reader, err := idx.Reader()
if err != nil {
t.Fatal(err)
}
docCount, err := reader.DocCount()
if err != nil {
t.Error(err)
}
if docCount != expectedCount {
t.Errorf("Expected document count to be %d got %d", expectedCount, docCount)
}
err = reader.Close()
if err != nil {
t.Fatal(err)
}
doc := document.NewDocument("1")
doc.AddField(document.NewTextField("name", []uint64{}, []byte("test")))
@ -132,13 +148,21 @@ func TestIndexInsert(t *testing.T) {
}
expectedCount++
docCount, err = idx.DocCount()
reader, err = idx.Reader()
if err != nil {
t.Fatal(err)
}
docCount, err = reader.DocCount()
if err != nil {
t.Error(err)
}
if docCount != expectedCount {
t.Errorf("Expected document count to be %d got %d", expectedCount, docCount)
}
err = reader.Close()
if err != nil {
t.Fatal(err)
}
// should have 4 rows (1 for version, 1 for schema field, and 1 for single term, and 1 for the term count, and 1 for the back index entry)
// +1 for id term
@ -181,13 +205,21 @@ func TestIndexInsertThenDelete(t *testing.T) {
}()
var expectedCount uint64
docCount, err := idx.DocCount()
reader, err := idx.Reader()
if err != nil {
t.Fatal(err)
}
docCount, err := reader.DocCount()
if err != nil {
t.Error(err)
}
if docCount != expectedCount {
t.Errorf("Expected document count to be %d got %d", expectedCount, docCount)
}
err = reader.Close()
if err != nil {
t.Fatal(err)
}
doc := document.NewDocument("1")
doc.AddField(document.NewTextField("name", []uint64{}, []byte("test")))
@ -207,13 +239,21 @@ func TestIndexInsertThenDelete(t *testing.T) {
expectedCount++
expectedRows += 4 // 2 dictionary 2 terms
docCount, err = idx.DocCount()
reader, err = idx.Reader()
if err != nil {
t.Fatal(err)
}
docCount, err = reader.DocCount()
if err != nil {
t.Error(err)
}
if docCount != expectedCount {
t.Errorf("Expected document count to be %d got %d", expectedCount, docCount)
}
err = reader.Close()
if err != nil {
t.Fatal(err)
}
err = idx.Delete("1")
if err != nil {
@ -222,13 +262,21 @@ func TestIndexInsertThenDelete(t *testing.T) {
expectedCount--
expectedRows -= 2 //2 terms
docCount, err = idx.DocCount()
reader, err = idx.Reader()
if err != nil {
t.Fatal(err)
}
docCount, err = reader.DocCount()
if err != nil {
t.Error(err)
}
if docCount != expectedCount {
t.Errorf("Expected document count to be %d got %d", expectedCount, docCount)
}
err = reader.Close()
if err != nil {
t.Fatal(err)
}
err = idx.Delete("2")
if err != nil {
@ -237,13 +285,21 @@ func TestIndexInsertThenDelete(t *testing.T) {
expectedCount--
expectedRows -= 2 //2 terms
docCount, err = idx.DocCount()
reader, err = idx.Reader()
if err != nil {
t.Fatal(err)
}
docCount, err = reader.DocCount()
if err != nil {
t.Error(err)
}
if docCount != expectedCount {
t.Errorf("Expected document count to be %d got %d", expectedCount, docCount)
}
err = reader.Close()
if err != nil {
t.Fatal(err)
}
// should have 2 rows (1 for version, 2 for schema field, 3 for dictionary row garbage)
//expectedLength := uint64(1 + 2 + 3)
@ -303,10 +359,18 @@ func TestIndexInsertThenUpdate(t *testing.T) {
}
if rowCount != expectedLength {
t.Errorf("expected %d rows, got: %d", expectedLength, rowCount)
allRows := idx.DumpAll()
reader, err := idx.Reader()
if err != nil {
t.Fatal(err)
}
allRows := reader.DumpAll()
for ar := range allRows {
t.Logf("%v", ar)
}
err = reader.Close()
if err != nil {
t.Fatal(err)
}
}
// now do another update that should remove one of the terms
@ -325,10 +389,17 @@ func TestIndexInsertThenUpdate(t *testing.T) {
}
if rowCount != expectedLength {
t.Errorf("expected %d rows, got: %d", expectedLength, rowCount)
allRows := idx.DumpAll()
reader, err := idx.Reader()
if err != nil {
t.Fatal(err)
}
allRows := reader.DumpAll()
for ar := range allRows {
t.Logf("%v", ar)
}
if err != nil {
t.Fatal(err)
}
}
}
@ -407,12 +478,20 @@ func TestIndexInsertMultiple(t *testing.T) {
}
expectedCount++
docCount, err := idx.DocCount()
reader, err := idx.Reader()
if err != nil {
t.Fatal(err)
}
docCount, err := reader.DocCount()
if err != nil {
t.Error(err)
}
if docCount != expectedCount {
t.Errorf("expected doc count: %d, got %d", expectedCount, docCount)
t.Errorf("Expected document count to be %d got %d", expectedCount, docCount)
}
err = reader.Close()
if err != nil {
t.Fatal(err)
}
}
@ -441,13 +520,21 @@ func TestIndexInsertWithStore(t *testing.T) {
}()
var expectedCount uint64
docCount, err := idx.DocCount()
reader, err := idx.Reader()
if err != nil {
t.Fatal(err)
}
docCount, err := reader.DocCount()
if err != nil {
t.Error(err)
}
if docCount != expectedCount {
t.Errorf("Expected document count to be %d got %d", expectedCount, docCount)
}
err = reader.Close()
if err != nil {
t.Fatal(err)
}
doc := document.NewDocument("1")
doc.AddField(document.NewTextFieldWithIndexingOptions("name", []uint64{}, []byte("test"), document.IndexField|document.StoreField))
@ -457,13 +544,21 @@ func TestIndexInsertWithStore(t *testing.T) {
}
expectedCount++
docCount, err = idx.DocCount()
reader, err = idx.Reader()
if err != nil {
t.Fatal(err)
}
docCount, err = reader.DocCount()
if err != nil {
t.Error(err)
}
if docCount != expectedCount {
t.Errorf("Expected document count to be %d got %d", expectedCount, docCount)
}
err = reader.Close()
if err != nil {
t.Fatal(err)
}
// should have 6 rows (1 for version, 2 for schema field, and 2 for terms, and 2 for the stored field and 2 for the term counts, and 1 for the back index entry)
expectedLength := uint64(1 + 2 + 2 + 2 + 2 + 1)
@ -675,7 +770,10 @@ func TestIndexBatch(t *testing.T) {
}
}()
docCount := indexReader.DocCount()
docCount, err := indexReader.DocCount()
if err != nil {
t.Fatal(err)
}
if docCount != expectedCount {
t.Errorf("Expected document count to be %d got %d", expectedCount, docCount)
}
@ -702,10 +800,18 @@ func TestIndexBatch(t *testing.T) {
expectedDocIDs := []string{"2", "3"}
if !reflect.DeepEqual(docIDs, expectedDocIDs) {
t.Errorf("expected ids: %v, got ids: %v", expectedDocIDs, docIDs)
allRows := idx.DumpAll()
reader, err := idx.Reader()
if err != nil {
t.Fatal(err)
}
allRows := reader.DumpAll()
for ar := range allRows {
t.Logf("%v", ar)
}
err = reader.Close()
if err != nil {
t.Fatal(err)
}
}
}
@ -734,13 +840,21 @@ func TestIndexInsertUpdateDeleteWithMultipleTypesStored(t *testing.T) {
}()
var expectedCount uint64
docCount, err := idx.DocCount()
reader, err := idx.Reader()
if err != nil {
t.Fatal(err)
}
docCount, err := reader.DocCount()
if err != nil {
t.Error(err)
}
if docCount != expectedCount {
t.Errorf("Expected document count to be %d got %d", expectedCount, docCount)
}
err = reader.Close()
if err != nil {
t.Fatal(err)
}
doc := document.NewDocument("1")
doc.AddField(document.NewTextFieldWithIndexingOptions("name", []uint64{}, []byte("test"), document.IndexField|document.StoreField))
@ -756,13 +870,21 @@ func TestIndexInsertUpdateDeleteWithMultipleTypesStored(t *testing.T) {
}
expectedCount++
docCount, err = idx.DocCount()
reader, err = idx.Reader()
if err != nil {
t.Fatal(err)
}
docCount, err = reader.DocCount()
if err != nil {
t.Error(err)
}
if docCount != expectedCount {
t.Errorf("Expected document count to be %d got %d", expectedCount, docCount)
}
err = reader.Close()
if err != nil {
t.Fatal(err)
}
// should have 78 rows
// 1 for version
@ -861,7 +983,10 @@ func TestIndexInsertUpdateDeleteWithMultipleTypesStored(t *testing.T) {
}
// expected doc count shouldn't have changed
docCount = indexReader2.DocCount()
docCount, err = indexReader2.DocCount()
if err != nil {
t.Fatal(err)
}
if docCount != expectedCount {
t.Errorf("Expected document count to be %d got %d", expectedCount, docCount)
}
@ -911,13 +1036,21 @@ func TestIndexInsertUpdateDeleteWithMultipleTypesStored(t *testing.T) {
expectedCount--
// expected doc count shouldn't have changed
docCount, err = idx.DocCount()
reader, err = idx.Reader()
if err != nil {
t.Fatal(err)
}
docCount, err = reader.DocCount()
if err != nil {
t.Error(err)
}
if docCount != expectedCount {
t.Errorf("Expected document count to be %d got %d", expectedCount, docCount)
}
err = reader.Close()
if err != nil {
t.Fatal(err)
}
}
func TestIndexInsertFields(t *testing.T) {
@ -1365,7 +1498,7 @@ func TestLargeField(t *testing.T) {
}
}()
largeFieldValue := make([]byte, 0)
var largeFieldValue []byte
for len(largeFieldValue) < RowBufferSize {
largeFieldValue = append(largeFieldValue, bleveWikiArticle1K...)
}

View File

@ -21,7 +21,7 @@ import (
// if your application relies on them, you're doing something wrong
// they may change or be removed at any time
func (udc *UpsideDownCouch) dumpPrefix(kvreader store.KVReader, rv chan interface{}, prefix []byte) {
func dumpPrefix(kvreader store.KVReader, rv chan interface{}, prefix []byte) {
start := prefix
if start == nil {
start = []byte{0}
@ -51,7 +51,7 @@ func (udc *UpsideDownCouch) dumpPrefix(kvreader store.KVReader, rv chan interfac
}
}
func (udc *UpsideDownCouch) dumpRange(kvreader store.KVReader, rv chan interface{}, start, end []byte) {
func dumpRange(kvreader store.KVReader, rv chan interface{}, start, end []byte) {
it := kvreader.RangeIterator(start, end)
defer func() {
cerr := it.Close()
@ -77,48 +77,20 @@ func (udc *UpsideDownCouch) dumpRange(kvreader store.KVReader, rv chan interface
}
}
func (udc *UpsideDownCouch) DumpAll() chan interface{} {
func (i *IndexReader) DumpAll() chan interface{} {
rv := make(chan interface{})
go func() {
defer close(rv)
// start an isolated reader for use during the dump
kvreader, err := udc.store.Reader()
if err != nil {
rv <- err
return
}
defer func() {
cerr := kvreader.Close()
if cerr != nil {
rv <- cerr
}
}()
udc.dumpRange(kvreader, rv, nil, nil)
dumpRange(i.kvreader, rv, nil, nil)
}()
return rv
}
func (udc *UpsideDownCouch) DumpFields() chan interface{} {
func (i *IndexReader) DumpFields() chan interface{} {
rv := make(chan interface{})
go func() {
defer close(rv)
// start an isolated reader for use during the dump
kvreader, err := udc.store.Reader()
if err != nil {
rv <- err
return
}
defer func() {
cerr := kvreader.Close()
if cerr != nil {
rv <- cerr
}
}()
udc.dumpPrefix(kvreader, rv, []byte{'f'})
dumpPrefix(i.kvreader, rv, []byte{'f'})
}()
return rv
}
@ -130,7 +102,7 @@ func (k keyset) Swap(i, j int) { k[i], k[j] = k[j], k[i] }
func (k keyset) Less(i, j int) bool { return bytes.Compare(k[i], k[j]) < 0 }
// DumpDoc returns all rows in the index related to this doc id
func (udc *UpsideDownCouch) DumpDoc(id string) chan interface{} {
func (i *IndexReader) DumpDoc(id string) chan interface{} {
idBytes := []byte(id)
rv := make(chan interface{})
@ -138,20 +110,7 @@ func (udc *UpsideDownCouch) DumpDoc(id string) chan interface{} {
go func() {
defer close(rv)
// start an isolated reader for use during the dump
kvreader, err := udc.store.Reader()
if err != nil {
rv <- err
return
}
defer func() {
cerr := kvreader.Close()
if cerr != nil {
rv <- cerr
}
}()
back, err := udc.backIndexRowForDoc(kvreader, []byte(id))
back, err := backIndexRowForDoc(i.kvreader, []byte(id))
if err != nil {
rv <- err
return
@ -172,11 +131,11 @@ func (udc *UpsideDownCouch) DumpDoc(id string) chan interface{} {
// first add all the stored rows
storedRowPrefix := NewStoredRow(idBytes, 0, []uint64{}, 'x', []byte{}).ScanPrefixForDoc()
udc.dumpPrefix(kvreader, rv, storedRowPrefix)
dumpPrefix(i.kvreader, rv, storedRowPrefix)
// now walk term keys in order and add them as well
if len(keys) > 0 {
it := kvreader.RangeIterator(keys[0], nil)
it := i.kvreader.RangeIterator(keys[0], nil)
defer func() {
cerr := it.Close()
if cerr != nil {

View File

@ -44,13 +44,21 @@ func TestDump(t *testing.T) {
}()
var expectedCount uint64
docCount, err := idx.DocCount()
reader, err := idx.Reader()
if err != nil {
t.Fatal(err)
}
docCount, err := reader.DocCount()
if err != nil {
t.Error(err)
}
if docCount != expectedCount {
t.Errorf("Expected document count to be %d got %d", expectedCount, docCount)
}
err = reader.Close()
if err != nil {
t.Fatal(err)
}
doc := document.NewDocument("1")
doc.AddField(document.NewTextFieldWithIndexingOptions("name", []uint64{}, []byte("test"), document.IndexField|document.StoreField))
@ -79,7 +87,11 @@ func TestDump(t *testing.T) {
}
fieldsCount := 0
fieldsRows := idx.DumpFields()
reader, err = idx.Reader()
if err != nil {
t.Fatal(err)
}
fieldsRows := reader.DumpFields()
for range fieldsRows {
fieldsCount++
}
@ -93,7 +105,7 @@ func TestDump(t *testing.T) {
// 3 stored fields
expectedDocRowCount := int(1 + (2 * (64 / document.DefaultPrecisionStep)) + 3)
docRowCount := 0
docRows := idx.DumpDoc("1")
docRows := reader.DumpDoc("1")
for range docRows {
docRowCount++
}
@ -102,7 +114,7 @@ func TestDump(t *testing.T) {
}
docRowCount = 0
docRows = idx.DumpDoc("2")
docRows = reader.DumpDoc("2")
for range docRows {
docRowCount++
}
@ -119,11 +131,16 @@ func TestDump(t *testing.T) {
// 16 date term row counts (shared for both docs, same date value)
expectedAllRowCount := int(1 + fieldsCount + (2 * expectedDocRowCount) + 2 + 2 + int((2 * (64 / document.DefaultPrecisionStep))))
allRowCount := 0
allRows := idx.DumpAll()
allRows := reader.DumpAll()
for range allRows {
allRowCount++
}
if allRowCount != expectedAllRowCount {
t.Errorf("expected %d rows for all, got %d", expectedAllRowCount, allRowCount)
}
err = reader.Close()
if err != nil {
t.Fatal(err)
}
}

View File

@ -56,7 +56,7 @@ func (i *IndexReader) DocIDReaderOnly(ids []string) (index.DocIDReader, error) {
func (i *IndexReader) Document(id string) (doc *document.Document, err error) {
// first hit the back index to confirm doc exists
var backIndexRow *BackIndexRow
backIndexRow, err = i.index.backIndexRowForDoc(i.kvreader, []byte(id))
backIndexRow, err = backIndexRowForDoc(i.kvreader, []byte(id))
if err != nil {
return
}
@ -97,7 +97,7 @@ func (i *IndexReader) Document(id string) (doc *document.Document, err error) {
}
func (i *IndexReader) DocumentFieldTerms(id index.IndexInternalID, fields []string) (index.FieldTerms, error) {
back, err := i.index.backIndexRowForDoc(i.kvreader, id)
back, err := backIndexRowForDoc(i.kvreader, id)
if err != nil {
return nil, err
}
@ -156,8 +156,8 @@ func (i *IndexReader) GetInternal(key []byte) ([]byte, error) {
return i.kvreader.Get(internalRow.Key())
}
func (i *IndexReader) DocCount() uint64 {
return i.docCount
func (i *IndexReader) DocCount() (uint64, error) {
return i.docCount, nil
}
func (i *IndexReader) Close() error {

View File

@ -285,12 +285,6 @@ func (udc *UpsideDownCouch) batchRows(writer store.KVWriter, addRowsAll [][]Upsi
return writer.ExecuteBatch(wb)
}
func (udc *UpsideDownCouch) DocCount() (uint64, error) {
udc.m.RLock()
defer udc.m.RUnlock()
return udc.docCount, nil
}
func (udc *UpsideDownCouch) Open() (err error) {
//acquire the write mutex for the duratin of Open()
udc.writeMutex.Lock()
@ -439,7 +433,7 @@ func (udc *UpsideDownCouch) Update(doc *document.Document) (err error) {
// first we lookup the backindex row for the doc id if it exists
// lookup the back index row
var backIndexRow *BackIndexRow
backIndexRow, err = udc.backIndexRowForDoc(kvreader, index.IndexInternalID(doc.ID))
backIndexRow, err = backIndexRowForDoc(kvreader, index.IndexInternalID(doc.ID))
if err != nil {
_ = kvreader.Close()
atomic.AddUint64(&udc.stats.errors, 1)
@ -627,7 +621,7 @@ func (udc *UpsideDownCouch) Delete(id string) (err error) {
// first we lookup the backindex row for the doc id if it exists
// lookup the back index row
var backIndexRow *BackIndexRow
backIndexRow, err = udc.backIndexRowForDoc(kvreader, index.IndexInternalID(id))
backIndexRow, err = backIndexRowForDoc(kvreader, index.IndexInternalID(id))
if err != nil {
_ = kvreader.Close()
atomic.AddUint64(&udc.stats.errors, 1)
@ -695,36 +689,6 @@ func (udc *UpsideDownCouch) deleteSingle(id string, backIndexRow *BackIndexRow,
return deleteRows
}
func (udc *UpsideDownCouch) backIndexRowForDoc(kvreader store.KVReader, docID index.IndexInternalID) (*BackIndexRow, error) {
// use a temporary row structure to build key
tempRow := &BackIndexRow{
doc: docID,
}
keyBuf := GetRowBuffer()
if tempRow.KeySize() > len(keyBuf) {
keyBuf = make([]byte, 2*tempRow.KeySize())
}
defer PutRowBuffer(keyBuf)
keySize, err := tempRow.KeyTo(keyBuf)
if err != nil {
return nil, err
}
value, err := kvreader.Get(keyBuf[:keySize])
if err != nil {
return nil, err
}
if value == nil {
return nil, nil
}
backIndexRow, err := NewBackIndexRowKV(keyBuf[:keySize], value)
if err != nil {
return nil, err
}
return backIndexRow, nil
}
func decodeFieldType(typ byte, name string, pos []uint64, value []byte) document.Field {
switch typ {
case 't':
@ -833,7 +797,7 @@ func (udc *UpsideDownCouch) Batch(batch *index.Batch) (err error) {
}
for docID, doc := range batch.IndexOps {
backIndexRow, err := udc.backIndexRowForDoc(kvreader, index.IndexInternalID(docID))
backIndexRow, err := backIndexRowForDoc(kvreader, index.IndexInternalID(docID))
if err != nil {
docBackIndexRowErr = err
return
@ -1034,3 +998,33 @@ func (udc *UpsideDownCouch) fieldIndexOrNewRow(name string) (uint16, *FieldRow)
func init() {
registry.RegisterIndexType(Name, NewUpsideDownCouch)
}
func backIndexRowForDoc(kvreader store.KVReader, docID index.IndexInternalID) (*BackIndexRow, error) {
// use a temporary row structure to build key
tempRow := &BackIndexRow{
doc: docID,
}
keyBuf := GetRowBuffer()
if tempRow.KeySize() > len(keyBuf) {
keyBuf = make([]byte, 2*tempRow.KeySize())
}
defer PutRowBuffer(keyBuf)
keySize, err := tempRow.KeyTo(keyBuf)
if err != nil {
return nil, err
}
value, err := kvreader.Get(keyBuf[:keySize])
if err != nil {
return nil, err
}
if value == nil {
return nil, nil
}
backIndexRow, err := NewBackIndexRowKV(keyBuf[:keySize], value)
if err != nil {
return nil, err
}
return backIndexRow, nil
}

View File

@ -51,13 +51,21 @@ func TestIndexOpenReopen(t *testing.T) {
}
var expectedCount uint64
docCount, err := idx.DocCount()
reader, err := idx.Reader()
if err != nil {
t.Fatal(err)
}
docCount, err := reader.DocCount()
if err != nil {
t.Error(err)
}
if docCount != expectedCount {
t.Errorf("Expected document count to be %d got %d", expectedCount, docCount)
}
err = reader.Close()
if err != nil {
t.Fatal(err)
}
// opening the database should have inserted a version
expectedLength := uint64(1)
@ -116,13 +124,21 @@ func TestIndexInsert(t *testing.T) {
}()
var expectedCount uint64
docCount, err := idx.DocCount()
reader, err := idx.Reader()
if err != nil {
t.Fatal(err)
}
docCount, err := reader.DocCount()
if err != nil {
t.Error(err)
}
if docCount != expectedCount {
t.Errorf("Expected document count to be %d got %d", expectedCount, docCount)
}
err = reader.Close()
if err != nil {
t.Fatal(err)
}
doc := document.NewDocument("1")
doc.AddField(document.NewTextField("name", []uint64{}, []byte("test")))
@ -132,13 +148,21 @@ func TestIndexInsert(t *testing.T) {
}
expectedCount++
docCount, err = idx.DocCount()
reader, err = idx.Reader()
if err != nil {
t.Fatal(err)
}
docCount, err = reader.DocCount()
if err != nil {
t.Error(err)
}
if docCount != expectedCount {
t.Errorf("Expected document count to be %d got %d", expectedCount, docCount)
}
err = reader.Close()
if err != nil {
t.Fatal(err)
}
// should have 4 rows (1 for version, 1 for schema field, and 1 for single term, and 1 for the term count, and 1 for the back index entry)
expectedLength := uint64(1 + 1 + 1 + 1 + 1)
@ -176,13 +200,21 @@ func TestIndexInsertThenDelete(t *testing.T) {
}()
var expectedCount uint64
docCount, err := idx.DocCount()
reader, err := idx.Reader()
if err != nil {
t.Fatal(err)
}
docCount, err := reader.DocCount()
if err != nil {
t.Error(err)
}
if docCount != expectedCount {
t.Errorf("Expected document count to be %d got %d", expectedCount, docCount)
}
err = reader.Close()
if err != nil {
t.Fatal(err)
}
doc := document.NewDocument("1")
doc.AddField(document.NewTextField("name", []uint64{}, []byte("test")))
@ -200,13 +232,21 @@ func TestIndexInsertThenDelete(t *testing.T) {
}
expectedCount++
docCount, err = idx.DocCount()
reader, err = idx.Reader()
if err != nil {
t.Fatal(err)
}
docCount, err = reader.DocCount()
if err != nil {
t.Error(err)
}
if docCount != expectedCount {
t.Errorf("Expected document count to be %d got %d", expectedCount, docCount)
}
err = reader.Close()
if err != nil {
t.Fatal(err)
}
err = idx.Delete("1")
if err != nil {
@ -214,13 +254,21 @@ func TestIndexInsertThenDelete(t *testing.T) {
}
expectedCount--
docCount, err = idx.DocCount()
reader, err = idx.Reader()
if err != nil {
t.Fatal(err)
}
docCount, err = reader.DocCount()
if err != nil {
t.Error(err)
}
if docCount != expectedCount {
t.Errorf("Expected document count to be %d got %d", expectedCount, docCount)
}
err = reader.Close()
if err != nil {
t.Fatal(err)
}
err = idx.Delete("2")
if err != nil {
@ -228,13 +276,21 @@ func TestIndexInsertThenDelete(t *testing.T) {
}
expectedCount--
docCount, err = idx.DocCount()
reader, err = idx.Reader()
if err != nil {
t.Fatal(err)
}
docCount, err = reader.DocCount()
if err != nil {
t.Error(err)
}
if docCount != expectedCount {
t.Errorf("Expected document count to be %d got %d", expectedCount, docCount)
}
err = reader.Close()
if err != nil {
t.Fatal(err)
}
// should have 2 rows (1 for version, 1 for schema field, 1 for dictionary row garbage)
expectedLength := uint64(1 + 1 + 1)
@ -390,12 +446,20 @@ func TestIndexInsertMultiple(t *testing.T) {
}
expectedCount++
docCount, err := idx.DocCount()
reader, err := idx.Reader()
if err != nil {
t.Fatal(err)
}
docCount, err := reader.DocCount()
if err != nil {
t.Error(err)
}
if docCount != expectedCount {
t.Errorf("expected doc count: %d, got %d", expectedCount, docCount)
t.Errorf("Expected document count to be %d got %d", expectedCount, docCount)
}
err = reader.Close()
if err != nil {
t.Fatal(err)
}
}
@ -424,13 +488,21 @@ func TestIndexInsertWithStore(t *testing.T) {
}()
var expectedCount uint64
docCount, err := idx.DocCount()
reader, err := idx.Reader()
if err != nil {
t.Fatal(err)
}
docCount, err := reader.DocCount()
if err != nil {
t.Error(err)
}
if docCount != expectedCount {
t.Errorf("Expected document count to be %d got %d", expectedCount, docCount)
}
err = reader.Close()
if err != nil {
t.Fatal(err)
}
doc := document.NewDocument("1")
doc.AddField(document.NewTextFieldWithIndexingOptions("name", []uint64{}, []byte("test"), document.IndexField|document.StoreField))
@ -440,13 +512,21 @@ func TestIndexInsertWithStore(t *testing.T) {
}
expectedCount++
docCount, err = idx.DocCount()
reader, err = idx.Reader()
if err != nil {
t.Fatal(err)
}
docCount, err = reader.DocCount()
if err != nil {
t.Error(err)
}
if docCount != expectedCount {
t.Errorf("Expected document count to be %d got %d", expectedCount, docCount)
}
err = reader.Close()
if err != nil {
t.Fatal(err)
}
// should have 6 rows (1 for version, 1 for schema field, and 1 for single term, and 1 for the stored field and 1 for the term count, and 1 for the back index entry)
expectedLength := uint64(1 + 1 + 1 + 1 + 1 + 1)
@ -654,7 +734,10 @@ func TestIndexBatch(t *testing.T) {
}
}()
docCount := indexReader.DocCount()
docCount, err := indexReader.DocCount()
if err != nil {
t.Fatal(err)
}
if docCount != expectedCount {
t.Errorf("Expected document count to be %d got %d", expectedCount, docCount)
}
@ -663,7 +746,7 @@ func TestIndexBatch(t *testing.T) {
if err != nil {
t.Error(err)
}
docIds := make([]index.IndexInternalID, 0)
var docIds []index.IndexInternalID
docID, err := docIDReader.Next()
for docID != nil && err == nil {
docIds = append(docIds, docID)
@ -703,13 +786,21 @@ func TestIndexInsertUpdateDeleteWithMultipleTypesStored(t *testing.T) {
}()
var expectedCount uint64
docCount, err := idx.DocCount()
reader, err := idx.Reader()
if err != nil {
t.Fatal(err)
}
docCount, err := reader.DocCount()
if err != nil {
t.Error(err)
}
if docCount != expectedCount {
t.Errorf("Expected document count to be %d got %d", expectedCount, docCount)
}
err = reader.Close()
if err != nil {
t.Fatal(err)
}
doc := document.NewDocument("1")
doc.AddField(document.NewTextFieldWithIndexingOptions("name", []uint64{}, []byte("test"), document.IndexField|document.StoreField))
@ -725,13 +816,21 @@ func TestIndexInsertUpdateDeleteWithMultipleTypesStored(t *testing.T) {
}
expectedCount++
docCount, err = idx.DocCount()
reader, err = idx.Reader()
if err != nil {
t.Fatal(err)
}
docCount, err = reader.DocCount()
if err != nil {
t.Error(err)
}
if docCount != expectedCount {
t.Errorf("Expected document count to be %d got %d", expectedCount, docCount)
}
err = reader.Close()
if err != nil {
t.Fatal(err)
}
// should have 72 rows
// 1 for version
@ -818,7 +917,10 @@ func TestIndexInsertUpdateDeleteWithMultipleTypesStored(t *testing.T) {
}
// expected doc count shouldn't have changed
docCount = indexReader2.DocCount()
docCount, err = indexReader2.DocCount()
if err != nil {
t.Fatal(err)
}
if docCount != expectedCount {
t.Errorf("Expected document count to be %d got %d", expectedCount, docCount)
}
@ -862,13 +964,21 @@ func TestIndexInsertUpdateDeleteWithMultipleTypesStored(t *testing.T) {
expectedCount--
// expected doc count shouldn't have changed
docCount, err = idx.DocCount()
reader, err = idx.Reader()
if err != nil {
t.Fatal(err)
}
docCount, err = reader.DocCount()
if err != nil {
t.Error(err)
}
if docCount != expectedCount {
t.Errorf("Expected document count to be %d got %d", expectedCount, docCount)
}
err = reader.Close()
if err != nil {
t.Fatal(err)
}
}
func TestIndexInsertFields(t *testing.T) {
@ -1308,7 +1418,7 @@ func TestLargeField(t *testing.T) {
}
}()
largeFieldValue := make([]byte, 0)
var largeFieldValue []byte
for len(largeFieldValue) < RowBufferSize {
largeFieldValue = append(largeFieldValue, bleveWikiArticle1K...)
}

View File

@ -251,54 +251,6 @@ func (i *indexAliasImpl) FieldDictPrefix(field string, termPrefix []byte) (index
}, nil
}
func (i *indexAliasImpl) DumpAll() chan interface{} {
i.mutex.RLock()
defer i.mutex.RUnlock()
if !i.open {
return nil
}
err := i.isAliasToSingleIndex()
if err != nil {
return nil
}
return i.indexes[0].DumpAll()
}
func (i *indexAliasImpl) DumpDoc(id string) chan interface{} {
i.mutex.RLock()
defer i.mutex.RUnlock()
if !i.open {
return nil
}
err := i.isAliasToSingleIndex()
if err != nil {
return nil
}
return i.indexes[0].DumpDoc(id)
}
func (i *indexAliasImpl) DumpFields() chan interface{} {
i.mutex.RLock()
defer i.mutex.RUnlock()
if !i.open {
return nil
}
err := i.isAliasToSingleIndex()
if err != nil {
return nil
}
return i.indexes[0].DumpFields()
}
func (i *indexAliasImpl) Close() error {
i.mutex.Lock()
defer i.mutex.Unlock()

View File

@ -64,29 +64,14 @@ func TestIndexAliasSingle(t *testing.T) {
t.Errorf("expected %v, got %v", expectedError, err)
}
res := alias.DumpAll()
if res != nil {
t.Errorf("expected nil, got %v", res)
}
res = alias.DumpDoc("a")
if res != nil {
t.Errorf("expected nil, got %v", res)
}
res = alias.DumpFields()
if res != nil {
t.Errorf("expected nil, got %v", res)
}
mapping := alias.Mapping()
if mapping != nil {
t.Errorf("expected nil, got %v", res)
t.Errorf("expected nil, got %v", mapping)
}
indexStat := alias.Stats()
if indexStat != nil {
t.Errorf("expected nil, got %v", res)
t.Errorf("expected nil, got %v", indexStat)
}
// now a few things that should work
@ -153,29 +138,14 @@ func TestIndexAliasSingle(t *testing.T) {
t.Errorf("expected %v, got %v", expectedError2, err)
}
res = alias.DumpAll()
if res != nil {
t.Errorf("expected nil, got %v", res)
}
res = alias.DumpDoc("a")
if res != nil {
t.Errorf("expected nil, got %v", res)
}
res = alias.DumpFields()
if res != nil {
t.Errorf("expected nil, got %v", res)
}
mapping = alias.Mapping()
if mapping != nil {
t.Errorf("expected nil, got %v", res)
t.Errorf("expected nil, got %v", mapping)
}
indexStat = alias.Stats()
if indexStat != nil {
t.Errorf("expected nil, got %v", res)
t.Errorf("expected nil, got %v", indexStat)
}
// now a few things that should work
@ -240,29 +210,14 @@ func TestIndexAliasSingle(t *testing.T) {
t.Errorf("expected %v, got %v", expectedError3, err)
}
res = alias.DumpAll()
if res != nil {
t.Errorf("expected nil, got %v", res)
}
res = alias.DumpDoc("a")
if res != nil {
t.Errorf("expected nil, got %v", res)
}
res = alias.DumpFields()
if res != nil {
t.Errorf("expected nil, got %v", res)
}
mapping = alias.Mapping()
if mapping != nil {
t.Errorf("expected nil, got %v", res)
t.Errorf("expected nil, got %v", mapping)
}
indexStat = alias.Stats()
if indexStat != nil {
t.Errorf("expected nil, got %v", res)
t.Errorf("expected nil, got %v", indexStat)
}
// now a few things that should work
@ -328,29 +283,14 @@ func TestIndexAliasClosed(t *testing.T) {
t.Errorf("expected %v, got %v", ErrorIndexClosed, err)
}
res := alias.DumpAll()
if res != nil {
t.Errorf("expected nil, got %v", res)
}
res = alias.DumpDoc("a")
if res != nil {
t.Errorf("expected nil, got %v", res)
}
res = alias.DumpFields()
if res != nil {
t.Errorf("expected nil, got %v", res)
}
mapping := alias.Mapping()
if mapping != nil {
t.Errorf("expected nil, got %v", res)
t.Errorf("expected nil, got %v", mapping)
}
indexStat := alias.Stats()
if indexStat != nil {
t.Errorf("expected nil, got %v", res)
t.Errorf("expected nil, got %v", indexStat)
}
// now a few things that should work
@ -410,29 +350,14 @@ func TestIndexAliasEmpty(t *testing.T) {
t.Errorf("expected %v, got %v", ErrorAliasEmpty, err)
}
res := alias.DumpAll()
if res != nil {
t.Errorf("expected nil, got %v", res)
}
res = alias.DumpDoc("a")
if res != nil {
t.Errorf("expected nil, got %v", res)
}
res = alias.DumpFields()
if res != nil {
t.Errorf("expected nil, got %v", res)
}
mapping := alias.Mapping()
if mapping != nil {
t.Errorf("expected nil, got %v", res)
t.Errorf("expected nil, got %v", mapping)
}
indexStat := alias.Stats()
if indexStat != nil {
t.Errorf("expected nil, got %v", res)
t.Errorf("expected nil, got %v", indexStat)
}
// now a few things that should work
@ -538,29 +463,14 @@ func TestIndexAliasMulti(t *testing.T) {
t.Errorf("expected %v, got %v", ErrorAliasMulti, err)
}
res := alias.DumpAll()
if res != nil {
t.Errorf("expected nil, got %v", res)
}
res = alias.DumpDoc("a")
if res != nil {
t.Errorf("expected nil, got %v", res)
}
res = alias.DumpFields()
if res != nil {
t.Errorf("expected nil, got %v", res)
}
mapping := alias.Mapping()
if mapping != nil {
t.Errorf("expected nil, got %v", res)
t.Errorf("expected nil, got %v", mapping)
}
indexStat := alias.Stats()
if indexStat != nil {
t.Errorf("expected nil, got %v", res)
t.Errorf("expected nil, got %v", indexStat)
}
// now a few things that should work
@ -1355,18 +1265,6 @@ func (i *stubIndex) FieldDictPrefix(field string, termPrefix []byte) (index.Fiel
return nil, i.err
}
func (i *stubIndex) DumpAll() chan interface{} {
return nil
}
func (i *stubIndex) DumpDoc(id string) chan interface{} {
return nil
}
func (i *stubIndex) DumpFields() chan interface{} {
return nil
}
func (i *stubIndex) Close() error {
return i.err
}

View File

@ -355,7 +355,7 @@ func (i *indexImpl) Document(id string) (doc *document.Document, err error) {
// DocCount returns the number of documents in the
// index.
func (i *indexImpl) DocCount() (uint64, error) {
func (i *indexImpl) DocCount() (count uint64, err error) {
i.mutex.RLock()
defer i.mutex.RUnlock()
@ -363,7 +363,19 @@ func (i *indexImpl) DocCount() (uint64, error) {
return 0, ErrorIndexClosed
}
return i.i.DocCount()
// open a reader for this search
indexReader, err := i.i.Reader()
if err != nil {
return 0, fmt.Errorf("error opening index reader %v", err)
}
defer func() {
if cerr := indexReader.Close(); err == nil && cerr != nil {
err = cerr
}
}()
count, err = indexReader.DocCount()
return
}
// Search executes a search request operation.
@ -652,48 +664,6 @@ func (i *indexImpl) FieldDictPrefix(field string, termPrefix []byte) (index.Fiel
}, nil
}
// DumpAll writes all index rows to a channel.
// INTERNAL: do not rely on this function, it is
// only intended to be used by the debug utilities
func (i *indexImpl) DumpAll() chan interface{} {
i.mutex.RLock()
defer i.mutex.RUnlock()
if !i.open {
return nil
}
return i.i.DumpAll()
}
// DumpFields writes all field rows in the index
// to a channel.
// INTERNAL: do not rely on this function, it is
// only intended to be used by the debug utilities
func (i *indexImpl) DumpFields() chan interface{} {
i.mutex.RLock()
defer i.mutex.RUnlock()
if !i.open {
return nil
}
return i.i.DumpFields()
}
// DumpDoc writes all rows in the index associated
// with the specified identifier to a channel.
// INTERNAL: do not rely on this function, it is
// only intended to be used by the debug utilities
func (i *indexImpl) DumpDoc(id string) chan interface{} {
i.mutex.RLock()
defer i.mutex.RUnlock()
if !i.open {
return nil
}
return i.i.DumpDoc(id)
}
func (i *indexImpl) Close() error {
i.mutex.Lock()
defer i.mutex.Unlock()

View File

@ -111,8 +111,8 @@ func (sr *stubReader) GetInternal(key []byte) ([]byte, error) {
return nil, nil
}
func (sr *stubReader) DocCount() uint64 {
return 0
func (sr *stubReader) DocCount() (uint64, error) {
return 0, nil
}
func (sr *stubReader) ExternalID(id index.IndexInternalID) (string, error) {
@ -123,6 +123,18 @@ func (sr *stubReader) InternalID(id string) (index.IndexInternalID, error) {
return []byte(id), nil
}
func (sr *stubReader) DumpAll() chan interface{} {
return nil
}
func (sr *stubReader) DumpDoc(id string) chan interface{} {
return nil
}
func (sr *stubReader) DumpFields() chan interface{} {
return nil
}
func (sr *stubReader) Close() error {
return nil
}

View File

@ -19,6 +19,7 @@ type MatchAllSearcher struct {
indexReader index.IndexReader
reader index.DocIDReader
scorer *scorers.ConstantScorer
count uint64
}
func NewMatchAllSearcher(indexReader index.IndexReader, boost float64, explain bool) (*MatchAllSearcher, error) {
@ -26,16 +27,21 @@ func NewMatchAllSearcher(indexReader index.IndexReader, boost float64, explain b
if err != nil {
return nil, err
}
count, err := indexReader.DocCount()
if err != nil {
return nil, err
}
scorer := scorers.NewConstantScorer(1.0, boost, explain)
return &MatchAllSearcher{
indexReader: indexReader,
reader: reader,
scorer: scorer,
count: count,
}, nil
}
func (s *MatchAllSearcher) Count() uint64 {
return s.indexReader.DocCount()
return s.count
}
func (s *MatchAllSearcher) Weight() float64 {

View File

@ -30,7 +30,11 @@ func NewTermSearcher(indexReader index.IndexReader, term string, field string, b
if err != nil {
return nil, err
}
scorer := scorers.NewTermQueryScorer(term, field, boost, indexReader.DocCount(), reader.Count(), explain)
count, err := indexReader.DocCount()
if err != nil {
return nil, err
}
scorer := scorers.NewTermQueryScorer(term, field, boost, count, reader.Count(), explain)
return &TermSearcher{
indexReader: indexReader,
term: term,

View File

@ -150,7 +150,7 @@ func TestTermSearcher(t *testing.T) {
}()
searcher.SetQueryNorm(2.0)
docCount, err := i.DocCount()
docCount, err := indexReader.DocCount()
if err != nil {
t.Fatal(err)
}

View File

@ -81,19 +81,27 @@ index specified by -index.
return
}
internalIndex, _, err := index.Advanced()
if err != nil {
log.Fatal(err)
}
internalIndexReader, err := internalIndex.Reader()
if err != nil {
log.Fatal(err)
}
var dumpChan chan interface{}
if *docID != "" {
if *fieldsOnly {
log.Fatal("-docID cannot be used with -fields")
}
dumpChan = index.DumpDoc(*docID)
dumpChan = internalIndexReader.DumpDoc(*docID)
} else if *fieldsOnly {
dumpChan = index.DumpFields()
dumpChan = internalIndexReader.DumpFields()
} else if *dictionary != "" {
dumpDictionary(index, *dictionary)
return
} else {
dumpChan = index.DumpAll()
dumpChan = internalIndexReader.DumpAll()
}
for rowOrErr := range dumpChan {
@ -105,6 +113,10 @@ index specified by -index.
fmt.Printf("Key: % -100x\nValue: % -100x\n\n", rowOrErr.Key(), rowOrErr.Value())
}
}
err = internalIndexReader.Close()
if err != nil {
log.Fatal(err)
}
}
func dumpDictionary(index bleve.Index, field string) {