2015-08-25 20:52:42 +02:00
|
|
|
// Copyright (c) 2015 Couchbase, Inc.
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
|
|
|
|
// except in compliance with the License. You may obtain a copy of the License at
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
// Unless required by applicable law or agreed to in writing, software distributed under the
|
|
|
|
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
|
|
|
|
// either express or implied. See the License for the specific language governing permissions
|
|
|
|
// and limitations under the License.
|
|
|
|
|
|
|
|
package firestorm
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"fmt"
|
|
|
|
"sort"
|
|
|
|
"sync/atomic"
|
|
|
|
|
|
|
|
"github.com/blevesearch/bleve/index/store"
|
|
|
|
)
|
|
|
|
|
|
|
|
const IDFieldName = "_id"
|
|
|
|
|
2015-10-28 16:26:01 +01:00
|
|
|
func (f *Firestorm) bootstrap() (err error) {
|
|
|
|
|
|
|
|
kvwriter, err := f.store.Writer()
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer func() {
|
|
|
|
if cerr := kvwriter.Close(); err == nil && cerr != nil {
|
|
|
|
err = cerr
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2015-08-25 20:52:42 +02:00
|
|
|
// record version
|
2015-10-28 16:26:01 +01:00
|
|
|
err = f.storeVersion(kvwriter)
|
2015-08-25 20:52:42 +02:00
|
|
|
if err != nil {
|
2015-10-28 16:26:01 +01:00
|
|
|
return
|
2015-08-25 20:52:42 +02:00
|
|
|
}
|
|
|
|
// define _id field
|
|
|
|
_, idFieldRow := f.fieldIndexOrNewRow(IDFieldName)
|
2015-10-28 16:26:01 +01:00
|
|
|
|
|
|
|
wb := kvwriter.NewBatch()
|
|
|
|
wb.Set(idFieldRow.Key(), idFieldRow.Value())
|
|
|
|
err = kvwriter.ExecuteBatch(wb)
|
2015-08-25 20:52:42 +02:00
|
|
|
if err != nil {
|
2015-10-28 16:26:01 +01:00
|
|
|
return
|
2015-08-25 20:52:42 +02:00
|
|
|
}
|
|
|
|
|
2015-10-28 16:26:01 +01:00
|
|
|
return
|
2015-08-25 20:52:42 +02:00
|
|
|
}
|
|
|
|
|
2015-10-28 16:26:01 +01:00
|
|
|
func (f *Firestorm) warmup(reader store.KVReader) error {
|
2015-08-25 20:52:42 +02:00
|
|
|
// load all the existing fields
|
2015-10-28 16:26:01 +01:00
|
|
|
err := f.loadFields(reader)
|
2015-08-25 20:52:42 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// walk the term frequency info for _id
|
|
|
|
// this allows us to find deleted doc numbers
|
|
|
|
// and seed the doc count
|
|
|
|
idField, existed := f.fieldCache.FieldNamed(IDFieldName, false)
|
|
|
|
if !existed {
|
|
|
|
return fmt.Errorf("_id field missing, cannot proceed")
|
|
|
|
}
|
|
|
|
|
|
|
|
tfkPrefix := TermFreqIteratorStart(idField, nil)
|
|
|
|
|
2016-01-07 00:53:12 +01:00
|
|
|
var tfk TermFreqRow
|
2015-08-25 20:52:42 +02:00
|
|
|
var lastDocId []byte
|
|
|
|
lastDocNumbers := make(DocNumberList, 1)
|
2015-10-28 16:26:01 +01:00
|
|
|
err = visitPrefix(reader, tfkPrefix, func(key, val []byte) (bool, error) {
|
2016-01-07 00:53:12 +01:00
|
|
|
err := tfk.ParseKey(key)
|
2015-08-25 20:52:42 +02:00
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
docID := tfk.DocID()
|
|
|
|
docNum := tfk.DocNum()
|
|
|
|
|
|
|
|
if docNum > f.highDocNumber {
|
|
|
|
f.highDocNumber = docNum
|
|
|
|
}
|
|
|
|
if docNum > f.compensator.maxRead {
|
|
|
|
f.compensator.maxRead = docNum
|
|
|
|
}
|
|
|
|
|
|
|
|
// check for consecutive records
|
|
|
|
if bytes.Compare(docID, lastDocId) == 0 {
|
|
|
|
lastDocNumbers = append(lastDocNumbers, docNum)
|
|
|
|
} else {
|
|
|
|
// new doc id
|
2016-03-20 16:02:13 +01:00
|
|
|
atomic.AddUint64(&f.docCount, 1)
|
2015-08-25 20:52:42 +02:00
|
|
|
|
|
|
|
// last docID had multiple doc numbers
|
|
|
|
if len(lastDocNumbers) > 1 {
|
|
|
|
f.addOldDocNumbers(lastDocNumbers, lastDocId)
|
|
|
|
|
|
|
|
// reset size to 1
|
|
|
|
lastDocNumbers = make(DocNumberList, 1)
|
|
|
|
}
|
2015-11-30 16:18:14 +01:00
|
|
|
lastDocNumbers = lastDocNumbers[:1]
|
2015-08-25 20:52:42 +02:00
|
|
|
lastDocNumbers[0] = docNum
|
2015-11-30 16:18:14 +01:00
|
|
|
lastDocId = make([]byte, len(docID))
|
|
|
|
copy(lastDocId, docID)
|
2015-08-25 20:52:42 +02:00
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// be sure to finish up check on final row
|
|
|
|
if len(lastDocNumbers) > 1 {
|
|
|
|
f.addOldDocNumbers(lastDocNumbers, lastDocId)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (f *Firestorm) addOldDocNumbers(docNumberList DocNumberList, docID []byte) {
|
|
|
|
sort.Sort(docNumberList)
|
|
|
|
// high doc number is OK, rest are deleted
|
|
|
|
for _, dn := range docNumberList[1:] {
|
|
|
|
// f.deletedDocNumbers.Add(dn, docID)
|
|
|
|
f.compensator.deletedDocNumbers.Set(uint(dn))
|
|
|
|
f.garbageCollector.Notify(dn, docID)
|
|
|
|
}
|
|
|
|
}
|