2017-12-13 22:12:29 +01:00
|
|
|
// Copyright (c) 2017 Couchbase, Inc.
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2017-12-12 17:21:55 +01:00
|
|
|
package zap
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bufio"
|
|
|
|
"bytes"
|
|
|
|
"encoding/binary"
|
|
|
|
"fmt"
|
|
|
|
"math"
|
|
|
|
"os"
|
2018-02-06 00:42:47 +01:00
|
|
|
"sort"
|
2017-12-12 17:21:55 +01:00
|
|
|
|
|
|
|
"github.com/RoaringBitmap/roaring"
|
|
|
|
"github.com/Smerity/govarint"
|
2017-12-19 19:49:57 +01:00
|
|
|
"github.com/couchbase/vellum"
|
2017-12-12 17:21:55 +01:00
|
|
|
"github.com/golang/snappy"
|
|
|
|
)
|
|
|
|
|
2018-03-16 19:49:50 +01:00
|
|
|
var DefaultFileMergerBufferSize = 1024 * 1024
|
|
|
|
|
2018-02-06 02:53:47 +01:00
|
|
|
const docDropped = math.MaxUint64 // sentinel docNum to represent a deleted doc
|
|
|
|
|
2018-01-24 18:22:10 +01:00
|
|
|
// Merge takes a slice of zap segments and bit masks describing which
|
|
|
|
// documents may be dropped, and creates a new segment containing the
|
|
|
|
// remaining data. This new segment is built at the specified path,
|
|
|
|
// with the provided chunkFactor.
|
2017-12-13 19:41:03 +01:00
|
|
|
func Merge(segments []*Segment, drops []*roaring.Bitmap, path string,
|
2018-03-07 10:07:33 +01:00
|
|
|
chunkFactor uint32) ([][]uint64, uint64, error) {
|
2017-12-12 17:21:55 +01:00
|
|
|
flag := os.O_RDWR | os.O_CREATE
|
|
|
|
|
|
|
|
f, err := os.OpenFile(path, flag, 0600)
|
|
|
|
if err != nil {
|
2018-03-07 10:07:33 +01:00
|
|
|
return nil, 0, err
|
2017-12-12 17:21:55 +01:00
|
|
|
}
|
|
|
|
|
2018-01-24 18:22:10 +01:00
|
|
|
cleanup := func() {
|
|
|
|
_ = f.Close()
|
|
|
|
_ = os.Remove(path)
|
|
|
|
}
|
|
|
|
|
2018-02-07 23:38:10 +01:00
|
|
|
segmentBases := make([]*SegmentBase, len(segments))
|
|
|
|
for segmenti, segment := range segments {
|
|
|
|
segmentBases[segmenti] = &segment.SegmentBase
|
|
|
|
}
|
|
|
|
|
2017-12-20 23:55:31 +01:00
|
|
|
// buffer the output
|
2018-03-16 19:49:50 +01:00
|
|
|
br := bufio.NewWriterSize(f, DefaultFileMergerBufferSize)
|
2017-12-12 17:21:55 +01:00
|
|
|
|
|
|
|
// wrap it for counting (tracking offsets)
|
|
|
|
cr := NewCountHashWriter(br)
|
|
|
|
|
2018-02-14 23:53:28 +01:00
|
|
|
newDocNums, numDocs, storedIndexOffset, fieldsIndexOffset, docValueOffset, _, _, _, err :=
|
2018-02-07 23:38:10 +01:00
|
|
|
MergeToWriter(segmentBases, drops, chunkFactor, cr)
|
2017-12-12 17:21:55 +01:00
|
|
|
if err != nil {
|
2018-01-24 18:22:10 +01:00
|
|
|
cleanup()
|
2018-03-07 10:07:33 +01:00
|
|
|
return nil, 0, err
|
2017-12-12 17:21:55 +01:00
|
|
|
}
|
|
|
|
|
2018-02-01 00:48:03 +01:00
|
|
|
err = persistFooter(numDocs, storedIndexOffset, fieldsIndexOffset,
|
|
|
|
docValueOffset, chunkFactor, cr.Sum32(), cr)
|
2017-12-12 17:21:55 +01:00
|
|
|
if err != nil {
|
2018-01-24 18:22:10 +01:00
|
|
|
cleanup()
|
2018-03-07 10:07:33 +01:00
|
|
|
return nil, 0, err
|
2017-12-12 17:21:55 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
err = br.Flush()
|
|
|
|
if err != nil {
|
2018-01-24 18:22:10 +01:00
|
|
|
cleanup()
|
2018-03-07 10:07:33 +01:00
|
|
|
return nil, 0, err
|
2017-12-12 17:21:55 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
err = f.Sync()
|
|
|
|
if err != nil {
|
2018-01-24 18:22:10 +01:00
|
|
|
cleanup()
|
2018-03-07 10:07:33 +01:00
|
|
|
return nil, 0, err
|
2017-12-12 17:21:55 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
err = f.Close()
|
|
|
|
if err != nil {
|
2018-01-24 18:22:10 +01:00
|
|
|
cleanup()
|
2018-03-07 10:07:33 +01:00
|
|
|
return nil, 0, err
|
2018-03-05 12:32:58 +01:00
|
|
|
}
|
2018-03-05 12:02:57 +01:00
|
|
|
|
2018-03-07 10:07:33 +01:00
|
|
|
return newDocNums, uint64(cr.Count()), nil
|
2017-12-12 17:21:55 +01:00
|
|
|
}
|
|
|
|
|
2018-02-07 23:38:10 +01:00
|
|
|
func MergeToWriter(segments []*SegmentBase, drops []*roaring.Bitmap,
|
2018-02-01 00:48:03 +01:00
|
|
|
chunkFactor uint32, cr *CountHashWriter) (
|
|
|
|
newDocNums [][]uint64,
|
|
|
|
numDocs, storedIndexOffset, fieldsIndexOffset, docValueOffset uint64,
|
2018-02-14 23:53:28 +01:00
|
|
|
dictLocs []uint64, fieldsInv []string, fieldsMap map[string]uint16,
|
2018-02-01 00:48:03 +01:00
|
|
|
err error) {
|
|
|
|
docValueOffset = uint64(fieldNotUninverted)
|
|
|
|
|
2018-02-14 23:53:28 +01:00
|
|
|
var fieldsSame bool
|
|
|
|
fieldsSame, fieldsInv = mergeFields(segments)
|
|
|
|
fieldsMap = mapFields(fieldsInv)
|
2018-02-01 00:48:03 +01:00
|
|
|
|
|
|
|
numDocs = computeNewDocCount(segments, drops)
|
|
|
|
if numDocs > 0 {
|
|
|
|
storedIndexOffset, newDocNums, err = mergeStoredAndRemap(segments, drops,
|
2018-02-06 01:03:17 +01:00
|
|
|
fieldsMap, fieldsInv, fieldsSame, numDocs, cr)
|
2018-02-01 00:48:03 +01:00
|
|
|
if err != nil {
|
2018-02-14 23:53:28 +01:00
|
|
|
return nil, 0, 0, 0, 0, nil, nil, nil, err
|
2018-02-01 00:48:03 +01:00
|
|
|
}
|
|
|
|
|
2018-03-19 18:28:01 +01:00
|
|
|
dictLocs, docValueOffset, err = persistMergedRest(segments, drops,
|
|
|
|
fieldsInv, fieldsMap, fieldsSame,
|
2018-02-01 00:48:03 +01:00
|
|
|
newDocNums, numDocs, chunkFactor, cr)
|
|
|
|
if err != nil {
|
2018-02-14 23:53:28 +01:00
|
|
|
return nil, 0, 0, 0, 0, nil, nil, nil, err
|
2018-02-01 00:48:03 +01:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
dictLocs = make([]uint64, len(fieldsInv))
|
|
|
|
}
|
|
|
|
|
|
|
|
fieldsIndexOffset, err = persistFields(fieldsInv, cr, dictLocs)
|
|
|
|
if err != nil {
|
2018-02-14 23:53:28 +01:00
|
|
|
return nil, 0, 0, 0, 0, nil, nil, nil, err
|
2018-02-01 00:48:03 +01:00
|
|
|
}
|
|
|
|
|
2018-02-14 23:53:28 +01:00
|
|
|
return newDocNums, numDocs, storedIndexOffset, fieldsIndexOffset, docValueOffset, dictLocs, fieldsInv, fieldsMap, nil
|
2018-02-01 00:48:03 +01:00
|
|
|
}
|
|
|
|
|
2018-02-14 23:53:28 +01:00
|
|
|
// mapFields takes the fieldsInv list and returns a map of fieldName
|
|
|
|
// to fieldID+1
|
2017-12-12 17:21:55 +01:00
|
|
|
func mapFields(fields []string) map[string]uint16 {
|
2018-01-24 18:22:10 +01:00
|
|
|
rv := make(map[string]uint16, len(fields))
|
2017-12-12 17:21:55 +01:00
|
|
|
for i, fieldName := range fields {
|
2018-02-14 23:53:28 +01:00
|
|
|
rv[fieldName] = uint16(i) + 1
|
2017-12-12 17:21:55 +01:00
|
|
|
}
|
|
|
|
return rv
|
|
|
|
}
|
|
|
|
|
2017-12-13 20:26:11 +01:00
|
|
|
// computeNewDocCount determines how many documents will be in the newly
|
|
|
|
// merged segment when obsoleted docs are dropped
|
2018-02-07 23:38:10 +01:00
|
|
|
func computeNewDocCount(segments []*SegmentBase, drops []*roaring.Bitmap) uint64 {
|
2018-01-27 19:45:23 +01:00
|
|
|
var newDocCount uint64
|
2017-12-12 17:21:55 +01:00
|
|
|
for segI, segment := range segments {
|
2018-02-07 23:38:10 +01:00
|
|
|
newDocCount += segment.numDocs
|
2017-12-12 17:21:55 +01:00
|
|
|
if drops[segI] != nil {
|
2018-01-27 19:45:23 +01:00
|
|
|
newDocCount -= drops[segI].GetCardinality()
|
2017-12-12 17:21:55 +01:00
|
|
|
}
|
|
|
|
}
|
2018-01-27 19:45:23 +01:00
|
|
|
return newDocCount
|
2017-12-12 17:21:55 +01:00
|
|
|
}
|
|
|
|
|
2018-02-13 02:29:50 +01:00
|
|
|
func persistMergedRest(segments []*SegmentBase, dropsIn []*roaring.Bitmap,
|
2018-03-19 18:28:01 +01:00
|
|
|
fieldsInv []string, fieldsMap map[string]uint16, fieldsSame bool,
|
|
|
|
newDocNumsIn [][]uint64, newSegDocCount uint64, chunkFactor uint32,
|
2017-12-28 07:35:33 +01:00
|
|
|
w *CountHashWriter) ([]uint64, uint64, error) {
|
2017-12-12 17:21:55 +01:00
|
|
|
|
2017-12-20 23:55:31 +01:00
|
|
|
var bufMaxVarintLen64 []byte = make([]byte, binary.MaxVarintLen64)
|
2018-03-19 18:28:01 +01:00
|
|
|
var bufLoc []uint64
|
2017-12-20 23:55:31 +01:00
|
|
|
|
2018-02-07 23:29:51 +01:00
|
|
|
var postings *PostingsList
|
2018-02-09 02:11:35 +01:00
|
|
|
var postItr *PostingsIterator
|
2018-02-07 23:29:51 +01:00
|
|
|
|
2018-01-27 19:18:40 +01:00
|
|
|
rv := make([]uint64, len(fieldsInv))
|
2017-12-28 07:35:33 +01:00
|
|
|
fieldDvLocs := make([]uint64, len(fieldsInv))
|
2017-12-12 17:21:55 +01:00
|
|
|
|
2018-02-24 18:35:03 +01:00
|
|
|
tfEncoder := newChunkedIntCoder(uint64(chunkFactor), newSegDocCount-1)
|
|
|
|
locEncoder := newChunkedIntCoder(uint64(chunkFactor), newSegDocCount-1)
|
|
|
|
|
2018-01-27 22:23:33 +01:00
|
|
|
// docTermMap is keyed by docNum, where the array impl provides
|
|
|
|
// better memory usage behavior than a sparse-friendlier hashmap
|
|
|
|
// for when docs have much structural similarity (i.e., every doc
|
|
|
|
// has a given field)
|
|
|
|
var docTermMap [][]byte
|
2018-01-27 19:39:02 +01:00
|
|
|
|
2017-12-12 17:21:55 +01:00
|
|
|
var vellumBuf bytes.Buffer
|
2018-03-01 16:31:53 +01:00
|
|
|
newVellum, err := vellum.New(&vellumBuf, nil)
|
|
|
|
if err != nil {
|
|
|
|
return nil, 0, err
|
|
|
|
}
|
2018-01-27 19:33:38 +01:00
|
|
|
|
2018-03-12 00:30:34 +01:00
|
|
|
newRoaring := roaring.NewBitmap()
|
|
|
|
newRoaringLocs := roaring.NewBitmap()
|
|
|
|
|
2017-12-12 17:21:55 +01:00
|
|
|
// for each field
|
|
|
|
for fieldID, fieldName := range fieldsInv {
|
|
|
|
|
2018-02-13 02:29:50 +01:00
|
|
|
// collect FST iterators from all active segments for this field
|
|
|
|
var newDocNums [][]uint64
|
|
|
|
var drops []*roaring.Bitmap
|
2017-12-12 17:21:55 +01:00
|
|
|
var dicts []*Dictionary
|
|
|
|
var itrs []vellum.Iterator
|
2018-02-13 02:29:50 +01:00
|
|
|
|
|
|
|
for segmentI, segment := range segments {
|
2017-12-12 17:21:55 +01:00
|
|
|
dict, err2 := segment.dictionary(fieldName)
|
|
|
|
if err2 != nil {
|
2017-12-29 17:09:29 +01:00
|
|
|
return nil, 0, err2
|
2017-12-12 17:21:55 +01:00
|
|
|
}
|
2017-12-13 19:41:03 +01:00
|
|
|
if dict != nil && dict.fst != nil {
|
|
|
|
itr, err2 := dict.fst.Iterator(nil, nil)
|
2017-12-14 04:07:37 +01:00
|
|
|
if err2 != nil && err2 != vellum.ErrIteratorDone {
|
2017-12-29 17:09:29 +01:00
|
|
|
return nil, 0, err2
|
2017-12-13 19:41:03 +01:00
|
|
|
}
|
2017-12-14 04:07:37 +01:00
|
|
|
if itr != nil {
|
2018-02-13 02:29:50 +01:00
|
|
|
newDocNums = append(newDocNums, newDocNumsIn[segmentI])
|
2018-03-16 17:51:23 +01:00
|
|
|
if dropsIn[segmentI] != nil && !dropsIn[segmentI].IsEmpty() {
|
|
|
|
drops = append(drops, dropsIn[segmentI])
|
|
|
|
} else {
|
|
|
|
drops = append(drops, nil)
|
|
|
|
}
|
2018-02-13 02:29:50 +01:00
|
|
|
dicts = append(dicts, dict)
|
2017-12-14 04:07:37 +01:00
|
|
|
itrs = append(itrs, itr)
|
|
|
|
}
|
2017-12-12 17:21:55 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-27 22:23:33 +01:00
|
|
|
if uint64(cap(docTermMap)) < newSegDocCount {
|
|
|
|
docTermMap = make([][]byte, newSegDocCount)
|
|
|
|
} else {
|
|
|
|
docTermMap = docTermMap[0:newSegDocCount]
|
|
|
|
for docNum := range docTermMap { // reset the docTermMap
|
|
|
|
docTermMap[docNum] = docTermMap[docNum][:0]
|
|
|
|
}
|
|
|
|
}
|
2018-01-27 19:33:38 +01:00
|
|
|
|
2018-02-13 02:48:49 +01:00
|
|
|
var prevTerm []byte
|
2017-12-12 17:21:55 +01:00
|
|
|
|
2018-03-12 00:30:34 +01:00
|
|
|
newRoaring.Clear()
|
|
|
|
newRoaringLocs.Clear()
|
2017-12-28 07:35:33 +01:00
|
|
|
|
2018-03-08 00:00:38 +01:00
|
|
|
var lastDocNum, lastFreq, lastNorm uint64
|
|
|
|
|
|
|
|
// determines whether to use "1-hit" encoding optimization
|
|
|
|
// when a term appears in only 1 doc, with no loc info,
|
|
|
|
// has freq of 1, and the docNum fits into 31-bits
|
|
|
|
use1HitEncoding := func(termCardinality uint64) (bool, uint64, uint64) {
|
|
|
|
if termCardinality == uint64(1) && locEncoder.FinalSize() <= 0 {
|
|
|
|
docNum := uint64(newRoaring.Minimum())
|
|
|
|
if under32Bits(docNum) && docNum == lastDocNum && lastFreq == 1 {
|
|
|
|
return true, docNum, lastNorm
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false, 0, 0
|
|
|
|
}
|
|
|
|
|
2018-02-13 02:48:49 +01:00
|
|
|
finishTerm := func(term []byte) error {
|
|
|
|
if term == nil {
|
|
|
|
return nil
|
2017-12-12 17:21:55 +01:00
|
|
|
}
|
2018-01-27 19:18:40 +01:00
|
|
|
|
2017-12-12 17:21:55 +01:00
|
|
|
tfEncoder.Close()
|
|
|
|
locEncoder.Close()
|
|
|
|
|
2018-03-09 06:36:19 +01:00
|
|
|
postingsOffset, err := writePostings(
|
|
|
|
newRoaring, newRoaringLocs, tfEncoder, locEncoder,
|
|
|
|
use1HitEncoding, w, bufMaxVarintLen64)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-12-12 17:21:55 +01:00
|
|
|
|
2018-03-09 06:36:19 +01:00
|
|
|
if postingsOffset > 0 {
|
|
|
|
err = newVellum.Insert(term, postingsOffset)
|
2018-02-13 02:48:49 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-12 00:30:34 +01:00
|
|
|
newRoaring.Clear()
|
|
|
|
newRoaringLocs.Clear()
|
2018-02-13 02:48:49 +01:00
|
|
|
|
|
|
|
tfEncoder.Reset()
|
|
|
|
locEncoder.Reset()
|
|
|
|
|
2018-03-08 00:00:38 +01:00
|
|
|
lastDocNum = 0
|
|
|
|
lastFreq = 0
|
|
|
|
lastNorm = 0
|
|
|
|
|
2018-02-13 02:48:49 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
enumerator, err := newEnumerator(itrs)
|
|
|
|
|
|
|
|
for err == nil {
|
|
|
|
term, itrI, postingsOffset := enumerator.Current()
|
|
|
|
|
|
|
|
if !bytes.Equal(prevTerm, term) {
|
|
|
|
// if the term changed, write out the info collected
|
|
|
|
// for the previous term
|
|
|
|
err2 := finishTerm(prevTerm)
|
|
|
|
if err2 != nil {
|
|
|
|
return nil, 0, err2
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var err2 error
|
|
|
|
postings, err2 = dicts[itrI].postingsListFromOffset(
|
|
|
|
postingsOffset, drops[itrI], postings)
|
|
|
|
if err2 != nil {
|
|
|
|
return nil, 0, err2
|
|
|
|
}
|
|
|
|
|
|
|
|
postItr = postings.iterator(postItr)
|
2018-03-06 16:58:42 +01:00
|
|
|
|
2018-03-19 18:28:01 +01:00
|
|
|
if fieldsSame {
|
|
|
|
// can optimize by copying freq/norm/loc bytes directly
|
|
|
|
lastDocNum, lastFreq, lastNorm, err = mergeTermFreqNormLocsByCopying(
|
|
|
|
term, postItr, newDocNums[itrI], newRoaring, newRoaringLocs,
|
|
|
|
tfEncoder, locEncoder, docTermMap)
|
|
|
|
} else {
|
|
|
|
lastDocNum, lastFreq, lastNorm, bufLoc, err = mergeTermFreqNormLocs(
|
|
|
|
fieldsMap, term, postItr, newDocNums[itrI], newRoaring, newRoaringLocs,
|
|
|
|
tfEncoder, locEncoder, docTermMap, bufLoc)
|
|
|
|
}
|
2018-03-19 18:06:17 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, 0, err
|
2018-02-13 02:48:49 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
prevTerm = prevTerm[:0] // copy to prevTerm in case Next() reuses term mem
|
|
|
|
prevTerm = append(prevTerm, term...)
|
2017-12-12 17:21:55 +01:00
|
|
|
|
2018-02-13 02:48:49 +01:00
|
|
|
err = enumerator.Next()
|
2017-12-12 17:21:55 +01:00
|
|
|
}
|
|
|
|
if err != nil && err != vellum.ErrIteratorDone {
|
2017-12-29 17:09:29 +01:00
|
|
|
return nil, 0, err
|
2017-12-12 17:21:55 +01:00
|
|
|
}
|
|
|
|
|
2018-02-13 02:48:49 +01:00
|
|
|
err = finishTerm(prevTerm)
|
|
|
|
if err != nil {
|
|
|
|
return nil, 0, err
|
|
|
|
}
|
|
|
|
|
2017-12-12 17:21:55 +01:00
|
|
|
dictOffset := uint64(w.Count())
|
2018-01-27 19:33:38 +01:00
|
|
|
|
2017-12-12 17:21:55 +01:00
|
|
|
err = newVellum.Close()
|
|
|
|
if err != nil {
|
2017-12-29 17:09:29 +01:00
|
|
|
return nil, 0, err
|
2017-12-12 17:21:55 +01:00
|
|
|
}
|
|
|
|
vellumData := vellumBuf.Bytes()
|
|
|
|
|
|
|
|
// write out the length of the vellum data
|
2018-01-27 19:33:38 +01:00
|
|
|
n := binary.PutUvarint(bufMaxVarintLen64, uint64(len(vellumData)))
|
|
|
|
_, err = w.Write(bufMaxVarintLen64[:n])
|
2017-12-12 17:21:55 +01:00
|
|
|
if err != nil {
|
2017-12-29 17:09:29 +01:00
|
|
|
return nil, 0, err
|
2017-12-12 17:21:55 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// write this vellum to disk
|
|
|
|
_, err = w.Write(vellumData)
|
|
|
|
if err != nil {
|
2017-12-29 17:09:29 +01:00
|
|
|
return nil, 0, err
|
2017-12-28 07:35:33 +01:00
|
|
|
}
|
|
|
|
|
2018-01-27 19:18:40 +01:00
|
|
|
rv[fieldID] = dictOffset
|
2017-12-28 07:35:33 +01:00
|
|
|
|
2018-01-27 22:23:33 +01:00
|
|
|
// update the field doc values
|
2018-01-27 19:33:38 +01:00
|
|
|
fdvEncoder := newChunkedContentCoder(uint64(chunkFactor), newSegDocCount-1)
|
2018-01-27 22:23:33 +01:00
|
|
|
for docNum, docTerms := range docTermMap {
|
|
|
|
if len(docTerms) > 0 {
|
|
|
|
err = fdvEncoder.Add(uint64(docNum), docTerms)
|
|
|
|
if err != nil {
|
|
|
|
return nil, 0, err
|
|
|
|
}
|
2017-12-28 07:35:33 +01:00
|
|
|
}
|
|
|
|
}
|
2017-12-29 17:09:29 +01:00
|
|
|
err = fdvEncoder.Close()
|
|
|
|
if err != nil {
|
|
|
|
return nil, 0, err
|
|
|
|
}
|
2018-01-27 19:33:38 +01:00
|
|
|
|
|
|
|
// get the field doc value offset
|
|
|
|
fieldDvLocs[fieldID] = uint64(w.Count())
|
|
|
|
|
2017-12-28 07:35:33 +01:00
|
|
|
// persist the doc value details for this field
|
|
|
|
_, err = fdvEncoder.Write(w)
|
|
|
|
if err != nil {
|
2017-12-29 17:09:29 +01:00
|
|
|
return nil, 0, err
|
2017-12-28 07:35:33 +01:00
|
|
|
}
|
2018-03-01 16:31:53 +01:00
|
|
|
|
|
|
|
// reset vellum buffer and vellum builder
|
|
|
|
vellumBuf.Reset()
|
|
|
|
err = newVellum.Reset(&vellumBuf)
|
|
|
|
if err != nil {
|
|
|
|
return nil, 0, err
|
|
|
|
}
|
2017-12-28 07:35:33 +01:00
|
|
|
}
|
|
|
|
|
2018-02-09 03:01:23 +01:00
|
|
|
fieldDvLocsOffset := uint64(w.Count())
|
2018-01-27 19:05:12 +01:00
|
|
|
|
2018-01-24 18:22:10 +01:00
|
|
|
buf := bufMaxVarintLen64
|
2017-12-28 07:35:33 +01:00
|
|
|
for _, offset := range fieldDvLocs {
|
|
|
|
n := binary.PutUvarint(buf, uint64(offset))
|
|
|
|
_, err := w.Write(buf[:n])
|
|
|
|
if err != nil {
|
2017-12-29 17:09:29 +01:00
|
|
|
return nil, 0, err
|
2017-12-28 07:35:33 +01:00
|
|
|
}
|
2017-12-12 17:21:55 +01:00
|
|
|
}
|
|
|
|
|
2018-01-27 19:18:40 +01:00
|
|
|
return rv, fieldDvLocsOffset, nil
|
2017-12-12 17:21:55 +01:00
|
|
|
}
|
|
|
|
|
2018-03-19 18:28:01 +01:00
|
|
|
func mergeTermFreqNormLocs(fieldsMap map[string]uint16, term []byte, postItr *PostingsIterator,
|
|
|
|
newDocNums []uint64, newRoaring *roaring.Bitmap, newRoaringLocs *roaring.Bitmap,
|
|
|
|
tfEncoder *chunkedIntCoder, locEncoder *chunkedIntCoder, docTermMap [][]byte,
|
|
|
|
bufLoc []uint64) (
|
|
|
|
lastDocNum uint64, lastFreq uint64, lastNorm uint64, bufLocOut []uint64, err error) {
|
|
|
|
next, err := postItr.Next()
|
|
|
|
for next != nil && err == nil {
|
|
|
|
hitNewDocNum := newDocNums[next.Number()]
|
|
|
|
if hitNewDocNum == docDropped {
|
|
|
|
return 0, 0, 0, nil, fmt.Errorf("see hit with dropped docNum")
|
|
|
|
}
|
|
|
|
|
|
|
|
newRoaring.Add(uint32(hitNewDocNum))
|
|
|
|
|
|
|
|
nextFreq := next.Frequency()
|
|
|
|
nextNorm := uint64(math.Float32bits(float32(next.Norm())))
|
|
|
|
|
|
|
|
err = tfEncoder.Add(hitNewDocNum, nextFreq, nextNorm)
|
|
|
|
if err != nil {
|
|
|
|
return 0, 0, 0, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
locs := next.Locations()
|
|
|
|
if len(locs) > 0 {
|
|
|
|
newRoaringLocs.Add(uint32(hitNewDocNum))
|
|
|
|
|
|
|
|
for _, loc := range locs {
|
|
|
|
if cap(bufLoc) < 5+len(loc.ArrayPositions()) {
|
|
|
|
bufLoc = make([]uint64, 0, 5+len(loc.ArrayPositions()))
|
|
|
|
}
|
|
|
|
args := bufLoc[0:5]
|
|
|
|
args[0] = uint64(fieldsMap[loc.Field()] - 1)
|
|
|
|
args[1] = loc.Pos()
|
|
|
|
args[2] = loc.Start()
|
|
|
|
args[3] = loc.End()
|
|
|
|
args[4] = uint64(len(loc.ArrayPositions()))
|
|
|
|
args = append(args, loc.ArrayPositions()...)
|
|
|
|
err = locEncoder.Add(hitNewDocNum, args...)
|
|
|
|
if err != nil {
|
|
|
|
return 0, 0, 0, nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
docTermMap[hitNewDocNum] =
|
|
|
|
append(append(docTermMap[hitNewDocNum], term...), termSeparator)
|
|
|
|
|
|
|
|
lastDocNum = hitNewDocNum
|
|
|
|
lastFreq = nextFreq
|
|
|
|
lastNorm = nextNorm
|
|
|
|
|
|
|
|
next, err = postItr.Next()
|
|
|
|
}
|
|
|
|
|
|
|
|
return lastDocNum, lastFreq, lastNorm, bufLoc, err
|
|
|
|
}
|
|
|
|
|
2018-03-19 18:06:17 +01:00
|
|
|
func mergeTermFreqNormLocsByCopying(term []byte, postItr *PostingsIterator,
|
|
|
|
newDocNums []uint64, newRoaring *roaring.Bitmap, newRoaringLocs *roaring.Bitmap,
|
|
|
|
tfEncoder *chunkedIntCoder, locEncoder *chunkedIntCoder, docTermMap [][]byte) (
|
|
|
|
lastDocNum uint64, lastFreq uint64, lastNorm uint64, err error) {
|
|
|
|
nextDocNum, nextFreq, nextNorm, nextFreqNormBytes, nextLocBytes, err :=
|
|
|
|
postItr.nextBytes()
|
|
|
|
for err == nil && len(nextFreqNormBytes) > 0 {
|
|
|
|
hitNewDocNum := newDocNums[nextDocNum]
|
|
|
|
if hitNewDocNum == docDropped {
|
|
|
|
return 0, 0, 0, fmt.Errorf("see hit with dropped doc num")
|
|
|
|
}
|
|
|
|
|
|
|
|
newRoaring.Add(uint32(hitNewDocNum))
|
|
|
|
err = tfEncoder.AddBytes(hitNewDocNum, nextFreqNormBytes)
|
|
|
|
if err != nil {
|
|
|
|
return 0, 0, 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(nextLocBytes) > 0 {
|
|
|
|
newRoaringLocs.Add(uint32(hitNewDocNum))
|
|
|
|
err = locEncoder.AddBytes(hitNewDocNum, nextLocBytes)
|
|
|
|
if err != nil {
|
|
|
|
return 0, 0, 0, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
docTermMap[hitNewDocNum] =
|
|
|
|
append(append(docTermMap[hitNewDocNum], term...), termSeparator)
|
|
|
|
|
|
|
|
lastDocNum = hitNewDocNum
|
|
|
|
lastFreq = nextFreq
|
|
|
|
lastNorm = nextNorm
|
|
|
|
|
|
|
|
nextDocNum, nextFreq, nextNorm, nextFreqNormBytes, nextLocBytes, err =
|
|
|
|
postItr.nextBytes()
|
|
|
|
}
|
|
|
|
|
|
|
|
return lastDocNum, lastFreq, lastNorm, err
|
|
|
|
}
|
|
|
|
|
2018-03-09 06:36:19 +01:00
|
|
|
func writePostings(postings, postingLocs *roaring.Bitmap,
|
|
|
|
tfEncoder, locEncoder *chunkedIntCoder,
|
|
|
|
use1HitEncoding func(uint64) (bool, uint64, uint64),
|
|
|
|
w *CountHashWriter, bufMaxVarintLen64 []byte) (
|
|
|
|
offset uint64, err error) {
|
|
|
|
termCardinality := postings.GetCardinality()
|
|
|
|
if termCardinality <= 0 {
|
|
|
|
return 0, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if use1HitEncoding != nil {
|
|
|
|
encodeAs1Hit, docNum1Hit, normBits1Hit := use1HitEncoding(termCardinality)
|
|
|
|
if encodeAs1Hit {
|
|
|
|
return FSTValEncode1Hit(docNum1Hit, normBits1Hit), nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
tfOffset := uint64(w.Count())
|
|
|
|
_, err = tfEncoder.Write(w)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
locOffset := uint64(w.Count())
|
|
|
|
_, err = locEncoder.Write(w)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
postingLocsOffset := uint64(w.Count())
|
|
|
|
_, err = writeRoaringWithLen(postingLocs, w, bufMaxVarintLen64)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
postingsOffset := uint64(w.Count())
|
|
|
|
|
|
|
|
n := binary.PutUvarint(bufMaxVarintLen64, tfOffset)
|
|
|
|
_, err = w.Write(bufMaxVarintLen64[:n])
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
n = binary.PutUvarint(bufMaxVarintLen64, locOffset)
|
|
|
|
_, err = w.Write(bufMaxVarintLen64[:n])
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
n = binary.PutUvarint(bufMaxVarintLen64, postingLocsOffset)
|
|
|
|
_, err = w.Write(bufMaxVarintLen64[:n])
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err = writeRoaringWithLen(postings, w, bufMaxVarintLen64)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return postingsOffset, nil
|
|
|
|
}
|
|
|
|
|
2018-02-07 23:38:10 +01:00
|
|
|
func mergeStoredAndRemap(segments []*SegmentBase, drops []*roaring.Bitmap,
|
2018-02-06 01:03:17 +01:00
|
|
|
fieldsMap map[string]uint16, fieldsInv []string, fieldsSame bool, newSegDocCount uint64,
|
2017-12-12 17:21:55 +01:00
|
|
|
w *CountHashWriter) (uint64, [][]uint64, error) {
|
2018-01-27 19:55:29 +01:00
|
|
|
var rv [][]uint64 // The remapped or newDocNums for each segment.
|
2018-01-27 20:02:53 +01:00
|
|
|
|
|
|
|
var newDocNum uint64
|
2017-12-12 17:21:55 +01:00
|
|
|
|
|
|
|
var curr int
|
|
|
|
var metaBuf bytes.Buffer
|
|
|
|
var data, compressed []byte
|
|
|
|
|
2018-01-27 18:56:56 +01:00
|
|
|
metaEncoder := govarint.NewU64Base128Encoder(&metaBuf)
|
|
|
|
|
2017-12-21 00:18:22 +01:00
|
|
|
vals := make([][][]byte, len(fieldsInv))
|
|
|
|
typs := make([][]byte, len(fieldsInv))
|
|
|
|
poss := make([][][]uint64, len(fieldsInv))
|
|
|
|
|
2017-12-12 17:21:55 +01:00
|
|
|
docNumOffsets := make([]uint64, newSegDocCount)
|
|
|
|
|
|
|
|
// for each segment
|
|
|
|
for segI, segment := range segments {
|
2018-01-27 20:02:53 +01:00
|
|
|
segNewDocNums := make([]uint64, segment.numDocs)
|
2017-12-12 17:21:55 +01:00
|
|
|
|
2018-02-27 18:23:23 +01:00
|
|
|
dropsI := drops[segI]
|
|
|
|
|
2018-02-06 18:12:48 +01:00
|
|
|
// optimize when the field mapping is the same across all
|
|
|
|
// segments and there are no deletions, via byte-copying
|
|
|
|
// of stored docs bytes directly to the writer
|
2018-02-27 18:23:23 +01:00
|
|
|
if fieldsSame && (dropsI == nil || dropsI.GetCardinality() == 0) {
|
2018-02-06 18:12:48 +01:00
|
|
|
err := segment.copyStoredDocs(newDocNum, docNumOffsets, w)
|
|
|
|
if err != nil {
|
|
|
|
return 0, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
for i := uint64(0); i < segment.numDocs; i++ {
|
|
|
|
segNewDocNums[i] = newDocNum
|
|
|
|
newDocNum++
|
|
|
|
}
|
|
|
|
rv = append(rv, segNewDocNums)
|
|
|
|
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-12-12 17:21:55 +01:00
|
|
|
// for each doc num
|
|
|
|
for docNum := uint64(0); docNum < segment.numDocs; docNum++ {
|
2018-01-27 20:02:53 +01:00
|
|
|
// TODO: roaring's API limits docNums to 32-bits?
|
2018-02-27 18:23:23 +01:00
|
|
|
if dropsI != nil && dropsI.Contains(uint32(docNum)) {
|
2018-01-27 20:02:53 +01:00
|
|
|
segNewDocNums[docNum] = docDropped
|
2018-01-27 19:55:29 +01:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2018-01-27 20:02:53 +01:00
|
|
|
segNewDocNums[docNum] = newDocNum
|
2018-01-27 19:55:29 +01:00
|
|
|
|
2018-01-27 19:05:12 +01:00
|
|
|
curr = 0
|
2017-12-12 17:21:55 +01:00
|
|
|
metaBuf.Reset()
|
|
|
|
data = data[:0]
|
|
|
|
compressed = compressed[:0]
|
|
|
|
|
2018-01-27 19:55:29 +01:00
|
|
|
// collect all the data
|
|
|
|
for i := 0; i < len(fieldsInv); i++ {
|
|
|
|
vals[i] = vals[i][:0]
|
|
|
|
typs[i] = typs[i][:0]
|
|
|
|
poss[i] = poss[i][:0]
|
|
|
|
}
|
|
|
|
err := segment.VisitDocument(docNum, func(field string, typ byte, value []byte, pos []uint64) bool {
|
2018-02-14 23:53:28 +01:00
|
|
|
fieldID := int(fieldsMap[field]) - 1
|
2018-01-27 19:55:29 +01:00
|
|
|
vals[fieldID] = append(vals[fieldID], value)
|
|
|
|
typs[fieldID] = append(typs[fieldID], typ)
|
|
|
|
poss[fieldID] = append(poss[fieldID], pos)
|
|
|
|
return true
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return 0, nil, err
|
|
|
|
}
|
2017-12-12 17:21:55 +01:00
|
|
|
|
2018-01-27 19:55:29 +01:00
|
|
|
// now walk the fields in order
|
|
|
|
for fieldID := range fieldsInv {
|
|
|
|
storedFieldValues := vals[int(fieldID)]
|
2017-12-12 17:21:55 +01:00
|
|
|
|
2018-01-31 23:46:28 +01:00
|
|
|
stf := typs[int(fieldID)]
|
|
|
|
spf := poss[int(fieldID)]
|
|
|
|
|
2018-02-01 00:08:31 +01:00
|
|
|
var err2 error
|
|
|
|
curr, data, err2 = persistStoredFieldValues(fieldID,
|
|
|
|
storedFieldValues, stf, spf, curr, metaEncoder, data)
|
|
|
|
if err2 != nil {
|
|
|
|
return 0, nil, err2
|
2017-12-12 17:21:55 +01:00
|
|
|
}
|
2018-01-27 19:55:29 +01:00
|
|
|
}
|
2017-12-12 17:21:55 +01:00
|
|
|
|
2018-01-27 19:55:29 +01:00
|
|
|
metaEncoder.Close()
|
|
|
|
metaBytes := metaBuf.Bytes()
|
2018-01-27 19:05:12 +01:00
|
|
|
|
2018-01-27 19:55:29 +01:00
|
|
|
compressed = snappy.Encode(compressed, data)
|
2017-12-12 17:21:55 +01:00
|
|
|
|
2018-01-27 19:55:29 +01:00
|
|
|
// record where we're about to start writing
|
|
|
|
docNumOffsets[newDocNum] = uint64(w.Count())
|
2017-12-12 17:21:55 +01:00
|
|
|
|
2018-01-27 19:55:29 +01:00
|
|
|
// write out the meta len and compressed data len
|
|
|
|
_, err = writeUvarints(w, uint64(len(metaBytes)), uint64(len(compressed)))
|
|
|
|
if err != nil {
|
|
|
|
return 0, nil, err
|
2017-12-12 17:21:55 +01:00
|
|
|
}
|
2018-01-27 19:55:29 +01:00
|
|
|
// now write the meta
|
|
|
|
_, err = w.Write(metaBytes)
|
|
|
|
if err != nil {
|
|
|
|
return 0, nil, err
|
|
|
|
}
|
|
|
|
// now write the compressed data
|
|
|
|
_, err = w.Write(compressed)
|
|
|
|
if err != nil {
|
|
|
|
return 0, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
newDocNum++
|
2017-12-12 17:21:55 +01:00
|
|
|
}
|
2018-01-27 19:55:29 +01:00
|
|
|
|
2017-12-12 17:21:55 +01:00
|
|
|
rv = append(rv, segNewDocNums)
|
|
|
|
}
|
|
|
|
|
|
|
|
// return value is the start of the stored index
|
2018-02-07 02:10:41 +01:00
|
|
|
storedIndexOffset := uint64(w.Count())
|
2018-01-27 19:05:12 +01:00
|
|
|
|
2017-12-12 17:21:55 +01:00
|
|
|
// now write out the stored doc index
|
2018-02-07 02:10:41 +01:00
|
|
|
for _, docNumOffset := range docNumOffsets {
|
|
|
|
err := binary.Write(w, binary.BigEndian, docNumOffset)
|
2017-12-12 17:21:55 +01:00
|
|
|
if err != nil {
|
|
|
|
return 0, nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-07 02:10:41 +01:00
|
|
|
return storedIndexOffset, rv, nil
|
2017-12-12 17:21:55 +01:00
|
|
|
}
|
|
|
|
|
2018-02-06 18:12:48 +01:00
|
|
|
// copyStoredDocs writes out a segment's stored doc info, optimized by
|
|
|
|
// using a single Write() call for the entire set of bytes. The
|
|
|
|
// newDocNumOffsets is filled with the new offsets for each doc.
|
|
|
|
func (s *SegmentBase) copyStoredDocs(newDocNum uint64, newDocNumOffsets []uint64,
|
|
|
|
w *CountHashWriter) error {
|
|
|
|
if s.numDocs <= 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
indexOffset0, storedOffset0, _, _, _ :=
|
|
|
|
s.getDocStoredOffsets(0) // the segment's first doc
|
|
|
|
|
|
|
|
indexOffsetN, storedOffsetN, readN, metaLenN, dataLenN :=
|
|
|
|
s.getDocStoredOffsets(s.numDocs - 1) // the segment's last doc
|
|
|
|
|
|
|
|
storedOffset0New := uint64(w.Count())
|
|
|
|
|
|
|
|
storedBytes := s.mem[storedOffset0 : storedOffsetN+readN+metaLenN+dataLenN]
|
|
|
|
_, err := w.Write(storedBytes)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// remap the storedOffset's for the docs into new offsets relative
|
|
|
|
// to storedOffset0New, filling the given docNumOffsetsOut array
|
|
|
|
for indexOffset := indexOffset0; indexOffset <= indexOffsetN; indexOffset += 8 {
|
|
|
|
storedOffset := binary.BigEndian.Uint64(s.mem[indexOffset : indexOffset+8])
|
|
|
|
storedOffsetNew := storedOffset - storedOffset0 + storedOffset0New
|
|
|
|
newDocNumOffsets[newDocNum] = storedOffsetNew
|
|
|
|
newDocNum += 1
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-02-06 01:03:17 +01:00
|
|
|
// mergeFields builds a unified list of fields used across all the
|
|
|
|
// input segments, and computes whether the fields are the same across
|
|
|
|
// segments (which depends on fields to be sorted in the same way
|
|
|
|
// across segments)
|
|
|
|
func mergeFields(segments []*SegmentBase) (bool, []string) {
|
|
|
|
fieldsSame := true
|
|
|
|
|
|
|
|
var segment0Fields []string
|
|
|
|
if len(segments) > 0 {
|
|
|
|
segment0Fields = segments[0].Fields()
|
|
|
|
}
|
|
|
|
|
2018-02-14 23:53:28 +01:00
|
|
|
fieldsExist := map[string]struct{}{}
|
2017-12-12 17:21:55 +01:00
|
|
|
for _, segment := range segments {
|
|
|
|
fields := segment.Fields()
|
2018-02-06 01:03:17 +01:00
|
|
|
for fieldi, field := range fields {
|
2018-02-14 23:53:28 +01:00
|
|
|
fieldsExist[field] = struct{}{}
|
2018-02-06 01:03:17 +01:00
|
|
|
if len(segment0Fields) != len(fields) || segment0Fields[fieldi] != field {
|
|
|
|
fieldsSame = false
|
|
|
|
}
|
2017-12-12 17:21:55 +01:00
|
|
|
}
|
|
|
|
}
|
2018-01-27 19:05:12 +01:00
|
|
|
|
2018-02-14 23:53:28 +01:00
|
|
|
rv := make([]string, 0, len(fieldsExist))
|
2017-12-12 17:21:55 +01:00
|
|
|
// ensure _id stays first
|
|
|
|
rv = append(rv, "_id")
|
2018-02-14 23:53:28 +01:00
|
|
|
for k := range fieldsExist {
|
2017-12-12 17:21:55 +01:00
|
|
|
if k != "_id" {
|
|
|
|
rv = append(rv, k)
|
|
|
|
}
|
|
|
|
}
|
2018-02-06 00:42:47 +01:00
|
|
|
|
|
|
|
sort.Strings(rv[1:]) // leave _id as first
|
|
|
|
|
2018-02-06 01:03:17 +01:00
|
|
|
return fieldsSame, rv
|
2017-12-12 17:21:55 +01:00
|
|
|
}
|