0
0
Fork 0

Merge pull request #808 from steveyen/more-scorch-optimizing

err fix and more scorch optimizing
This commit is contained in:
Steve Yen 2018-03-07 10:39:20 -08:00 committed by GitHub
commit 0ec4a1935a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 23 additions and 12 deletions

View File

@ -186,14 +186,14 @@ func (s *Scorch) planMergeAtSnapshot(ourSnapshot *IndexSnapshot,
newDocNums, nBytes, err := zap.Merge(segmentsToMerge, docsToDrop, path, 1024) newDocNums, nBytes, err := zap.Merge(segmentsToMerge, docsToDrop, path, 1024)
atomic.AddUint64(&s.stats.TotFileMergeZapEnd, 1) atomic.AddUint64(&s.stats.TotFileMergeZapEnd, 1)
atomic.AddUint64(&s.stats.TotFileMergeWrittenBytes, nBytes) atomic.AddUint64(&s.stats.TotFileMergeWrittenBytes, nBytes)
fileMergeZapTime := uint64(time.Since(fileMergeZapStartTime)) fileMergeZapTime := uint64(time.Since(fileMergeZapStartTime))
atomic.AddUint64(&s.stats.TotFileMergeZapTime, fileMergeZapTime) atomic.AddUint64(&s.stats.TotFileMergeZapTime, fileMergeZapTime)
if atomic.LoadUint64(&s.stats.MaxFileMergeZapTime) < fileMergeZapTime { if atomic.LoadUint64(&s.stats.MaxFileMergeZapTime) < fileMergeZapTime {
atomic.StoreUint64(&s.stats.MaxFileMergeZapTime, fileMergeZapTime) atomic.StoreUint64(&s.stats.MaxFileMergeZapTime, fileMergeZapTime)
} }
if err != nil { if err != nil {
s.unmarkIneligibleForRemoval(filename) s.unmarkIneligibleForRemoval(filename)
atomic.AddUint64(&s.stats.TotFileMergePlanTasksErr, 1) atomic.AddUint64(&s.stats.TotFileMergePlanTasksErr, 1)
return fmt.Errorf("merging failed: %v", err) return fmt.Errorf("merging failed: %v", err)

View File

@ -266,21 +266,34 @@ func (s *Segment) processDocument(result *index.AnalysisResult) {
locationBS := s.PostingsLocs[pid] locationBS := s.PostingsLocs[pid]
if len(tokenFreq.Locations) > 0 { if len(tokenFreq.Locations) > 0 {
locationBS.AddInt(int(docNum)) locationBS.AddInt(int(docNum))
locfields := s.Locfields[pid]
locstarts := s.Locstarts[pid]
locends := s.Locends[pid]
locpos := s.Locpos[pid]
locarraypos := s.Locarraypos[pid]
for _, loc := range tokenFreq.Locations { for _, loc := range tokenFreq.Locations {
var locf = fieldID var locf = fieldID
if loc.Field != "" { if loc.Field != "" {
locf = uint16(s.getOrDefineField(loc.Field)) locf = uint16(s.getOrDefineField(loc.Field))
} }
s.Locfields[pid] = append(s.Locfields[pid], locf) locfields = append(locfields, locf)
s.Locstarts[pid] = append(s.Locstarts[pid], uint64(loc.Start)) locstarts = append(locstarts, uint64(loc.Start))
s.Locends[pid] = append(s.Locends[pid], uint64(loc.End)) locends = append(locends, uint64(loc.End))
s.Locpos[pid] = append(s.Locpos[pid], uint64(loc.Position)) locpos = append(locpos, uint64(loc.Position))
if len(loc.ArrayPositions) > 0 { if len(loc.ArrayPositions) > 0 {
s.Locarraypos[pid] = append(s.Locarraypos[pid], loc.ArrayPositions) locarraypos = append(locarraypos, loc.ArrayPositions)
} else { } else {
s.Locarraypos[pid] = append(s.Locarraypos[pid], nil) locarraypos = append(locarraypos, nil)
} }
} }
s.Locfields[pid] = locfields
s.Locstarts[pid] = locstarts
s.Locends[pid] = locends
s.Locpos[pid] = locpos
s.Locarraypos[pid] = locarraypos
} }
} }
} }

View File

@ -515,7 +515,7 @@ func persistDocValues(memSegment *mem.Segment, w *CountHashWriter,
var err1 error var err1 error
postings, err1 = dict.(*mem.Dictionary).InitPostingsList(next.Term, nil, postings) postings, err1 = dict.(*mem.Dictionary).InitPostingsList(next.Term, nil, postings)
if err1 != nil { if err1 != nil {
return nil, err return nil, err1
} }
postingsItr = postings.InitIterator(postingsItr) postingsItr = postings.InitIterator(postingsItr)

View File

@ -24,7 +24,6 @@ import (
type chunkedIntCoder struct { type chunkedIntCoder struct {
final []byte final []byte
maxDocNum uint64
chunkSize uint64 chunkSize uint64
chunkBuf bytes.Buffer chunkBuf bytes.Buffer
encoder *govarint.Base128Encoder encoder *govarint.Base128Encoder
@ -41,7 +40,6 @@ func newChunkedIntCoder(chunkSize uint64, maxDocNum uint64) *chunkedIntCoder {
total := maxDocNum/chunkSize + 1 total := maxDocNum/chunkSize + 1
rv := &chunkedIntCoder{ rv := &chunkedIntCoder{
chunkSize: chunkSize, chunkSize: chunkSize,
maxDocNum: maxDocNum,
chunkLens: make([]uint64, total), chunkLens: make([]uint64, total),
final: make([]byte, 0, 64), final: make([]byte, 0, 64),
} }