scorch zap provide full buffer capacity to snappy Encode/Decode()
The snappy Encode/Decode() API's accept an optional destination buffer param where their encoded/decoded output results will be placed, but they only check that the buffer has enough len() rather than enough capacity before deciding to allocate a new buffer.
This commit is contained in:
parent
84424edcad
commit
6540b197d4
@ -42,6 +42,8 @@ type chunkedContentCoder struct {
|
||||
chunkBuf bytes.Buffer
|
||||
|
||||
chunkMeta []MetaData
|
||||
|
||||
compressed []byte // temp buf for snappy compression
|
||||
}
|
||||
|
||||
// MetaData represents the data information inside a
|
||||
@ -105,10 +107,10 @@ func (c *chunkedContentCoder) flushContents() error {
|
||||
metaData := c.chunkMetaBuf.Bytes()
|
||||
c.final = append(c.final, c.chunkMetaBuf.Bytes()...)
|
||||
// write the compressed data to the final data
|
||||
compressedData := snappy.Encode(nil, c.chunkBuf.Bytes())
|
||||
c.final = append(c.final, compressedData...)
|
||||
c.compressed = snappy.Encode(c.compressed[:cap(c.compressed)], c.chunkBuf.Bytes())
|
||||
c.final = append(c.final, c.compressed...)
|
||||
|
||||
c.chunkLens[c.currChunk] = uint64(len(compressedData) + len(metaData))
|
||||
c.chunkLens[c.currChunk] = uint64(len(c.compressed) + len(metaData))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -42,6 +42,7 @@ type docValueIterator struct {
|
||||
dvDataLoc uint64
|
||||
curChunkHeader []MetaData
|
||||
curChunkData []byte // compressed data cache
|
||||
uncompressed []byte // temp buf for snappy decompression
|
||||
}
|
||||
|
||||
func (di *docValueIterator) size() int {
|
||||
@ -135,10 +136,11 @@ func (di *docValueIterator) visitDocValues(docNum uint64,
|
||||
return nil
|
||||
}
|
||||
// uncompress the already loaded data
|
||||
uncompressed, err := snappy.Decode(nil, di.curChunkData)
|
||||
uncompressed, err := snappy.Decode(di.uncompressed[:cap(di.uncompressed)], di.curChunkData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
di.uncompressed = uncompressed
|
||||
|
||||
// pick the terms for the given docNum
|
||||
uncompressed = uncompressed[start:end]
|
||||
|
@ -604,7 +604,6 @@ func mergeStoredAndRemap(segments []*SegmentBase, drops []*roaring.Bitmap,
|
||||
curr = 0
|
||||
metaBuf.Reset()
|
||||
data = data[:0]
|
||||
compressed = compressed[:0]
|
||||
|
||||
// collect all the data
|
||||
for i := 0; i < len(fieldsInv); i++ {
|
||||
@ -641,7 +640,7 @@ func mergeStoredAndRemap(segments []*SegmentBase, drops []*roaring.Bitmap,
|
||||
metaEncoder.Close()
|
||||
metaBytes := metaBuf.Bytes()
|
||||
|
||||
compressed = snappy.Encode(compressed, data)
|
||||
compressed = snappy.Encode(compressed[:cap(compressed)], data)
|
||||
|
||||
// record where we're about to start writing
|
||||
docNumOffsets[newDocNum] = uint64(w.Count())
|
||||
|
@ -517,7 +517,6 @@ func (s *interim) writeStoredFields() (
|
||||
|
||||
s.metaBuf.Reset()
|
||||
data = data[:0]
|
||||
compressed = compressed[:0]
|
||||
|
||||
for fieldID := range s.FieldsInv {
|
||||
isf, exists := docStoredFields[uint16(fieldID)]
|
||||
@ -534,7 +533,7 @@ func (s *interim) writeStoredFields() (
|
||||
metaEncoder.Close()
|
||||
metaBytes := s.metaBuf.Bytes()
|
||||
|
||||
compressed = snappy.Encode(compressed, data)
|
||||
compressed = snappy.Encode(compressed[:cap(compressed)], data)
|
||||
|
||||
docStoredOffsets[docNum] = uint64(s.w.Count())
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user