upside_down dict row value size accounts for large uvarint's
This is somewhat unlikely, but if a term is (incredibly) popular, its uvarint count value representation might go beyond 8 bytes. Some KVStore implementations (like forestdb) provide a BatchEx cgo optimization that depends on proper preallocated counting, so this change provides a proper worst-case estimate based on the max-unvarint of 10 bytes instead of the previously incorrect 8 bytes.
This commit is contained in:
parent
dd1718fa78
commit
a29dd25a48
|
@ -234,6 +234,8 @@ func NewFieldRowKV(key, value []byte) (*FieldRow, error) {
|
|||
|
||||
// DICTIONARY
|
||||
|
||||
const DictionaryRowMaxValueSize = binary.MaxVarintLen64
|
||||
|
||||
type DictionaryRow struct {
|
||||
field uint16
|
||||
term []byte
|
||||
|
@ -264,7 +266,7 @@ func (dr *DictionaryRow) Value() []byte {
|
|||
}
|
||||
|
||||
func (dr *DictionaryRow) ValueSize() int {
|
||||
return binary.MaxVarintLen64
|
||||
return DictionaryRowMaxValueSize
|
||||
}
|
||||
|
||||
func (dr *DictionaryRow) ValueTo(buf []byte) (int, error) {
|
||||
|
|
|
@ -208,7 +208,7 @@ func (udc *UpsideDownCouch) batchRows(writer store.KVWriter, addRowsAll [][]Upsi
|
|||
|
||||
mergeNum := len(dictionaryDeltas)
|
||||
mergeKeyBytes := 0
|
||||
mergeValBytes := mergeNum * 8
|
||||
mergeValBytes := mergeNum * DictionaryRowMaxValueSize
|
||||
|
||||
for dictRowKey, _ := range dictionaryDeltas {
|
||||
mergeKeyBytes += len(dictRowKey)
|
||||
|
@ -278,8 +278,8 @@ func (udc *UpsideDownCouch) batchRows(writer store.KVWriter, addRowsAll [][]Upsi
|
|||
for dictRowKey, delta := range dictionaryDeltas {
|
||||
dictRowKeyLen := copy(buf, dictRowKey)
|
||||
binary.LittleEndian.PutUint64(buf[dictRowKeyLen:], uint64(delta))
|
||||
wb.Merge(buf[:dictRowKeyLen], buf[dictRowKeyLen:dictRowKeyLen+8])
|
||||
buf = buf[dictRowKeyLen+8:]
|
||||
wb.Merge(buf[:dictRowKeyLen], buf[dictRowKeyLen:dictRowKeyLen+DictionaryRowMaxValueSize])
|
||||
buf = buf[dictRowKeyLen+DictionaryRowMaxValueSize:]
|
||||
}
|
||||
|
||||
// write out the batch
|
||||
|
|
Loading…
Reference in New Issue