0
0
Fork 0

converted ALL_CAPS constants to CamelCase

This commit is contained in:
Marty Schoch 2014-09-03 17:48:40 -04:00
parent 53b25195d6
commit d534b0836b
17 changed files with 153 additions and 147 deletions

View File

@ -36,7 +36,7 @@ func (p *FlexibleGoDateTimeParser) ParseDateTime(input string) (time.Time, error
return rv, nil
}
}
return time.Time{}, analysis.INVALID_DATETIME
return time.Time{}, analysis.InvalidDateTime
}
func FlexibleGoDateTimeParserConstructor(config map[string]interface{}, cache *registry.Cache) (analysis.DateTimeParser, error) {

View File

@ -53,7 +53,7 @@ func TestFlexibleDateTimeParser(t *testing.T) {
{
input: "not a date time",
expectedTime: time.Time{},
expectedError: analysis.INVALID_DATETIME,
expectedError: analysis.InvalidDateTime,
},
}

View File

@ -19,23 +19,23 @@ import (
const NormalizeName = "normalize_ar"
const (
ALEF = '\u0627'
ALEF_MADDA = '\u0622'
ALEF_HAMZA_ABOVE = '\u0623'
ALEF_HAMZA_BELOW = '\u0625'
YEH = '\u064A'
DOTLESS_YEH = '\u0649'
TEH_MARBUTA = '\u0629'
HEH = '\u0647'
TATWEEL = '\u0640'
FATHATAN = '\u064B'
DAMMATAN = '\u064C'
KASRATAN = '\u064D'
FATHA = '\u064E'
DAMMA = '\u064F'
KASRA = '\u0650'
SHADDA = '\u0651'
SUKUN = '\u0652'
Alef = '\u0627'
AlefMadda = '\u0622'
AlefHamzaAbove = '\u0623'
AlefHamzaBelow = '\u0625'
Yeh = '\u064A'
DotlessYeh = '\u0649'
TehMarbuta = '\u0629'
Heh = '\u0647'
Tatweel = '\u0640'
Fathatan = '\u064B'
Dammatan = '\u064C'
Kasratan = '\u064D'
Fatha = '\u064E'
Damma = '\u064F'
Kasra = '\u0650'
Shadda = '\u0651'
Sukun = '\u0652'
)
type ArabicNormalizeFilter struct {
@ -61,13 +61,13 @@ func normalize(input []byte) []byte {
runes := bytes.Runes(input)
for i := 0; i < len(runes); i++ {
switch runes[i] {
case ALEF_MADDA, ALEF_HAMZA_ABOVE, ALEF_HAMZA_BELOW:
runes[i] = ALEF
case DOTLESS_YEH:
runes[i] = YEH
case TEH_MARBUTA:
runes[i] = HEH
case TATWEEL, KASRATAN, DAMMATAN, FATHATAN, FATHA, DAMMA, KASRA, SHADDA, SUKUN:
case AlefMadda, AlefHamzaAbove, AlefHamzaBelow:
runes[i] = Alef
case DotlessYeh:
runes[i] = Yeh
case TehMarbuta:
runes[i] = Heh
case Tatweel, Kasratan, Dammatan, Fathatan, Fatha, Damma, Kasra, Shadda, Sukun:
runes = analysis.DeleteRune(runes, i)
i--
}

View File

@ -20,32 +20,32 @@ import (
const NormalizeName = "normalize_ckb"
const (
YEH = '\u064A'
DOTLESS_YEH = '\u0649'
FARSI_YEH = '\u06CC'
Yeh = '\u064A'
DotlessYeh = '\u0649'
FarsiYeh = '\u06CC'
KAF = '\u0643'
KEHEH = '\u06A9'
Kaf = '\u0643'
Keheh = '\u06A9'
HEH = '\u0647'
AE = '\u06D5'
ZWNJ = '\u200C'
HEH_DOACHASHMEE = '\u06BE'
TEH_MARBUTA = '\u0629'
Heh = '\u0647'
Ae = '\u06D5'
Zwnj = '\u200C'
HehDoachashmee = '\u06BE'
TehMarbuta = '\u0629'
REH = '\u0631'
RREH = '\u0695'
RREH_ABOVE = '\u0692'
Reh = '\u0631'
Rreh = '\u0695'
RrehAbove = '\u0692'
TATWEEL = '\u0640'
FATHATAN = '\u064B'
DAMMATAN = '\u064C'
KASRATAN = '\u064D'
FATHA = '\u064E'
DAMMA = '\u064F'
KASRA = '\u0650'
SHADDA = '\u0651'
SUKUN = '\u0652'
Tatweel = '\u0640'
Fathatan = '\u064B'
Dammatan = '\u064C'
Kasratan = '\u064D'
Fatha = '\u064E'
Damma = '\u064F'
Kasra = '\u0650'
Shadda = '\u0651'
Sukun = '\u0652'
)
type SoraniNormalizeFilter struct {
@ -71,31 +71,31 @@ func normalize(input []byte) []byte {
runes := bytes.Runes(input)
for i := 0; i < len(runes); i++ {
switch runes[i] {
case YEH, DOTLESS_YEH:
runes[i] = FARSI_YEH
case KAF:
runes[i] = KEHEH
case ZWNJ:
if i > 0 && runes[i-1] == HEH {
runes[i-1] = AE
case Yeh, DotlessYeh:
runes[i] = FarsiYeh
case Kaf:
runes[i] = Keheh
case Zwnj:
if i > 0 && runes[i-1] == Heh {
runes[i-1] = Ae
}
runes = analysis.DeleteRune(runes, i)
i--
case HEH:
case Heh:
if i == len(runes)-1 {
runes[i] = AE
runes[i] = Ae
}
case TEH_MARBUTA:
runes[i] = AE
case HEH_DOACHASHMEE:
runes[i] = HEH
case REH:
case TehMarbuta:
runes[i] = Ae
case HehDoachashmee:
runes[i] = Heh
case Reh:
if i == 0 {
runes[i] = RREH
runes[i] = Rreh
}
case RREH_ABOVE:
runes[i] = RREH
case TATWEEL, KASRATAN, DAMMATAN, FATHATAN, FATHA, DAMMA, KASRA, SHADDA, SUKUN:
case RrehAbove:
runes[i] = Rreh
case Tatweel, Kasratan, Dammatan, Fathatan, Fatha, Damma, Kasra, Shadda, Sukun:
runes = analysis.DeleteRune(runes, i)
i--
default:

View File

@ -19,15 +19,15 @@ import (
const NormalizeName = "normalize_fa"
const (
YEH = '\u064A'
FARSI_YEH = '\u06CC'
YEH_BARREE = '\u06D2'
KEHEH = '\u06A9'
KAF = '\u0643'
HAMZA_ABOVE = '\u0654'
HEH_YEH = '\u06C0'
HEH_GOAL = '\u06C1'
HEH = '\u0647'
Yeh = '\u064A'
FarsiYeh = '\u06CC'
YehBarree = '\u06D2'
Keheh = '\u06A9'
Kaf = '\u0643'
HamzaAbove = '\u0654'
HehYeh = '\u06C0'
HehGoal = '\u06C1'
Heh = '\u0647'
)
type PersianNormalizeFilter struct {
@ -53,13 +53,13 @@ func normalize(input []byte) []byte {
runes := bytes.Runes(input)
for i := 0; i < len(runes); i++ {
switch runes[i] {
case FARSI_YEH, YEH_BARREE:
runes[i] = YEH
case KEHEH:
runes[i] = KAF
case HEH_YEH, HEH_GOAL:
runes[i] = HEH
case HAMZA_ABOVE: // necessary for HEH + HAMZA
case FarsiYeh, YehBarree:
runes[i] = Yeh
case Keheh:
runes[i] = Kaf
case HehYeh, HehGoal:
runes[i] = Heh
case HamzaAbove: // necessary for HEH + HAMZA
runes = analysis.DeleteRune(runes, i)
i--
}

View File

@ -18,10 +18,9 @@ import (
const Name = "apostrophe"
const RIGHT_SINGLE_QUOTATION_MARK = ""
const APOSTROPHE = "'"
const APOSTROPHES = APOSTROPHE + RIGHT_SINGLE_QUOTATION_MARK
const RightSingleQoutationMark = ""
const Apostrophe = "'"
const Apostrophes = Apostrophe + RightSingleQoutationMark
type ApostropheFilter struct{}
@ -33,7 +32,7 @@ func (s *ApostropheFilter) Filter(input analysis.TokenStream) analysis.TokenStre
rv := make(analysis.TokenStream, 0)
for _, token := range input {
firstApostrophe := bytes.IndexAny(token.Term, APOSTROPHES)
firstApostrophe := bytes.IndexAny(token.Term, Apostrophes)
if firstApostrophe >= 0 {
// found an apostrophe
token.Term = token.Term[0:firstApostrophe]

View File

@ -19,10 +19,10 @@ import (
const Name = "elision"
const RIGHT_SINGLE_QUOTATION_MARK = ""
const APOSTROPHE = "'"
const RightSingleQoutationMark = ""
const Apostrophe = "'"
const APOSTROPHES = APOSTROPHE + RIGHT_SINGLE_QUOTATION_MARK
const Apostrophes = Apostrophe + RightSingleQoutationMark
type ElisionFilter struct {
articles analysis.TokenMap
@ -38,7 +38,7 @@ func (s *ElisionFilter) Filter(input analysis.TokenStream) analysis.TokenStream
rv := make(analysis.TokenStream, 0)
for _, token := range input {
firstApostrophe := bytes.IndexAny(token.Term, APOSTROPHES)
firstApostrophe := bytes.IndexAny(token.Term, Apostrophes)
if firstApostrophe >= 0 {
// found an apostrophe
prefix := token.Term[0:firstApostrophe]

View File

@ -70,7 +70,7 @@ func (a *Analyzer) Analyze(input []byte) TokenStream {
return tokens
}
var INVALID_DATETIME = fmt.Errorf("unable to parse datetime with any of the layouts")
var InvalidDateTime = fmt.Errorf("unable to parse datetime with any of the layouts")
type DateTimeParser interface {
ParseDateTime(string) (time.Time, error)

View File

@ -19,7 +19,7 @@ import (
"code.google.com/p/goprotobuf/proto"
)
const BYTE_SEPARATOR byte = 0xff
const ByteSeparator byte = 0xff
type UpsideDownCouchRowStream chan UpsideDownCouchRow
@ -134,7 +134,7 @@ func (f *FieldRow) Key() []byte {
}
func (f *FieldRow) Value() []byte {
return append([]byte(f.name), BYTE_SEPARATOR)
return append([]byte(f.name), ByteSeparator)
}
func (f *FieldRow) String() string {
@ -159,7 +159,7 @@ func NewFieldRowKV(key, value []byte) (*FieldRow, error) {
}
buf = bytes.NewBuffer(value)
rv.name, err = buf.ReadString(BYTE_SEPARATOR)
rv.name, err = buf.ReadString(ByteSeparator)
if err != nil {
return nil, err
}
@ -210,7 +210,7 @@ func (tfr *TermFrequencyRow) ScanPrefixForFieldTerm() []byte {
buf[0] = 't'
binary.LittleEndian.PutUint16(buf[1:3], tfr.field)
termLen := copy(buf[3:], tfr.term)
buf[3+termLen] = BYTE_SEPARATOR
buf[3+termLen] = ByteSeparator
return buf
}
@ -219,7 +219,7 @@ func (tfr *TermFrequencyRow) Key() []byte {
buf[0] = 't'
binary.LittleEndian.PutUint16(buf[1:3], tfr.field)
termLen := copy(buf[3:], tfr.term)
buf[3+termLen] = BYTE_SEPARATOR
buf[3+termLen] = ByteSeparator
copy(buf[3+termLen+1:], tfr.doc)
return buf
}
@ -281,13 +281,13 @@ func NewTermFrequencyRowK(key []byte) (*TermFrequencyRow, error) {
return nil, err
}
rv.term, err = buf.ReadBytes(BYTE_SEPARATOR)
rv.term, err = buf.ReadBytes(ByteSeparator)
if err != nil {
return nil, err
}
rv.term = rv.term[:len(rv.term)-1] // trim off separator byte
doc, err := buf.ReadBytes(BYTE_SEPARATOR)
doc, err := buf.ReadBytes(ByteSeparator)
if err != io.EOF {
return nil, err
}
@ -413,7 +413,7 @@ func NewBackIndexRowKV(key, value []byte) (*BackIndexRow, error) {
buf.ReadByte() // type
var err error
rv.doc, err = buf.ReadBytes(BYTE_SEPARATOR)
rv.doc, err = buf.ReadBytes(ByteSeparator)
if err == io.EOF && len(rv.doc) < 1 {
err = fmt.Errorf("invalid doc length 0")
}
@ -446,7 +446,7 @@ func (s *StoredRow) Key() []byte {
buf := new(bytes.Buffer)
buf.WriteByte('s')
buf.Write(s.doc)
buf.WriteByte(BYTE_SEPARATOR)
buf.WriteByte(ByteSeparator)
fieldbuf := make([]byte, 2)
binary.LittleEndian.PutUint16(fieldbuf, s.field)
buf.Write(fieldbuf)
@ -473,7 +473,7 @@ func (s *StoredRow) ScanPrefixForDoc() []byte {
buf := new(bytes.Buffer)
buf.WriteByte('s')
buf.Write(s.doc)
buf.WriteByte(BYTE_SEPARATOR)
buf.WriteByte(ByteSeparator)
return buf.Bytes()
}
@ -494,7 +494,7 @@ func NewStoredRowK(key []byte) (*StoredRow, error) {
buf.ReadByte() // type
var err error
rv.doc, err = buf.ReadBytes(BYTE_SEPARATOR)
rv.doc, err = buf.ReadBytes(ByteSeparator)
if len(rv.doc) < 2 { // 1 for min doc id length, 1 for separator
err = fmt.Errorf("invalid doc length 0")
return nil, err

View File

@ -30,31 +30,31 @@ func TestRows(t *testing.T) {
{
NewFieldRow(0, "name"),
[]byte{'f', 0, 0},
[]byte{'n', 'a', 'm', 'e', BYTE_SEPARATOR},
[]byte{'n', 'a', 'm', 'e', ByteSeparator},
},
{
NewFieldRow(1, "desc"),
[]byte{'f', 1, 0},
[]byte{'d', 'e', 's', 'c', BYTE_SEPARATOR},
[]byte{'d', 'e', 's', 'c', ByteSeparator},
},
{
NewFieldRow(513, "style"),
[]byte{'f', 1, 2},
[]byte{'s', 't', 'y', 'l', 'e', BYTE_SEPARATOR},
[]byte{'s', 't', 'y', 'l', 'e', ByteSeparator},
},
{
NewTermFrequencyRow([]byte{'b', 'e', 'e', 'r'}, 0, "", 3, 3.14),
[]byte{'t', 0, 0, 'b', 'e', 'e', 'r', BYTE_SEPARATOR},
[]byte{'t', 0, 0, 'b', 'e', 'e', 'r', ByteSeparator},
[]byte{3, 0, 0, 0, 0, 0, 0, 0, 195, 245, 72, 64},
},
{
NewTermFrequencyRow([]byte{'b', 'e', 'e', 'r'}, 0, "budweiser", 3, 3.14),
[]byte{'t', 0, 0, 'b', 'e', 'e', 'r', BYTE_SEPARATOR, 'b', 'u', 'd', 'w', 'e', 'i', 's', 'e', 'r'},
[]byte{'t', 0, 0, 'b', 'e', 'e', 'r', ByteSeparator, 'b', 'u', 'd', 'w', 'e', 'i', 's', 'e', 'r'},
[]byte{3, 0, 0, 0, 0, 0, 0, 0, 195, 245, 72, 64},
},
{
NewTermFrequencyRowWithTermVectors([]byte{'b', 'e', 'e', 'r'}, 0, "budweiser", 3, 3.14, []*TermVector{&TermVector{field: 0, pos: 1, start: 3, end: 11}, &TermVector{field: 0, pos: 2, start: 23, end: 31}, &TermVector{field: 0, pos: 3, start: 43, end: 51}}),
[]byte{'t', 0, 0, 'b', 'e', 'e', 'r', BYTE_SEPARATOR, 'b', 'u', 'd', 'w', 'e', 'i', 's', 'e', 'r'},
[]byte{'t', 0, 0, 'b', 'e', 'e', 'r', ByteSeparator, 'b', 'u', 'd', 'w', 'e', 'i', 's', 'e', 'r'},
[]byte{3, 0, 0, 0, 0, 0, 0, 0, 195, 245, 72, 64, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 23, 0, 0, 0, 0, 0, 0, 0, 31, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 43, 0, 0, 0, 0, 0, 0, 0, 51, 0, 0, 0, 0, 0, 0, 0},
},
{
@ -74,7 +74,7 @@ func TestRows(t *testing.T) {
},
{
NewStoredRow("budweiser", 0, []uint64{}, byte('t'), []byte("an american beer")),
[]byte{'s', 'b', 'u', 'd', 'w', 'e', 'i', 's', 'e', 'r', BYTE_SEPARATOR, 0, 0},
[]byte{'s', 'b', 'u', 'd', 'w', 'e', 'i', 's', 'e', 'r', ByteSeparator, 0, 0},
[]byte{'t', 'a', 'n', ' ', 'a', 'm', 'e', 'r', 'i', 'c', 'a', 'n', ' ', 'b', 'e', 'e', 'r'},
},
{
@ -151,43 +151,43 @@ func TestInvalidRows(t *testing.T) {
},
// type t, invalid key (missing id)
{
[]byte{'t', 0, 0, 'b', 'e', 'e', 'r', BYTE_SEPARATOR},
[]byte{'t', 0, 0, 'b', 'e', 'e', 'r', ByteSeparator},
[]byte{},
},
// type t, invalid val (misisng freq)
{
[]byte{'t', 0, 0, 'b', 'e', 'e', 'r', BYTE_SEPARATOR, 'b', 'u', 'd', 'w', 'e', 'i', 's', 'e', 'r'},
[]byte{'t', 0, 0, 'b', 'e', 'e', 'r', ByteSeparator, 'b', 'u', 'd', 'w', 'e', 'i', 's', 'e', 'r'},
[]byte{},
},
// type t, invalid val (missing norm)
{
[]byte{'t', 0, 0, 'b', 'e', 'e', 'r', BYTE_SEPARATOR, 'b', 'u', 'd', 'w', 'e', 'i', 's', 'e', 'r'},
[]byte{'t', 0, 0, 'b', 'e', 'e', 'r', ByteSeparator, 'b', 'u', 'd', 'w', 'e', 'i', 's', 'e', 'r'},
[]byte{3, 0, 0, 0, 0, 0, 0, 0},
},
// type t, invalid val (half missing tv field, full missing is valid (no term vectors))
{
[]byte{'t', 0, 0, 'b', 'e', 'e', 'r', BYTE_SEPARATOR, 'b', 'u', 'd', 'w', 'e', 'i', 's', 'e', 'r'},
[]byte{'t', 0, 0, 'b', 'e', 'e', 'r', ByteSeparator, 'b', 'u', 'd', 'w', 'e', 'i', 's', 'e', 'r'},
[]byte{3, 0, 0, 0, 0, 0, 0, 0, 195, 245, 72, 64, 0},
},
// type t, invalid val (missing tv pos)
{
[]byte{'t', 0, 0, 'b', 'e', 'e', 'r', BYTE_SEPARATOR, 'b', 'u', 'd', 'w', 'e', 'i', 's', 'e', 'r'},
[]byte{'t', 0, 0, 'b', 'e', 'e', 'r', ByteSeparator, 'b', 'u', 'd', 'w', 'e', 'i', 's', 'e', 'r'},
[]byte{3, 0, 0, 0, 0, 0, 0, 0, 195, 245, 72, 64, 0, 0},
},
// type t, invalid val (missing tv start)
{
[]byte{'t', 0, 0, 'b', 'e', 'e', 'r', BYTE_SEPARATOR, 'b', 'u', 'd', 'w', 'e', 'i', 's', 'e', 'r'},
[]byte{'t', 0, 0, 'b', 'e', 'e', 'r', ByteSeparator, 'b', 'u', 'd', 'w', 'e', 'i', 's', 'e', 'r'},
[]byte{3, 0, 0, 0, 0, 0, 0, 0, 195, 245, 72, 64, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0},
},
// type t, invalid val (missing tv end)
{
[]byte{'t', 0, 0, 'b', 'e', 'e', 'r', BYTE_SEPARATOR, 'b', 'u', 'd', 'w', 'e', 'i', 's', 'e', 'r'},
[]byte{'t', 0, 0, 'b', 'e', 'e', 'r', ByteSeparator, 'b', 'u', 'd', 'w', 'e', 'i', 's', 'e', 'r'},
[]byte{3, 0, 0, 0, 0, 0, 0, 0, 195, 245, 72, 64, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0},
},
// type b, invalid key (missing id)
{
[]byte{'b'},
[]byte{'b', 'e', 'e', 'r', BYTE_SEPARATOR, 0, 0},
[]byte{'b', 'e', 'e', 'r', ByteSeparator, 0, 0},
},
// type b, invalid val (missing field)
{
@ -201,7 +201,7 @@ func TestInvalidRows(t *testing.T) {
},
// type b, invalid val (missing field)
{
[]byte{'s', 'b', 'u', 'd', 'w', 'e', 'i', 's', 'e', 'r', BYTE_SEPARATOR},
[]byte{'s', 'b', 'u', 'd', 'w', 'e', 'i', 's', 'e', 'r', ByteSeparator},
[]byte{'t', 'a', 'n', ' ', 'a', 'm', 'e', 'r', 'i', 'c', 'a', 'n', ' ', 'b', 'e', 'e', 'r'},
},
}

View File

@ -22,9 +22,9 @@ import (
"code.google.com/p/goprotobuf/proto"
)
var VERSION_KEY []byte = []byte{'v'}
var VersionKey []byte = []byte{'v'}
const VERSION uint8 = 1
const Version uint8 = 1
type UpsideDownCouch struct {
version uint8
@ -38,7 +38,7 @@ type UpsideDownCouch struct {
func NewUpsideDownCouch(s store.KVStore) *UpsideDownCouch {
return &UpsideDownCouch{
version: VERSION,
version: Version,
analyzer: make(map[string]*analysis.Analyzer),
fieldIndexes: make(map[string]uint16),
store: s,
@ -167,7 +167,7 @@ func (udc *UpsideDownCouch) DocCount() uint64 {
func (udc *UpsideDownCouch) Open() (err error) {
var value []byte
value, err = udc.store.Get(VERSION_KEY)
value, err = udc.store.Get(VersionKey)
if err != nil {
return
}
@ -526,7 +526,7 @@ func (udc *UpsideDownCouch) TermFieldReader(term []byte, fieldName string) (inde
if fieldExists {
return newUpsideDownCouchTermFieldReader(udc, term, uint16(fieldIndex))
}
return newUpsideDownCouchTermFieldReader(udc, []byte{BYTE_SEPARATOR}, ^uint16(0))
return newUpsideDownCouchTermFieldReader(udc, []byte{ByteSeparator}, ^uint16(0))
}
func (udc *UpsideDownCouch) FieldReader(fieldName string, startTerm []byte, endTerm []byte) (index.FieldReader, error) {
@ -534,7 +534,7 @@ func (udc *UpsideDownCouch) FieldReader(fieldName string, startTerm []byte, endT
if fieldExists {
return newUpsideDownCouchFieldReader(udc, uint16(fieldIndex), startTerm, endTerm)
}
return newUpsideDownCouchTermFieldReader(udc, []byte{BYTE_SEPARATOR}, ^uint16(0))
return newUpsideDownCouchTermFieldReader(udc, []byte{ByteSeparator}, ^uint16(0))
}
func (udc *UpsideDownCouch) DocIdReader(start, end string) (index.DocIdReader, error) {

View File

@ -121,6 +121,15 @@ func (dm *DocumentMapping) AddSubDocumentMapping(property string, sdm *DocumentM
dm.Properties[property] = sdm
}
// AddFieldMappingsAt adds one or more FieldMappings
// at the named sub-document. If the named sub-document
// doesn't yet exist it is created for you.
// This is a convenience function to make most common
// mappings more concise.
// Otherwise, you would:
// subMapping := NewDocumentMapping()
// subMapping.AddFieldMapping(fieldMapping)
// parentMapping.AddSubDocumentMapping(property, subMapping)
func (dm *DocumentMapping) AddFieldMappingsAt(property string, fms ...*FieldMapping) {
if dm.Properties == nil {
dm.Properties = make(map[string]*DocumentMapping)

View File

@ -13,7 +13,7 @@ import (
"fmt"
)
const SHIFT_START_INT64 byte = 0x20
const ShiftStartInt64 byte = 0x20
// PrefixCoded is a byte array encoding of
// 64-bit numeric values shifted by 0-63 bits
@ -26,7 +26,7 @@ func NewPrefixCodedInt64(in int64, shift uint) (PrefixCoded, error) {
nChars := (((63 - shift) * 37) >> 8) + 1
rv := make(PrefixCoded, nChars+1)
rv[0] = SHIFT_START_INT64 + byte(shift)
rv[0] = ShiftStartInt64 + byte(shift)
sortableBits := int64(uint64(in) ^ 0x8000000000000000)
sortableBits = int64(uint64(sortableBits) >> shift)
@ -52,7 +52,7 @@ func MustNewPrefixCodedInt64(in int64, shift uint) PrefixCoded {
// returns 0 if in uninitialized state
func (p PrefixCoded) Shift() (uint, error) {
if len(p) > 0 {
shift := p[0] - SHIFT_START_INT64
shift := p[0] - ShiftStartInt64
if shift < 0 || shift < 63 {
return uint(shift), nil
}

View File

@ -17,7 +17,7 @@ import (
const Name = "ansi"
const DEFAULT_ANSI_HIGHLIGHT = bgYellow
const DefaultAnsiHighlight = bgYellow
type ANSIFragmentFormatter struct {
color string
@ -25,7 +25,7 @@ type ANSIFragmentFormatter struct {
func NewANSIFragmentFormatter() *ANSIFragmentFormatter {
return &ANSIFragmentFormatter{
color: DEFAULT_ANSI_HIGHLIGHT,
color: DefaultAnsiHighlight,
}
}

View File

@ -20,8 +20,8 @@ import (
)
const (
reset = "\x1b[0m"
DEFAULT_ANSI_HIGHLIGHT = "\x1b[43m"
reset = "\x1b[0m"
DefaultAnsiHighlight = "\x1b[43m"
)
func TestSimpleHighlighter(t *testing.T) {
@ -52,7 +52,7 @@ func TestSimpleHighlighter(t *testing.T) {
},
}
expectedFragment := "the " + DEFAULT_ANSI_HIGHLIGHT + "quick" + reset + " brown " + DEFAULT_ANSI_HIGHLIGHT + "fox" + reset + " jumps over the lazy dog"
expectedFragment := "the " + DefaultAnsiHighlight + "quick" + reset + " brown " + DefaultAnsiHighlight + "fox" + reset + " jumps over the lazy dog"
doc := document.NewDocument("a").AddField(document.NewTextField("desc", []uint64{}, []byte("the quick brown fox jumps over the lazy dog")))
fragment := highlighter.BestFragmentInField(&docMatch, doc, "desc")
@ -145,11 +145,11 @@ Etiam vel augue vel nisl commodo suscipit et ac nisl. Quisque eros diam, porttit
}
expectedFragments := []string{
"…eros, in iaculis ante laoreet at. Sed " + DEFAULT_ANSI_HIGHLIGHT + "venenatis" + reset + " " + DEFAULT_ANSI_HIGHLIGHT + "interdum" + reset + " " + DEFAULT_ANSI_HIGHLIGHT + "metus" + reset + ", egestas scelerisque orci laoreet ut.…",
"… eros sed " + DEFAULT_ANSI_HIGHLIGHT + "metus" + reset + " aliquet convallis ac eget " + DEFAULT_ANSI_HIGHLIGHT + "metus" + reset + ". Donec eget feugiat sem. Quisque " + DEFAULT_ANSI_HIGHLIGHT + "venenatis" + reset + ", augue et…",
"… odio. Maecenas condimentum felis vitae nibh " + DEFAULT_ANSI_HIGHLIGHT + "venenatis" + reset + ", ut feugiat risus vehicula. Suspendisse non s…",
"… id feugiat lacus egestas. Integer et eleifend " + DEFAULT_ANSI_HIGHLIGHT + "metus" + reset + ". Duis neque tellus, vulputate nec dui eu, euism…",
"… accumsan. Vivamus eros felis, rhoncus vel " + DEFAULT_ANSI_HIGHLIGHT + "interdum" + reset + " bibendum, imperdiet nec diam. Etiam sed eros sed…",
"…eros, in iaculis ante laoreet at. Sed " + DefaultAnsiHighlight + "venenatis" + reset + " " + DefaultAnsiHighlight + "interdum" + reset + " " + DefaultAnsiHighlight + "metus" + reset + ", egestas scelerisque orci laoreet ut.…",
"… eros sed " + DefaultAnsiHighlight + "metus" + reset + " aliquet convallis ac eget " + DefaultAnsiHighlight + "metus" + reset + ". Donec eget feugiat sem. Quisque " + DefaultAnsiHighlight + "venenatis" + reset + ", augue et…",
"… odio. Maecenas condimentum felis vitae nibh " + DefaultAnsiHighlight + "venenatis" + reset + ", ut feugiat risus vehicula. Suspendisse non s…",
"… id feugiat lacus egestas. Integer et eleifend " + DefaultAnsiHighlight + "metus" + reset + ". Duis neque tellus, vulputate nec dui eu, euism…",
"… accumsan. Vivamus eros felis, rhoncus vel " + DefaultAnsiHighlight + "interdum" + reset + " bibendum, imperdiet nec diam. Etiam sed eros sed…",
}
fragmenter := sfrag.NewSimpleFragmenter(100)

View File

@ -17,8 +17,6 @@ import (
"github.com/blevesearch/bleve/search"
)
const MAX_SCORE_CACHE = 64
type TermQueryScorer struct {
queryTerm string
queryField string
@ -90,8 +88,8 @@ func (s *TermQueryScorer) Score(termMatch *index.TermFieldDoc) *search.DocumentM
// need to compute score
var tf float64
if termMatch.Freq < MAX_SQRT_CACHE {
tf = SQRT_CACHE[int(termMatch.Freq)]
if termMatch.Freq < MaxSqrtCache {
tf = SqrtCache[int(termMatch.Freq)]
} else {
tf = math.Sqrt(float64(termMatch.Freq))
}

View File

@ -13,13 +13,13 @@ import (
"math"
)
var SQRT_CACHE map[int]float64
var SqrtCache map[int]float64
const MAX_SQRT_CACHE = 64
const MaxSqrtCache = 64
func init() {
SQRT_CACHE = make(map[int]float64, MAX_SQRT_CACHE)
for i := 0; i < MAX_SQRT_CACHE; i++ {
SQRT_CACHE[i] = math.Sqrt(float64(i))
SqrtCache = make(map[int]float64, MaxSqrtCache)
for i := 0; i < MaxSqrtCache; i++ {
SqrtCache[i] = math.Sqrt(float64(i))
}
}