0
0
Fork 0

gofmt simplifications

This commit is contained in:
Marty Schoch 2016-04-02 21:54:33 -04:00
parent 7594daad01
commit 194ee82c80
25 changed files with 191 additions and 191 deletions

View File

@ -33,12 +33,12 @@ func TestTokenFrequency(t *testing.T) {
"water": &TokenFreq{
Term: []byte("water"),
Locations: []*TokenLocation{
&TokenLocation{
{
Position: 1,
Start: 0,
End: 5,
},
&TokenLocation{
{
Position: 2,
Start: 6,
End: 11,
@ -58,12 +58,12 @@ func TestTokenFrequenciesMergeAll(t *testing.T) {
"water": &TokenFreq{
Term: []byte("water"),
Locations: []*TokenLocation{
&TokenLocation{
{
Position: 1,
Start: 0,
End: 5,
},
&TokenLocation{
{
Position: 2,
Start: 6,
End: 11,
@ -75,12 +75,12 @@ func TestTokenFrequenciesMergeAll(t *testing.T) {
"water": &TokenFreq{
Term: []byte("water"),
Locations: []*TokenLocation{
&TokenLocation{
{
Position: 1,
Start: 0,
End: 5,
},
&TokenLocation{
{
Position: 2,
Start: 6,
End: 11,
@ -92,23 +92,23 @@ func TestTokenFrequenciesMergeAll(t *testing.T) {
"water": &TokenFreq{
Term: []byte("water"),
Locations: []*TokenLocation{
&TokenLocation{
{
Position: 1,
Start: 0,
End: 5,
},
&TokenLocation{
{
Position: 2,
Start: 6,
End: 11,
},
&TokenLocation{
{
Field: "tf2",
Position: 1,
Start: 0,
End: 5,
},
&TokenLocation{
{
Field: "tf2",
Position: 2,
Start: 6,
@ -129,12 +129,12 @@ func TestTokenFrequenciesMergeAllLeftEmpty(t *testing.T) {
"water": &TokenFreq{
Term: []byte("water"),
Locations: []*TokenLocation{
&TokenLocation{
{
Position: 1,
Start: 0,
End: 5,
},
&TokenLocation{
{
Position: 2,
Start: 6,
End: 11,
@ -146,13 +146,13 @@ func TestTokenFrequenciesMergeAllLeftEmpty(t *testing.T) {
"water": &TokenFreq{
Term: []byte("water"),
Locations: []*TokenLocation{
&TokenLocation{
{
Field: "tf2",
Position: 1,
Start: 0,
End: 5,
},
&TokenLocation{
{
Field: "tf2",
Position: 2,
Start: 6,

View File

@ -23,39 +23,39 @@ type ScriptData struct {
}
var scripts = map[*unicode.RangeTable]*ScriptData{
unicode.Devanagari: &ScriptData{
unicode.Devanagari: {
flag: 1,
base: 0x0900,
},
unicode.Bengali: &ScriptData{
unicode.Bengali: {
flag: 2,
base: 0x0980,
},
unicode.Gurmukhi: &ScriptData{
unicode.Gurmukhi: {
flag: 4,
base: 0x0A00,
},
unicode.Gujarati: &ScriptData{
unicode.Gujarati: {
flag: 8,
base: 0x0A80,
},
unicode.Oriya: &ScriptData{
unicode.Oriya: {
flag: 16,
base: 0x0B00,
},
unicode.Tamil: &ScriptData{
unicode.Tamil: {
flag: 32,
base: 0x0B80,
},
unicode.Telugu: &ScriptData{
unicode.Telugu: {
flag: 64,
base: 0x0C00,
},
unicode.Kannada: &ScriptData{
unicode.Kannada: {
flag: 128,
base: 0x0C80,
},
unicode.Malayalam: &ScriptData{
unicode.Malayalam: {
flag: 256,
base: 0x0D00,
},
@ -226,7 +226,7 @@ func init() {
}
func lookupScript(r rune) *unicode.RangeTable {
for script, _ := range scripts {
for script := range scripts {
if unicode.Is(script, r) {
return script
}

View File

@ -74,7 +74,7 @@ func TestAnalysis(t *testing.T) {
{
d: document.NewDocument("a").
AddField(
document.NewTextFieldWithIndexingOptions("name", nil, []byte("test"), document.IndexField|document.StoreField|document.IncludeTermVectors)),
document.NewTextFieldWithIndexingOptions("name", nil, []byte("test"), document.IndexField|document.StoreField|document.IncludeTermVectors)),
r: &index.AnalysisResult{
DocID: "a",
Rows: []index.IndexRow{

View File

@ -54,7 +54,7 @@ func TestDictUpdater(t *testing.T) {
t.Fatal(err)
}
for key, _ := range dictBatch {
for key := range dictBatch {
v, err := reader.Get([]byte(key))
if err != nil {
t.Fatal(err)
@ -102,7 +102,7 @@ func TestDictUpdater(t *testing.T) {
t.Fatal(err)
}
for key, _ := range dictBatch {
for key := range dictBatch {
v, err := reader.Get([]byte(key))
if err != nil {
t.Fatal(err)
@ -147,7 +147,7 @@ func TestDictUpdater(t *testing.T) {
t.Fatal(err)
}
for key, _ := range dictBatch {
for key := range dictBatch {
v, err := reader.Get([]byte(key))
if err != nil {
t.Fatal(err)

View File

@ -74,7 +74,7 @@ func TestDump(t *testing.T) {
fieldsCount := 0
fieldsRows := idx.DumpFields()
for _ = range fieldsRows {
for range fieldsRows {
fieldsCount++
}
if fieldsCount != 4 { // _id field is automatic
@ -89,7 +89,7 @@ func TestDump(t *testing.T) {
expectedDocRowCount := int(1 + 1 + (2 * (64 / document.DefaultPrecisionStep)) + 3)
docRowCount := 0
docRows := idx.DumpDoc("1")
for _ = range docRows {
for range docRows {
docRowCount++
}
if docRowCount != expectedDocRowCount {
@ -98,7 +98,7 @@ func TestDump(t *testing.T) {
docRowCount = 0
docRows = idx.DumpDoc("2")
for _ = range docRows {
for range docRows {
docRowCount++
}
if docRowCount != expectedDocRowCount {
@ -120,7 +120,7 @@ func TestDump(t *testing.T) {
expectedAllRowCount := int(1 + fieldsCount + (2 * expectedDocRowCount) + 2 + int((2 * (64 / document.DefaultPrecisionStep))))
allRowCount := 0
allRows := idx.DumpAll()
for _ = range allRows {
for range allRows {
allRowCount++
}
if allRowCount != expectedAllRowCount {

View File

@ -176,7 +176,7 @@ func (f *Firestorm) Update(doc *document.Document) (err error) {
}
f.compensator.Mutate([]byte(doc.ID), doc.Number)
f.lookuper.NotifyBatch([]*InFlightItem{&InFlightItem{[]byte(doc.ID), doc.Number}})
f.lookuper.NotifyBatch([]*InFlightItem{{[]byte(doc.ID), doc.Number}})
f.dictUpdater.NotifyBatch(dictionaryDeltas)
atomic.AddUint64(&f.stats.indexTime, uint64(time.Since(indexStart)))
@ -187,7 +187,7 @@ func (f *Firestorm) Update(doc *document.Document) (err error) {
func (f *Firestorm) Delete(id string) error {
indexStart := time.Now()
f.compensator.Mutate([]byte(id), 0)
f.lookuper.NotifyBatch([]*InFlightItem{&InFlightItem{[]byte(id), 0}})
f.lookuper.NotifyBatch([]*InFlightItem{{[]byte(id), 0}})
atomic.AddUint64(&f.stats.indexTime, uint64(time.Since(indexStart)))
return nil
}

View File

@ -195,10 +195,10 @@ func (b *Batch) String() string {
}
func (b *Batch) Reset() {
for k, _ := range b.IndexOps {
for k := range b.IndexOps {
delete(b.IndexOps, k)
}
for k, _ := range b.InternalOps {
for k := range b.InternalOps {
delete(b.InternalOps, k)
}
}

View File

@ -80,7 +80,7 @@ func TestDump(t *testing.T) {
fieldsCount := 0
fieldsRows := idx.DumpFields()
for _ = range fieldsRows {
for range fieldsRows {
fieldsCount++
}
if fieldsCount != 3 {
@ -94,7 +94,7 @@ func TestDump(t *testing.T) {
expectedDocRowCount := int(1 + (2 * (64 / document.DefaultPrecisionStep)) + 3)
docRowCount := 0
docRows := idx.DumpDoc("1")
for _ = range docRows {
for range docRows {
docRowCount++
}
if docRowCount != expectedDocRowCount {
@ -103,7 +103,7 @@ func TestDump(t *testing.T) {
docRowCount = 0
docRows = idx.DumpDoc("2")
for _ = range docRows {
for range docRows {
docRowCount++
}
if docRowCount != expectedDocRowCount {
@ -120,7 +120,7 @@ func TestDump(t *testing.T) {
expectedAllRowCount := int(1 + fieldsCount + (2 * expectedDocRowCount) + 2 + 2 + int((2 * (64 / document.DefaultPrecisionStep))))
allRowCount := 0
allRows := idx.DumpAll()
for _ = range allRows {
for range allRows {
allRowCount++
}
if allRowCount != expectedAllRowCount {

View File

@ -115,7 +115,7 @@ func TestIndexReader(t *testing.T) {
Freq: 1,
Norm: 0.5773502588272095,
Vectors: []*index.TermFieldVector{
&index.TermFieldVector{
{
Field: "desc",
Pos: 3,
Start: 9,

View File

@ -59,34 +59,34 @@ func TestRows(t *testing.T) {
[]byte{3, 195, 235, 163, 130, 4},
},
{
NewTermFrequencyRowWithTermVectors([]byte{'b', 'e', 'e', 'r'}, 0, []byte("budweiser"), 3, 3.14, []*TermVector{&TermVector{field: 0, pos: 1, start: 3, end: 11}, &TermVector{field: 0, pos: 2, start: 23, end: 31}, &TermVector{field: 0, pos: 3, start: 43, end: 51}}),
NewTermFrequencyRowWithTermVectors([]byte{'b', 'e', 'e', 'r'}, 0, []byte("budweiser"), 3, 3.14, []*TermVector{{field: 0, pos: 1, start: 3, end: 11}, {field: 0, pos: 2, start: 23, end: 31}, {field: 0, pos: 3, start: 43, end: 51}}),
[]byte{'t', 0, 0, 'b', 'e', 'e', 'r', ByteSeparator, 'b', 'u', 'd', 'w', 'e', 'i', 's', 'e', 'r'},
[]byte{3, 195, 235, 163, 130, 4, 0, 1, 3, 11, 0, 0, 2, 23, 31, 0, 0, 3, 43, 51, 0},
},
// test larger varints
{
NewTermFrequencyRowWithTermVectors([]byte{'b', 'e', 'e', 'r'}, 0, []byte("budweiser"), 25896, 3.14, []*TermVector{&TermVector{field: 255, pos: 1, start: 3, end: 11}, &TermVector{field: 0, pos: 2198, start: 23, end: 31}, &TermVector{field: 0, pos: 3, start: 43, end: 51}}),
NewTermFrequencyRowWithTermVectors([]byte{'b', 'e', 'e', 'r'}, 0, []byte("budweiser"), 25896, 3.14, []*TermVector{{field: 255, pos: 1, start: 3, end: 11}, {field: 0, pos: 2198, start: 23, end: 31}, {field: 0, pos: 3, start: 43, end: 51}}),
[]byte{'t', 0, 0, 'b', 'e', 'e', 'r', ByteSeparator, 'b', 'u', 'd', 'w', 'e', 'i', 's', 'e', 'r'},
[]byte{168, 202, 1, 195, 235, 163, 130, 4, 255, 1, 1, 3, 11, 0, 0, 150, 17, 23, 31, 0, 0, 3, 43, 51, 0},
},
// test vectors with arrayPositions
{
NewTermFrequencyRowWithTermVectors([]byte{'b', 'e', 'e', 'r'}, 0, []byte("budweiser"), 25896, 3.14, []*TermVector{&TermVector{field: 255, pos: 1, start: 3, end: 11, arrayPositions: []uint64{0}}, &TermVector{field: 0, pos: 2198, start: 23, end: 31, arrayPositions: []uint64{1, 2}}, &TermVector{field: 0, pos: 3, start: 43, end: 51, arrayPositions: []uint64{3, 4, 5}}}),
NewTermFrequencyRowWithTermVectors([]byte{'b', 'e', 'e', 'r'}, 0, []byte("budweiser"), 25896, 3.14, []*TermVector{{field: 255, pos: 1, start: 3, end: 11, arrayPositions: []uint64{0}}, {field: 0, pos: 2198, start: 23, end: 31, arrayPositions: []uint64{1, 2}}, {field: 0, pos: 3, start: 43, end: 51, arrayPositions: []uint64{3, 4, 5}}}),
[]byte{'t', 0, 0, 'b', 'e', 'e', 'r', ByteSeparator, 'b', 'u', 'd', 'w', 'e', 'i', 's', 'e', 'r'},
[]byte{168, 202, 1, 195, 235, 163, 130, 4, 255, 1, 1, 3, 11, 1, 0, 0, 150, 17, 23, 31, 2, 1, 2, 0, 3, 43, 51, 3, 3, 4, 5},
},
{
NewBackIndexRow([]byte("budweiser"), []*BackIndexTermEntry{&BackIndexTermEntry{Term: proto.String("beer"), Field: proto.Uint32(0)}}, nil),
NewBackIndexRow([]byte("budweiser"), []*BackIndexTermEntry{{Term: proto.String("beer"), Field: proto.Uint32(0)}}, nil),
[]byte{'b', 'b', 'u', 'd', 'w', 'e', 'i', 's', 'e', 'r'},
[]byte{10, 8, 10, 4, 'b', 'e', 'e', 'r', 16, 0},
},
{
NewBackIndexRow([]byte("budweiser"), []*BackIndexTermEntry{&BackIndexTermEntry{Term: proto.String("beer"), Field: proto.Uint32(0)}, &BackIndexTermEntry{Term: proto.String("beat"), Field: proto.Uint32(1)}}, nil),
NewBackIndexRow([]byte("budweiser"), []*BackIndexTermEntry{{Term: proto.String("beer"), Field: proto.Uint32(0)}, {Term: proto.String("beat"), Field: proto.Uint32(1)}}, nil),
[]byte{'b', 'b', 'u', 'd', 'w', 'e', 'i', 's', 'e', 'r'},
[]byte{10, 8, 10, 4, 'b', 'e', 'e', 'r', 16, 0, 10, 8, 10, 4, 'b', 'e', 'a', 't', 16, 1},
},
{
NewBackIndexRow([]byte("budweiser"), []*BackIndexTermEntry{&BackIndexTermEntry{Term: proto.String("beer"), Field: proto.Uint32(0)}, &BackIndexTermEntry{Term: proto.String("beat"), Field: proto.Uint32(1)}}, []*BackIndexStoreEntry{&BackIndexStoreEntry{Field: proto.Uint32(3)}, &BackIndexStoreEntry{Field: proto.Uint32(4)}, &BackIndexStoreEntry{Field: proto.Uint32(5)}}),
NewBackIndexRow([]byte("budweiser"), []*BackIndexTermEntry{{Term: proto.String("beer"), Field: proto.Uint32(0)}, {Term: proto.String("beat"), Field: proto.Uint32(1)}}, []*BackIndexStoreEntry{{Field: proto.Uint32(3)}, {Field: proto.Uint32(4)}, {Field: proto.Uint32(5)}}),
[]byte{'b', 'b', 'u', 'd', 'w', 'e', 'i', 's', 'e', 'r'},
[]byte{10, 8, 10, 4, 'b', 'e', 'e', 'r', 16, 0, 10, 8, 10, 4, 'b', 'e', 'a', 't', 16, 1, 18, 2, 8, 3, 18, 2, 8, 4, 18, 2, 8, 5},
},
@ -263,19 +263,19 @@ func BenchmarkTermFrequencyRowEncode(b *testing.B) {
3,
3.14,
[]*TermVector{
&TermVector{
{
field: 0,
pos: 1,
start: 3,
end: 11,
},
&TermVector{
{
field: 0,
pos: 2,
start: 23,
end: 31,
},
&TermVector{
{
field: 0,
pos: 3,
start: 43,
@ -306,13 +306,13 @@ func BenchmarkBackIndexRowEncode(b *testing.B) {
t1 := "term1"
row := NewBackIndexRow([]byte("beername"),
[]*BackIndexTermEntry{
&BackIndexTermEntry{
{
Term: &t1,
Field: &field,
},
},
[]*BackIndexStoreEntry{
&BackIndexStoreEntry{
{
Field: &field,
},
})

View File

@ -82,7 +82,7 @@ func NewUpsideDownCouch(storeName string, storeConfig map[string]interface{}, an
func (udc *UpsideDownCouch) init(kvwriter store.KVWriter) (err error) {
// version marker
rowsAll := [][]UpsideDownCouchRow{
[]UpsideDownCouchRow{NewVersionRow(udc.version)},
{NewVersionRow(udc.version)},
}
err = udc.batchRows(kvwriter, nil, rowsAll, nil)
@ -211,7 +211,7 @@ func (udc *UpsideDownCouch) batchRows(writer store.KVWriter, addRowsAll [][]Upsi
mergeKeyBytes := 0
mergeValBytes := mergeNum * DictionaryRowMaxValueSize
for dictRowKey, _ := range dictionaryDeltas {
for dictRowKey := range dictionaryDeltas {
mergeKeyBytes += len(dictRowKey)
}

View File

@ -460,7 +460,7 @@ func TestIndexAliasMulti(t *testing.T) {
},
Total: 1,
Hits: search.DocumentMatchCollection{
&search.DocumentMatch{
{
ID: "a",
Score: 1.0,
},
@ -479,7 +479,7 @@ func TestIndexAliasMulti(t *testing.T) {
},
Total: 1,
Hits: search.DocumentMatchCollection{
&search.DocumentMatch{
{
ID: "b",
Score: 2.0,
},
@ -566,11 +566,11 @@ func TestIndexAliasMulti(t *testing.T) {
Request: sr,
Total: 2,
Hits: search.DocumentMatchCollection{
&search.DocumentMatch{
{
ID: "b",
Score: 2.0,
},
&search.DocumentMatch{
{
ID: "a",
Score: 1.0,
},
@ -603,7 +603,7 @@ func TestMultiSearchNoError(t *testing.T) {
},
Total: 1,
Hits: search.DocumentMatchCollection{
&search.DocumentMatch{
{
Index: "1",
ID: "a",
Score: 1.0,
@ -619,7 +619,7 @@ func TestMultiSearchNoError(t *testing.T) {
},
Total: 1,
Hits: search.DocumentMatchCollection{
&search.DocumentMatch{
{
Index: "2",
ID: "b",
Score: 2.0,
@ -638,12 +638,12 @@ func TestMultiSearchNoError(t *testing.T) {
Request: sr,
Total: 2,
Hits: search.DocumentMatchCollection{
&search.DocumentMatch{
{
Index: "2",
ID: "b",
Score: 2.0,
},
&search.DocumentMatch{
{
Index: "1",
ID: "a",
Score: 1.0,
@ -673,7 +673,7 @@ func TestMultiSearchSomeError(t *testing.T) {
},
Total: 1,
Hits: search.DocumentMatchCollection{
&search.DocumentMatch{
{
ID: "a",
Score: 1.0,
},
@ -793,7 +793,7 @@ func TestMultiSearchTimeout(t *testing.T) {
},
Total: 1,
Hits: []*search.DocumentMatch{
&search.DocumentMatch{
{
Index: "1",
ID: "a",
Score: 1.0,
@ -816,7 +816,7 @@ func TestMultiSearchTimeout(t *testing.T) {
},
Total: 1,
Hits: []*search.DocumentMatch{
&search.DocumentMatch{
{
Index: "2",
ID: "b",
Score: 2.0,
@ -914,7 +914,7 @@ func TestMultiSearchTimeoutPartial(t *testing.T) {
},
Total: 1,
Hits: []*search.DocumentMatch{
&search.DocumentMatch{
{
Index: "1",
ID: "a",
Score: 1.0,
@ -933,7 +933,7 @@ func TestMultiSearchTimeoutPartial(t *testing.T) {
},
Total: 1,
Hits: []*search.DocumentMatch{
&search.DocumentMatch{
{
Index: "2",
ID: "b",
Score: 2.0,
@ -957,7 +957,7 @@ func TestMultiSearchTimeoutPartial(t *testing.T) {
},
Total: 1,
Hits: []*search.DocumentMatch{
&search.DocumentMatch{
{
Index: "3",
ID: "c",
Score: 3.0,
@ -983,12 +983,12 @@ func TestMultiSearchTimeoutPartial(t *testing.T) {
Request: sr,
Total: 2,
Hits: search.DocumentMatchCollection{
&search.DocumentMatch{
{
Index: "2",
ID: "b",
Score: 2.0,
},
&search.DocumentMatch{
{
Index: "1",
ID: "a",
Score: 1.0,
@ -1019,7 +1019,7 @@ func TestIndexAliasMultipleLayer(t *testing.T) {
},
Total: 1,
Hits: []*search.DocumentMatch{
&search.DocumentMatch{
{
Index: "1",
ID: "a",
Score: 1.0,
@ -1042,7 +1042,7 @@ func TestIndexAliasMultipleLayer(t *testing.T) {
},
Total: 1,
Hits: []*search.DocumentMatch{
&search.DocumentMatch{
{
Index: "2",
ID: "b",
Score: 2.0,
@ -1066,7 +1066,7 @@ func TestIndexAliasMultipleLayer(t *testing.T) {
},
Total: 1,
Hits: []*search.DocumentMatch{
&search.DocumentMatch{
{
Index: "3",
ID: "c",
Score: 3.0,
@ -1086,7 +1086,7 @@ func TestIndexAliasMultipleLayer(t *testing.T) {
},
Total: 1,
Hits: []*search.DocumentMatch{
&search.DocumentMatch{
{
Index: "4",
ID: "d",
Score: 4.0,
@ -1119,12 +1119,12 @@ func TestIndexAliasMultipleLayer(t *testing.T) {
Request: sr,
Total: 2,
Hits: search.DocumentMatchCollection{
&search.DocumentMatch{
{
Index: "4",
ID: "d",
Score: 4.0,
},
&search.DocumentMatch{
{
Index: "1",
ID: "a",
Score: 1.0,

View File

@ -50,7 +50,7 @@ func (c *customAnalysis) registerAll(i *IndexMapping) error {
if len(c.Tokenizers) > 0 {
// put all the names in map tracking work to do
todo := map[string]struct{}{}
for name, _ := range c.Tokenizers {
for name := range c.Tokenizers {
todo[name] = struct{}{}
}
registered := 1
@ -59,7 +59,7 @@ func (c *customAnalysis) registerAll(i *IndexMapping) error {
for len(todo) > 0 && registered > 0 {
registered = 0
errs = []error{}
for name, _ := range todo {
for name := range todo {
config := c.Tokenizers[name]
_, err := i.cache.DefineTokenizer(name, config)
if err != nil {

View File

@ -11,7 +11,7 @@ import (
var pcodedvalues []nu.PrefixCoded
func init() {
pcodedvalues = []nu.PrefixCoded{nu.PrefixCoded{0x20, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, nu.PrefixCoded{0x20, 0x0, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f}, nu.PrefixCoded{0x20, 0x0, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7a, 0x1d, 0xa}, nu.PrefixCoded{0x20, 0x1, 0x0, 0x0, 0x0, 0x0, 0x1, 0x16, 0x9, 0x4a, 0x7b}}
pcodedvalues = []nu.PrefixCoded{{0x20, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, {0x20, 0x0, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f}, {0x20, 0x0, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7a, 0x1d, 0xa}, {0x20, 0x1, 0x0, 0x0, 0x0, 0x0, 0x1, 0x16, 0x9, 0x4a, 0x7b}}
}
func BenchmarkNumericFacet10(b *testing.B) {

View File

@ -13,15 +13,15 @@ func TestTermFacetResultsMerge(t *testing.T) {
Missing: 25,
Other: 25,
Terms: []*TermFacet{
&TermFacet{
{
Term: "blog",
Count: 25,
},
&TermFacet{
{
Term: "comment",
Count: 24,
},
&TermFacet{
{
Term: "feedback",
Count: 1,
},
@ -33,11 +33,11 @@ func TestTermFacetResultsMerge(t *testing.T) {
Missing: 22,
Other: 15,
Terms: []*TermFacet{
&TermFacet{
{
Term: "clothing",
Count: 35,
},
&TermFacet{
{
Term: "electronics",
Count: 25,
},
@ -54,15 +54,15 @@ func TestTermFacetResultsMerge(t *testing.T) {
Missing: 25,
Other: 25,
Terms: []*TermFacet{
&TermFacet{
{
Term: "blog",
Count: 25,
},
&TermFacet{
{
Term: "comment",
Count: 22,
},
&TermFacet{
{
Term: "flag",
Count: 3,
},
@ -78,15 +78,15 @@ func TestTermFacetResultsMerge(t *testing.T) {
Missing: 50,
Other: 51,
Terms: []*TermFacet{
&TermFacet{
{
Term: "blog",
Count: 50,
},
&TermFacet{
{
Term: "comment",
Count: 46,
},
&TermFacet{
{
Term: "flag",
Count: 3,
},
@ -116,18 +116,18 @@ func TestNumericFacetResultsMerge(t *testing.T) {
Missing: 25,
Other: 25,
NumericRanges: []*NumericRangeFacet{
&NumericRangeFacet{
{
Name: "low",
Max: &lowmed,
Count: 25,
},
&NumericRangeFacet{
{
Name: "med",
Count: 24,
Max: &lowmed,
Min: &medhi,
},
&NumericRangeFacet{
{
Name: "hi",
Count: 1,
Min: &medhi,
@ -145,18 +145,18 @@ func TestNumericFacetResultsMerge(t *testing.T) {
Missing: 25,
Other: 25,
NumericRanges: []*NumericRangeFacet{
&NumericRangeFacet{
{
Name: "low",
Max: &lowmed,
Count: 25,
},
&NumericRangeFacet{
{
Name: "med",
Max: &lowmed,
Min: &medhi,
Count: 22,
},
&NumericRangeFacet{
{
Name: "highest",
Min: &hihigher,
Count: 3,
@ -173,18 +173,18 @@ func TestNumericFacetResultsMerge(t *testing.T) {
Missing: 50,
Other: 51,
NumericRanges: []*NumericRangeFacet{
&NumericRangeFacet{
{
Name: "low",
Count: 50,
Max: &lowmed,
},
&NumericRangeFacet{
{
Name: "med",
Max: &lowmed,
Min: &medhi,
Count: 46,
},
&NumericRangeFacet{
{
Name: "highest",
Min: &hihigher,
Count: 3,
@ -220,18 +220,18 @@ func TestDateFacetResultsMerge(t *testing.T) {
Missing: 25,
Other: 25,
DateRanges: []*DateRangeFacet{
&DateRangeFacet{
{
Name: "low",
End: &lowmed,
Count: 25,
},
&DateRangeFacet{
{
Name: "med",
Count: 24,
Start: &lowmed,
End: &medhi,
},
&DateRangeFacet{
{
Name: "hi",
Count: 1,
Start: &medhi,
@ -249,18 +249,18 @@ func TestDateFacetResultsMerge(t *testing.T) {
Missing: 25,
Other: 25,
DateRanges: []*DateRangeFacet{
&DateRangeFacet{
{
Name: "low",
End: &lowmed2,
Count: 25,
},
&DateRangeFacet{
{
Name: "med",
Start: &lowmed2,
End: &medhi2,
Count: 22,
},
&DateRangeFacet{
{
Name: "highest",
Start: &hihigher2,
Count: 3,
@ -277,18 +277,18 @@ func TestDateFacetResultsMerge(t *testing.T) {
Missing: 50,
Other: 51,
DateRanges: []*DateRangeFacet{
&DateRangeFacet{
{
Name: "low",
Count: 50,
End: &lowmed,
},
&DateRangeFacet{
{
Name: "med",
Start: &lowmed,
End: &medhi,
Count: 46,
},
&DateRangeFacet{
{
Name: "highest",
Start: &hihigher,
Count: 3,

View File

@ -27,7 +27,7 @@ func TestSimpleFragmenter(t *testing.T) {
{
orig: []byte("this is a test"),
fragments: []*highlight.Fragment{
&highlight.Fragment{
{
Orig: []byte("this is a test"),
Start: 0,
End: 14,
@ -46,7 +46,7 @@ func TestSimpleFragmenter(t *testing.T) {
{
orig: []byte("0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789"),
fragments: []*highlight.Fragment{
&highlight.Fragment{
{
Orig: []byte("0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789"),
Start: 0,
End: 100,
@ -65,52 +65,52 @@ func TestSimpleFragmenter(t *testing.T) {
{
orig: []byte("01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890"),
fragments: []*highlight.Fragment{
&highlight.Fragment{
{
Orig: []byte("01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890"),
Start: 0,
End: 100,
},
&highlight.Fragment{
{
Orig: []byte("01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890"),
Start: 10,
End: 101,
},
&highlight.Fragment{
{
Orig: []byte("01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890"),
Start: 20,
End: 101,
},
&highlight.Fragment{
{
Orig: []byte("01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890"),
Start: 30,
End: 101,
},
&highlight.Fragment{
{
Orig: []byte("01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890"),
Start: 40,
End: 101,
},
&highlight.Fragment{
{
Orig: []byte("01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890"),
Start: 50,
End: 101,
},
&highlight.Fragment{
{
Orig: []byte("01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890"),
Start: 60,
End: 101,
},
&highlight.Fragment{
{
Orig: []byte("01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890"),
Start: 70,
End: 101,
},
&highlight.Fragment{
{
Orig: []byte("01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890"),
Start: 80,
End: 101,
},
&highlight.Fragment{
{
Orig: []byte("01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890"),
Start: 90,
End: 101,
@ -183,7 +183,7 @@ func TestSimpleFragmenter(t *testing.T) {
{
orig: []byte("[[पानी का स्वाद]] [[नीलेश रघुवंशी]] का कविता संग्रह हैं। इस कृति के लिए उन्हें २००४ में [[केदार सम्मान]] से सम्मानित किया गया है।{{केदार सम्मान से सम्मानित कृतियाँ}}"),
fragments: []*highlight.Fragment{
&highlight.Fragment{
{
Orig: []byte("[[पानी का स्वाद]] [[नीलेश रघुवंशी]] का कविता संग्रह हैं। इस कृति के लिए उन्हें २००४ में [[केदार सम्मान]] से सम्मानित किया गया है।{{केदार सम्मान से सम्मानित कृतियाँ}}"),
Start: 0,
End: 411,
@ -202,12 +202,12 @@ func TestSimpleFragmenter(t *testing.T) {
{
orig: []byte("交换机"),
fragments: []*highlight.Fragment{
&highlight.Fragment{
{
Orig: []byte("交换机"),
Start: 0,
End: 9,
},
&highlight.Fragment{
{
Orig: []byte("交换机"),
Start: 3,
End: 9,
@ -254,12 +254,12 @@ func TestSimpleFragmenterWithSize(t *testing.T) {
{
orig: []byte("this is a test"),
fragments: []*highlight.Fragment{
&highlight.Fragment{
{
Orig: []byte("this is a test"),
Start: 0,
End: 5,
},
&highlight.Fragment{
{
Orig: []byte("this is a test"),
Start: 9,
End: 14,

View File

@ -32,7 +32,7 @@ func TestConstantScorer(t *testing.T) {
Freq: 1,
Norm: 1.0,
Vectors: []*index.TermFieldVector{
&index.TermFieldVector{
{
Field: "desc",
Pos: 1,
Start: 0,
@ -83,21 +83,21 @@ func TestConstantScorerWithQueryNorm(t *testing.T) {
Value: 2.0,
Message: "weight(^1.000000), product of:",
Children: []*search.Explanation{
&search.Explanation{
{
Value: 2.0,
Message: "ConstantScore()^1.000000, product of:",
Children: []*search.Explanation{
&search.Explanation{
{
Value: 1,
Message: "boost",
},
&search.Explanation{
{
Value: 2,
Message: "queryNorm",
},
},
},
&search.Explanation{
{
Value: 1.0,
Message: "ConstantScore()",
},

View File

@ -39,7 +39,7 @@ func TestTermScorer(t *testing.T) {
Freq: 1,
Norm: 1.0,
Vectors: []*index.TermFieldVector{
&index.TermFieldVector{
{
Field: "desc",
Pos: 1,
Start: 0,
@ -54,15 +54,15 @@ func TestTermScorer(t *testing.T) {
Value: math.Sqrt(1.0) * idf,
Message: "fieldWeight(desc:beer in one), product of:",
Children: []*search.Explanation{
&search.Explanation{
{
Value: 1,
Message: "tf(termFreq(desc:beer)=1",
},
&search.Explanation{
{
Value: 1,
Message: "fieldNorm(field=desc, doc=one)",
},
&search.Explanation{
{
Value: idf,
Message: "idf(docFreq=9, maxDocs=100)",
},
@ -95,15 +95,15 @@ func TestTermScorer(t *testing.T) {
Value: math.Sqrt(1.0) * idf,
Message: "fieldWeight(desc:beer in one), product of:",
Children: []*search.Explanation{
&search.Explanation{
{
Value: 1,
Message: "tf(termFreq(desc:beer)=1",
},
&search.Explanation{
{
Value: 1,
Message: "fieldNorm(field=desc, doc=one)",
},
&search.Explanation{
{
Value: idf,
Message: "idf(docFreq=9, maxDocs=100)",
},
@ -125,15 +125,15 @@ func TestTermScorer(t *testing.T) {
Value: math.Sqrt(65) * idf,
Message: "fieldWeight(desc:beer in one), product of:",
Children: []*search.Explanation{
&search.Explanation{
{
Value: math.Sqrt(65),
Message: "tf(termFreq(desc:beer)=65",
},
&search.Explanation{
{
Value: 1,
Message: "fieldNorm(field=desc, doc=one)",
},
&search.Explanation{
{
Value: idf,
Message: "idf(docFreq=9, maxDocs=100)",
},
@ -188,37 +188,37 @@ func TestTermScorerWithQueryNorm(t *testing.T) {
Value: math.Sqrt(1.0) * idf * 3.0 * idf * 2.0,
Message: "weight(desc:beer^3.000000 in one), product of:",
Children: []*search.Explanation{
&search.Explanation{
{
Value: 2.0 * idf * 3.0,
Message: "queryWeight(desc:beer^3.000000), product of:",
Children: []*search.Explanation{
&search.Explanation{
{
Value: 3,
Message: "boost",
},
&search.Explanation{
{
Value: idf,
Message: "idf(docFreq=9, maxDocs=100)",
},
&search.Explanation{
{
Value: 2,
Message: "queryNorm",
},
},
},
&search.Explanation{
{
Value: math.Sqrt(1.0) * idf,
Message: "fieldWeight(desc:beer in one), product of:",
Children: []*search.Explanation{
&search.Explanation{
{
Value: 1,
Message: "tf(termFreq(desc:beer)=1",
},
&search.Explanation{
{
Value: 1,
Message: "fieldNorm(field=desc, doc=one)",
},
&search.Explanation{
{
Value: idf,
Message: "idf(docFreq=9, maxDocs=100)",
},

View File

@ -247,15 +247,15 @@ func TestBooleanSearch(t *testing.T) {
{
searcher: booleanSearcher,
results: []*search.DocumentMatch{
&search.DocumentMatch{
{
ID: "1",
Score: 0.9818005051949021,
},
&search.DocumentMatch{
{
ID: "3",
Score: 0.808709699395535,
},
&search.DocumentMatch{
{
ID: "4",
Score: 0.34618161159873423,
},
@ -264,11 +264,11 @@ func TestBooleanSearch(t *testing.T) {
{
searcher: booleanSearcher2,
results: []*search.DocumentMatch{
&search.DocumentMatch{
{
ID: "1",
Score: 0.6775110856165737,
},
&search.DocumentMatch{
{
ID: "3",
Score: 0.6775110856165737,
},
@ -282,15 +282,15 @@ func TestBooleanSearch(t *testing.T) {
{
searcher: booleanSearcher4,
results: []*search.DocumentMatch{
&search.DocumentMatch{
{
ID: "1",
Score: 1.0,
},
&search.DocumentMatch{
{
ID: "3",
Score: 0.5,
},
&search.DocumentMatch{
{
ID: "4",
Score: 1.0,
},
@ -299,11 +299,11 @@ func TestBooleanSearch(t *testing.T) {
{
searcher: booleanSearcher5,
results: []*search.DocumentMatch{
&search.DocumentMatch{
{
ID: "3",
Score: 0.5,
},
&search.DocumentMatch{
{
ID: "4",
Score: 1.0,
},
@ -317,7 +317,7 @@ func TestBooleanSearch(t *testing.T) {
{
searcher: conjunctionSearcher7,
results: []*search.DocumentMatch{
&search.DocumentMatch{
{
ID: "1",
Score: 2.0097428702814377,
},
@ -326,7 +326,7 @@ func TestBooleanSearch(t *testing.T) {
{
searcher: conjunctionSearcher8,
results: []*search.DocumentMatch{
&search.DocumentMatch{
{
ID: "3",
Score: 2.0681575785068107,
},

View File

@ -127,7 +127,7 @@ func TestConjunctionSearch(t *testing.T) {
{
searcher: beerAndMartySearcher,
results: []*search.DocumentMatch{
&search.DocumentMatch{
{
ID: "1",
Score: 2.0097428702814377,
},
@ -136,7 +136,7 @@ func TestConjunctionSearch(t *testing.T) {
{
searcher: angstAndBeerSearcher,
results: []*search.DocumentMatch{
&search.DocumentMatch{
{
ID: "2",
Score: 1.0807601687084403,
},
@ -149,11 +149,11 @@ func TestConjunctionSearch(t *testing.T) {
{
searcher: beerAndMisterSearcher,
results: []*search.DocumentMatch{
&search.DocumentMatch{
{
ID: "2",
Score: 1.2877980334016337,
},
&search.DocumentMatch{
{
ID: "3",
Score: 1.2877980334016337,
},
@ -162,7 +162,7 @@ func TestConjunctionSearch(t *testing.T) {
{
searcher: couchbaseAndMisterSearcher,
results: []*search.DocumentMatch{
&search.DocumentMatch{
{
ID: "2",
Score: 1.4436599157093672,
},
@ -171,7 +171,7 @@ func TestConjunctionSearch(t *testing.T) {
{
searcher: beerAndCouchbaseAndMisterSearcher,
results: []*search.DocumentMatch{
&search.DocumentMatch{
{
ID: "2",
Score: 1.441614953806971,
},

View File

@ -70,11 +70,11 @@ func TestDisjunctionSearch(t *testing.T) {
{
searcher: martyOrDustinSearcher,
results: []*search.DocumentMatch{
&search.DocumentMatch{
{
ID: "1",
Score: 0.6775110856165737,
},
&search.DocumentMatch{
{
ID: "3",
Score: 0.6775110856165737,
},
@ -84,15 +84,15 @@ func TestDisjunctionSearch(t *testing.T) {
{
searcher: nestedRaviOrMartyOrDustinSearcher,
results: []*search.DocumentMatch{
&search.DocumentMatch{
{
ID: "1",
Score: 0.2765927424732821,
},
&search.DocumentMatch{
{
ID: "3",
Score: 0.2765927424732821,
},
&search.DocumentMatch{
{
ID: "4",
Score: 0.5531854849465642,
},

View File

@ -55,19 +55,19 @@ func TestFuzzySearch(t *testing.T) {
{
searcher: fuzzySearcherbeet,
results: []*search.DocumentMatch{
&search.DocumentMatch{
{
ID: "1",
Score: 1.0,
},
&search.DocumentMatch{
{
ID: "2",
Score: 0.5,
},
&search.DocumentMatch{
{
ID: "3",
Score: 0.5,
},
&search.DocumentMatch{
{
ID: "4",
Score: 0.9999999838027345,
},
@ -80,7 +80,7 @@ func TestFuzzySearch(t *testing.T) {
{
searcher: fuzzySearcheraplee,
results: []*search.DocumentMatch{
&search.DocumentMatch{
{
ID: "3",
Score: 0.9581453659370776,
},
@ -89,7 +89,7 @@ func TestFuzzySearch(t *testing.T) {
{
searcher: fuzzySearcherprefix,
results: []*search.DocumentMatch{
&search.DocumentMatch{
{
ID: "5",
Score: 1.916290731874155,
},

View File

@ -47,23 +47,23 @@ func TestMatchAllSearch(t *testing.T) {
searcher: allSearcher,
queryNorm: 1.0,
results: []*search.DocumentMatch{
&search.DocumentMatch{
{
ID: "1",
Score: 1.0,
},
&search.DocumentMatch{
{
ID: "2",
Score: 1.0,
},
&search.DocumentMatch{
{
ID: "3",
Score: 1.0,
},
&search.DocumentMatch{
{
ID: "4",
Score: 1.0,
},
&search.DocumentMatch{
{
ID: "5",
Score: 1.0,
},
@ -73,23 +73,23 @@ func TestMatchAllSearch(t *testing.T) {
searcher: allSearcher2,
queryNorm: 0.8333333,
results: []*search.DocumentMatch{
&search.DocumentMatch{
{
ID: "1",
Score: 1.0,
},
&search.DocumentMatch{
{
ID: "2",
Score: 1.0,
},
&search.DocumentMatch{
{
ID: "3",
Score: 1.0,
},
&search.DocumentMatch{
{
ID: "4",
Score: 1.0,
},
&search.DocumentMatch{
{
ID: "5",
Score: 1.0,
},

View File

@ -52,7 +52,7 @@ func TestPhraseSearch(t *testing.T) {
{
searcher: phraseSearcher,
results: []*search.DocumentMatch{
&search.DocumentMatch{
{
ID: "2",
Score: 1.0807601687084403,
},

View File

@ -56,7 +56,7 @@ func TestRegexpSearch(t *testing.T) {
{
searcher: regexpSearcher,
results: []*search.DocumentMatch{
&search.DocumentMatch{
{
ID: "1",
Score: 1.916290731874155,
},
@ -65,11 +65,11 @@ func TestRegexpSearch(t *testing.T) {
{
searcher: regexpSearcherCo,
results: []*search.DocumentMatch{
&search.DocumentMatch{
{
ID: "2",
Score: 0.33875554280828685,
},
&search.DocumentMatch{
{
ID: "3",
Score: 0.33875554280828685,
},