0
0
bleve/search/scorer/scorer_term_test.go

252 lines
6.3 KiB
Go
Raw Normal View History

2014-04-19 20:29:42 +02:00
// Copyright (c) 2013 Couchbase, Inc.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
// except in compliance with the License. You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing permissions
// and limitations under the License.
package scorers
2014-04-19 20:29:42 +02:00
import (
"math"
"reflect"
"testing"
"github.com/blevesearch/bleve/index"
"github.com/blevesearch/bleve/search"
2014-04-19 20:29:42 +02:00
)
func TestTermScorer(t *testing.T) {
var docTotal uint64 = 100
var docTerm uint64 = 9
var queryTerm = "beer"
var queryField = "desc"
var queryBoost = 1.0
scorer := NewTermQueryScorer(queryTerm, queryField, queryBoost, docTotal, docTerm, true)
2014-04-19 20:29:42 +02:00
idf := 1.0 + math.Log(float64(docTotal)/float64(docTerm+1.0))
tests := []struct {
termMatch *index.TermFieldDoc
result *search.DocumentMatch
2014-04-19 20:29:42 +02:00
}{
// test some simple math
{
termMatch: &index.TermFieldDoc{
ID: index.IndexInternalID("one"),
2014-04-19 20:29:42 +02:00
Freq: 1,
Norm: 1.0,
2014-04-19 20:49:15 +02:00
Vectors: []*index.TermFieldVector{
2016-04-03 03:54:33 +02:00
{
2014-04-19 20:49:15 +02:00
Field: "desc",
Pos: 1,
Start: 0,
End: 4,
},
},
2014-04-19 20:29:42 +02:00
},
result: &search.DocumentMatch{
IndexInternalID: index.IndexInternalID("one"),
Score: math.Sqrt(1.0) * idf,
improved implementation to address perf regressions primary change is going back to sort values be []string and not []interface{}, this avoid allocatiosn converting into the interface{} that sounds obvious, so why didn't we just do that first? because a common (default) sort is score, which is naturally a number, not a string (like terms). converting into the number was also expensive, and the common case. so, this solution also makes the change to NOT put the score into the sort value list. instead you see the dummy value "_score". this is just a placeholder, the actual sort impl knows that field of the sort is the score, and will sort using the actual score. also, several other aspets of the benchmark were cleaned up so that unnecessary allocations do not pollute the cpu profiles Here are the updated benchmarks: $ go test -run=xxx -bench=. -benchmem -cpuprofile=cpu.out BenchmarkTop10of100000Scores-4 3000 465809 ns/op 2548 B/op 33 allocs/op BenchmarkTop100of100000Scores-4 2000 626488 ns/op 21484 B/op 213 allocs/op BenchmarkTop10of1000000Scores-4 300 5107658 ns/op 2560 B/op 33 allocs/op BenchmarkTop100of1000000Scores-4 300 5275403 ns/op 21624 B/op 213 allocs/op PASS ok github.com/blevesearch/bleve/search/collectors 7.188s Prior to this PR, master reported: $ go test -run=xxx -bench=. -benchmem BenchmarkTop10of100000Scores-4 3000 453269 ns/op 360161 B/op 42 allocs/op BenchmarkTop100of100000Scores-4 2000 519131 ns/op 388275 B/op 219 allocs/op BenchmarkTop10of1000000Scores-4 200 7459004 ns/op 4628236 B/op 52 allocs/op BenchmarkTop100of1000000Scores-4 200 8064864 ns/op 4656596 B/op 232 allocs/op PASS ok github.com/blevesearch/bleve/search/collectors 7.385s So, we're pretty close on the smaller datasets, and we scale better on the larger datasets. We also show fewer allocations and bytes in all cases (some of this is artificial due to test cleanup).
2016-08-25 21:47:07 +02:00
Sort: []string{},
Expl: &search.Explanation{
2014-04-19 20:39:07 +02:00
Value: math.Sqrt(1.0) * idf,
Message: "fieldWeight(desc:beer in one), product of:",
Children: []*search.Explanation{
2016-04-03 03:54:33 +02:00
{
2014-04-19 20:39:07 +02:00
Value: 1,
Message: "tf(termFreq(desc:beer)=1",
},
2016-04-03 03:54:33 +02:00
{
2014-04-19 20:39:07 +02:00
Value: 1,
Message: "fieldNorm(field=desc, doc=one)",
},
2016-04-03 03:54:33 +02:00
{
2014-04-19 20:39:07 +02:00
Value: idf,
Message: "idf(docFreq=9, maxDocs=100)",
},
},
},
Locations: search.FieldTermLocationMap{
"desc": search.TermLocationMap{
"beer": []*search.Location{
2016-04-03 06:03:33 +02:00
{
2014-04-19 20:49:15 +02:00
Pos: 1,
Start: 0,
End: 4,
},
},
},
},
2014-04-19 20:29:42 +02:00
},
},
// test the same thing again (score should be cached this time)
{
termMatch: &index.TermFieldDoc{
ID: index.IndexInternalID("one"),
2014-04-19 20:29:42 +02:00
Freq: 1,
Norm: 1.0,
},
result: &search.DocumentMatch{
IndexInternalID: index.IndexInternalID("one"),
Score: math.Sqrt(1.0) * idf,
improved implementation to address perf regressions primary change is going back to sort values be []string and not []interface{}, this avoid allocatiosn converting into the interface{} that sounds obvious, so why didn't we just do that first? because a common (default) sort is score, which is naturally a number, not a string (like terms). converting into the number was also expensive, and the common case. so, this solution also makes the change to NOT put the score into the sort value list. instead you see the dummy value "_score". this is just a placeholder, the actual sort impl knows that field of the sort is the score, and will sort using the actual score. also, several other aspets of the benchmark were cleaned up so that unnecessary allocations do not pollute the cpu profiles Here are the updated benchmarks: $ go test -run=xxx -bench=. -benchmem -cpuprofile=cpu.out BenchmarkTop10of100000Scores-4 3000 465809 ns/op 2548 B/op 33 allocs/op BenchmarkTop100of100000Scores-4 2000 626488 ns/op 21484 B/op 213 allocs/op BenchmarkTop10of1000000Scores-4 300 5107658 ns/op 2560 B/op 33 allocs/op BenchmarkTop100of1000000Scores-4 300 5275403 ns/op 21624 B/op 213 allocs/op PASS ok github.com/blevesearch/bleve/search/collectors 7.188s Prior to this PR, master reported: $ go test -run=xxx -bench=. -benchmem BenchmarkTop10of100000Scores-4 3000 453269 ns/op 360161 B/op 42 allocs/op BenchmarkTop100of100000Scores-4 2000 519131 ns/op 388275 B/op 219 allocs/op BenchmarkTop10of1000000Scores-4 200 7459004 ns/op 4628236 B/op 52 allocs/op BenchmarkTop100of1000000Scores-4 200 8064864 ns/op 4656596 B/op 232 allocs/op PASS ok github.com/blevesearch/bleve/search/collectors 7.385s So, we're pretty close on the smaller datasets, and we scale better on the larger datasets. We also show fewer allocations and bytes in all cases (some of this is artificial due to test cleanup).
2016-08-25 21:47:07 +02:00
Sort: []string{},
Expl: &search.Explanation{
2014-04-19 20:39:07 +02:00
Value: math.Sqrt(1.0) * idf,
Message: "fieldWeight(desc:beer in one), product of:",
Children: []*search.Explanation{
2016-04-03 03:54:33 +02:00
{
2014-04-19 20:39:07 +02:00
Value: 1,
Message: "tf(termFreq(desc:beer)=1",
},
2016-04-03 03:54:33 +02:00
{
2014-04-19 20:39:07 +02:00
Value: 1,
Message: "fieldNorm(field=desc, doc=one)",
},
2016-04-03 03:54:33 +02:00
{
2014-04-19 20:39:07 +02:00
Value: idf,
Message: "idf(docFreq=9, maxDocs=100)",
},
},
},
2014-04-19 20:29:42 +02:00
},
},
// test a case where the sqrt isn't precalculated
{
termMatch: &index.TermFieldDoc{
ID: index.IndexInternalID("one"),
2014-04-19 20:29:42 +02:00
Freq: 65,
Norm: 1.0,
},
result: &search.DocumentMatch{
IndexInternalID: index.IndexInternalID("one"),
Score: math.Sqrt(65) * idf,
improved implementation to address perf regressions primary change is going back to sort values be []string and not []interface{}, this avoid allocatiosn converting into the interface{} that sounds obvious, so why didn't we just do that first? because a common (default) sort is score, which is naturally a number, not a string (like terms). converting into the number was also expensive, and the common case. so, this solution also makes the change to NOT put the score into the sort value list. instead you see the dummy value "_score". this is just a placeholder, the actual sort impl knows that field of the sort is the score, and will sort using the actual score. also, several other aspets of the benchmark were cleaned up so that unnecessary allocations do not pollute the cpu profiles Here are the updated benchmarks: $ go test -run=xxx -bench=. -benchmem -cpuprofile=cpu.out BenchmarkTop10of100000Scores-4 3000 465809 ns/op 2548 B/op 33 allocs/op BenchmarkTop100of100000Scores-4 2000 626488 ns/op 21484 B/op 213 allocs/op BenchmarkTop10of1000000Scores-4 300 5107658 ns/op 2560 B/op 33 allocs/op BenchmarkTop100of1000000Scores-4 300 5275403 ns/op 21624 B/op 213 allocs/op PASS ok github.com/blevesearch/bleve/search/collectors 7.188s Prior to this PR, master reported: $ go test -run=xxx -bench=. -benchmem BenchmarkTop10of100000Scores-4 3000 453269 ns/op 360161 B/op 42 allocs/op BenchmarkTop100of100000Scores-4 2000 519131 ns/op 388275 B/op 219 allocs/op BenchmarkTop10of1000000Scores-4 200 7459004 ns/op 4628236 B/op 52 allocs/op BenchmarkTop100of1000000Scores-4 200 8064864 ns/op 4656596 B/op 232 allocs/op PASS ok github.com/blevesearch/bleve/search/collectors 7.385s So, we're pretty close on the smaller datasets, and we scale better on the larger datasets. We also show fewer allocations and bytes in all cases (some of this is artificial due to test cleanup).
2016-08-25 21:47:07 +02:00
Sort: []string{},
Expl: &search.Explanation{
2014-04-19 20:39:07 +02:00
Value: math.Sqrt(65) * idf,
Message: "fieldWeight(desc:beer in one), product of:",
Children: []*search.Explanation{
2016-04-03 03:54:33 +02:00
{
2014-04-19 20:39:07 +02:00
Value: math.Sqrt(65),
Message: "tf(termFreq(desc:beer)=65",
},
2016-04-03 03:54:33 +02:00
{
2014-04-19 20:39:07 +02:00
Value: 1,
Message: "fieldNorm(field=desc, doc=one)",
},
2016-04-03 03:54:33 +02:00
{
2014-04-19 20:39:07 +02:00
Value: idf,
Message: "idf(docFreq=9, maxDocs=100)",
},
},
},
2014-04-19 20:29:42 +02:00
},
},
}
for _, test := range tests {
ctx := &search.SearchContext{
DocumentMatchPool: search.NewDocumentMatchPool(1, 0),
}
actual := scorer.Score(ctx, test.termMatch)
2014-04-19 20:29:42 +02:00
if !reflect.DeepEqual(actual, test.result) {
t.Errorf("expected %#v got %#v for %#v", test.result, actual, test.termMatch)
}
}
}
2014-04-19 21:04:12 +02:00
func TestTermScorerWithQueryNorm(t *testing.T) {
var docTotal uint64 = 100
var docTerm uint64 = 9
var queryTerm = "beer"
var queryField = "desc"
var queryBoost = 3.0
scorer := NewTermQueryScorer(queryTerm, queryField, queryBoost, docTotal, docTerm, true)
2014-04-19 21:04:12 +02:00
idf := 1.0 + math.Log(float64(docTotal)/float64(docTerm+1.0))
scorer.SetQueryNorm(2.0)
2014-04-19 21:11:29 +02:00
expectedQueryWeight := 3 * idf * 3 * idf
actualQueryWeight := scorer.Weight()
if expectedQueryWeight != actualQueryWeight {
t.Errorf("expected query weight %f, got %f", expectedQueryWeight, actualQueryWeight)
}
2014-04-19 21:04:12 +02:00
tests := []struct {
termMatch *index.TermFieldDoc
result *search.DocumentMatch
2014-04-19 21:04:12 +02:00
}{
{
termMatch: &index.TermFieldDoc{
ID: index.IndexInternalID("one"),
2014-04-19 21:04:12 +02:00
Freq: 1,
Norm: 1.0,
},
result: &search.DocumentMatch{
IndexInternalID: index.IndexInternalID("one"),
Score: math.Sqrt(1.0) * idf * 3.0 * idf * 2.0,
improved implementation to address perf regressions primary change is going back to sort values be []string and not []interface{}, this avoid allocatiosn converting into the interface{} that sounds obvious, so why didn't we just do that first? because a common (default) sort is score, which is naturally a number, not a string (like terms). converting into the number was also expensive, and the common case. so, this solution also makes the change to NOT put the score into the sort value list. instead you see the dummy value "_score". this is just a placeholder, the actual sort impl knows that field of the sort is the score, and will sort using the actual score. also, several other aspets of the benchmark were cleaned up so that unnecessary allocations do not pollute the cpu profiles Here are the updated benchmarks: $ go test -run=xxx -bench=. -benchmem -cpuprofile=cpu.out BenchmarkTop10of100000Scores-4 3000 465809 ns/op 2548 B/op 33 allocs/op BenchmarkTop100of100000Scores-4 2000 626488 ns/op 21484 B/op 213 allocs/op BenchmarkTop10of1000000Scores-4 300 5107658 ns/op 2560 B/op 33 allocs/op BenchmarkTop100of1000000Scores-4 300 5275403 ns/op 21624 B/op 213 allocs/op PASS ok github.com/blevesearch/bleve/search/collectors 7.188s Prior to this PR, master reported: $ go test -run=xxx -bench=. -benchmem BenchmarkTop10of100000Scores-4 3000 453269 ns/op 360161 B/op 42 allocs/op BenchmarkTop100of100000Scores-4 2000 519131 ns/op 388275 B/op 219 allocs/op BenchmarkTop10of1000000Scores-4 200 7459004 ns/op 4628236 B/op 52 allocs/op BenchmarkTop100of1000000Scores-4 200 8064864 ns/op 4656596 B/op 232 allocs/op PASS ok github.com/blevesearch/bleve/search/collectors 7.385s So, we're pretty close on the smaller datasets, and we scale better on the larger datasets. We also show fewer allocations and bytes in all cases (some of this is artificial due to test cleanup).
2016-08-25 21:47:07 +02:00
Sort: []string{},
Expl: &search.Explanation{
2014-04-19 21:04:12 +02:00
Value: math.Sqrt(1.0) * idf * 3.0 * idf * 2.0,
Message: "weight(desc:beer^3.000000 in one), product of:",
Children: []*search.Explanation{
2016-04-03 03:54:33 +02:00
{
2014-04-19 21:04:12 +02:00
Value: 2.0 * idf * 3.0,
Message: "queryWeight(desc:beer^3.000000), product of:",
Children: []*search.Explanation{
2016-04-03 03:54:33 +02:00
{
2014-04-19 21:04:12 +02:00
Value: 3,
Message: "boost",
},
2016-04-03 03:54:33 +02:00
{
2014-04-19 21:04:12 +02:00
Value: idf,
Message: "idf(docFreq=9, maxDocs=100)",
},
2016-04-03 03:54:33 +02:00
{
2014-04-19 21:04:12 +02:00
Value: 2,
Message: "queryNorm",
},
},
},
2016-04-03 03:54:33 +02:00
{
2014-04-19 21:04:12 +02:00
Value: math.Sqrt(1.0) * idf,
Message: "fieldWeight(desc:beer in one), product of:",
Children: []*search.Explanation{
2016-04-03 03:54:33 +02:00
{
2014-04-19 21:04:12 +02:00
Value: 1,
Message: "tf(termFreq(desc:beer)=1",
},
2016-04-03 03:54:33 +02:00
{
2014-04-19 21:04:12 +02:00
Value: 1,
Message: "fieldNorm(field=desc, doc=one)",
},
2016-04-03 03:54:33 +02:00
{
2014-04-19 21:04:12 +02:00
Value: idf,
Message: "idf(docFreq=9, maxDocs=100)",
},
},
},
},
},
},
},
}
for _, test := range tests {
ctx := &search.SearchContext{
DocumentMatchPool: search.NewDocumentMatchPool(1, 0),
}
actual := scorer.Score(ctx, test.termMatch)
2014-04-19 21:04:12 +02:00
if !reflect.DeepEqual(actual, test.result) {
t.Errorf("expected %#v got %#v for %#v", test.result, actual, test.termMatch)
}
}
}