0
0
Fork 0

fix pagination bug introduced by collector optimization

fixes #378

this bug was introduced by:
f2aba116c4

theory of operation for this collector (top N, skip K)

- collect the highest scoring N+K results
- if K > 0, skip K and return the next N

internal details

- the top N+K are kept in a list
- the list is ordered from lowest scoring (first) to highest scoring (last)
- as a hit comes in, we find where this new hit would fit into this list
- if this caused the list to get too big, trim off the head (lowest scoring hit)

theory of the optimization

- we were not tracking the lowest score in the list
- so if the score was lower than the lowest score, we would add/remove it
- by keeping track of the lowest score in the list, we can avoid these ops

problem with the optimization
- the optimization worked by returning early
- by returning early there was a subtle change to documents which had the same score
- the reason is that which docs end up in the top N+K changed by returning early
- why was that? docs are coming in, in order by key ascending
- when finding the correct position to insert a hit into the list, we checked <, not <= the score
- this has the subtle effect that docs with the same score end up in reverse order

for example consider the following in progress list:

doc ids [   c    a    b  ]
scores  [   1    5    9  ]

if we now see doc d with score 5, we get:

doc ids [   c    a    d    b  ]
scores  [   1    5    5    9  ]

While that appears in order (a, d) it is actually reverse order, because when we
produce the top N we start at the end.

theory of the fix

- previous pagination depended on later hits with the same score "bumping" earlier
hits with the same score off the bottom of the list
- however, if we change the logic to <= instead of <, now the list in the previous
example would look like:

doc ids [   c    d    a    b  ]
scores  [   1    5    5    9  ]

- this small change means that now earlier (lower id) will score higher, and
thus we no longer depend on later hits bumping things down, which means returning
early is a valid thing to do

NOTE: this does depend on the hits coming back in order by ID.  this is not
something strictly guaranteed, but it was the same assumption that allowed the
original behavior

This also has the side-effect that 2 hits with the same score come back in
ascending ID order, which is somehow more pleasing to me than reverse order.
This commit is contained in:
Marty Schoch 2016-06-01 10:43:14 -04:00
parent 105626269c
commit 2043bb4bf8
4 changed files with 204 additions and 27 deletions

View File

@ -105,7 +105,7 @@ func (tksc *TopScoreCollector) collectSingle(dm *search.DocumentMatch) {
for e := tksc.results.Front(); e != nil; e = e.Next() {
curr := e.Value.(*search.DocumentMatch)
if dm.Score < curr.Score {
if dm.Score <= curr.Score {
tksc.results.InsertBefore(dm, e)
// if we just made the list too long

View File

@ -222,6 +222,184 @@ func TestTop10ScoresSkip10(t *testing.T) {
}
}
func TestPaginationSameScores(t *testing.T) {
// a stub search with more than 10 matches
// all documents have the same score
searcher := &stubSearcher{
matches: search.DocumentMatchCollection{
&search.DocumentMatch{
ID: "a",
Score: 5,
},
&search.DocumentMatch{
ID: "b",
Score: 5,
},
&search.DocumentMatch{
ID: "c",
Score: 5,
},
&search.DocumentMatch{
ID: "d",
Score: 5,
},
&search.DocumentMatch{
ID: "e",
Score: 5,
},
&search.DocumentMatch{
ID: "f",
Score: 5,
},
&search.DocumentMatch{
ID: "g",
Score: 5,
},
&search.DocumentMatch{
ID: "h",
Score: 5,
},
&search.DocumentMatch{
ID: "i",
Score: 5,
},
&search.DocumentMatch{
ID: "j",
Score: 5,
},
&search.DocumentMatch{
ID: "k",
Score: 5,
},
&search.DocumentMatch{
ID: "l",
Score: 5,
},
&search.DocumentMatch{
ID: "m",
Score: 5,
},
&search.DocumentMatch{
ID: "n",
Score: 5,
},
},
}
// first get first 5 hits
collector := NewTopScorerSkipCollector(5, 0)
err := collector.Collect(context.Background(), searcher)
if err != nil {
t.Fatal(err)
}
total := collector.Total()
if total != 14 {
t.Errorf("expected 14 total results, got %d", total)
}
results := collector.Results()
if len(results) != 5 {
t.Fatalf("expected 5 results, got %d", len(results))
}
firstResults := make(map[string]struct{})
for _, hit := range results {
firstResults[hit.ID] = struct{}{}
}
// a stub search with more than 10 matches
// all documents have the same score
searcher = &stubSearcher{
matches: search.DocumentMatchCollection{
&search.DocumentMatch{
ID: "a",
Score: 5,
},
&search.DocumentMatch{
ID: "b",
Score: 5,
},
&search.DocumentMatch{
ID: "c",
Score: 5,
},
&search.DocumentMatch{
ID: "d",
Score: 5,
},
&search.DocumentMatch{
ID: "e",
Score: 5,
},
&search.DocumentMatch{
ID: "f",
Score: 5,
},
&search.DocumentMatch{
ID: "g",
Score: 5,
},
&search.DocumentMatch{
ID: "h",
Score: 5,
},
&search.DocumentMatch{
ID: "i",
Score: 5,
},
&search.DocumentMatch{
ID: "j",
Score: 5,
},
&search.DocumentMatch{
ID: "k",
Score: 5,
},
&search.DocumentMatch{
ID: "l",
Score: 5,
},
&search.DocumentMatch{
ID: "m",
Score: 5,
},
&search.DocumentMatch{
ID: "n",
Score: 5,
},
},
}
// now get next 5 hits
collector = NewTopScorerSkipCollector(5, 5)
err = collector.Collect(context.Background(), searcher)
if err != nil {
t.Fatal(err)
}
total = collector.Total()
if total != 14 {
t.Errorf("expected 14 total results, got %d", total)
}
results = collector.Results()
if len(results) != 5 {
t.Fatalf("expected 5 results, got %d", len(results))
}
// make sure that none of these hits repeat ones we saw in the top 5
for _, hit := range results {
if _, ok := firstResults[hit.ID]; ok {
t.Errorf("doc ID %s is in top 5 and next 5 result sets", hit.ID)
}
}
}
func BenchmarkTop10of100000Scores(b *testing.B) {
benchHelper(10000, NewTopScorerCollector(10), b)
}

View File

@ -130,10 +130,10 @@
"total_hits": 2,
"hits": [
{
"id": "b"
"id": "a"
},
{
"id": "a"
"id": "b"
}
]
}
@ -198,10 +198,10 @@
"total_hits": 2,
"hits": [
{
"id": "d"
"id": "c"
},
{
"id": "c"
"id": "d"
}
]
}
@ -416,13 +416,13 @@
"total_hits": 3,
"hits": [
{
"id": "d"
"id": "b"
},
{
"id": "c"
},
{
"id": "b"
"id": "d"
}
]
}
@ -524,16 +524,16 @@
"total_hits": 4,
"hits": [
{
"id": "d"
},
{
"id": "c"
"id": "a"
},
{
"id": "b"
},
{
"id": "a"
"id": "c"
},
{
"id": "d"
}
]
}
@ -551,10 +551,10 @@
"total_hits": 2,
"hits": [
{
"id": "c"
"id": "b"
},
{
"id": "b"
"id": "c"
}
]
}
@ -575,10 +575,10 @@
"id": "b"
},
{
"id": "d"
"id": "c"
},
{
"id": "c"
"id": "d"
}
]
}

View File

@ -12,17 +12,16 @@
"total_hits": 4,
"hits": [
{
"id": "3507@FOSDEM15@fosdem.org"
"id": "3492@FOSDEM15@fosdem.org"
},
{
"id": "3496@FOSDEM15@fosdem.org"
},
{
"id": "3505@FOSDEM15@fosdem.org"
},
{
"id": "3496@FOSDEM15@fosdem.org"
}
,
{
"id": "3492@FOSDEM15@fosdem.org"
"id": "3507@FOSDEM15@fosdem.org"
}
]
}
@ -39,10 +38,10 @@
"total_hits": 2,
"hits": [
{
"id": "3505@FOSDEM15@fosdem.org"
"id": "3311@FOSDEM15@fosdem.org"
},
{
"id": "3311@FOSDEM15@fosdem.org"
"id": "3505@FOSDEM15@fosdem.org"
}
]
}
@ -94,12 +93,12 @@
"total_hits": 2,
"hits": [
{
"id": "3496@FOSDEM15@fosdem.org"
"id": "3492@FOSDEM15@fosdem.org"
},
{
"id": "3492@FOSDEM15@fosdem.org"
"id": "3496@FOSDEM15@fosdem.org"
}
]
}
}
]
]