2014-07-30 18:30:38 +02:00
|
|
|
// Copyright (c) 2014 Couchbase, Inc.
|
2016-10-02 16:13:14 +02:00
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
2014-08-25 18:06:10 +02:00
|
|
|
|
2014-07-30 18:30:38 +02:00
|
|
|
package bleve
|
|
|
|
|
|
|
|
import (
|
2015-05-09 20:51:07 +02:00
|
|
|
"fmt"
|
2014-08-20 22:58:20 +02:00
|
|
|
"io/ioutil"
|
2014-12-29 04:34:16 +01:00
|
|
|
"log"
|
2016-08-31 21:21:44 +02:00
|
|
|
"math"
|
2014-07-30 18:30:38 +02:00
|
|
|
"os"
|
2015-03-10 21:22:19 +01:00
|
|
|
"reflect"
|
2015-09-22 19:40:20 +02:00
|
|
|
"sort"
|
2017-04-12 00:20:34 +02:00
|
|
|
"strconv"
|
2015-04-08 17:17:34 +02:00
|
|
|
"strings"
|
2015-05-09 20:51:07 +02:00
|
|
|
"sync"
|
2014-07-30 18:30:38 +02:00
|
|
|
"testing"
|
2014-12-29 04:34:16 +01:00
|
|
|
"time"
|
2015-09-16 23:10:59 +02:00
|
|
|
|
2016-03-02 22:55:02 +01:00
|
|
|
"golang.org/x/net/context"
|
|
|
|
|
2016-10-01 23:20:59 +02:00
|
|
|
"github.com/blevesearch/bleve/analysis/analyzer/keyword"
|
2017-04-12 00:20:34 +02:00
|
|
|
"github.com/blevesearch/bleve/document"
|
2016-02-09 21:48:08 +01:00
|
|
|
"github.com/blevesearch/bleve/index"
|
2016-04-08 21:32:13 +02:00
|
|
|
"github.com/blevesearch/bleve/index/store/null"
|
2016-09-18 15:33:18 +02:00
|
|
|
"github.com/blevesearch/bleve/mapping"
|
2016-02-09 21:48:08 +01:00
|
|
|
"github.com/blevesearch/bleve/search"
|
2016-09-20 03:12:36 +02:00
|
|
|
"github.com/blevesearch/bleve/search/query"
|
2014-07-30 18:30:38 +02:00
|
|
|
)
|
|
|
|
|
2014-11-25 21:56:43 +01:00
|
|
|
func TestCrud(t *testing.T) {
|
2015-04-07 21:39:56 +02:00
|
|
|
defer func() {
|
|
|
|
err := os.RemoveAll("testidx")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}()
|
2014-11-25 21:56:43 +01:00
|
|
|
|
|
|
|
index, err := New("testidx", NewIndexMapping())
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
doca := map[string]interface{}{
|
|
|
|
"name": "marty",
|
|
|
|
"desc": "gophercon india",
|
|
|
|
}
|
|
|
|
err = index.Index("a", doca)
|
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
docy := map[string]interface{}{
|
|
|
|
"name": "jasper",
|
|
|
|
"desc": "clojure",
|
|
|
|
}
|
|
|
|
err = index.Index("y", docy)
|
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
err = index.Delete("y")
|
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
docx := map[string]interface{}{
|
|
|
|
"name": "rose",
|
|
|
|
"desc": "googler",
|
|
|
|
}
|
|
|
|
err = index.Index("x", docx)
|
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
err = index.SetInternal([]byte("status"), []byte("pending"))
|
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
docb := map[string]interface{}{
|
|
|
|
"name": "steve",
|
|
|
|
"desc": "cbft master",
|
|
|
|
}
|
2015-03-03 19:18:20 +01:00
|
|
|
batch := index.NewBatch()
|
2015-04-07 20:52:00 +02:00
|
|
|
err = batch.Index("b", docb)
|
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
2014-11-25 21:56:43 +01:00
|
|
|
batch.Delete("x")
|
|
|
|
batch.SetInternal([]byte("batchi"), []byte("batchv"))
|
|
|
|
batch.DeleteInternal([]byte("status"))
|
|
|
|
err = index.Batch(batch)
|
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
val, err := index.GetInternal([]byte("batchi"))
|
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
if string(val) != "batchv" {
|
|
|
|
t.Errorf("expected 'batchv', got '%s'", val)
|
|
|
|
}
|
|
|
|
val, err = index.GetInternal([]byte("status"))
|
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
if val != nil {
|
|
|
|
t.Errorf("expected nil, got '%s'", val)
|
|
|
|
}
|
|
|
|
|
|
|
|
err = index.SetInternal([]byte("seqno"), []byte("7"))
|
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
err = index.SetInternal([]byte("status"), []byte("ready"))
|
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
err = index.DeleteInternal([]byte("status"))
|
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
val, err = index.GetInternal([]byte("status"))
|
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
if val != nil {
|
|
|
|
t.Errorf("expected nil, got '%s'", val)
|
|
|
|
}
|
|
|
|
|
|
|
|
val, err = index.GetInternal([]byte("seqno"))
|
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
if string(val) != "7" {
|
|
|
|
t.Errorf("expected '7', got '%s'", val)
|
|
|
|
}
|
2014-11-25 22:23:48 +01:00
|
|
|
|
|
|
|
// close the index, open it again, and try some more things
|
2015-04-07 20:52:00 +02:00
|
|
|
err = index.Close()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2014-11-25 22:23:48 +01:00
|
|
|
|
|
|
|
index, err = Open("testidx")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2015-04-08 00:05:41 +02:00
|
|
|
defer func() {
|
|
|
|
err := index.Close()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}()
|
2014-11-25 22:23:48 +01:00
|
|
|
|
|
|
|
count, err := index.DocCount()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if count != 2 {
|
|
|
|
t.Errorf("expected doc count 2, got %d", count)
|
|
|
|
}
|
|
|
|
|
|
|
|
doc, err := index.Document("a")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if doc == nil {
|
|
|
|
t.Errorf("expected doc not nil, got nil")
|
|
|
|
}
|
|
|
|
foundNameField := false
|
|
|
|
for _, field := range doc.Fields {
|
|
|
|
if field.Name() == "name" && string(field.Value()) == "marty" {
|
|
|
|
foundNameField = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !foundNameField {
|
|
|
|
t.Errorf("expected to find field named 'name' with value 'marty'")
|
|
|
|
}
|
2014-11-26 21:36:58 +01:00
|
|
|
|
|
|
|
fields, err := index.Fields()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
expectedFields := map[string]bool{
|
|
|
|
"_all": false,
|
|
|
|
"name": false,
|
|
|
|
"desc": false,
|
|
|
|
}
|
2016-09-02 01:13:44 +02:00
|
|
|
if len(fields) < len(expectedFields) {
|
2014-11-26 21:36:58 +01:00
|
|
|
t.Fatalf("expected %d fields got %d", len(expectedFields), len(fields))
|
|
|
|
}
|
|
|
|
for _, f := range fields {
|
|
|
|
expectedFields[f] = true
|
|
|
|
}
|
|
|
|
for ef, efp := range expectedFields {
|
|
|
|
if !efp {
|
|
|
|
t.Errorf("field %s is missing", ef)
|
|
|
|
}
|
|
|
|
}
|
2014-11-25 21:56:43 +01:00
|
|
|
}
|
|
|
|
|
2014-08-20 22:58:20 +02:00
|
|
|
func TestIndexCreateNewOverExisting(t *testing.T) {
|
2015-04-07 21:39:56 +02:00
|
|
|
defer func() {
|
|
|
|
err := os.RemoveAll("testidx")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}()
|
2014-08-20 22:58:20 +02:00
|
|
|
|
|
|
|
index, err := New("testidx", NewIndexMapping())
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2015-04-07 20:52:00 +02:00
|
|
|
err = index.Close()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2014-08-20 22:58:20 +02:00
|
|
|
index, err = New("testidx", NewIndexMapping())
|
2014-09-02 20:14:05 +02:00
|
|
|
if err != ErrorIndexPathExists {
|
2014-08-20 22:58:20 +02:00
|
|
|
t.Fatalf("expected error index path exists, got %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestIndexOpenNonExisting(t *testing.T) {
|
|
|
|
_, err := Open("doesnotexist")
|
2014-09-02 20:14:05 +02:00
|
|
|
if err != ErrorIndexPathDoesNotExist {
|
2014-08-20 22:58:20 +02:00
|
|
|
t.Fatalf("expected error index path does not exist, got %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestIndexOpenMetaMissingOrCorrupt(t *testing.T) {
|
2015-04-07 21:39:56 +02:00
|
|
|
defer func() {
|
|
|
|
err := os.RemoveAll("testidx")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}()
|
2014-08-20 22:58:20 +02:00
|
|
|
|
|
|
|
index, err := New("testidx", NewIndexMapping())
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2015-04-07 20:52:00 +02:00
|
|
|
err = index.Close()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2014-08-20 22:58:20 +02:00
|
|
|
|
2014-11-21 22:47:20 +01:00
|
|
|
// now intentionally change the storage type
|
2015-04-07 20:52:00 +02:00
|
|
|
err = ioutil.WriteFile("testidx/index_meta.json", []byte(`{"storage":"mystery"}`), 0666)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2014-11-21 22:47:20 +01:00
|
|
|
|
|
|
|
index, err = Open("testidx")
|
|
|
|
if err != ErrorUnknownStorageType {
|
2015-01-06 18:18:18 +01:00
|
|
|
t.Fatalf("expected error unknown storage type, got %v", err)
|
2014-11-21 22:47:20 +01:00
|
|
|
}
|
|
|
|
|
2014-08-20 22:58:20 +02:00
|
|
|
// now intentionally corrupt the metadata
|
2015-04-07 20:52:00 +02:00
|
|
|
err = ioutil.WriteFile("testidx/index_meta.json", []byte("corrupted"), 0666)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2014-08-20 22:58:20 +02:00
|
|
|
|
|
|
|
index, err = Open("testidx")
|
2014-09-02 20:14:05 +02:00
|
|
|
if err != ErrorIndexMetaCorrupt {
|
2014-08-20 22:58:20 +02:00
|
|
|
t.Fatalf("expected error index metadata corrupted, got %v", err)
|
|
|
|
}
|
|
|
|
|
2014-12-18 18:43:12 +01:00
|
|
|
// now intentionally remove the metadata
|
2015-04-07 20:52:00 +02:00
|
|
|
err = os.Remove("testidx/index_meta.json")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2014-08-20 22:58:20 +02:00
|
|
|
|
|
|
|
index, err = Open("testidx")
|
2014-09-02 20:14:05 +02:00
|
|
|
if err != ErrorIndexMetaMissing {
|
2014-08-20 22:58:20 +02:00
|
|
|
t.Fatalf("expected error index metadata missing, got %v", err)
|
|
|
|
}
|
|
|
|
}
|
2014-11-21 22:47:20 +01:00
|
|
|
|
|
|
|
func TestInMemIndex(t *testing.T) {
|
|
|
|
|
BREAKING CHANGE - new method to create memory only index
Previously bleve allowed you to create a memory-only index by
simply passing "" as the path argument to the New() method.
This was not clear when reading the code, and led to some
problematic error cases as well.
Now, to create a memory-only index one should use the
NewMemOnly() method. Passing "" as the path argument
to the New() method will now return os.ErrInvalid.
Advanced users calling NewUsing() can create disk-based or
memory-only indexes, but the change here is that pass ""
as the path argument no longer defaults you into getting
a memory-only index. Instead, the KV store is selected
manually, just as it is for the disk-based solutions.
Here is an example use of the NewUsing() method to create
a memory-only index:
NewUsing("", indexMapping, Config.DefaultIndexType,
Config.DefaultMemKVStore, nil)
Config.DefaultMemKVStore is just a new default value
added to the configuration, it currently points to
gtreap.Name (which could have been used directly
instead for more control)
closes #427
2016-09-27 20:05:55 +02:00
|
|
|
index, err := NewMemOnly(NewIndexMapping())
|
2014-11-21 22:47:20 +01:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2015-04-07 20:52:00 +02:00
|
|
|
err = index.Close()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2014-11-21 22:47:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestClosedIndex(t *testing.T) {
|
BREAKING CHANGE - new method to create memory only index
Previously bleve allowed you to create a memory-only index by
simply passing "" as the path argument to the New() method.
This was not clear when reading the code, and led to some
problematic error cases as well.
Now, to create a memory-only index one should use the
NewMemOnly() method. Passing "" as the path argument
to the New() method will now return os.ErrInvalid.
Advanced users calling NewUsing() can create disk-based or
memory-only indexes, but the change here is that pass ""
as the path argument no longer defaults you into getting
a memory-only index. Instead, the KV store is selected
manually, just as it is for the disk-based solutions.
Here is an example use of the NewUsing() method to create
a memory-only index:
NewUsing("", indexMapping, Config.DefaultIndexType,
Config.DefaultMemKVStore, nil)
Config.DefaultMemKVStore is just a new default value
added to the configuration, it currently points to
gtreap.Name (which could have been used directly
instead for more control)
closes #427
2016-09-27 20:05:55 +02:00
|
|
|
index, err := NewMemOnly(NewIndexMapping())
|
2014-11-21 22:47:20 +01:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2015-04-07 20:52:00 +02:00
|
|
|
err = index.Close()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2014-11-21 22:47:20 +01:00
|
|
|
|
|
|
|
err = index.Index("test", "test")
|
|
|
|
if err != ErrorIndexClosed {
|
|
|
|
t.Errorf("expected error index closed, got %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
err = index.Delete("test")
|
|
|
|
if err != ErrorIndexClosed {
|
|
|
|
t.Errorf("expected error index closed, got %v", err)
|
|
|
|
}
|
|
|
|
|
2015-03-03 19:18:20 +01:00
|
|
|
b := index.NewBatch()
|
2014-11-21 22:47:20 +01:00
|
|
|
err = index.Batch(b)
|
|
|
|
if err != ErrorIndexClosed {
|
|
|
|
t.Errorf("expected error index closed, got %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err = index.Document("test")
|
|
|
|
if err != ErrorIndexClosed {
|
|
|
|
t.Errorf("expected error index closed, got %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err = index.DocCount()
|
|
|
|
if err != ErrorIndexClosed {
|
|
|
|
t.Errorf("expected error index closed, got %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err = index.Search(NewSearchRequest(NewTermQuery("test")))
|
|
|
|
if err != ErrorIndexClosed {
|
|
|
|
t.Errorf("expected error index closed, got %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err = index.Fields()
|
|
|
|
if err != ErrorIndexClosed {
|
|
|
|
t.Errorf("expected error index closed, got %v", err)
|
|
|
|
}
|
|
|
|
}
|
2014-12-29 04:34:16 +01:00
|
|
|
|
2016-02-09 21:48:08 +01:00
|
|
|
type slowQuery struct {
|
2016-09-20 03:12:36 +02:00
|
|
|
actual query.Query
|
2016-02-09 21:48:08 +01:00
|
|
|
delay time.Duration
|
|
|
|
}
|
|
|
|
|
2017-01-06 02:49:45 +01:00
|
|
|
func (s *slowQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) {
|
2016-02-09 21:48:08 +01:00
|
|
|
time.Sleep(s.delay)
|
2017-01-06 02:49:45 +01:00
|
|
|
return s.actual.Searcher(i, m, options)
|
2016-02-09 21:48:08 +01:00
|
|
|
}
|
|
|
|
|
2014-12-29 04:34:16 +01:00
|
|
|
func TestSlowSearch(t *testing.T) {
|
2015-04-07 21:39:56 +02:00
|
|
|
defer func() {
|
|
|
|
err := os.RemoveAll("testidx")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}()
|
2014-12-29 04:34:16 +01:00
|
|
|
|
|
|
|
defer func() {
|
|
|
|
// reset logger back to normal
|
|
|
|
SetLog(log.New(ioutil.Discard, "bleve", log.LstdFlags))
|
|
|
|
}()
|
|
|
|
// set custom logger
|
|
|
|
var sdw sawDataWriter
|
|
|
|
SetLog(log.New(&sdw, "bleve", log.LstdFlags))
|
|
|
|
|
|
|
|
index, err := New("testidx", NewIndexMapping())
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2015-04-08 00:05:41 +02:00
|
|
|
defer func() {
|
|
|
|
err := index.Close()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}()
|
2014-12-29 04:34:16 +01:00
|
|
|
|
|
|
|
Config.SlowSearchLogThreshold = 1 * time.Minute
|
|
|
|
|
|
|
|
query := NewTermQuery("water")
|
|
|
|
req := NewSearchRequest(query)
|
2015-04-07 20:52:00 +02:00
|
|
|
_, err = index.Search(req)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2014-12-29 04:34:16 +01:00
|
|
|
|
|
|
|
if sdw.sawData {
|
|
|
|
t.Errorf("expected to not see slow query logged, but did")
|
|
|
|
}
|
|
|
|
|
2016-02-09 21:48:08 +01:00
|
|
|
sq := &slowQuery{
|
|
|
|
actual: query,
|
|
|
|
delay: 50 * time.Millisecond, // on Windows timer resolution is 15ms
|
|
|
|
}
|
|
|
|
req.Query = sq
|
2014-12-29 04:34:16 +01:00
|
|
|
Config.SlowSearchLogThreshold = 1 * time.Microsecond
|
2015-04-07 20:52:00 +02:00
|
|
|
_, err = index.Search(req)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2014-12-29 04:34:16 +01:00
|
|
|
|
|
|
|
if !sdw.sawData {
|
|
|
|
t.Errorf("expected to see slow query logged, but didn't")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
type sawDataWriter struct {
|
|
|
|
sawData bool
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *sawDataWriter) Write(p []byte) (n int, err error) {
|
|
|
|
s.sawData = true
|
|
|
|
return len(p), nil
|
|
|
|
}
|
2015-01-19 21:40:18 +01:00
|
|
|
|
|
|
|
func TestStoredFieldPreserved(t *testing.T) {
|
2015-04-07 21:39:56 +02:00
|
|
|
defer func() {
|
|
|
|
err := os.RemoveAll("testidx")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}()
|
2015-01-19 21:40:18 +01:00
|
|
|
|
|
|
|
index, err := New("testidx", NewIndexMapping())
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2015-04-08 00:05:41 +02:00
|
|
|
defer func() {
|
|
|
|
err := index.Close()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}()
|
2015-01-19 21:40:18 +01:00
|
|
|
|
|
|
|
doca := map[string]interface{}{
|
|
|
|
"name": "Marty",
|
|
|
|
"desc": "GopherCON India",
|
2016-01-12 02:18:03 +01:00
|
|
|
"bool": true,
|
|
|
|
"num": float64(1),
|
2015-01-19 21:40:18 +01:00
|
|
|
}
|
|
|
|
err = index.Index("a", doca)
|
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
q := NewTermQuery("marty")
|
|
|
|
req := NewSearchRequest(q)
|
2016-01-12 02:18:03 +01:00
|
|
|
req.Fields = []string{"name", "desc", "bool", "num"}
|
2015-01-19 21:40:18 +01:00
|
|
|
res, err := index.Search(req)
|
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(res.Hits) != 1 {
|
2015-10-02 15:35:48 +02:00
|
|
|
t.Fatalf("expected 1 hit, got %d", len(res.Hits))
|
2015-01-19 21:40:18 +01:00
|
|
|
}
|
|
|
|
if res.Hits[0].Fields["name"] != "Marty" {
|
|
|
|
t.Errorf("expected 'Marty' got '%s'", res.Hits[0].Fields["name"])
|
|
|
|
}
|
|
|
|
if res.Hits[0].Fields["desc"] != "GopherCON India" {
|
|
|
|
t.Errorf("expected 'GopherCON India' got '%s'", res.Hits[0].Fields["desc"])
|
|
|
|
}
|
2016-01-12 02:18:03 +01:00
|
|
|
if res.Hits[0].Fields["num"] != float64(1) {
|
|
|
|
t.Errorf("expected '1' got '%v'", res.Hits[0].Fields["num"])
|
|
|
|
}
|
|
|
|
if res.Hits[0].Fields["bool"] != true {
|
|
|
|
t.Errorf("expected 'true' got '%v'", res.Hits[0].Fields["bool"])
|
|
|
|
}
|
2015-01-19 21:40:18 +01:00
|
|
|
}
|
2015-03-10 21:22:19 +01:00
|
|
|
|
|
|
|
func TestDict(t *testing.T) {
|
2015-04-07 21:39:56 +02:00
|
|
|
defer func() {
|
|
|
|
err := os.RemoveAll("testidx")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}()
|
2015-03-10 21:22:19 +01:00
|
|
|
|
|
|
|
index, err := New("testidx", NewIndexMapping())
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
doca := map[string]interface{}{
|
|
|
|
"name": "marty",
|
|
|
|
"desc": "gophercon india",
|
|
|
|
}
|
|
|
|
err = index.Index("a", doca)
|
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
docy := map[string]interface{}{
|
|
|
|
"name": "jasper",
|
|
|
|
"desc": "clojure",
|
|
|
|
}
|
|
|
|
err = index.Index("y", docy)
|
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
docx := map[string]interface{}{
|
|
|
|
"name": "rose",
|
|
|
|
"desc": "googler",
|
|
|
|
}
|
|
|
|
err = index.Index("x", docx)
|
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
dict, err := index.FieldDict("name")
|
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
terms := []string{}
|
|
|
|
de, err := dict.Next()
|
|
|
|
for err == nil && de != nil {
|
|
|
|
terms = append(terms, string(de.Term))
|
|
|
|
de, err = dict.Next()
|
|
|
|
}
|
|
|
|
|
|
|
|
expectedTerms := []string{"jasper", "marty", "rose"}
|
|
|
|
if !reflect.DeepEqual(terms, expectedTerms) {
|
|
|
|
t.Errorf("expected %v, got %v", expectedTerms, terms)
|
|
|
|
}
|
|
|
|
|
|
|
|
err = dict.Close()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2015-03-12 05:06:44 +01:00
|
|
|
// test start and end range
|
|
|
|
dict, err = index.FieldDictRange("name", []byte("marty"), []byte("rose"))
|
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
terms = []string{}
|
|
|
|
de, err = dict.Next()
|
|
|
|
for err == nil && de != nil {
|
|
|
|
terms = append(terms, string(de.Term))
|
|
|
|
de, err = dict.Next()
|
|
|
|
}
|
|
|
|
|
|
|
|
expectedTerms = []string{"marty", "rose"}
|
|
|
|
if !reflect.DeepEqual(terms, expectedTerms) {
|
|
|
|
t.Errorf("expected %v, got %v", expectedTerms, terms)
|
|
|
|
}
|
|
|
|
|
|
|
|
err = dict.Close()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2015-03-12 05:07:09 +01:00
|
|
|
docz := map[string]interface{}{
|
|
|
|
"name": "prefix",
|
|
|
|
"desc": "bob cat cats catting dog doggy zoo",
|
|
|
|
}
|
|
|
|
err = index.Index("z", docz)
|
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
dict, err = index.FieldDictPrefix("desc", []byte("cat"))
|
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
terms = []string{}
|
|
|
|
de, err = dict.Next()
|
|
|
|
for err == nil && de != nil {
|
|
|
|
terms = append(terms, string(de.Term))
|
|
|
|
de, err = dict.Next()
|
|
|
|
}
|
|
|
|
|
|
|
|
expectedTerms = []string{"cat", "cats", "catting"}
|
|
|
|
if !reflect.DeepEqual(terms, expectedTerms) {
|
|
|
|
t.Errorf("expected %v, got %v", expectedTerms, terms)
|
|
|
|
}
|
|
|
|
|
2015-03-12 22:41:09 +01:00
|
|
|
stats := index.Stats()
|
|
|
|
if stats == nil {
|
|
|
|
t.Errorf("expected IndexStat, got nil")
|
|
|
|
}
|
|
|
|
|
2015-03-12 05:06:44 +01:00
|
|
|
err = dict.Close()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2015-03-10 21:22:19 +01:00
|
|
|
err = index.Close()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
2015-04-08 16:41:42 +02:00
|
|
|
|
|
|
|
func TestBatchString(t *testing.T) {
|
|
|
|
defer func() {
|
|
|
|
err := os.RemoveAll("testidx")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
index, err := New("testidx", NewIndexMapping())
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2016-02-09 19:33:11 +01:00
|
|
|
defer func() {
|
|
|
|
err := index.Close()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}()
|
2015-04-08 16:41:42 +02:00
|
|
|
|
|
|
|
batch := index.NewBatch()
|
|
|
|
err = batch.Index("a", []byte("{}"))
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
batch.Delete("b")
|
|
|
|
batch.SetInternal([]byte("c"), []byte{})
|
|
|
|
batch.DeleteInternal([]byte("d"))
|
|
|
|
|
|
|
|
batchStr := batch.String()
|
2015-04-08 17:17:34 +02:00
|
|
|
if !strings.HasPrefix(batchStr, "Batch (2 ops, 2 internal ops)") {
|
|
|
|
t.Errorf("expected to start with Batch (2 ops, 2 internal ops), did not")
|
2015-04-08 16:41:42 +02:00
|
|
|
}
|
2015-04-08 17:17:34 +02:00
|
|
|
if !strings.Contains(batchStr, "INDEX - 'a'") {
|
|
|
|
t.Errorf("expected to contain INDEX - 'a', did not")
|
|
|
|
}
|
|
|
|
if !strings.Contains(batchStr, "DELETE - 'b'") {
|
|
|
|
t.Errorf("expected to contain DELETE - 'b', did not")
|
|
|
|
}
|
|
|
|
if !strings.Contains(batchStr, "SET INTERNAL - 'c'") {
|
|
|
|
t.Errorf("expected to contain SET INTERNAL - 'c', did not")
|
|
|
|
}
|
|
|
|
if !strings.Contains(batchStr, "DELETE INTERNAL - 'd'") {
|
|
|
|
t.Errorf("expected to contain DELETE INTERNAL - 'd', did not")
|
|
|
|
}
|
|
|
|
|
2015-04-08 16:41:42 +02:00
|
|
|
}
|
2015-05-08 14:07:20 +02:00
|
|
|
|
|
|
|
func TestIndexMetadataRaceBug198(t *testing.T) {
|
|
|
|
defer func() {
|
|
|
|
err := os.RemoveAll("testidx")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
index, err := New("testidx", NewIndexMapping())
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2016-02-09 19:33:11 +01:00
|
|
|
defer func() {
|
|
|
|
err := index.Close()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}()
|
2015-05-08 14:07:20 +02:00
|
|
|
|
2017-12-14 13:52:38 +01:00
|
|
|
wg := sync.WaitGroup{}
|
|
|
|
wg.Add(1)
|
2016-02-09 19:33:11 +01:00
|
|
|
done := make(chan struct{})
|
2015-05-08 14:07:20 +02:00
|
|
|
go func() {
|
|
|
|
for {
|
2016-02-09 19:33:11 +01:00
|
|
|
select {
|
|
|
|
case <-done:
|
2017-12-14 13:52:38 +01:00
|
|
|
wg.Done()
|
2016-02-09 19:33:11 +01:00
|
|
|
return
|
|
|
|
default:
|
2017-12-14 13:52:38 +01:00
|
|
|
_, err2 := index.DocCount()
|
|
|
|
if err2 != nil {
|
|
|
|
t.Fatal(err2)
|
2016-02-09 19:33:11 +01:00
|
|
|
}
|
2015-05-08 14:40:46 +02:00
|
|
|
}
|
2015-05-08 14:07:20 +02:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
for i := 0; i < 100; i++ {
|
|
|
|
batch := index.NewBatch()
|
|
|
|
err = batch.Index("a", []byte("{}"))
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
err = index.Batch(batch)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
2016-02-09 19:33:11 +01:00
|
|
|
close(done)
|
2017-12-14 13:52:38 +01:00
|
|
|
wg.Wait()
|
2015-05-08 14:07:20 +02:00
|
|
|
}
|
2015-05-09 20:51:07 +02:00
|
|
|
|
2016-08-09 15:18:53 +02:00
|
|
|
func TestSortMatchSearch(t *testing.T) {
|
|
|
|
defer func() {
|
|
|
|
err := os.RemoveAll("testidx")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
index, err := New("testidx", NewIndexMapping())
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2016-08-11 10:35:08 +02:00
|
|
|
names := []string{"Noam", "Uri", "David", "Yosef", "Eitan", "Itay", "Ariel", "Daniel", "Omer", "Yogev", "Yehonatan", "Moshe", "Mohammed", "Yusuf", "Omar"}
|
|
|
|
days := []string{"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}
|
|
|
|
numbers := []string{"One", "Two", "Three", "Four", "Five", "Six", "Seven", "Eight", "Nine", "Ten", "Eleven", "Twelve"}
|
2016-08-09 15:18:53 +02:00
|
|
|
for i := 0; i < 200; i++ {
|
|
|
|
doc := make(map[string]interface{})
|
2016-08-11 10:35:08 +02:00
|
|
|
doc["Name"] = names[i%len(names)]
|
|
|
|
doc["Day"] = days[i%len(days)]
|
|
|
|
doc["Number"] = numbers[i%len(numbers)]
|
2016-08-09 15:18:53 +02:00
|
|
|
err = index.Index(fmt.Sprintf("%d", i), doc)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-11 10:35:08 +02:00
|
|
|
req := NewSearchRequest(NewMatchQuery("One"))
|
2016-08-17 23:49:06 +02:00
|
|
|
req.SortBy([]string{"Day", "Name"})
|
2016-08-09 15:18:53 +02:00
|
|
|
req.Fields = []string{"*"}
|
|
|
|
sr, err := index.Search(req)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
prev := ""
|
|
|
|
for _, hit := range sr.Hits {
|
|
|
|
val := hit.Fields["Day"].(string)
|
|
|
|
if prev > val {
|
|
|
|
t.Errorf("Hits must be sorted by 'Day'. Found '%s' before '%s'", prev, val)
|
|
|
|
}
|
|
|
|
prev = val
|
|
|
|
}
|
|
|
|
err = index.Close()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-09 20:51:07 +02:00
|
|
|
func TestIndexCountMatchSearch(t *testing.T) {
|
|
|
|
defer func() {
|
|
|
|
err := os.RemoveAll("testidx")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
index, err := New("testidx", NewIndexMapping())
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
wg.Add(1)
|
|
|
|
go func(i int) {
|
|
|
|
b := index.NewBatch()
|
|
|
|
for j := 0; j < 200; j++ {
|
|
|
|
id := fmt.Sprintf("%d", (i*200)+j)
|
|
|
|
doc := struct {
|
|
|
|
Body string
|
|
|
|
}{
|
|
|
|
Body: "match",
|
|
|
|
}
|
|
|
|
err := b.Index(id, doc)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
err := index.Batch(b)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
wg.Done()
|
|
|
|
}(i)
|
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
|
|
|
|
// search for something that should match all documents
|
|
|
|
sr, err := index.Search(NewSearchRequest(NewMatchQuery("match")))
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// get the index document count
|
|
|
|
dc, err := index.DocCount()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// make sure test is working correctly, doc count should 2000
|
|
|
|
if dc != 2000 {
|
|
|
|
t.Errorf("expected doc count 2000, got %d", dc)
|
|
|
|
}
|
|
|
|
|
|
|
|
// make sure our search found all the documents
|
|
|
|
if dc != sr.Total {
|
|
|
|
t.Errorf("expected search result total %d to match doc count %d", sr.Total, dc)
|
|
|
|
}
|
|
|
|
|
|
|
|
err = index.Close()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
2015-05-15 21:04:52 +02:00
|
|
|
|
|
|
|
func TestBatchReset(t *testing.T) {
|
|
|
|
defer func() {
|
|
|
|
err := os.RemoveAll("testidx")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
index, err := New("testidx", NewIndexMapping())
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
batch := index.NewBatch()
|
|
|
|
err = batch.Index("k1", struct {
|
|
|
|
Body string
|
|
|
|
}{
|
|
|
|
Body: "v1",
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
batch.Delete("k2")
|
|
|
|
batch.SetInternal([]byte("k3"), []byte("v3"))
|
|
|
|
batch.DeleteInternal([]byte("k4"))
|
|
|
|
|
|
|
|
if batch.Size() != 4 {
|
|
|
|
t.Logf("%v", batch)
|
|
|
|
t.Errorf("expected batch size 4, got %d", batch.Size())
|
|
|
|
}
|
|
|
|
|
|
|
|
batch.Reset()
|
|
|
|
|
|
|
|
if batch.Size() != 0 {
|
|
|
|
t.Errorf("expected batch size 0 after reset, got %d", batch.Size())
|
|
|
|
}
|
|
|
|
|
|
|
|
err = index.Close()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
2015-05-15 21:47:54 +02:00
|
|
|
|
|
|
|
func TestDocumentFieldArrayPositions(t *testing.T) {
|
|
|
|
defer func() {
|
|
|
|
err := os.RemoveAll("testidx")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
index, err := New("testidx", NewIndexMapping())
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// index a document with an array of strings
|
|
|
|
err = index.Index("k", struct {
|
|
|
|
Messages []string
|
|
|
|
}{
|
|
|
|
Messages: []string{
|
|
|
|
"first",
|
|
|
|
"second",
|
|
|
|
"third",
|
|
|
|
"last",
|
|
|
|
},
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// load the document
|
|
|
|
doc, err := index.Document("k")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, f := range doc.Fields {
|
|
|
|
if reflect.DeepEqual(f.Value(), []byte("first")) {
|
|
|
|
ap := f.ArrayPositions()
|
|
|
|
if len(ap) < 1 {
|
|
|
|
t.Errorf("expected an array position, got none")
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if ap[0] != 0 {
|
|
|
|
t.Errorf("expected 'first' in array position 0, got %d", ap[0])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if reflect.DeepEqual(f.Value(), []byte("second")) {
|
|
|
|
ap := f.ArrayPositions()
|
|
|
|
if len(ap) < 1 {
|
|
|
|
t.Errorf("expected an array position, got none")
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if ap[0] != 1 {
|
|
|
|
t.Errorf("expected 'second' in array position 1, got %d", ap[0])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if reflect.DeepEqual(f.Value(), []byte("third")) {
|
|
|
|
ap := f.ArrayPositions()
|
|
|
|
if len(ap) < 1 {
|
|
|
|
t.Errorf("expected an array position, got none")
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if ap[0] != 2 {
|
|
|
|
t.Errorf("expected 'third' in array position 2, got %d", ap[0])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if reflect.DeepEqual(f.Value(), []byte("last")) {
|
|
|
|
ap := f.ArrayPositions()
|
|
|
|
if len(ap) < 1 {
|
|
|
|
t.Errorf("expected an array position, got none")
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if ap[0] != 3 {
|
|
|
|
t.Errorf("expected 'last' in array position 3, got %d", ap[0])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// now index a document in the same field with a single string
|
|
|
|
err = index.Index("k2", struct {
|
|
|
|
Messages string
|
|
|
|
}{
|
|
|
|
Messages: "only",
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// load the document
|
|
|
|
doc, err = index.Document("k2")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, f := range doc.Fields {
|
|
|
|
if reflect.DeepEqual(f.Value(), []byte("only")) {
|
|
|
|
ap := f.ArrayPositions()
|
|
|
|
if len(ap) != 0 {
|
|
|
|
t.Errorf("expected no array positions, got %d", len(ap))
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
err = index.Close()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
2015-05-21 13:49:41 +02:00
|
|
|
|
|
|
|
func TestKeywordSearchBug207(t *testing.T) {
|
|
|
|
defer func() {
|
|
|
|
err := os.RemoveAll("testidx")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
f := NewTextFieldMapping()
|
2016-09-30 17:18:39 +02:00
|
|
|
f.Analyzer = keyword.Name
|
2015-05-21 13:49:41 +02:00
|
|
|
|
|
|
|
m := NewIndexMapping()
|
|
|
|
m.DefaultMapping = NewDocumentMapping()
|
|
|
|
m.DefaultMapping.AddFieldMappingsAt("Body", f)
|
|
|
|
|
|
|
|
index, err := New("testidx", m)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
doc1 := struct {
|
|
|
|
Body string
|
|
|
|
}{
|
|
|
|
Body: "a555c3bb06f7a127cda000005",
|
|
|
|
}
|
|
|
|
|
|
|
|
err = index.Index("a", doc1)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
doc2 := struct {
|
|
|
|
Body string
|
|
|
|
}{
|
|
|
|
Body: "555c3bb06f7a127cda000005",
|
|
|
|
}
|
|
|
|
|
|
|
|
err = index.Index("b", doc2)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// now search for these terms
|
|
|
|
sreq := NewSearchRequest(NewTermQuery("a555c3bb06f7a127cda000005"))
|
|
|
|
sres, err := index.Search(sreq)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if sres.Total != 1 {
|
|
|
|
t.Errorf("expected 1 result, got %d", sres.Total)
|
|
|
|
}
|
|
|
|
if sres.Hits[0].ID != "a" {
|
|
|
|
t.Errorf("expecated id 'a', got '%s'", sres.Hits[0].ID)
|
|
|
|
}
|
|
|
|
|
|
|
|
sreq = NewSearchRequest(NewTermQuery("555c3bb06f7a127cda000005"))
|
|
|
|
sres, err = index.Search(sreq)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if sres.Total != 1 {
|
|
|
|
t.Errorf("expected 1 result, got %d", sres.Total)
|
|
|
|
}
|
|
|
|
if sres.Hits[0].ID != "b" {
|
|
|
|
t.Errorf("expecated id 'b', got '%s'", sres.Hits[0].ID)
|
|
|
|
}
|
|
|
|
|
2015-05-21 21:43:13 +02:00
|
|
|
// now do the same searches using query strings
|
|
|
|
sreq = NewSearchRequest(NewQueryStringQuery("Body:a555c3bb06f7a127cda000005"))
|
|
|
|
sres, err = index.Search(sreq)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if sres.Total != 1 {
|
|
|
|
t.Errorf("expected 1 result, got %d", sres.Total)
|
|
|
|
}
|
|
|
|
if sres.Hits[0].ID != "a" {
|
|
|
|
t.Errorf("expecated id 'a', got '%s'", sres.Hits[0].ID)
|
|
|
|
}
|
|
|
|
|
|
|
|
sreq = NewSearchRequest(NewQueryStringQuery(`Body:555c3bb06f7a127cda000005`))
|
|
|
|
sres, err = index.Search(sreq)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if sres.Total != 1 {
|
|
|
|
t.Errorf("expected 1 result, got %d", sres.Total)
|
|
|
|
}
|
|
|
|
if sres.Hits[0].ID != "b" {
|
|
|
|
t.Errorf("expecated id 'b', got '%s'", sres.Hits[0].ID)
|
|
|
|
}
|
|
|
|
|
2015-05-21 13:49:41 +02:00
|
|
|
err = index.Close()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
2015-07-31 17:16:11 +02:00
|
|
|
|
|
|
|
func TestTermVectorArrayPositions(t *testing.T) {
|
|
|
|
defer func() {
|
|
|
|
err := os.RemoveAll("testidx")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
index, err := New("testidx", NewIndexMapping())
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// index a document with an array of strings
|
|
|
|
err = index.Index("k", struct {
|
|
|
|
Messages []string
|
|
|
|
}{
|
|
|
|
Messages: []string{
|
|
|
|
"first",
|
|
|
|
"second",
|
|
|
|
"third",
|
|
|
|
"last",
|
|
|
|
},
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// search for this document in all field
|
|
|
|
tq := NewTermQuery("second")
|
|
|
|
tsr := NewSearchRequest(tq)
|
2017-01-06 02:49:45 +01:00
|
|
|
tsr.IncludeLocations = true
|
2015-07-31 17:16:11 +02:00
|
|
|
results, err := index.Search(tsr)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if results.Total != 1 {
|
|
|
|
t.Fatalf("expected 1 result, got %d", results.Total)
|
|
|
|
}
|
|
|
|
if len(results.Hits[0].Locations["Messages"]["second"]) < 1 {
|
|
|
|
t.Fatalf("expected at least one location")
|
|
|
|
}
|
|
|
|
if len(results.Hits[0].Locations["Messages"]["second"][0].ArrayPositions) < 1 {
|
|
|
|
t.Fatalf("expected at least one location array position")
|
|
|
|
}
|
|
|
|
if results.Hits[0].Locations["Messages"]["second"][0].ArrayPositions[0] != 1 {
|
2017-02-10 02:02:12 +01:00
|
|
|
t.Fatalf("expected array position 1, got %d", results.Hits[0].Locations["Messages"]["second"][0].ArrayPositions[0])
|
2015-07-31 17:16:11 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// repeat search for this document in Messages field
|
2016-09-23 01:49:31 +02:00
|
|
|
tq2 := NewTermQuery("third")
|
|
|
|
tq2.SetField("Messages")
|
2015-07-31 17:16:11 +02:00
|
|
|
tsr = NewSearchRequest(tq2)
|
2017-01-06 02:49:45 +01:00
|
|
|
tsr.IncludeLocations = true
|
2015-07-31 17:16:11 +02:00
|
|
|
results, err = index.Search(tsr)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if results.Total != 1 {
|
|
|
|
t.Fatalf("expected 1 result, got %d", results.Total)
|
|
|
|
}
|
|
|
|
if len(results.Hits[0].Locations["Messages"]["third"]) < 1 {
|
|
|
|
t.Fatalf("expected at least one location")
|
|
|
|
}
|
|
|
|
if len(results.Hits[0].Locations["Messages"]["third"][0].ArrayPositions) < 1 {
|
|
|
|
t.Fatalf("expected at least one location array position")
|
|
|
|
}
|
|
|
|
if results.Hits[0].Locations["Messages"]["third"][0].ArrayPositions[0] != 2 {
|
2017-02-10 02:02:12 +01:00
|
|
|
t.Fatalf("expected array position 2, got %d", results.Hits[0].Locations["Messages"]["third"][0].ArrayPositions[0])
|
2015-07-31 17:16:11 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
err = index.Close()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
2015-09-28 23:00:08 +02:00
|
|
|
|
2015-09-22 19:40:20 +02:00
|
|
|
func TestDocumentStaticMapping(t *testing.T) {
|
|
|
|
defer func() {
|
|
|
|
err := os.RemoveAll("testidx")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
m := NewIndexMapping()
|
|
|
|
m.DefaultMapping = NewDocumentStaticMapping()
|
|
|
|
m.DefaultMapping.AddFieldMappingsAt("Text", NewTextFieldMapping())
|
|
|
|
m.DefaultMapping.AddFieldMappingsAt("Date", NewDateTimeFieldMapping())
|
|
|
|
m.DefaultMapping.AddFieldMappingsAt("Numeric", NewNumericFieldMapping())
|
|
|
|
|
|
|
|
index, err := New("testidx", m)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
doc1 := struct {
|
|
|
|
Text string
|
|
|
|
IgnoredText string
|
|
|
|
Numeric float64
|
|
|
|
IgnoredNumeric float64
|
|
|
|
Date time.Time
|
|
|
|
IgnoredDate time.Time
|
|
|
|
}{
|
|
|
|
Text: "valid text",
|
|
|
|
IgnoredText: "ignored text",
|
|
|
|
Numeric: 10,
|
|
|
|
IgnoredNumeric: 20,
|
|
|
|
Date: time.Unix(1, 0),
|
|
|
|
IgnoredDate: time.Unix(2, 0),
|
|
|
|
}
|
|
|
|
|
|
|
|
err = index.Index("a", doc1)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
fields, err := index.Fields()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
sort.Strings(fields)
|
|
|
|
expectedFields := []string{"Date", "Numeric", "Text", "_all"}
|
2016-09-02 01:13:44 +02:00
|
|
|
if len(fields) < len(expectedFields) {
|
2015-09-22 19:40:20 +02:00
|
|
|
t.Fatalf("invalid field count: %d", len(fields))
|
|
|
|
}
|
|
|
|
for i, expected := range expectedFields {
|
|
|
|
if expected != fields[i] {
|
|
|
|
t.Fatalf("unexpected field[%d]: %s", i, fields[i])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
err = index.Close()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-28 23:00:08 +02:00
|
|
|
func TestIndexEmptyDocId(t *testing.T) {
|
|
|
|
defer func() {
|
|
|
|
err := os.RemoveAll("testidx")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
index, err := New("testidx", NewIndexMapping())
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2016-02-09 19:33:11 +01:00
|
|
|
defer func() {
|
|
|
|
err := index.Close()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}()
|
2015-09-28 23:00:08 +02:00
|
|
|
|
|
|
|
doc := map[string]interface{}{
|
|
|
|
"body": "nodocid",
|
|
|
|
}
|
|
|
|
|
|
|
|
err = index.Index("", doc)
|
|
|
|
if err != ErrorEmptyID {
|
|
|
|
t.Errorf("expect index empty doc id to fail")
|
|
|
|
}
|
|
|
|
|
|
|
|
err = index.Delete("")
|
|
|
|
if err != ErrorEmptyID {
|
|
|
|
t.Errorf("expect delete empty doc id to fail")
|
|
|
|
}
|
|
|
|
|
|
|
|
batch := index.NewBatch()
|
|
|
|
err = batch.Index("", doc)
|
|
|
|
if err != ErrorEmptyID {
|
|
|
|
t.Errorf("expect index empty doc id in batch to fail")
|
|
|
|
}
|
|
|
|
|
|
|
|
batch.Delete("")
|
|
|
|
if batch.Size() > 0 {
|
|
|
|
t.Errorf("expect delete empty doc id in batch to be ignored")
|
|
|
|
}
|
|
|
|
}
|
2015-11-21 03:15:35 +01:00
|
|
|
|
2015-11-23 14:41:34 +01:00
|
|
|
func TestDateTimeFieldMappingIssue287(t *testing.T) {
|
2015-11-21 03:15:35 +01:00
|
|
|
defer func() {
|
|
|
|
err := os.RemoveAll("testidx")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
f := NewDateTimeFieldMapping()
|
|
|
|
|
|
|
|
m := NewIndexMapping()
|
|
|
|
m.DefaultMapping = NewDocumentMapping()
|
|
|
|
m.DefaultMapping.AddFieldMappingsAt("Date", f)
|
|
|
|
|
|
|
|
index, err := New("testidx", m)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
type doc struct {
|
|
|
|
Date time.Time
|
|
|
|
}
|
|
|
|
|
|
|
|
now := time.Now()
|
|
|
|
|
|
|
|
// 3hr ago to 1hr ago
|
|
|
|
for i := 0; i < 3; i++ {
|
2015-11-23 18:28:09 +01:00
|
|
|
d := doc{now.Add(time.Duration((i - 3)) * time.Hour)}
|
2015-11-21 03:15:35 +01:00
|
|
|
|
2016-07-01 16:21:41 +02:00
|
|
|
err = index.Index(strconv.FormatInt(int64(i), 10), d)
|
2015-11-21 03:15:35 +01:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// search range across all docs
|
2016-09-23 03:24:43 +02:00
|
|
|
start := now.Add(-4 * time.Hour)
|
|
|
|
end := now
|
|
|
|
sreq := NewSearchRequest(NewDateRangeQuery(start, end))
|
2015-11-21 03:15:35 +01:00
|
|
|
sres, err := index.Search(sreq)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if sres.Total != 3 {
|
|
|
|
t.Errorf("expected 3 results, got %d", sres.Total)
|
|
|
|
}
|
|
|
|
|
|
|
|
// search range includes only oldest
|
2016-09-23 03:24:43 +02:00
|
|
|
start = now.Add(-4 * time.Hour)
|
|
|
|
end = now.Add(-121 * time.Minute)
|
|
|
|
sreq = NewSearchRequest(NewDateRangeQuery(start, end))
|
2015-11-21 03:15:35 +01:00
|
|
|
sres, err = index.Search(sreq)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if sres.Total != 1 {
|
|
|
|
t.Errorf("expected 1 results, got %d", sres.Total)
|
|
|
|
}
|
|
|
|
if sres.Total > 0 && sres.Hits[0].ID != "0" {
|
|
|
|
t.Errorf("expecated id '0', got '%s'", sres.Hits[0].ID)
|
|
|
|
}
|
|
|
|
|
|
|
|
// search range includes only newest
|
2016-09-23 03:24:43 +02:00
|
|
|
start = now.Add(-61 * time.Minute)
|
|
|
|
end = now
|
|
|
|
sreq = NewSearchRequest(NewDateRangeQuery(start, end))
|
2015-11-21 03:15:35 +01:00
|
|
|
sres, err = index.Search(sreq)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if sres.Total != 1 {
|
|
|
|
t.Errorf("expected 1 results, got %d", sres.Total)
|
|
|
|
}
|
|
|
|
if sres.Total > 0 && sres.Hits[0].ID != "2" {
|
|
|
|
t.Errorf("expecated id '2', got '%s'", sres.Hits[0].ID)
|
|
|
|
}
|
|
|
|
|
|
|
|
err = index.Close()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
2015-12-21 20:59:32 +01:00
|
|
|
|
|
|
|
func TestDocumentFieldArrayPositionsBug295(t *testing.T) {
|
|
|
|
defer func() {
|
|
|
|
err := os.RemoveAll("testidx")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
index, err := New("testidx", NewIndexMapping())
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// index a document with an array of strings
|
|
|
|
err = index.Index("k", struct {
|
|
|
|
Messages []string
|
|
|
|
Another string
|
|
|
|
MoreData []string
|
|
|
|
}{
|
|
|
|
Messages: []string{
|
|
|
|
"bleve",
|
|
|
|
"bleve",
|
|
|
|
},
|
|
|
|
Another: "text",
|
|
|
|
MoreData: []string{
|
|
|
|
"a",
|
|
|
|
"b",
|
|
|
|
"c",
|
|
|
|
"bleve",
|
|
|
|
},
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// search for it in the messages field
|
2016-09-23 01:49:31 +02:00
|
|
|
tq := NewTermQuery("bleve")
|
|
|
|
tq.SetField("Messages")
|
2015-12-21 20:59:32 +01:00
|
|
|
tsr := NewSearchRequest(tq)
|
2017-01-06 02:49:45 +01:00
|
|
|
tsr.IncludeLocations = true
|
2015-12-21 20:59:32 +01:00
|
|
|
results, err := index.Search(tsr)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if results.Total != 1 {
|
|
|
|
t.Fatalf("expected 1 result, got %d", results.Total)
|
|
|
|
}
|
|
|
|
if len(results.Hits[0].Locations["Messages"]["bleve"]) != 2 {
|
|
|
|
t.Fatalf("expected 2 locations of 'bleve', got %d", len(results.Hits[0].Locations["Messages"]["bleve"]))
|
|
|
|
}
|
|
|
|
if results.Hits[0].Locations["Messages"]["bleve"][0].ArrayPositions[0] != 0 {
|
|
|
|
t.Errorf("expected array position to be 0")
|
|
|
|
}
|
|
|
|
if results.Hits[0].Locations["Messages"]["bleve"][1].ArrayPositions[0] != 1 {
|
|
|
|
t.Errorf("expected array position to be 1")
|
|
|
|
}
|
|
|
|
|
|
|
|
// search for it in all
|
|
|
|
tq = NewTermQuery("bleve")
|
|
|
|
tsr = NewSearchRequest(tq)
|
2017-01-06 02:49:45 +01:00
|
|
|
tsr.IncludeLocations = true
|
2015-12-21 20:59:32 +01:00
|
|
|
results, err = index.Search(tsr)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if results.Total != 1 {
|
|
|
|
t.Fatalf("expected 1 result, got %d", results.Total)
|
|
|
|
}
|
|
|
|
if len(results.Hits[0].Locations["Messages"]["bleve"]) != 2 {
|
|
|
|
t.Fatalf("expected 2 locations of 'bleve', got %d", len(results.Hits[0].Locations["Messages"]["bleve"]))
|
|
|
|
}
|
|
|
|
if results.Hits[0].Locations["Messages"]["bleve"][0].ArrayPositions[0] != 0 {
|
|
|
|
t.Errorf("expected array position to be 0")
|
|
|
|
}
|
|
|
|
if results.Hits[0].Locations["Messages"]["bleve"][1].ArrayPositions[0] != 1 {
|
|
|
|
t.Errorf("expected array position to be 1")
|
|
|
|
}
|
|
|
|
|
|
|
|
err = index.Close()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
2016-01-12 02:18:03 +01:00
|
|
|
|
|
|
|
func TestBooleanFieldMappingIssue109(t *testing.T) {
|
|
|
|
defer func() {
|
|
|
|
err := os.RemoveAll("testidx")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
m := NewIndexMapping()
|
|
|
|
m.DefaultMapping = NewDocumentMapping()
|
|
|
|
m.DefaultMapping.AddFieldMappingsAt("Bool", NewBooleanFieldMapping())
|
|
|
|
|
|
|
|
index, err := New("testidx", m)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
type doc struct {
|
|
|
|
Bool bool
|
|
|
|
}
|
2016-01-13 23:10:13 +01:00
|
|
|
err = index.Index("true", &doc{Bool: true})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
err = index.Index("false", &doc{Bool: false})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2016-01-12 02:18:03 +01:00
|
|
|
|
2016-09-23 01:49:31 +02:00
|
|
|
q := NewBoolFieldQuery(true)
|
|
|
|
q.SetField("Bool")
|
|
|
|
sreq := NewSearchRequest(q)
|
2016-01-12 02:18:03 +01:00
|
|
|
sres, err := index.Search(sreq)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if sres.Total != 1 {
|
|
|
|
t.Errorf("expected 1 results, got %d", sres.Total)
|
|
|
|
}
|
|
|
|
|
2016-09-23 01:49:31 +02:00
|
|
|
q = NewBoolFieldQuery(false)
|
|
|
|
q.SetField("Bool")
|
|
|
|
sreq = NewSearchRequest(q)
|
2016-01-12 02:18:03 +01:00
|
|
|
sres, err = index.Search(sreq)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if sres.Total != 1 {
|
|
|
|
t.Errorf("expected 1 results, got %d", sres.Total)
|
|
|
|
}
|
|
|
|
|
|
|
|
sreq = NewSearchRequest(NewBoolFieldQuery(true))
|
|
|
|
sres, err = index.Search(sreq)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if sres.Total != 1 {
|
|
|
|
t.Errorf("expected 1 results, got %d", sres.Total)
|
|
|
|
}
|
|
|
|
|
|
|
|
err = index.Close()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
2016-03-02 22:55:02 +01:00
|
|
|
|
|
|
|
func TestSearchTimeout(t *testing.T) {
|
|
|
|
defer func() {
|
|
|
|
err := os.RemoveAll("testidx")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
index, err := New("testidx", NewIndexMapping())
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
defer func() {
|
|
|
|
err := index.Close()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
// first run a search with an absurdly long timeout (should succeeed)
|
|
|
|
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
|
|
|
|
query := NewTermQuery("water")
|
|
|
|
req := NewSearchRequest(query)
|
|
|
|
_, err = index.SearchInContext(ctx, req)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// now run a search again with an absurdly low timeout (should timeout)
|
|
|
|
ctx, _ = context.WithTimeout(context.Background(), 1*time.Microsecond)
|
|
|
|
sq := &slowQuery{
|
|
|
|
actual: query,
|
|
|
|
delay: 50 * time.Millisecond, // on Windows timer resolution is 15ms
|
|
|
|
}
|
|
|
|
req.Query = sq
|
|
|
|
_, err = index.SearchInContext(ctx, req)
|
|
|
|
if err != context.DeadlineExceeded {
|
|
|
|
t.Fatalf("exected %v, got: %v", context.DeadlineExceeded, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// now run a search with a long timeout, but with a long query, and cancel it
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
|
|
|
sq = &slowQuery{
|
|
|
|
actual: query,
|
|
|
|
delay: 100 * time.Millisecond, // on Windows timer resolution is 15ms
|
|
|
|
}
|
|
|
|
req = NewSearchRequest(sq)
|
|
|
|
cancel()
|
|
|
|
_, err = index.SearchInContext(ctx, req)
|
|
|
|
if err != context.Canceled {
|
|
|
|
t.Fatalf("exected %v, got: %v", context.Canceled, err)
|
|
|
|
}
|
|
|
|
}
|
2016-03-07 22:05:34 +01:00
|
|
|
|
|
|
|
// TestConfigCache exposes a concurrent map write with go 1.6
|
|
|
|
func TestConfigCache(t *testing.T) {
|
|
|
|
for i := 0; i < 100; i++ {
|
|
|
|
go func() {
|
|
|
|
_, err := Config.Cache.HighlighterNamed(Config.DefaultHighlighter)
|
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
}
|
2016-04-08 21:32:13 +02:00
|
|
|
|
|
|
|
func TestBatchRaceBug260(t *testing.T) {
|
|
|
|
defer func() {
|
|
|
|
err := os.RemoveAll("testidx")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
i, err := New("testidx", NewIndexMapping())
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
attempt to fix core reference counting issues
Observed problem:
Persisted index state (in root bolt) would contain index snapshots which
pointed to index files that did not exist.
Debugging this uncovered two main problems:
1. At the end of persisting a snapshot, the persister creates a new index
snapshot with the SAME epoch as the current root, only it replaces in-memory
segments with the new disk based ones. This is problematic because reference
counting an index segment triggers "eligible for deletion". And eligible for
deletion is keyed by epoch. So having two separate instances going by the same
epoch is problematic. Specifically, one of them gets to 0 before the other,
and we wrongly conclude it's eligible for deletion, when in fact the "other"
instance with same epoch is actually still in use.
To address this problem, we have modified the behavior of the persister. Now,
upon completion of persistence, ONLY if new files were actually created do we
proceed to introduce a new snapshot. AND, this new snapshot now gets it's own
brand new epoch. BOTH of these are important because since the persister now
also introduces a new epoch, it will see this epoch again in the future AND be
expected to persist it. That is OK (mostly harmless), but we cannot allow it
to form a loop. Checking that new files were actually introduced is what
short-circuits the potential loop. The new epoch introduced by the persister,
if seen again will not have any new segments that actually need persisting to
disk, and the cycle is stopped.
2. The implementation of NumSnapshotsToKeep, and related code to deleted old
snapshots from the root bolt also contains problems. Specifically, the
determination of which snapshots to keep vs delete did not consider which ones
were actually persisted. So, lets say you had set NumSnapshotsToKeep to 3, if
the introducer gets 3 snapshots ahead of the persister, what can happen is that
the three snapshots we choose to keep are all in memory. We now wrongly delete
all of the snapshots from the root bolt. But it gets worse, in this instant of
time, we now have files on disk that nothing in the root bolt points to, so we
also go ahead and delete those files. Those files were still being referenced
by the in-memory snapshots. But, now even if they get persisted to disk, they
simply have references to non-existent files. Opening up one of these indexes
results in lost data (often everything).
To address this problem, we made large change to the way this section of code
operates. First, we now start with a list of all epochs actually persisted in
the root bolt. Second, we set aside NumSnapshotsToKeep of these snapshots to
keep. Third, anything else in the eligibleForRemoval list will be deleted. I
suspect this code is slower and less elegant, but I think it is more correct.
Also, previously NumSnapshotsToKeep defaulted to 0, I have now defaulted it to
1, which feels like saner out-of-the-box behavior (though it's debatable if the
original intent was perhaps instead for "extra" snapshots to keep, but with the
variable named as it is, 1 makes more sense to me)
Other minor changes included in this change:
- Location of 'nextSnapshotEpoch', 'eligibleForRemoval', and
'ineligibleForRemoval' members of Scorch struct were moved into the
paragraph with 'rootLock' to clarify that you must hold the lock to access it.
- TestBatchRaceBug260 was updated to properly Close() the index, which leads to
occasional test failures.
2018-01-02 22:09:55 +01:00
|
|
|
defer func() {
|
|
|
|
err := i.Close()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}()
|
2016-04-08 21:32:13 +02:00
|
|
|
b := i.NewBatch()
|
|
|
|
err = b.Index("1", 1)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
err = i.Batch(b)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
b.Reset()
|
|
|
|
err = b.Index("2", 2)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
err = i.Batch(b)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
b.Reset()
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkBatchOverhead(b *testing.B) {
|
|
|
|
defer func() {
|
|
|
|
err := os.RemoveAll("testidx")
|
|
|
|
if err != nil {
|
|
|
|
b.Fatal(err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
m := NewIndexMapping()
|
|
|
|
i, err := NewUsing("testidx", m, Config.DefaultIndexType, null.Name, nil)
|
|
|
|
if err != nil {
|
|
|
|
b.Fatal(err)
|
|
|
|
}
|
|
|
|
for n := 0; n < b.N; n++ {
|
|
|
|
// put 1000 items in a batch
|
|
|
|
batch := i.NewBatch()
|
|
|
|
for i := 0; i < 1000; i++ {
|
|
|
|
err = batch.Index(fmt.Sprintf("%d", i), map[string]interface{}{"name": "bleve"})
|
|
|
|
if err != nil {
|
|
|
|
b.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
err = i.Batch(batch)
|
|
|
|
if err != nil {
|
|
|
|
b.Fatal(err)
|
|
|
|
}
|
|
|
|
batch.Reset()
|
|
|
|
}
|
|
|
|
}
|
2016-08-06 16:47:34 +02:00
|
|
|
|
|
|
|
func TestOpenReadonlyMultiple(t *testing.T) {
|
|
|
|
defer func() {
|
|
|
|
err := os.RemoveAll("testidx")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
// build an index and close it
|
|
|
|
index, err := New("testidx", NewIndexMapping())
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
doca := map[string]interface{}{
|
|
|
|
"name": "marty",
|
|
|
|
"desc": "gophercon india",
|
|
|
|
}
|
|
|
|
err = index.Index("a", doca)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
err = index.Close()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// now open it read-only
|
|
|
|
index, err = OpenUsing("testidx", map[string]interface{}{
|
|
|
|
"read_only": true,
|
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// now open it again
|
|
|
|
index2, err := OpenUsing("testidx", map[string]interface{}{
|
|
|
|
"read_only": true,
|
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
err = index.Close()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
err = index2.Close()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
2016-08-31 21:21:44 +02:00
|
|
|
|
|
|
|
// TestBug408 tests for VERY large values of size, even though actual result
|
|
|
|
// set may be reasonable size
|
|
|
|
func TestBug408(t *testing.T) {
|
|
|
|
type TestStruct struct {
|
|
|
|
ID string `json:"id"`
|
|
|
|
UserID *string `json:"user_id"`
|
|
|
|
}
|
|
|
|
|
|
|
|
docMapping := NewDocumentMapping()
|
|
|
|
docMapping.AddFieldMappingsAt("id", NewTextFieldMapping())
|
|
|
|
docMapping.AddFieldMappingsAt("user_id", NewTextFieldMapping())
|
|
|
|
|
|
|
|
indexMapping := NewIndexMapping()
|
|
|
|
indexMapping.DefaultMapping = docMapping
|
|
|
|
|
BREAKING CHANGE - new method to create memory only index
Previously bleve allowed you to create a memory-only index by
simply passing "" as the path argument to the New() method.
This was not clear when reading the code, and led to some
problematic error cases as well.
Now, to create a memory-only index one should use the
NewMemOnly() method. Passing "" as the path argument
to the New() method will now return os.ErrInvalid.
Advanced users calling NewUsing() can create disk-based or
memory-only indexes, but the change here is that pass ""
as the path argument no longer defaults you into getting
a memory-only index. Instead, the KV store is selected
manually, just as it is for the disk-based solutions.
Here is an example use of the NewUsing() method to create
a memory-only index:
NewUsing("", indexMapping, Config.DefaultIndexType,
Config.DefaultMemKVStore, nil)
Config.DefaultMemKVStore is just a new default value
added to the configuration, it currently points to
gtreap.Name (which could have been used directly
instead for more control)
closes #427
2016-09-27 20:05:55 +02:00
|
|
|
index, err := NewMemOnly(indexMapping)
|
2016-08-31 21:21:44 +02:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
numToTest := 10
|
|
|
|
matchUserID := "match"
|
|
|
|
noMatchUserID := "no_match"
|
|
|
|
matchingDocIds := make(map[string]struct{})
|
|
|
|
|
|
|
|
for i := 0; i < numToTest; i++ {
|
|
|
|
ds := &TestStruct{"id_" + strconv.Itoa(i), nil}
|
|
|
|
if i%2 == 0 {
|
|
|
|
ds.UserID = &noMatchUserID
|
|
|
|
} else {
|
|
|
|
ds.UserID = &matchUserID
|
|
|
|
matchingDocIds[ds.ID] = struct{}{}
|
|
|
|
}
|
|
|
|
err = index.Index(ds.ID, ds)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
cnt, err := index.DocCount()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if int(cnt) != numToTest {
|
|
|
|
t.Fatalf("expected %d documents in index, got %d", numToTest, cnt)
|
|
|
|
}
|
|
|
|
|
2016-09-23 01:49:31 +02:00
|
|
|
q := NewTermQuery(matchUserID)
|
|
|
|
q.SetField("user_id")
|
2016-08-31 21:21:44 +02:00
|
|
|
searchReq := NewSearchRequestOptions(q, math.MaxInt32, 0, false)
|
|
|
|
results, err := index.Search(searchReq)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if int(results.Total) != numToTest/2 {
|
|
|
|
t.Fatalf("expected %d search hits, got %d", numToTest/2, results.Total)
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, result := range results.Hits {
|
|
|
|
if _, found := matchingDocIds[result.ID]; !found {
|
|
|
|
t.Fatalf("document with ID %s not in results as expected", result.ID)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-04-12 00:20:34 +02:00
|
|
|
|
|
|
|
func TestIndexAdvancedCountMatchSearch(t *testing.T) {
|
|
|
|
defer func() {
|
|
|
|
err := os.RemoveAll("testidx")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
index, err := New("testidx", NewIndexMapping())
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
wg.Add(1)
|
|
|
|
go func(i int) {
|
|
|
|
b := index.NewBatch()
|
|
|
|
for j := 0; j < 200; j++ {
|
|
|
|
id := fmt.Sprintf("%d", (i*200)+j)
|
|
|
|
|
|
|
|
doc := &document.Document{
|
|
|
|
ID: id,
|
|
|
|
Fields: []document.Field{
|
|
|
|
document.NewTextField("body", []uint64{}, []byte("match")),
|
|
|
|
},
|
|
|
|
CompositeFields: []*document.CompositeField{
|
|
|
|
document.NewCompositeField("_all", true, []string{}, []string{}),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
err := b.IndexAdvanced(doc)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
err := index.Batch(b)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
wg.Done()
|
|
|
|
}(i)
|
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
|
|
|
|
// search for something that should match all documents
|
|
|
|
sr, err := index.Search(NewSearchRequest(NewMatchQuery("match")))
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// get the index document count
|
|
|
|
dc, err := index.DocCount()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// make sure test is working correctly, doc count should 2000
|
|
|
|
if dc != 2000 {
|
|
|
|
t.Errorf("expected doc count 2000, got %d", dc)
|
|
|
|
}
|
|
|
|
|
|
|
|
// make sure our search found all the documents
|
|
|
|
if dc != sr.Total {
|
|
|
|
t.Errorf("expected search result total %d to match doc count %d", sr.Total, dc)
|
|
|
|
}
|
|
|
|
|
|
|
|
err = index.Close()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|