From ef18dfe4cd686c7fb5bf3800be0998551f33b043 Mon Sep 17 00:00:00 2001 From: Silvan Jegen Date: Thu, 18 Dec 2014 18:43:12 +0100 Subject: [PATCH] Fix typos in comments and strings --- analysis/token_filters/porter/porter.go | 2 +- .../stemmer_filter/stemmer_filter.go | 2 +- .../unicode_normalize_test.go | 2 +- analysis/token_map.go | 4 ++-- analysis/token_map/custom.go | 4 ++-- index/store/boltdb/batch.go | 2 +- index/store/boltdb/store_test.go | 12 +++++------ index/store/forestdb/batch.go | 2 +- index/store/forestdb/store_test.go | 20 +++++++++---------- index/store/inmem/batch.go | 2 +- index/store/inmem/store_test.go | 12 +++++------ index/store/leveldb/batch.go | 2 +- index/store/leveldb/store_test.go | 12 +++++------ index/store/null/null_test.go | 2 +- index/upside_down/dump.go | 2 +- index/upside_down/field_reader.go | 2 +- index/upside_down/reader_test.go | 2 +- index/upside_down/row_test.go | 2 +- index/upside_down/upside_down.go | 2 +- index/upside_down/upside_down_test.go | 20 +++++++++---------- index_alias.go | 2 +- index_impl.go | 14 ++++++------- index_meta_test.go | 2 +- index_test.go | 2 +- mapping_document.go | 4 ++-- mapping_index.go | 16 +++++++-------- numeric_util/float_test.go | 2 +- numeric_util/prefix_coded_test.go | 2 +- query_boolean.go | 4 ++-- query_date_range.go | 4 ++-- query_match.go | 2 +- query_match_phrase.go | 2 +- search.go | 4 ++-- search/explanation.go | 2 +- .../fragmenters/simple/fragmenter_simple.go | 2 +- .../highlighters/simple/highlighter_simple.go | 2 +- search/searchers/search_boolean.go | 2 +- search/searchers/search_conjunction.go | 6 +++--- search/searchers/search_disjunction.go | 4 ++-- search/searchers/search_numeric_range.go | 2 +- search/searchers/search_phrase.go | 6 +++--- search/util.go | 2 +- test/integration_test.go | 2 +- utils/bleve_query/main.go | 2 +- 44 files changed, 100 insertions(+), 102 deletions(-) diff --git a/analysis/token_filters/porter/porter.go b/analysis/token_filters/porter/porter.go index 88b22808..75929385 100644 --- a/analysis/token_filters/porter/porter.go +++ b/analysis/token_filters/porter/porter.go @@ -27,7 +27,7 @@ func NewPorterStemmer() *PorterStemmer { func (s *PorterStemmer) Filter(input analysis.TokenStream) analysis.TokenStream { for _, token := range input { - // if not protected keyword, stem it + // if it is not a protected keyword, stem it if !token.KeyWord { stemmed := porterstemmer.StemString(string(token.Term)) token.Term = []byte(stemmed) diff --git a/analysis/token_filters/stemmer_filter/stemmer_filter.go b/analysis/token_filters/stemmer_filter/stemmer_filter.go index 3640681b..44a6e677 100644 --- a/analysis/token_filters/stemmer_filter/stemmer_filter.go +++ b/analysis/token_filters/stemmer_filter/stemmer_filter.go @@ -55,7 +55,7 @@ func (s *StemmerFilter) List() []string { func (s *StemmerFilter) Filter(input analysis.TokenStream) analysis.TokenStream { for _, token := range input { - // if not protected keyword, stem it + // if it is not a protected keyword, stem it if !token.KeyWord { stemmer := <-s.stemmerPool stemmed := stemmer.Stem(string(token.Term)) diff --git a/analysis/token_filters/unicode_normalize/unicode_normalize_test.go b/analysis/token_filters/unicode_normalize/unicode_normalize_test.go index d710cb94..a02f6794 100644 --- a/analysis/token_filters/unicode_normalize/unicode_normalize_test.go +++ b/analysis/token_filters/unicode_normalize/unicode_normalize_test.go @@ -18,7 +18,7 @@ import ( // the following tests come from the lucene // test cases for CJK width filter -// which is our bases for using this +// which is our basis for using this // as a substitute for that func TestUnicodeNormalization(t *testing.T) { diff --git a/analysis/token_map.go b/analysis/token_map.go index ade376c4..c385ae11 100644 --- a/analysis/token_map.go +++ b/analysis/token_map.go @@ -39,7 +39,7 @@ func (t TokenMap) LoadBytes(data []byte) error { t.LoadLine(line) line, err = bufioReader.ReadString('\n') } - // if the err was EOF still need to process last value + // if the err was EOF we still need to process the last value if err == io.EOF { t.LoadLine(line) return nil @@ -48,7 +48,7 @@ func (t TokenMap) LoadBytes(data []byte) error { } func (t TokenMap) LoadLine(line string) error { - // find the start of comment, if any + // find the start of a comment, if any startComment := strings.IndexAny(line, "#|") if startComment >= 0 { line = line[:startComment] diff --git a/analysis/token_map/custom.go b/analysis/token_map/custom.go index 889892c2..eb5ccb68 100644 --- a/analysis/token_map/custom.go +++ b/analysis/token_map/custom.go @@ -21,13 +21,13 @@ const Name = "custom" func GenericTokenMapConstructor(config map[string]interface{}, cache *registry.Cache) (analysis.TokenMap, error) { rv := analysis.NewTokenMap() - // first try to load by filename + // first: try to load by filename filename, ok := config["filename"].(string) if ok { err := rv.LoadFile(filename) return rv, err } - // next look for an inline word list + // next: look for an inline word list tokens, ok := config["tokens"].([]interface{}) if ok { for _, token := range tokens { diff --git a/index/store/boltdb/batch.go b/index/store/boltdb/batch.go index 5a6c2645..2f9c4b2f 100644 --- a/index/store/boltdb/batch.go +++ b/index/store/boltdb/batch.go @@ -70,7 +70,7 @@ func (i *Batch) Execute() error { return i.store.db.Update(func(tx *bolt.Tx) error { b := tx.Bucket([]byte(i.store.bucket)) - // first processed the merges + // first process the merges for k, mc := range i.merges { val := b.Get([]byte(k)) var err error diff --git a/index/store/boltdb/store_test.go b/index/store/boltdb/store_test.go index ae7d9c1a..80311fad 100644 --- a/index/store/boltdb/store_test.go +++ b/index/store/boltdb/store_test.go @@ -84,7 +84,7 @@ func CommonTestKVStore(t *testing.T, s store.KVStore) { t.Fatalf("valid false, expected true") } if string(key) != "b" { - t.Fatalf("exepcted key b, got %s", key) + t.Fatalf("expected key b, got %s", key) } if string(val) != "val-b" { t.Fatalf("expected value val-b, got %s", val) @@ -96,7 +96,7 @@ func CommonTestKVStore(t *testing.T, s store.KVStore) { t.Fatalf("valid false, expected true") } if string(key) != "c" { - t.Fatalf("exepcted key c, got %s", key) + t.Fatalf("expected key c, got %s", key) } if string(val) != "val-c" { t.Fatalf("expected value val-c, got %s", val) @@ -108,7 +108,7 @@ func CommonTestKVStore(t *testing.T, s store.KVStore) { t.Fatalf("valid false, expected true") } if string(key) != "i" { - t.Fatalf("exepcted key i, got %s", key) + t.Fatalf("expected key i, got %s", key) } if string(val) != "val-i" { t.Fatalf("expected value val-i, got %s", val) @@ -129,14 +129,14 @@ func CommonTestReaderIsolation(t *testing.T, s store.KVStore) { } writer.Close() - // create an isoalted reader + // create an isolated reader reader, err := s.Reader() if err != nil { t.Error(err) } defer reader.Close() - // verify we see the value already inserted + // verify that we see the value already inserted val, err := reader.Get([]byte("a")) if err != nil { t.Error(err) @@ -182,7 +182,7 @@ func CommonTestReaderIsolation(t *testing.T, s store.KVStore) { t.Errorf("expected val-b, got nil") } - // ensure director iterator sees it + // ensure that the director iterator sees it count = 0 it = newReader.Iterator([]byte{0}) defer it.Close() diff --git a/index/store/forestdb/batch.go b/index/store/forestdb/batch.go index 944e331d..67dc399d 100644 --- a/index/store/forestdb/batch.go +++ b/index/store/forestdb/batch.go @@ -69,7 +69,7 @@ func (b *Batch) Execute() error { defer b.store.writer.Unlock() } - // first processed the merges + // first process the merges for k, mc := range b.merges { val, err := b.store.get([]byte(k)) if err != nil { diff --git a/index/store/forestdb/store_test.go b/index/store/forestdb/store_test.go index 2909fca9..69d1ff0d 100644 --- a/index/store/forestdb/store_test.go +++ b/index/store/forestdb/store_test.go @@ -60,7 +60,7 @@ func TestRollbackSameHandle(t *testing.T) { t.Fatal(err) } - // create 2 docs a and b + // create 2 docs, a and b err = writer.Set([]byte("a"), []byte("val-a")) if err != nil { t.Error(err) @@ -124,7 +124,7 @@ func TestRollbackSameHandle(t *testing.T) { } // TestRollbackNewHandle tries to rollback the -// database, then open a new handle, and ensure +// database, then opens a new handle, and ensures // that the rollback is reflected there as well func TestRollbackNewHandle(t *testing.T) { defer os.RemoveAll("test") @@ -140,7 +140,7 @@ func TestRollbackNewHandle(t *testing.T) { t.Fatal(err) } - // create 2 docs a and b + // create 2 docs, a and b err = writer.Set([]byte("a"), []byte("val-a")) if err != nil { t.Error(err) @@ -211,7 +211,7 @@ func TestRollbackNewHandle(t *testing.T) { } // TestRollbackOtherHandle tries to create 2 handles -// at the begining, then rollback one of them +// at the beginning, then rollback one of them // and ensure it affects the other func TestRollbackOtherHandle(t *testing.T) { defer os.RemoveAll("test") @@ -234,7 +234,7 @@ func TestRollbackOtherHandle(t *testing.T) { t.Fatal(err) } - // create 2 docs a and b + // create 2 docs, a and b err = writer.Set([]byte("a"), []byte("val-a")) if err != nil { t.Error(err) @@ -344,7 +344,7 @@ func CommonTestKVStore(t *testing.T, s store.KVStore) { t.Fatalf("valid false, expected true") } if string(key) != "b" { - t.Fatalf("exepcted key b, got %s", key) + t.Fatalf("expected key b, got %s", key) } if string(val) != "val-b" { t.Fatalf("expected value val-b, got %s", val) @@ -356,7 +356,7 @@ func CommonTestKVStore(t *testing.T, s store.KVStore) { t.Fatalf("valid false, expected true") } if string(key) != "c" { - t.Fatalf("exepcted key c, got %s", key) + t.Fatalf("expected key c, got %s", key) } if string(val) != "val-c" { t.Fatalf("expected value val-c, got %s", val) @@ -368,7 +368,7 @@ func CommonTestKVStore(t *testing.T, s store.KVStore) { t.Fatalf("valid false, expected true") } if string(key) != "i" { - t.Fatalf("exepcted key i, got %s", key) + t.Fatalf("expected key i, got %s", key) } if string(val) != "val-i" { t.Fatalf("expected value val-i, got %s", val) @@ -389,7 +389,7 @@ func CommonTestReaderIsolation(t *testing.T, s store.KVStore) { } writer.Close() - // create an isoalted reader + // create an isolated reader reader, err := s.Reader() if err != nil { t.Error(err) @@ -442,7 +442,7 @@ func CommonTestReaderIsolation(t *testing.T, s store.KVStore) { t.Errorf("expected val-b, got nil") } - // ensure director iterator sees it + // ensure that the director iterator sees it count = 0 it = newReader.Iterator([]byte{0}) defer it.Close() diff --git a/index/store/inmem/batch.go b/index/store/inmem/batch.go index 012b1d00..ffcd330f 100644 --- a/index/store/inmem/batch.go +++ b/index/store/inmem/batch.go @@ -67,7 +67,7 @@ func (i *Batch) Execute() error { defer i.store.writer.Unlock() } - // first processed the merges + // first process the merges for k, mc := range i.merges { val, err := i.store.get([]byte(k)) if err != nil { diff --git a/index/store/inmem/store_test.go b/index/store/inmem/store_test.go index 7f7a350a..06db47a9 100644 --- a/index/store/inmem/store_test.go +++ b/index/store/inmem/store_test.go @@ -73,7 +73,7 @@ func CommonTestKVStore(t *testing.T, s store.KVStore) { t.Fatalf("valid false, expected true") } if string(key) != "b" { - t.Fatalf("exepcted key b, got %s", key) + t.Fatalf("expected key b, got %s", key) } if string(val) != "val-b" { t.Fatalf("expected value val-b, got %s", val) @@ -85,7 +85,7 @@ func CommonTestKVStore(t *testing.T, s store.KVStore) { t.Fatalf("valid false, expected true") } if string(key) != "c" { - t.Fatalf("exepcted key c, got %s", key) + t.Fatalf("expected key c, got %s", key) } if string(val) != "val-c" { t.Fatalf("expected value val-c, got %s", val) @@ -97,7 +97,7 @@ func CommonTestKVStore(t *testing.T, s store.KVStore) { t.Fatalf("valid false, expected true") } if string(key) != "i" { - t.Fatalf("exepcted key i, got %s", key) + t.Fatalf("expected key i, got %s", key) } if string(val) != "val-i" { t.Fatalf("expected value val-i, got %s", val) @@ -118,14 +118,14 @@ func CommonTestReaderIsolation(t *testing.T, s store.KVStore) { } writer.Close() - // create an isoalted reader + // create an isolated reader reader, err := s.Reader() if err != nil { t.Error(err) } defer reader.Close() - // verify we see the value already inserted + // verify that we see the value already inserted val, err := reader.Get([]byte("a")) if err != nil { t.Error(err) @@ -171,7 +171,7 @@ func CommonTestReaderIsolation(t *testing.T, s store.KVStore) { t.Errorf("expected val-b, got nil") } - // ensure director iterator sees it + // ensure that the director iterator sees it count = 0 it = newReader.Iterator([]byte{0}) defer it.Close() diff --git a/index/store/leveldb/batch.go b/index/store/leveldb/batch.go index cbe4ecf5..f5a3dfe5 100644 --- a/index/store/leveldb/batch.go +++ b/index/store/leveldb/batch.go @@ -73,7 +73,7 @@ func (ldb *Batch) Execute() error { batch := levigo.NewWriteBatch() defer batch.Close() - // first processed the merges + // first process the merges for k, mc := range ldb.merges { val, err := ldb.store.get([]byte(k)) if err != nil { diff --git a/index/store/leveldb/store_test.go b/index/store/leveldb/store_test.go index cb76a63f..b21f14ca 100644 --- a/index/store/leveldb/store_test.go +++ b/index/store/leveldb/store_test.go @@ -90,7 +90,7 @@ func CommonTestKVStore(t *testing.T, s store.KVStore) { t.Fatalf("valid false, expected true") } if string(key) != "b" { - t.Fatalf("exepcted key b, got %s", key) + t.Fatalf("expected key b, got %s", key) } if string(val) != "val-b" { t.Fatalf("expected value val-b, got %s", val) @@ -102,7 +102,7 @@ func CommonTestKVStore(t *testing.T, s store.KVStore) { t.Fatalf("valid false, expected true") } if string(key) != "c" { - t.Fatalf("exepcted key c, got %s", key) + t.Fatalf("expected key c, got %s", key) } if string(val) != "val-c" { t.Fatalf("expected value val-c, got %s", val) @@ -114,7 +114,7 @@ func CommonTestKVStore(t *testing.T, s store.KVStore) { t.Fatalf("valid false, expected true") } if string(key) != "i" { - t.Fatalf("exepcted key i, got %s", key) + t.Fatalf("expected key i, got %s", key) } if string(val) != "val-i" { t.Fatalf("expected value val-i, got %s", val) @@ -135,14 +135,14 @@ func CommonTestReaderIsolation(t *testing.T, s store.KVStore) { } writer.Close() - // create an isoalted reader + // create an isolated reader reader, err := s.Reader() if err != nil { t.Error(err) } defer reader.Close() - // verify we see the value already inserted + // verify that we see the value already inserted val, err := reader.Get([]byte("a")) if err != nil { t.Error(err) @@ -188,7 +188,7 @@ func CommonTestReaderIsolation(t *testing.T, s store.KVStore) { t.Errorf("expected val-b, got nil") } - // ensure director iterator sees it + // ensure that the director iterator sees it count = 0 it = newReader.Iterator([]byte{0}) defer it.Close() diff --git a/index/store/null/null_test.go b/index/store/null/null_test.go index 8cc1dd2e..f4a0b890 100644 --- a/index/store/null/null_test.go +++ b/index/store/null/null_test.go @@ -64,7 +64,7 @@ func CommonTestKVStore(t *testing.T, s store.KVStore) { t.Fatalf("valid true, expected false") } if key != nil { - t.Fatalf("exepcted key nil, got %s", key) + t.Fatalf("expected key nil, got %s", key) } if val != nil { t.Fatalf("expected value nil, got %s", val) diff --git a/index/upside_down/dump.go b/index/upside_down/dump.go index ff4eab20..9b9443f6 100644 --- a/index/upside_down/dump.go +++ b/index/upside_down/dump.go @@ -18,7 +18,7 @@ import ( // the functions in this file are only intended to be used by // the bleve_dump utility and the debug http handlers -// if your application relies on the, you're doing something wrong +// if your application relies on them, you're doing something wrong // they may change or be removed at any time func (udc *UpsideDownCouch) dumpPrefix(kvreader store.KVReader, rv chan interface{}, prefix []byte) { diff --git a/index/upside_down/field_reader.go b/index/upside_down/field_reader.go index 3552bf61..6278a48e 100644 --- a/index/upside_down/field_reader.go +++ b/index/upside_down/field_reader.go @@ -80,7 +80,7 @@ func incrementBytes(in []byte) []byte { for i := len(rv) - 1; i >= 0; i-- { rv[i] = rv[i] + 1 if rv[i] != 0 { - // didnt' overflow, so stop + // didn't overflow, so stop break } } diff --git a/index/upside_down/reader_test.go b/index/upside_down/reader_test.go index 6aac6b54..2484a950 100644 --- a/index/upside_down/reader_test.go +++ b/index/upside_down/reader_test.go @@ -55,7 +55,7 @@ func TestIndexReader(t *testing.T) { } defer indexReader.Close() - // first look for a term that doesnt exist + // first look for a term that doesn't exist reader, err := indexReader.TermFieldReader([]byte("nope"), "name") if err != nil { t.Errorf("Error accessing term field reader: %v", err) diff --git a/index/upside_down/row_test.go b/index/upside_down/row_test.go index 743129fb..480ee246 100644 --- a/index/upside_down/row_test.go +++ b/index/upside_down/row_test.go @@ -154,7 +154,7 @@ func TestInvalidRows(t *testing.T) { []byte{'t', 0, 0, 'b', 'e', 'e', 'r', ByteSeparator}, []byte{}, }, - // type t, invalid val (misisng freq) + // type t, invalid val (missing freq) { []byte{'t', 0, 0, 'b', 'e', 'e', 'r', ByteSeparator, 'b', 'u', 'd', 'w', 'e', 'i', 's', 'e', 'r'}, []byte{}, diff --git a/index/upside_down/upside_down.go b/index/upside_down/upside_down.go index 5ed36943..62330ae6 100644 --- a/index/upside_down/upside_down.go +++ b/index/upside_down/upside_down.go @@ -571,7 +571,7 @@ func (udc *UpsideDownCouch) Batch(batch *index.Batch) error { for docID, doc := range batch.IndexOps { backIndexRow := backIndexRows[docID] if doc == nil && backIndexRow != nil { - //delete + // delete deleteRows = udc.deleteSingle(docID, backIndexRow, deleteRows) docsDeleted++ } else if doc != nil { diff --git a/index/upside_down/upside_down_test.go b/index/upside_down/upside_down_test.go index 01c3f535..a57973cc 100644 --- a/index/upside_down/upside_down_test.go +++ b/index/upside_down/upside_down_test.go @@ -47,7 +47,7 @@ func TestIndexOpenReopen(t *testing.T) { t.Errorf("Expected document count to be %d got %d", expectedCount, docCount) } - // opening database should have inserted version + // opening the database should have inserted a version expectedLength := uint64(1) rowCount := idx.rowCount() if rowCount != expectedLength { @@ -108,7 +108,7 @@ func TestIndexInsert(t *testing.T) { t.Errorf("Expected document count to be %d got %d", expectedCount, docCount) } - // should have 4 rows (1 for version, 1 for schema field, and 1 for single term, and 1 for the term count, and 1 for the back index entry) + // should have 4 rows (1 for version, 1 for schema field, and 1 for single term, and 1 for the term count, and 1 for the back index entry) expectedLength := uint64(1 + 1 + 1 + 1 + 1) rowCount := idx.rowCount() if rowCount != expectedLength { @@ -189,7 +189,7 @@ func TestIndexInsertThenDelete(t *testing.T) { t.Errorf("Expected document count to be %d got %d", expectedCount, docCount) } - // should have 2 row (1 for version, 1 for schema field) + // should have 2 rows (1 for version, 1 for schema field) expectedLength := uint64(1 + 1) rowCount := idx.rowCount() if rowCount != expectedLength { @@ -224,14 +224,14 @@ func TestIndexInsertThenUpdate(t *testing.T) { t.Errorf("Error deleting entry from index: %v", err) } - // should have 2 row (1 for version, 1 for schema field, and 2 for the two term, and 2 for the term counts, and 1 for the back index entry) + // should have 2 rows (1 for version, 1 for schema field, and 2 for the two term, and 2 for the term counts, and 1 for the back index entry) expectedLength := uint64(1 + 1 + 2 + 2 + 1) rowCount := idx.rowCount() if rowCount != expectedLength { t.Errorf("expected %d rows, got: %d", expectedLength, rowCount) } - // now do another update that should remove one of term + // now do another update that should remove one of the terms doc = document.NewDocument("1") doc.AddField(document.NewTextField("name", []uint64{}, []byte("fail"))) err = idx.Update(doc) @@ -239,7 +239,7 @@ func TestIndexInsertThenUpdate(t *testing.T) { t.Errorf("Error deleting entry from index: %v", err) } - // should have 2 row (1 for version, 1 for schema field, and 1 for the remaining term, and 1 for the term count, and 1 for the back index entry) + // should have 2 rows (1 for version, 1 for schema field, and 1 for the remaining term, and 1 for the term count, and 1 for the back index entry) expectedLength = uint64(1 + 1 + 1 + 1 + 1) rowCount = idx.rowCount() if rowCount != expectedLength { @@ -276,14 +276,14 @@ func TestIndexInsertMultiple(t *testing.T) { } expectedCount++ - // should have 4 rows (1 for version, 1 for schema field, and 2 for single term, and 1 for the term count, and 2 for the back index entries) + // should have 4 rows (1 for version, 1 for schema field, and 2 for single term, and 1 for the term count, and 2 for the back index entries) expectedLength := uint64(1 + 1 + 2 + 1 + 2) rowCount := idx.rowCount() if rowCount != expectedLength { t.Errorf("expected %d rows, got: %d", expectedLength, rowCount) } - // close and reopen and and one more to testing counting works correctly + // close, reopen and add one more to test that counting works correctly idx.Close() store, err = boltdb.Open("test", "bleve") idx = NewUpsideDownCouch(store, analysisQueue) @@ -350,7 +350,7 @@ func TestIndexInsertWithStore(t *testing.T) { t.Errorf("Expected document count to be %d got %d", expectedCount, docCount) } - // should have 6 rows (1 for version, 1 for schema field, and 1 for single term, and 1 for the stored field and 1 for the term count, and 1 for the back index entry) + // should have 6 rows (1 for version, 1 for schema field, and 1 for single term, and 1 for the stored field and 1 for the term count, and 1 for the back index entry) expectedLength := uint64(1 + 1 + 1 + 1 + 1 + 1) rowCount := idx.rowCount() if rowCount != expectedLength { @@ -398,7 +398,7 @@ func TestIndexInternalCRUD(t *testing.T) { } defer indexReader.Close() - // get something that doesnt exist yet + // get something that doesn't exist yet val, err := indexReader.GetInternal([]byte("key")) if err != nil { t.Error(err) diff --git a/index_alias.go b/index_alias.go index 5bf9122d..c46be279 100644 --- a/index_alias.go +++ b/index_alias.go @@ -15,7 +15,7 @@ package bleve // 1. When it points to a single index, ALL index // operations are valid and will be passed through // to the underlying index. -// 2. When it points to more than index, the only +// 2. When it points to more than one index, the only // valid operation is Search. In this case the // search will be performed across all the // underlying indexes and the results merged. diff --git a/index_impl.go b/index_impl.go index 8b5a0fce..1a29c512 100644 --- a/index_impl.go +++ b/index_impl.go @@ -65,7 +65,7 @@ func newMemIndex(mapping *IndexMapping) (*indexImpl, error) { return nil, err } - // open open the index + // open the index rv.i = upside_down.NewUpsideDownCouch(rv.s, Config.analysisQueue) err = rv.i.Open() if err != nil { @@ -111,7 +111,7 @@ func newIndex(path string, mapping *IndexMapping) (*indexImpl, error) { if storeConstructor == nil { return nil, ErrorUnknownStorageType } - // at this point there hope we can be successful, so save index meta + // at this point there is hope that we can be successful, so save index meta err = rv.meta.Save(path) if err != nil { return nil, err @@ -128,7 +128,7 @@ func newIndex(path string, mapping *IndexMapping) (*indexImpl, error) { return nil, err } - // open open the index + // open the index rv.i = upside_down.NewUpsideDownCouch(rv.s, Config.analysisQueue) err = rv.i.Open() if err != nil { @@ -182,7 +182,7 @@ func openIndex(path string) (*indexImpl, error) { return nil, err } - // open open the index + // open the index rv.i = upside_down.NewUpsideDownCouch(rv.s, Config.analysisQueue) err = rv.i.Open() if err != nil { @@ -501,7 +501,7 @@ func (i *indexImpl) Fields() ([]string, error) { // DumpAll writes all index rows to a channel. // INTERNAL: do not rely on this function, it is -// only intended to be used by the debug utilties +// only intended to be used by the debug utilities func (i *indexImpl) DumpAll() chan interface{} { i.mutex.RLock() defer i.mutex.RUnlock() @@ -516,7 +516,7 @@ func (i *indexImpl) DumpAll() chan interface{} { // DumpFields writes all field rows in the index // to a channel. // INTERNAL: do not rely on this function, it is -// only intended to be used by the debug utilties +// only intended to be used by the debug utilities func (i *indexImpl) DumpFields() chan interface{} { i.mutex.RLock() defer i.mutex.RUnlock() @@ -530,7 +530,7 @@ func (i *indexImpl) DumpFields() chan interface{} { // DumpDoc writes all rows in the index associated // with the specified identifier to a channel. // INTERNAL: do not rely on this function, it is -// only intended to be used by the debug utilties +// only intended to be used by the debug utilities func (i *indexImpl) DumpDoc(id string) chan interface{} { i.mutex.RLock() defer i.mutex.RUnlock() diff --git a/index_meta_test.go b/index_meta_test.go index e8ea2e3a..69a7a4eb 100644 --- a/index_meta_test.go +++ b/index_meta_test.go @@ -18,7 +18,7 @@ func TestIndexMeta(t *testing.T) { var testIndexPath = "doesnotexit.bleve" defer os.RemoveAll(testIndexPath) - // open non-existant meta should error + // open non-existent meta should give an error _, err := openIndexMeta(testIndexPath) if err == nil { t.Errorf("expected error, got nil") diff --git a/index_test.go b/index_test.go index e9b63306..f5680132 100644 --- a/index_test.go +++ b/index_test.go @@ -218,7 +218,7 @@ func TestIndexOpenMetaMissingOrCorrupt(t *testing.T) { t.Fatalf("expected error index metadata corrupted, got %v", err) } - // no intentionally remove the metadata + // now intentionally remove the metadata os.Remove("testidx/index_meta.json") index, err = Open("testidx") diff --git a/mapping_document.go b/mapping_document.go index e316bbb0..604c1eb5 100644 --- a/mapping_document.go +++ b/mapping_document.go @@ -23,7 +23,7 @@ import ( // As documents can be hierarchical, named sub-sections // of documents are mapped using the same structure in // the Properties field. -// Each value inside a document can be index 0 or more +// Each value inside a document can be indexed 0 or more // ways. These index entries are called fields and // are stored in the Fields field. // Entire sections of a document can be ignored or @@ -261,7 +261,7 @@ func (dm *DocumentMapping) processProperty(property interface{}, path []string, // look to see if there is a mapping for this field subDocMapping := dm.documentMappingForPath(pathString) - // check tos see if we even need to do further processing + // check to see if we even need to do further processing if subDocMapping != nil && !subDocMapping.Enabled { return } diff --git a/mapping_index.go b/mapping_index.go index 0471fa12..d2b14c09 100644 --- a/mapping_index.go +++ b/mapping_index.go @@ -86,12 +86,12 @@ func newCustomAnalysis() *customAnalysis { return &rv } -// An IndexMapping controls how objects are place +// An IndexMapping controls how objects are placed // into an index. -// First the type of the object is deteremined. -// Once the type is know, the appropriate/ +// First the type of the object is determined. +// Once the type is know, the appropriate // DocumentMapping is selected by the type. -// If no mapping was described for that type, +// If no mapping was determined for that type, // a DefaultMapping will be used. type IndexMapping struct { TypeMapping map[string]*DocumentMapping `json:"types,omitempty"` @@ -106,7 +106,7 @@ type IndexMapping struct { cache *registry.Cache } -// AddCustomCharFilter defines a custom char fitler for use in this mapping +// AddCustomCharFilter defines a custom char filter for use in this mapping func (im *IndexMapping) AddCustomCharFilter(name string, config map[string]interface{}) error { _, err := im.cache.DefineCharFilter(name, config) if err != nil { @@ -184,8 +184,6 @@ func NewIndexMapping() *IndexMapping { // Validate will walk the entire structure ensuring the following // explicitly named and default analyzers can be built -// explicitly named and default date parsers can be built -// field type names are valid func (im *IndexMapping) validate() error { _, err := im.cache.AnalyzerNamed(im.DefaultAnalyzer) if err != nil { @@ -318,7 +316,7 @@ func (im *IndexMapping) determineType(data interface{}) string { return classifier.Type() } - // now see if we can find type using the mapping + // now see if we can find a type using the mapping typ, ok := mustString(lookupPropertyPath(data, im.TypeField)) if ok { return typ @@ -328,7 +326,7 @@ func (im *IndexMapping) determineType(data interface{}) string { } func (im *IndexMapping) mapDocument(doc *document.Document, data interface{}) error { - // see if the top level object is a byte array, and possibly run through conveter + // see if the top level object is a byte array, and possibly run through a converter byteArrayData, ok := data.([]byte) if ok { byteArrayConverterConstructor := registry.ByteArrayConverterByName(im.ByteArrayConverter) diff --git a/numeric_util/float_test.go b/numeric_util/float_test.go index c055e4b1..dc7bf496 100644 --- a/numeric_util/float_test.go +++ b/numeric_util/float_test.go @@ -14,7 +14,7 @@ import ( ) // test that the float/sortable int operations work both ways -// and that the the corresponding integers sort the same as +// and that the corresponding integers sort the same as // the original floats would have func TestSortabledFloat64ToInt64(t *testing.T) { tests := []struct { diff --git a/numeric_util/prefix_coded_test.go b/numeric_util/prefix_coded_test.go index f102f8da..59110f45 100644 --- a/numeric_util/prefix_coded_test.go +++ b/numeric_util/prefix_coded_test.go @@ -15,7 +15,7 @@ import ( ) // these array encoding values have been verified manually -// against the lucene imlementation +// against the lucene implementation func TestPrefixCoded(t *testing.T) { tests := []struct { input int64 diff --git a/query_boolean.go b/query_boolean.go index 3b61c962..bf540148 100644 --- a/query_boolean.go +++ b/query_boolean.go @@ -27,9 +27,9 @@ type booleanQuery struct { // NewBooleanQuery creates a compound Query composed // of several other Query objects. -// Result documents must satisify ALL of the +// Result documents must satisfy ALL of the // must Queries. -// Result documents must satsify NONE of the must not +// Result documents must satisfy NONE of the must not // Queries. // If there are any should queries, result documents // must satisfy at least one of them. diff --git a/query_date_range.go b/query_date_range.go index 179be829..8643d83d 100644 --- a/query_date_range.go +++ b/query_date_range.go @@ -31,7 +31,7 @@ type dateRangeQuery struct { // NewDateRangeQuery creates a new Query for ranges // of date values. -// A DateTimeParser is chosed based on the field. +// A DateTimeParser is chosen based on the field. // Either, but not both endpoints can be nil. func NewDateRangeQuery(start, end *string) *dateRangeQuery { return NewDateRangeInclusiveQuery(start, end, nil, nil) @@ -39,7 +39,7 @@ func NewDateRangeQuery(start, end *string) *dateRangeQuery { // NewDateRangeInclusiveQuery creates a new Query for ranges // of date values. -// A DateTimeParser is chosed based on the field. +// A DateTimeParser is chosen based on the field. // Either, but not both endpoints can be nil. // startInclusive and endInclusive control inclusion of the endpoints. func NewDateRangeInclusiveQuery(start, end *string, startInclusive, endInclusive *bool) *dateRangeQuery { diff --git a/query_match.go b/query_match.go index bf423f1b..c602982c 100644 --- a/query_match.go +++ b/query_match.go @@ -27,7 +27,7 @@ type matchQuery struct { } // NewMatchQuery creates a Query for matching text. -// An Analyzer is chosed based on the field. +// An Analyzer is chosen based on the field. // Input text is analyzed using this analyzer. // Token terms resulting from this analysis are // used to perform term searches. Result documents diff --git a/query_match_phrase.go b/query_match_phrase.go index c3ed44f8..5eed8a2a 100644 --- a/query_match_phrase.go +++ b/query_match_phrase.go @@ -26,7 +26,7 @@ type matchPhraseQuery struct { // NewMatchPhraseQuery creates a new Query object // for matching phrases in the index. -// An Analyzer is chosed based on the field. +// An Analyzer is chosen based on the field. // Input text is analyzed using this analyzer. // Token terms resulting from this analysis are // used to build a search phrase. Result documents diff --git a/search.go b/search.go index b996a8ef..4d82c594 100644 --- a/search.go +++ b/search.go @@ -70,7 +70,7 @@ func (dr *dateTimeRange) UnmarshalJSON(input []byte) error { return nil } -// A FacetRequest describes an facet or aggregation +// A FacetRequest describes a facet or aggregation // of the result document set you would like to be // built. type FacetRequest struct { @@ -151,7 +151,7 @@ func (h *HighlightRequest) AddField(field string) { // result set to return. // Highlight describes optional search result // highlighting. -// Fields desribed a list of field values whcih +// Fields describes a list of field values which // should be retrieved for result documents. // Facets describe the set of facets to be computed. // Explain triggers inclusion of additional search diff --git a/search/explanation.go b/search/explanation.go index 999869b0..fe2d5870 100644 --- a/search/explanation.go +++ b/search/explanation.go @@ -23,7 +23,7 @@ type Explanation struct { func (expl *Explanation) String() string { js, err := json.MarshalIndent(expl, "", " ") if err != nil { - return fmt.Sprintf("error serializing explation to json: %v", err) + return fmt.Sprintf("error serializing explanation to json: %v", err) } return string(js) } diff --git a/search/highlight/fragmenters/simple/fragmenter_simple.go b/search/highlight/fragmenters/simple/fragmenter_simple.go index 46810486..3a53074f 100644 --- a/search/highlight/fragmenters/simple/fragmenter_simple.go +++ b/search/highlight/fragmenters/simple/fragmenter_simple.go @@ -33,7 +33,7 @@ func (s *Fragmenter) Fragment(orig []byte, ot highlight.TermLocations) []*highli maxbegin := 0 for currTermIndex, termLocation := range ot { - // start with with this + // start with this // it should be the highest scoring fragment with this term first start := termLocation.Start end := start + s.fragmentSize diff --git a/search/highlight/highlighters/simple/highlighter_simple.go b/search/highlight/highlighters/simple/highlighter_simple.go index dc5a54e0..93ca9b5c 100644 --- a/search/highlight/highlighters/simple/highlighter_simple.go +++ b/search/highlight/highlighters/simple/highlighter_simple.go @@ -146,7 +146,7 @@ type FragmentQueue []*highlight.Fragment func (fq FragmentQueue) Len() int { return len(fq) } func (fq FragmentQueue) Less(i, j int) bool { - // We want Pop to give us the highest, not lowest, priority so we use greater than here. + // We want Pop to give us the highest, not lowest, priority so we use greater-than here. return fq[i].Score > fq[j].Score } diff --git a/search/searchers/search_boolean.go b/search/searchers/search_boolean.go index 91d5b4a7..b7a13ad1 100644 --- a/search/searchers/search_boolean.go +++ b/search/searchers/search_boolean.go @@ -57,7 +57,7 @@ func (s *BooleanSearcher) computeQueryNorm() { // now compute query norm from this s.queryNorm = 1.0 / math.Sqrt(sumOfSquaredWeights) - // finally tell all the downsteam searchers the norm + // finally tell all the downstream searchers the norm if s.mustSearcher != nil { s.mustSearcher.SetQueryNorm(s.queryNorm) } diff --git a/search/searchers/search_conjunction.go b/search/searchers/search_conjunction.go index 9162e754..b56dcb1b 100644 --- a/search/searchers/search_conjunction.go +++ b/search/searchers/search_conjunction.go @@ -30,7 +30,7 @@ type ConjunctionSearcher struct { } func NewConjunctionSearcher(indexReader index.IndexReader, qsearchers []search.Searcher, explain bool) (*ConjunctionSearcher, error) { - // build the downstream searchres + // build the downstream searchers searchers := make(OrderedSearcherList, len(qsearchers)) for i, searcher := range qsearchers { searchers[i] = searcher @@ -57,7 +57,7 @@ func (s *ConjunctionSearcher) computeQueryNorm() { } // now compute query norm from this s.queryNorm = 1.0 / math.Sqrt(sumOfSquaredWeights) - // finally tell all the downsteam searchers the norm + // finally tell all the downstream searchers the norm for _, termSearcher := range s.searchers { termSearcher.SetQueryNorm(s.queryNorm) } @@ -145,7 +145,7 @@ OUTER: } else { s.currentID = s.currs[0].ID } - // don't continue now, wait for next call the Next() + // don't continue now, wait for the next call to Next() break } return rv, nil diff --git a/search/searchers/search_disjunction.go b/search/searchers/search_disjunction.go index a02fc5d9..eb9ed2ed 100644 --- a/search/searchers/search_disjunction.go +++ b/search/searchers/search_disjunction.go @@ -30,7 +30,7 @@ type DisjunctionSearcher struct { } func NewDisjunctionSearcher(indexReader index.IndexReader, qsearchers []search.Searcher, min float64, explain bool) (*DisjunctionSearcher, error) { - // build the downstream searchres + // build the downstream searchers searchers := make(OrderedSearcherList, len(qsearchers)) for i, searcher := range qsearchers { searchers[i] = searcher @@ -57,7 +57,7 @@ func (s *DisjunctionSearcher) computeQueryNorm() { } // now compute query norm from this s.queryNorm = 1.0 / math.Sqrt(sumOfSquaredWeights) - // finally tell all the downsteam searchers the norm + // finally tell all the downstream searchers the norm for _, termSearcher := range s.searchers { termSearcher.SetQueryNorm(s.queryNorm) } diff --git a/search/searchers/search_numeric_range.go b/search/searchers/search_numeric_range.go index 3bb074d7..3e2bd945 100644 --- a/search/searchers/search_numeric_range.go +++ b/search/searchers/search_numeric_range.go @@ -54,7 +54,7 @@ func NewNumericRangeSearcher(indexReader index.IndexReader, min *float64, max *f if !*inclusiveMax && maxInt64 != math.MinInt64 { maxInt64-- } - // FIXME hard-coded precion, should match field declaration + // FIXME hard-coded precision, should match field declaration termRanges := splitInt64Range(minInt64, maxInt64, 4) terms := termRanges.Enumerate() // enumerate all the terms in the range diff --git a/search/searchers/search_phrase.go b/search/searchers/search_phrase.go index 80cce312..973958fc 100644 --- a/search/searchers/search_phrase.go +++ b/search/searchers/search_phrase.go @@ -46,7 +46,7 @@ func (s *PhraseSearcher) computeQueryNorm() { // now compute query norm from this s.queryNorm = 1.0 / math.Sqrt(sumOfSquaredWeights) - // finally tell all the downsteam searchers the norm + // finally tell all the downstream searchers the norm if s.mustSearcher != nil { s.mustSearcher.SetQueryNorm(s.queryNorm) } @@ -114,7 +114,7 @@ func (s *PhraseSearcher) Next() (*search.DocumentMatch, error) { for i := 0; i < len(s.terms); i++ { nextTerm := s.terms[i] if nextTerm != "" { - // look through all this terms locations + // look through all these term locations // to try and find the correct offsets nextLocations, ok := termLocMap[nextTerm] if ok { @@ -125,7 +125,7 @@ func (s *PhraseSearcher) Next() (*search.DocumentMatch, error) { continue INNER } } - // if we got here we didnt find location match for this term + // if we got here we didn't find a location match for this term continue OUTER } else { continue OUTER diff --git a/search/util.go b/search/util.go index 3dea69aa..6867eee7 100644 --- a/search/util.go +++ b/search/util.go @@ -30,7 +30,7 @@ func MergeLocations(locations []FieldTermLocationMap) FieldTermLocationMap { func MergeTermLocationMaps(rv, other TermLocationMap) TermLocationMap { for term, locationMap := range other { // for a given term/document there cannot be different locations - // if they can back from different clauses, overwrite is ok + // if they came back from different clauses, overwrite is ok rv[term] = locationMap } return rv diff --git a/test/integration_test.go b/test/integration_test.go index 861019fd..9e428b8c 100644 --- a/test/integration_test.go +++ b/test/integration_test.go @@ -80,7 +80,7 @@ func runTestDir(t *testing.T, dir string) { } defer index.Close() - //index data + // index data fis, err := ioutil.ReadDir(dir + string(filepath.Separator) + "data") if err != nil { t.Errorf("error reading data dir: %v", err) diff --git a/utils/bleve_query/main.go b/utils/bleve_query/main.go index 542a2e9f..e1421a0e 100644 --- a/utils/bleve_query/main.go +++ b/utils/bleve_query/main.go @@ -29,7 +29,7 @@ func main() { flag.Parse() if *indexPath == "" { - log.Fatal("specify index to query") + log.Fatal("Specify index to query") } if flag.NArg() < 1 {