0
0
Fork 0

Fix typos in comments and strings

This commit is contained in:
Silvan Jegen 2014-12-18 18:43:12 +01:00
parent fc33752c80
commit ef18dfe4cd
44 changed files with 100 additions and 102 deletions

View File

@ -27,7 +27,7 @@ func NewPorterStemmer() *PorterStemmer {
func (s *PorterStemmer) Filter(input analysis.TokenStream) analysis.TokenStream {
for _, token := range input {
// if not protected keyword, stem it
// if it is not a protected keyword, stem it
if !token.KeyWord {
stemmed := porterstemmer.StemString(string(token.Term))
token.Term = []byte(stemmed)

View File

@ -55,7 +55,7 @@ func (s *StemmerFilter) List() []string {
func (s *StemmerFilter) Filter(input analysis.TokenStream) analysis.TokenStream {
for _, token := range input {
// if not protected keyword, stem it
// if it is not a protected keyword, stem it
if !token.KeyWord {
stemmer := <-s.stemmerPool
stemmed := stemmer.Stem(string(token.Term))

View File

@ -18,7 +18,7 @@ import (
// the following tests come from the lucene
// test cases for CJK width filter
// which is our bases for using this
// which is our basis for using this
// as a substitute for that
func TestUnicodeNormalization(t *testing.T) {

View File

@ -39,7 +39,7 @@ func (t TokenMap) LoadBytes(data []byte) error {
t.LoadLine(line)
line, err = bufioReader.ReadString('\n')
}
// if the err was EOF still need to process last value
// if the err was EOF we still need to process the last value
if err == io.EOF {
t.LoadLine(line)
return nil
@ -48,7 +48,7 @@ func (t TokenMap) LoadBytes(data []byte) error {
}
func (t TokenMap) LoadLine(line string) error {
// find the start of comment, if any
// find the start of a comment, if any
startComment := strings.IndexAny(line, "#|")
if startComment >= 0 {
line = line[:startComment]

View File

@ -21,13 +21,13 @@ const Name = "custom"
func GenericTokenMapConstructor(config map[string]interface{}, cache *registry.Cache) (analysis.TokenMap, error) {
rv := analysis.NewTokenMap()
// first try to load by filename
// first: try to load by filename
filename, ok := config["filename"].(string)
if ok {
err := rv.LoadFile(filename)
return rv, err
}
// next look for an inline word list
// next: look for an inline word list
tokens, ok := config["tokens"].([]interface{})
if ok {
for _, token := range tokens {

View File

@ -70,7 +70,7 @@ func (i *Batch) Execute() error {
return i.store.db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(i.store.bucket))
// first processed the merges
// first process the merges
for k, mc := range i.merges {
val := b.Get([]byte(k))
var err error

View File

@ -84,7 +84,7 @@ func CommonTestKVStore(t *testing.T, s store.KVStore) {
t.Fatalf("valid false, expected true")
}
if string(key) != "b" {
t.Fatalf("exepcted key b, got %s", key)
t.Fatalf("expected key b, got %s", key)
}
if string(val) != "val-b" {
t.Fatalf("expected value val-b, got %s", val)
@ -96,7 +96,7 @@ func CommonTestKVStore(t *testing.T, s store.KVStore) {
t.Fatalf("valid false, expected true")
}
if string(key) != "c" {
t.Fatalf("exepcted key c, got %s", key)
t.Fatalf("expected key c, got %s", key)
}
if string(val) != "val-c" {
t.Fatalf("expected value val-c, got %s", val)
@ -108,7 +108,7 @@ func CommonTestKVStore(t *testing.T, s store.KVStore) {
t.Fatalf("valid false, expected true")
}
if string(key) != "i" {
t.Fatalf("exepcted key i, got %s", key)
t.Fatalf("expected key i, got %s", key)
}
if string(val) != "val-i" {
t.Fatalf("expected value val-i, got %s", val)
@ -129,14 +129,14 @@ func CommonTestReaderIsolation(t *testing.T, s store.KVStore) {
}
writer.Close()
// create an isoalted reader
// create an isolated reader
reader, err := s.Reader()
if err != nil {
t.Error(err)
}
defer reader.Close()
// verify we see the value already inserted
// verify that we see the value already inserted
val, err := reader.Get([]byte("a"))
if err != nil {
t.Error(err)
@ -182,7 +182,7 @@ func CommonTestReaderIsolation(t *testing.T, s store.KVStore) {
t.Errorf("expected val-b, got nil")
}
// ensure director iterator sees it
// ensure that the director iterator sees it
count = 0
it = newReader.Iterator([]byte{0})
defer it.Close()

View File

@ -69,7 +69,7 @@ func (b *Batch) Execute() error {
defer b.store.writer.Unlock()
}
// first processed the merges
// first process the merges
for k, mc := range b.merges {
val, err := b.store.get([]byte(k))
if err != nil {

View File

@ -60,7 +60,7 @@ func TestRollbackSameHandle(t *testing.T) {
t.Fatal(err)
}
// create 2 docs a and b
// create 2 docs, a and b
err = writer.Set([]byte("a"), []byte("val-a"))
if err != nil {
t.Error(err)
@ -124,7 +124,7 @@ func TestRollbackSameHandle(t *testing.T) {
}
// TestRollbackNewHandle tries to rollback the
// database, then open a new handle, and ensure
// database, then opens a new handle, and ensures
// that the rollback is reflected there as well
func TestRollbackNewHandle(t *testing.T) {
defer os.RemoveAll("test")
@ -140,7 +140,7 @@ func TestRollbackNewHandle(t *testing.T) {
t.Fatal(err)
}
// create 2 docs a and b
// create 2 docs, a and b
err = writer.Set([]byte("a"), []byte("val-a"))
if err != nil {
t.Error(err)
@ -211,7 +211,7 @@ func TestRollbackNewHandle(t *testing.T) {
}
// TestRollbackOtherHandle tries to create 2 handles
// at the begining, then rollback one of them
// at the beginning, then rollback one of them
// and ensure it affects the other
func TestRollbackOtherHandle(t *testing.T) {
defer os.RemoveAll("test")
@ -234,7 +234,7 @@ func TestRollbackOtherHandle(t *testing.T) {
t.Fatal(err)
}
// create 2 docs a and b
// create 2 docs, a and b
err = writer.Set([]byte("a"), []byte("val-a"))
if err != nil {
t.Error(err)
@ -344,7 +344,7 @@ func CommonTestKVStore(t *testing.T, s store.KVStore) {
t.Fatalf("valid false, expected true")
}
if string(key) != "b" {
t.Fatalf("exepcted key b, got %s", key)
t.Fatalf("expected key b, got %s", key)
}
if string(val) != "val-b" {
t.Fatalf("expected value val-b, got %s", val)
@ -356,7 +356,7 @@ func CommonTestKVStore(t *testing.T, s store.KVStore) {
t.Fatalf("valid false, expected true")
}
if string(key) != "c" {
t.Fatalf("exepcted key c, got %s", key)
t.Fatalf("expected key c, got %s", key)
}
if string(val) != "val-c" {
t.Fatalf("expected value val-c, got %s", val)
@ -368,7 +368,7 @@ func CommonTestKVStore(t *testing.T, s store.KVStore) {
t.Fatalf("valid false, expected true")
}
if string(key) != "i" {
t.Fatalf("exepcted key i, got %s", key)
t.Fatalf("expected key i, got %s", key)
}
if string(val) != "val-i" {
t.Fatalf("expected value val-i, got %s", val)
@ -389,7 +389,7 @@ func CommonTestReaderIsolation(t *testing.T, s store.KVStore) {
}
writer.Close()
// create an isoalted reader
// create an isolated reader
reader, err := s.Reader()
if err != nil {
t.Error(err)
@ -442,7 +442,7 @@ func CommonTestReaderIsolation(t *testing.T, s store.KVStore) {
t.Errorf("expected val-b, got nil")
}
// ensure director iterator sees it
// ensure that the director iterator sees it
count = 0
it = newReader.Iterator([]byte{0})
defer it.Close()

View File

@ -67,7 +67,7 @@ func (i *Batch) Execute() error {
defer i.store.writer.Unlock()
}
// first processed the merges
// first process the merges
for k, mc := range i.merges {
val, err := i.store.get([]byte(k))
if err != nil {

View File

@ -73,7 +73,7 @@ func CommonTestKVStore(t *testing.T, s store.KVStore) {
t.Fatalf("valid false, expected true")
}
if string(key) != "b" {
t.Fatalf("exepcted key b, got %s", key)
t.Fatalf("expected key b, got %s", key)
}
if string(val) != "val-b" {
t.Fatalf("expected value val-b, got %s", val)
@ -85,7 +85,7 @@ func CommonTestKVStore(t *testing.T, s store.KVStore) {
t.Fatalf("valid false, expected true")
}
if string(key) != "c" {
t.Fatalf("exepcted key c, got %s", key)
t.Fatalf("expected key c, got %s", key)
}
if string(val) != "val-c" {
t.Fatalf("expected value val-c, got %s", val)
@ -97,7 +97,7 @@ func CommonTestKVStore(t *testing.T, s store.KVStore) {
t.Fatalf("valid false, expected true")
}
if string(key) != "i" {
t.Fatalf("exepcted key i, got %s", key)
t.Fatalf("expected key i, got %s", key)
}
if string(val) != "val-i" {
t.Fatalf("expected value val-i, got %s", val)
@ -118,14 +118,14 @@ func CommonTestReaderIsolation(t *testing.T, s store.KVStore) {
}
writer.Close()
// create an isoalted reader
// create an isolated reader
reader, err := s.Reader()
if err != nil {
t.Error(err)
}
defer reader.Close()
// verify we see the value already inserted
// verify that we see the value already inserted
val, err := reader.Get([]byte("a"))
if err != nil {
t.Error(err)
@ -171,7 +171,7 @@ func CommonTestReaderIsolation(t *testing.T, s store.KVStore) {
t.Errorf("expected val-b, got nil")
}
// ensure director iterator sees it
// ensure that the director iterator sees it
count = 0
it = newReader.Iterator([]byte{0})
defer it.Close()

View File

@ -73,7 +73,7 @@ func (ldb *Batch) Execute() error {
batch := levigo.NewWriteBatch()
defer batch.Close()
// first processed the merges
// first process the merges
for k, mc := range ldb.merges {
val, err := ldb.store.get([]byte(k))
if err != nil {

View File

@ -90,7 +90,7 @@ func CommonTestKVStore(t *testing.T, s store.KVStore) {
t.Fatalf("valid false, expected true")
}
if string(key) != "b" {
t.Fatalf("exepcted key b, got %s", key)
t.Fatalf("expected key b, got %s", key)
}
if string(val) != "val-b" {
t.Fatalf("expected value val-b, got %s", val)
@ -102,7 +102,7 @@ func CommonTestKVStore(t *testing.T, s store.KVStore) {
t.Fatalf("valid false, expected true")
}
if string(key) != "c" {
t.Fatalf("exepcted key c, got %s", key)
t.Fatalf("expected key c, got %s", key)
}
if string(val) != "val-c" {
t.Fatalf("expected value val-c, got %s", val)
@ -114,7 +114,7 @@ func CommonTestKVStore(t *testing.T, s store.KVStore) {
t.Fatalf("valid false, expected true")
}
if string(key) != "i" {
t.Fatalf("exepcted key i, got %s", key)
t.Fatalf("expected key i, got %s", key)
}
if string(val) != "val-i" {
t.Fatalf("expected value val-i, got %s", val)
@ -135,14 +135,14 @@ func CommonTestReaderIsolation(t *testing.T, s store.KVStore) {
}
writer.Close()
// create an isoalted reader
// create an isolated reader
reader, err := s.Reader()
if err != nil {
t.Error(err)
}
defer reader.Close()
// verify we see the value already inserted
// verify that we see the value already inserted
val, err := reader.Get([]byte("a"))
if err != nil {
t.Error(err)
@ -188,7 +188,7 @@ func CommonTestReaderIsolation(t *testing.T, s store.KVStore) {
t.Errorf("expected val-b, got nil")
}
// ensure director iterator sees it
// ensure that the director iterator sees it
count = 0
it = newReader.Iterator([]byte{0})
defer it.Close()

View File

@ -64,7 +64,7 @@ func CommonTestKVStore(t *testing.T, s store.KVStore) {
t.Fatalf("valid true, expected false")
}
if key != nil {
t.Fatalf("exepcted key nil, got %s", key)
t.Fatalf("expected key nil, got %s", key)
}
if val != nil {
t.Fatalf("expected value nil, got %s", val)

View File

@ -18,7 +18,7 @@ import (
// the functions in this file are only intended to be used by
// the bleve_dump utility and the debug http handlers
// if your application relies on the, you're doing something wrong
// if your application relies on them, you're doing something wrong
// they may change or be removed at any time
func (udc *UpsideDownCouch) dumpPrefix(kvreader store.KVReader, rv chan interface{}, prefix []byte) {

View File

@ -80,7 +80,7 @@ func incrementBytes(in []byte) []byte {
for i := len(rv) - 1; i >= 0; i-- {
rv[i] = rv[i] + 1
if rv[i] != 0 {
// didnt' overflow, so stop
// didn't overflow, so stop
break
}
}

View File

@ -55,7 +55,7 @@ func TestIndexReader(t *testing.T) {
}
defer indexReader.Close()
// first look for a term that doesnt exist
// first look for a term that doesn't exist
reader, err := indexReader.TermFieldReader([]byte("nope"), "name")
if err != nil {
t.Errorf("Error accessing term field reader: %v", err)

View File

@ -154,7 +154,7 @@ func TestInvalidRows(t *testing.T) {
[]byte{'t', 0, 0, 'b', 'e', 'e', 'r', ByteSeparator},
[]byte{},
},
// type t, invalid val (misisng freq)
// type t, invalid val (missing freq)
{
[]byte{'t', 0, 0, 'b', 'e', 'e', 'r', ByteSeparator, 'b', 'u', 'd', 'w', 'e', 'i', 's', 'e', 'r'},
[]byte{},

View File

@ -571,7 +571,7 @@ func (udc *UpsideDownCouch) Batch(batch *index.Batch) error {
for docID, doc := range batch.IndexOps {
backIndexRow := backIndexRows[docID]
if doc == nil && backIndexRow != nil {
//delete
// delete
deleteRows = udc.deleteSingle(docID, backIndexRow, deleteRows)
docsDeleted++
} else if doc != nil {

View File

@ -47,7 +47,7 @@ func TestIndexOpenReopen(t *testing.T) {
t.Errorf("Expected document count to be %d got %d", expectedCount, docCount)
}
// opening database should have inserted version
// opening the database should have inserted a version
expectedLength := uint64(1)
rowCount := idx.rowCount()
if rowCount != expectedLength {
@ -108,7 +108,7 @@ func TestIndexInsert(t *testing.T) {
t.Errorf("Expected document count to be %d got %d", expectedCount, docCount)
}
// should have 4 rows (1 for version, 1 for schema field, and 1 for single term, and 1 for the term count, and 1 for the back index entry)
// should have 4 rows (1 for version, 1 for schema field, and 1 for single term, and 1 for the term count, and 1 for the back index entry)
expectedLength := uint64(1 + 1 + 1 + 1 + 1)
rowCount := idx.rowCount()
if rowCount != expectedLength {
@ -189,7 +189,7 @@ func TestIndexInsertThenDelete(t *testing.T) {
t.Errorf("Expected document count to be %d got %d", expectedCount, docCount)
}
// should have 2 row (1 for version, 1 for schema field)
// should have 2 rows (1 for version, 1 for schema field)
expectedLength := uint64(1 + 1)
rowCount := idx.rowCount()
if rowCount != expectedLength {
@ -224,14 +224,14 @@ func TestIndexInsertThenUpdate(t *testing.T) {
t.Errorf("Error deleting entry from index: %v", err)
}
// should have 2 row (1 for version, 1 for schema field, and 2 for the two term, and 2 for the term counts, and 1 for the back index entry)
// should have 2 rows (1 for version, 1 for schema field, and 2 for the two term, and 2 for the term counts, and 1 for the back index entry)
expectedLength := uint64(1 + 1 + 2 + 2 + 1)
rowCount := idx.rowCount()
if rowCount != expectedLength {
t.Errorf("expected %d rows, got: %d", expectedLength, rowCount)
}
// now do another update that should remove one of term
// now do another update that should remove one of the terms
doc = document.NewDocument("1")
doc.AddField(document.NewTextField("name", []uint64{}, []byte("fail")))
err = idx.Update(doc)
@ -239,7 +239,7 @@ func TestIndexInsertThenUpdate(t *testing.T) {
t.Errorf("Error deleting entry from index: %v", err)
}
// should have 2 row (1 for version, 1 for schema field, and 1 for the remaining term, and 1 for the term count, and 1 for the back index entry)
// should have 2 rows (1 for version, 1 for schema field, and 1 for the remaining term, and 1 for the term count, and 1 for the back index entry)
expectedLength = uint64(1 + 1 + 1 + 1 + 1)
rowCount = idx.rowCount()
if rowCount != expectedLength {
@ -276,14 +276,14 @@ func TestIndexInsertMultiple(t *testing.T) {
}
expectedCount++
// should have 4 rows (1 for version, 1 for schema field, and 2 for single term, and 1 for the term count, and 2 for the back index entries)
// should have 4 rows (1 for version, 1 for schema field, and 2 for single term, and 1 for the term count, and 2 for the back index entries)
expectedLength := uint64(1 + 1 + 2 + 1 + 2)
rowCount := idx.rowCount()
if rowCount != expectedLength {
t.Errorf("expected %d rows, got: %d", expectedLength, rowCount)
}
// close and reopen and and one more to testing counting works correctly
// close, reopen and add one more to test that counting works correctly
idx.Close()
store, err = boltdb.Open("test", "bleve")
idx = NewUpsideDownCouch(store, analysisQueue)
@ -350,7 +350,7 @@ func TestIndexInsertWithStore(t *testing.T) {
t.Errorf("Expected document count to be %d got %d", expectedCount, docCount)
}
// should have 6 rows (1 for version, 1 for schema field, and 1 for single term, and 1 for the stored field and 1 for the term count, and 1 for the back index entry)
// should have 6 rows (1 for version, 1 for schema field, and 1 for single term, and 1 for the stored field and 1 for the term count, and 1 for the back index entry)
expectedLength := uint64(1 + 1 + 1 + 1 + 1 + 1)
rowCount := idx.rowCount()
if rowCount != expectedLength {
@ -398,7 +398,7 @@ func TestIndexInternalCRUD(t *testing.T) {
}
defer indexReader.Close()
// get something that doesnt exist yet
// get something that doesn't exist yet
val, err := indexReader.GetInternal([]byte("key"))
if err != nil {
t.Error(err)

View File

@ -15,7 +15,7 @@ package bleve
// 1. When it points to a single index, ALL index
// operations are valid and will be passed through
// to the underlying index.
// 2. When it points to more than index, the only
// 2. When it points to more than one index, the only
// valid operation is Search. In this case the
// search will be performed across all the
// underlying indexes and the results merged.

View File

@ -65,7 +65,7 @@ func newMemIndex(mapping *IndexMapping) (*indexImpl, error) {
return nil, err
}
// open open the index
// open the index
rv.i = upside_down.NewUpsideDownCouch(rv.s, Config.analysisQueue)
err = rv.i.Open()
if err != nil {
@ -111,7 +111,7 @@ func newIndex(path string, mapping *IndexMapping) (*indexImpl, error) {
if storeConstructor == nil {
return nil, ErrorUnknownStorageType
}
// at this point there hope we can be successful, so save index meta
// at this point there is hope that we can be successful, so save index meta
err = rv.meta.Save(path)
if err != nil {
return nil, err
@ -128,7 +128,7 @@ func newIndex(path string, mapping *IndexMapping) (*indexImpl, error) {
return nil, err
}
// open open the index
// open the index
rv.i = upside_down.NewUpsideDownCouch(rv.s, Config.analysisQueue)
err = rv.i.Open()
if err != nil {
@ -182,7 +182,7 @@ func openIndex(path string) (*indexImpl, error) {
return nil, err
}
// open open the index
// open the index
rv.i = upside_down.NewUpsideDownCouch(rv.s, Config.analysisQueue)
err = rv.i.Open()
if err != nil {
@ -501,7 +501,7 @@ func (i *indexImpl) Fields() ([]string, error) {
// DumpAll writes all index rows to a channel.
// INTERNAL: do not rely on this function, it is
// only intended to be used by the debug utilties
// only intended to be used by the debug utilities
func (i *indexImpl) DumpAll() chan interface{} {
i.mutex.RLock()
defer i.mutex.RUnlock()
@ -516,7 +516,7 @@ func (i *indexImpl) DumpAll() chan interface{} {
// DumpFields writes all field rows in the index
// to a channel.
// INTERNAL: do not rely on this function, it is
// only intended to be used by the debug utilties
// only intended to be used by the debug utilities
func (i *indexImpl) DumpFields() chan interface{} {
i.mutex.RLock()
defer i.mutex.RUnlock()
@ -530,7 +530,7 @@ func (i *indexImpl) DumpFields() chan interface{} {
// DumpDoc writes all rows in the index associated
// with the specified identifier to a channel.
// INTERNAL: do not rely on this function, it is
// only intended to be used by the debug utilties
// only intended to be used by the debug utilities
func (i *indexImpl) DumpDoc(id string) chan interface{} {
i.mutex.RLock()
defer i.mutex.RUnlock()

View File

@ -18,7 +18,7 @@ func TestIndexMeta(t *testing.T) {
var testIndexPath = "doesnotexit.bleve"
defer os.RemoveAll(testIndexPath)
// open non-existant meta should error
// open non-existent meta should give an error
_, err := openIndexMeta(testIndexPath)
if err == nil {
t.Errorf("expected error, got nil")

View File

@ -218,7 +218,7 @@ func TestIndexOpenMetaMissingOrCorrupt(t *testing.T) {
t.Fatalf("expected error index metadata corrupted, got %v", err)
}
// no intentionally remove the metadata
// now intentionally remove the metadata
os.Remove("testidx/index_meta.json")
index, err = Open("testidx")

View File

@ -23,7 +23,7 @@ import (
// As documents can be hierarchical, named sub-sections
// of documents are mapped using the same structure in
// the Properties field.
// Each value inside a document can be index 0 or more
// Each value inside a document can be indexed 0 or more
// ways. These index entries are called fields and
// are stored in the Fields field.
// Entire sections of a document can be ignored or
@ -261,7 +261,7 @@ func (dm *DocumentMapping) processProperty(property interface{}, path []string,
// look to see if there is a mapping for this field
subDocMapping := dm.documentMappingForPath(pathString)
// check tos see if we even need to do further processing
// check to see if we even need to do further processing
if subDocMapping != nil && !subDocMapping.Enabled {
return
}

View File

@ -86,12 +86,12 @@ func newCustomAnalysis() *customAnalysis {
return &rv
}
// An IndexMapping controls how objects are place
// An IndexMapping controls how objects are placed
// into an index.
// First the type of the object is deteremined.
// Once the type is know, the appropriate/
// First the type of the object is determined.
// Once the type is know, the appropriate
// DocumentMapping is selected by the type.
// If no mapping was described for that type,
// If no mapping was determined for that type,
// a DefaultMapping will be used.
type IndexMapping struct {
TypeMapping map[string]*DocumentMapping `json:"types,omitempty"`
@ -106,7 +106,7 @@ type IndexMapping struct {
cache *registry.Cache
}
// AddCustomCharFilter defines a custom char fitler for use in this mapping
// AddCustomCharFilter defines a custom char filter for use in this mapping
func (im *IndexMapping) AddCustomCharFilter(name string, config map[string]interface{}) error {
_, err := im.cache.DefineCharFilter(name, config)
if err != nil {
@ -184,8 +184,6 @@ func NewIndexMapping() *IndexMapping {
// Validate will walk the entire structure ensuring the following
// explicitly named and default analyzers can be built
// explicitly named and default date parsers can be built
// field type names are valid
func (im *IndexMapping) validate() error {
_, err := im.cache.AnalyzerNamed(im.DefaultAnalyzer)
if err != nil {
@ -318,7 +316,7 @@ func (im *IndexMapping) determineType(data interface{}) string {
return classifier.Type()
}
// now see if we can find type using the mapping
// now see if we can find a type using the mapping
typ, ok := mustString(lookupPropertyPath(data, im.TypeField))
if ok {
return typ
@ -328,7 +326,7 @@ func (im *IndexMapping) determineType(data interface{}) string {
}
func (im *IndexMapping) mapDocument(doc *document.Document, data interface{}) error {
// see if the top level object is a byte array, and possibly run through conveter
// see if the top level object is a byte array, and possibly run through a converter
byteArrayData, ok := data.([]byte)
if ok {
byteArrayConverterConstructor := registry.ByteArrayConverterByName(im.ByteArrayConverter)

View File

@ -14,7 +14,7 @@ import (
)
// test that the float/sortable int operations work both ways
// and that the the corresponding integers sort the same as
// and that the corresponding integers sort the same as
// the original floats would have
func TestSortabledFloat64ToInt64(t *testing.T) {
tests := []struct {

View File

@ -15,7 +15,7 @@ import (
)
// these array encoding values have been verified manually
// against the lucene imlementation
// against the lucene implementation
func TestPrefixCoded(t *testing.T) {
tests := []struct {
input int64

View File

@ -27,9 +27,9 @@ type booleanQuery struct {
// NewBooleanQuery creates a compound Query composed
// of several other Query objects.
// Result documents must satisify ALL of the
// Result documents must satisfy ALL of the
// must Queries.
// Result documents must satsify NONE of the must not
// Result documents must satisfy NONE of the must not
// Queries.
// If there are any should queries, result documents
// must satisfy at least one of them.

View File

@ -31,7 +31,7 @@ type dateRangeQuery struct {
// NewDateRangeQuery creates a new Query for ranges
// of date values.
// A DateTimeParser is chosed based on the field.
// A DateTimeParser is chosen based on the field.
// Either, but not both endpoints can be nil.
func NewDateRangeQuery(start, end *string) *dateRangeQuery {
return NewDateRangeInclusiveQuery(start, end, nil, nil)
@ -39,7 +39,7 @@ func NewDateRangeQuery(start, end *string) *dateRangeQuery {
// NewDateRangeInclusiveQuery creates a new Query for ranges
// of date values.
// A DateTimeParser is chosed based on the field.
// A DateTimeParser is chosen based on the field.
// Either, but not both endpoints can be nil.
// startInclusive and endInclusive control inclusion of the endpoints.
func NewDateRangeInclusiveQuery(start, end *string, startInclusive, endInclusive *bool) *dateRangeQuery {

View File

@ -27,7 +27,7 @@ type matchQuery struct {
}
// NewMatchQuery creates a Query for matching text.
// An Analyzer is chosed based on the field.
// An Analyzer is chosen based on the field.
// Input text is analyzed using this analyzer.
// Token terms resulting from this analysis are
// used to perform term searches. Result documents

View File

@ -26,7 +26,7 @@ type matchPhraseQuery struct {
// NewMatchPhraseQuery creates a new Query object
// for matching phrases in the index.
// An Analyzer is chosed based on the field.
// An Analyzer is chosen based on the field.
// Input text is analyzed using this analyzer.
// Token terms resulting from this analysis are
// used to build a search phrase. Result documents

View File

@ -70,7 +70,7 @@ func (dr *dateTimeRange) UnmarshalJSON(input []byte) error {
return nil
}
// A FacetRequest describes an facet or aggregation
// A FacetRequest describes a facet or aggregation
// of the result document set you would like to be
// built.
type FacetRequest struct {
@ -151,7 +151,7 @@ func (h *HighlightRequest) AddField(field string) {
// result set to return.
// Highlight describes optional search result
// highlighting.
// Fields desribed a list of field values whcih
// Fields describes a list of field values which
// should be retrieved for result documents.
// Facets describe the set of facets to be computed.
// Explain triggers inclusion of additional search

View File

@ -23,7 +23,7 @@ type Explanation struct {
func (expl *Explanation) String() string {
js, err := json.MarshalIndent(expl, "", " ")
if err != nil {
return fmt.Sprintf("error serializing explation to json: %v", err)
return fmt.Sprintf("error serializing explanation to json: %v", err)
}
return string(js)
}

View File

@ -33,7 +33,7 @@ func (s *Fragmenter) Fragment(orig []byte, ot highlight.TermLocations) []*highli
maxbegin := 0
for currTermIndex, termLocation := range ot {
// start with with this
// start with this
// it should be the highest scoring fragment with this term first
start := termLocation.Start
end := start + s.fragmentSize

View File

@ -146,7 +146,7 @@ type FragmentQueue []*highlight.Fragment
func (fq FragmentQueue) Len() int { return len(fq) }
func (fq FragmentQueue) Less(i, j int) bool {
// We want Pop to give us the highest, not lowest, priority so we use greater than here.
// We want Pop to give us the highest, not lowest, priority so we use greater-than here.
return fq[i].Score > fq[j].Score
}

View File

@ -57,7 +57,7 @@ func (s *BooleanSearcher) computeQueryNorm() {
// now compute query norm from this
s.queryNorm = 1.0 / math.Sqrt(sumOfSquaredWeights)
// finally tell all the downsteam searchers the norm
// finally tell all the downstream searchers the norm
if s.mustSearcher != nil {
s.mustSearcher.SetQueryNorm(s.queryNorm)
}

View File

@ -30,7 +30,7 @@ type ConjunctionSearcher struct {
}
func NewConjunctionSearcher(indexReader index.IndexReader, qsearchers []search.Searcher, explain bool) (*ConjunctionSearcher, error) {
// build the downstream searchres
// build the downstream searchers
searchers := make(OrderedSearcherList, len(qsearchers))
for i, searcher := range qsearchers {
searchers[i] = searcher
@ -57,7 +57,7 @@ func (s *ConjunctionSearcher) computeQueryNorm() {
}
// now compute query norm from this
s.queryNorm = 1.0 / math.Sqrt(sumOfSquaredWeights)
// finally tell all the downsteam searchers the norm
// finally tell all the downstream searchers the norm
for _, termSearcher := range s.searchers {
termSearcher.SetQueryNorm(s.queryNorm)
}
@ -145,7 +145,7 @@ OUTER:
} else {
s.currentID = s.currs[0].ID
}
// don't continue now, wait for next call the Next()
// don't continue now, wait for the next call to Next()
break
}
return rv, nil

View File

@ -30,7 +30,7 @@ type DisjunctionSearcher struct {
}
func NewDisjunctionSearcher(indexReader index.IndexReader, qsearchers []search.Searcher, min float64, explain bool) (*DisjunctionSearcher, error) {
// build the downstream searchres
// build the downstream searchers
searchers := make(OrderedSearcherList, len(qsearchers))
for i, searcher := range qsearchers {
searchers[i] = searcher
@ -57,7 +57,7 @@ func (s *DisjunctionSearcher) computeQueryNorm() {
}
// now compute query norm from this
s.queryNorm = 1.0 / math.Sqrt(sumOfSquaredWeights)
// finally tell all the downsteam searchers the norm
// finally tell all the downstream searchers the norm
for _, termSearcher := range s.searchers {
termSearcher.SetQueryNorm(s.queryNorm)
}

View File

@ -54,7 +54,7 @@ func NewNumericRangeSearcher(indexReader index.IndexReader, min *float64, max *f
if !*inclusiveMax && maxInt64 != math.MinInt64 {
maxInt64--
}
// FIXME hard-coded precion, should match field declaration
// FIXME hard-coded precision, should match field declaration
termRanges := splitInt64Range(minInt64, maxInt64, 4)
terms := termRanges.Enumerate()
// enumerate all the terms in the range

View File

@ -46,7 +46,7 @@ func (s *PhraseSearcher) computeQueryNorm() {
// now compute query norm from this
s.queryNorm = 1.0 / math.Sqrt(sumOfSquaredWeights)
// finally tell all the downsteam searchers the norm
// finally tell all the downstream searchers the norm
if s.mustSearcher != nil {
s.mustSearcher.SetQueryNorm(s.queryNorm)
}
@ -114,7 +114,7 @@ func (s *PhraseSearcher) Next() (*search.DocumentMatch, error) {
for i := 0; i < len(s.terms); i++ {
nextTerm := s.terms[i]
if nextTerm != "" {
// look through all this terms locations
// look through all these term locations
// to try and find the correct offsets
nextLocations, ok := termLocMap[nextTerm]
if ok {
@ -125,7 +125,7 @@ func (s *PhraseSearcher) Next() (*search.DocumentMatch, error) {
continue INNER
}
}
// if we got here we didnt find location match for this term
// if we got here we didn't find a location match for this term
continue OUTER
} else {
continue OUTER

View File

@ -30,7 +30,7 @@ func MergeLocations(locations []FieldTermLocationMap) FieldTermLocationMap {
func MergeTermLocationMaps(rv, other TermLocationMap) TermLocationMap {
for term, locationMap := range other {
// for a given term/document there cannot be different locations
// if they can back from different clauses, overwrite is ok
// if they came back from different clauses, overwrite is ok
rv[term] = locationMap
}
return rv

View File

@ -80,7 +80,7 @@ func runTestDir(t *testing.T, dir string) {
}
defer index.Close()
//index data
// index data
fis, err := ioutil.ReadDir(dir + string(filepath.Separator) + "data")
if err != nil {
t.Errorf("error reading data dir: %v", err)

View File

@ -29,7 +29,7 @@ func main() {
flag.Parse()
if *indexPath == "" {
log.Fatal("specify index to query")
log.Fatal("Specify index to query")
}
if flag.NArg() < 1 {