0
0
Fork 0

change "simple" analyzer to use "letter" tokenizer

this change improves compatibility with the simple analyzer
defined by Lucene.  this has important implications for
some perf tests as well as they often use the simple
analyzer.
This commit is contained in:
Marty Schoch 2016-03-31 15:13:17 -04:00
parent d774f980d3
commit 0b171c85da
4 changed files with 180 additions and 2 deletions

View File

@ -12,14 +12,14 @@ package simple_analyzer
import (
"github.com/blevesearch/bleve/analysis"
"github.com/blevesearch/bleve/analysis/token_filters/lower_case_filter"
"github.com/blevesearch/bleve/analysis/tokenizers/unicode"
"github.com/blevesearch/bleve/analysis/tokenizers/letter"
"github.com/blevesearch/bleve/registry"
)
const Name = "simple"
func AnalyzerConstructor(config map[string]interface{}, cache *registry.Cache) (*analysis.Analyzer, error) {
tokenizer, err := cache.TokenizerNamed(unicode.Name)
tokenizer, err := cache.TokenizerNamed(letter.Name)
if err != nil {
return nil, err
}

View File

@ -0,0 +1,71 @@
// Copyright (c) 2016 Couchbase, Inc.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
// except in compliance with the License. You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing permissions
// and limitations under the License.
package character
import (
"unicode/utf8"
"github.com/blevesearch/bleve/analysis"
)
type IsTokenRune func(r rune) bool
type CharacterTokenizer struct {
isTokenRun IsTokenRune
}
func NewCharacterTokenizer(f IsTokenRune) *CharacterTokenizer {
return &CharacterTokenizer{
isTokenRun: f,
}
}
func (c *CharacterTokenizer) Tokenize(input []byte) analysis.TokenStream {
rv := make(analysis.TokenStream, 0, 1024)
offset := 0
start := 0
end := 0
count := 0
for currRune, size := utf8.DecodeRune(input[offset:]); currRune != utf8.RuneError; currRune, size = utf8.DecodeRune(input[offset:]) {
isToken := c.isTokenRun(currRune)
if isToken {
end = offset + size
} else {
if end-start > 0 {
// build token
rv = append(rv, &analysis.Token{
Term: input[start:end],
Start: start,
End: end,
Position: count + 1,
Type: analysis.AlphaNumeric,
})
count++
}
start = offset + size
end = start
}
offset += size
}
// if we ended in the middle of a token, finish it
if end-start > 0 {
// build token
rv = append(rv, &analysis.Token{
Term: input[start:end],
Start: start,
End: end,
Position: count + 1,
Type: analysis.AlphaNumeric,
})
}
return rv
}

View File

@ -0,0 +1,79 @@
// Copyright (c) 2016 Couchbase, Inc.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
// except in compliance with the License. You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing permissions
// and limitations under the License.
package character
import (
"reflect"
"testing"
"unicode"
"github.com/blevesearch/bleve/analysis"
)
func TestCharacterTokenizer(t *testing.T) {
tests := []struct {
input []byte
output analysis.TokenStream
}{
{
[]byte("Hello World."),
analysis.TokenStream{
{
Start: 0,
End: 5,
Term: []byte("Hello"),
Position: 1,
Type: analysis.AlphaNumeric,
},
{
Start: 6,
End: 11,
Term: []byte("World"),
Position: 2,
Type: analysis.AlphaNumeric,
},
},
},
{
[]byte("dominique@mcdiabetes.com"),
analysis.TokenStream{
{
Start: 0,
End: 9,
Term: []byte("dominique"),
Position: 1,
Type: analysis.AlphaNumeric,
},
{
Start: 10,
End: 20,
Term: []byte("mcdiabetes"),
Position: 2,
Type: analysis.AlphaNumeric,
},
{
Start: 21,
End: 24,
Term: []byte("com"),
Position: 3,
Type: analysis.AlphaNumeric,
},
},
},
}
tokenizer := NewCharacterTokenizer(unicode.IsLetter)
for _, test := range tests {
actual := tokenizer.Tokenize(test.input)
if !reflect.DeepEqual(actual, test.output) {
t.Errorf("Expected %v, got %v for %s", test.output, actual, string(test.input))
}
}
}

View File

@ -0,0 +1,28 @@
// Copyright (c) 2016 Couchbase, Inc.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
// except in compliance with the License. You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing permissions
// and limitations under the License.
package letter
import (
"unicode"
"github.com/blevesearch/bleve/analysis"
"github.com/blevesearch/bleve/analysis/tokenizers/character"
"github.com/blevesearch/bleve/registry"
)
const Name = "letter"
func TokenizerConstructor(config map[string]interface{}, cache *registry.Cache) (analysis.Tokenizer, error) {
return character.NewCharacterTokenizer(unicode.IsLetter), nil
}
func init() {
registry.RegisterTokenizer(Name, TokenizerConstructor)
}