init. simple camel case parser.
This commit is contained in:
parent
92cf2a8974
commit
3f2701a97c
|
@ -0,0 +1,62 @@
|
|||
package camelcase_filter
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/blevesearch/bleve/analysis"
|
||||
"github.com/blevesearch/bleve/registry"
|
||||
)
|
||||
|
||||
const Name = "camelCase"
|
||||
|
||||
// CamelCaseFilter splits a given token into a set of tokens where each resulting token
|
||||
// falls into one the following classes:
|
||||
// 1) Upper case followed by lower case letters.
|
||||
// Terminated by a number, an upper case letter, and a non alpha-numeric symbol.
|
||||
// 2) Upper case followed by upper case letters.
|
||||
// Terminated by a number, an upper case followed by a lower case letter, and a non alpha-numeric symbol.
|
||||
// 3) Lower case followed by lower case letters.
|
||||
// Terminated by a number, an upper case letter, and a non alpha-numeric symbol.
|
||||
// 4) Number followed by numbers.
|
||||
// Terminated by a letter, and a non alpha-numeric symbol.
|
||||
// 5) Non alpha-numeric symbol followed by non alpha-numeric symbols.
|
||||
// Terminated by a number, and a letter.
|
||||
//
|
||||
// It does a one-time sequential pass over an input token, from left to right.
|
||||
// The scan is greedy and generates the longest substring that fits into one of the classes.
|
||||
//
|
||||
// See the test file for examples of classes and their parsings.
|
||||
type CamelCaseFilter struct{}
|
||||
|
||||
func NewCamelCaseFilter() *CamelCaseFilter {
|
||||
return &CamelCaseFilter{}
|
||||
}
|
||||
|
||||
func (f *CamelCaseFilter) Filter(input analysis.TokenStream) analysis.TokenStream {
|
||||
rv := make(analysis.TokenStream, 0, len(input))
|
||||
|
||||
for _, token := range input {
|
||||
runeCount := utf8.RuneCount(token.Term)
|
||||
runes := bytes.Runes(token.Term)
|
||||
|
||||
p := NewParser(runeCount)
|
||||
for i := 0; i < runeCount; i++ {
|
||||
if i+1 >= runeCount {
|
||||
p.Push(runes[i], nil)
|
||||
} else {
|
||||
p.Push(runes[i], &runes[i+1])
|
||||
}
|
||||
}
|
||||
rv = append(rv, p.FlushTokens()...)
|
||||
}
|
||||
return rv
|
||||
}
|
||||
|
||||
func CamelCaseFilterConstructor(config map[string]interface{}, cache *registry.Cache) (analysis.TokenFilter, error) {
|
||||
return NewCamelCaseFilter(), nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
registry.RegisterTokenFilter(Name, CamelCaseFilterConstructor)
|
||||
}
|
|
@ -0,0 +1,197 @@
|
|||
package camelcase_filter
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/blevesearch/bleve/analysis"
|
||||
)
|
||||
|
||||
func TestCamelCaseFilter(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
input analysis.TokenStream
|
||||
output analysis.TokenStream
|
||||
}{
|
||||
{
|
||||
input: analysis.TokenStream{
|
||||
&analysis.Token{
|
||||
Term: []byte(""),
|
||||
},
|
||||
},
|
||||
output: analysis.TokenStream{
|
||||
&analysis.Token{
|
||||
Term: []byte(""),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: analysis.TokenStream{
|
||||
&analysis.Token{
|
||||
Term: []byte("a"),
|
||||
},
|
||||
},
|
||||
output: analysis.TokenStream{
|
||||
&analysis.Token{
|
||||
Term: []byte("a"),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
input: analysis.TokenStream{
|
||||
&analysis.Token{
|
||||
Term: []byte("...aMACMac123macILoveGolang"),
|
||||
},
|
||||
},
|
||||
output: analysis.TokenStream{
|
||||
&analysis.Token{
|
||||
Term: []byte("..."),
|
||||
},
|
||||
&analysis.Token{
|
||||
Term: []byte("a"),
|
||||
},
|
||||
&analysis.Token{
|
||||
Term: []byte("MAC"),
|
||||
},
|
||||
&analysis.Token{
|
||||
Term: []byte("Mac"),
|
||||
},
|
||||
&analysis.Token{
|
||||
Term: []byte("123"),
|
||||
},
|
||||
&analysis.Token{
|
||||
Term: []byte("mac"),
|
||||
},
|
||||
&analysis.Token{
|
||||
Term: []byte("I"),
|
||||
},
|
||||
&analysis.Token{
|
||||
Term: []byte("Love"),
|
||||
},
|
||||
&analysis.Token{
|
||||
Term: []byte("Golang"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: analysis.TokenStream{
|
||||
&analysis.Token{
|
||||
Term: []byte("Lang"),
|
||||
},
|
||||
},
|
||||
output: analysis.TokenStream{
|
||||
&analysis.Token{
|
||||
Term: []byte("Lang"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: analysis.TokenStream{
|
||||
&analysis.Token{
|
||||
Term: []byte("GLang"),
|
||||
},
|
||||
},
|
||||
output: analysis.TokenStream{
|
||||
&analysis.Token{
|
||||
Term: []byte("G"),
|
||||
},
|
||||
&analysis.Token{
|
||||
Term: []byte("Lang"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: analysis.TokenStream{
|
||||
&analysis.Token{
|
||||
Term: []byte("GOLang"),
|
||||
},
|
||||
},
|
||||
output: analysis.TokenStream{
|
||||
&analysis.Token{
|
||||
Term: []byte("GO"),
|
||||
},
|
||||
&analysis.Token{
|
||||
Term: []byte("Lang"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: analysis.TokenStream{
|
||||
&analysis.Token{
|
||||
Term: []byte("GOOLang"),
|
||||
},
|
||||
},
|
||||
output: analysis.TokenStream{
|
||||
&analysis.Token{
|
||||
Term: []byte("GOO"),
|
||||
},
|
||||
&analysis.Token{
|
||||
Term: []byte("Lang"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: analysis.TokenStream{
|
||||
&analysis.Token{
|
||||
Term: []byte("1234"),
|
||||
},
|
||||
},
|
||||
output: analysis.TokenStream{
|
||||
&analysis.Token{
|
||||
Term: []byte("1234"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: analysis.TokenStream{
|
||||
&analysis.Token{
|
||||
Term: []byte("starbucks"),
|
||||
},
|
||||
},
|
||||
output: analysis.TokenStream{
|
||||
&analysis.Token{
|
||||
Term: []byte("starbucks"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: analysis.TokenStream{
|
||||
&analysis.Token{
|
||||
Term: []byte("Starbucks TVSamsungIsGREAT000"),
|
||||
},
|
||||
},
|
||||
output: analysis.TokenStream{
|
||||
&analysis.Token{
|
||||
Term: []byte("Starbucks"),
|
||||
},
|
||||
&analysis.Token{
|
||||
Term: []byte(" "),
|
||||
},
|
||||
&analysis.Token{
|
||||
Term: []byte("TV"),
|
||||
},
|
||||
&analysis.Token{
|
||||
Term: []byte("Samsung"),
|
||||
},
|
||||
&analysis.Token{
|
||||
Term: []byte("Is"),
|
||||
},
|
||||
&analysis.Token{
|
||||
Term: []byte("GREAT"),
|
||||
},
|
||||
&analysis.Token{
|
||||
Term: []byte("000"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
ccFilter := NewCamelCaseFilter()
|
||||
actual := ccFilter.Filter(test.input)
|
||||
if !reflect.DeepEqual(actual, test.output) {
|
||||
t.Errorf("expected %s \n\n got %s", test.output, actual)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,93 @@
|
|||
package camelcase_filter
|
||||
|
||||
import (
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/blevesearch/bleve/analysis"
|
||||
)
|
||||
|
||||
func buildTokenFromTerm(buffer []rune) *analysis.Token {
|
||||
return &analysis.Token{
|
||||
Term: buildTermFromRunes(buffer),
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Lifted from ngram_filter. Expose as public and re-use?
|
||||
func buildTermFromRunes(runes []rune) []byte {
|
||||
rv := make([]byte, 0, len(runes)*4)
|
||||
for _, r := range runes {
|
||||
runeBytes := make([]byte, utf8.RuneLen(r))
|
||||
utf8.EncodeRune(runeBytes, r)
|
||||
rv = append(rv, runeBytes...)
|
||||
}
|
||||
return rv
|
||||
}
|
||||
|
||||
// Parser accepts a symbol and passes it to the current state (representing a class).
|
||||
// The state can accept it (and accumulate it). Otherwise, the parser creates a new state that
|
||||
// starts with the pushed symbol.
|
||||
//
|
||||
// Parser accumulates a new resulting token every time it switches state.
|
||||
// Use FlushTokens() to get the results after the last symbol was pushed.
|
||||
type Parser struct {
|
||||
bufferLen int
|
||||
buffer []rune
|
||||
current State
|
||||
tokens []*analysis.Token
|
||||
}
|
||||
|
||||
func NewParser(len int) *Parser {
|
||||
return &Parser{
|
||||
bufferLen: len,
|
||||
buffer: make([]rune, 0, len),
|
||||
tokens: make([]*analysis.Token, 0, len),
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Parser) Push(sym rune, peek *rune) {
|
||||
if p.current == nil {
|
||||
// the start of parsing
|
||||
p.current = p.NewState(sym)
|
||||
p.buffer = append(p.buffer, sym)
|
||||
|
||||
} else if p.current.Member(sym, peek) {
|
||||
// same state, just accumulate
|
||||
p.buffer = append(p.buffer, sym)
|
||||
|
||||
} else {
|
||||
// the old state is no more, thus convert the buffer
|
||||
p.tokens = append(p.tokens, buildTokenFromTerm(p.buffer))
|
||||
|
||||
// let the new state begin
|
||||
p.current = p.NewState(sym)
|
||||
p.buffer = make([]rune, 0, p.bufferLen)
|
||||
p.buffer = append(p.buffer, sym)
|
||||
}
|
||||
}
|
||||
|
||||
// Note. States have to have different starting symbols.
|
||||
func (p *Parser) NewState(sym rune) State {
|
||||
var found State
|
||||
|
||||
found = &LowerCaseState{}
|
||||
if found.StartSym(sym) {
|
||||
return found
|
||||
}
|
||||
|
||||
found = &UpperCaseState{}
|
||||
if found.StartSym(sym) {
|
||||
return found
|
||||
}
|
||||
|
||||
found = &NumberCaseState{}
|
||||
if found.StartSym(sym) {
|
||||
return found
|
||||
}
|
||||
|
||||
return &NonAlphaNumericCaseState{}
|
||||
}
|
||||
|
||||
func (p *Parser) FlushTokens() []*analysis.Token {
|
||||
p.tokens = append(p.tokens, buildTokenFromTerm(p.buffer))
|
||||
return p.tokens
|
||||
}
|
|
@ -0,0 +1,73 @@
|
|||
package camelcase_filter
|
||||
|
||||
import (
|
||||
"unicode"
|
||||
)
|
||||
|
||||
// States codify the classes that the parser recognizes.
|
||||
type State interface {
|
||||
// is _sym_ the start character
|
||||
StartSym(sym rune) bool
|
||||
|
||||
// is _sym_ a member of a class.
|
||||
// peek, the next sym on the tape, can also be used to determine a class.
|
||||
Member(sym rune, peek *rune) bool
|
||||
}
|
||||
|
||||
type LowerCaseState struct{}
|
||||
|
||||
func (s *LowerCaseState) Member(sym rune, peek *rune) bool {
|
||||
return unicode.IsLower(sym)
|
||||
}
|
||||
|
||||
func (s *LowerCaseState) StartSym(sym rune) bool {
|
||||
return s.Member(sym, nil)
|
||||
}
|
||||
|
||||
type UpperCaseState struct {
|
||||
startedCollecting bool // denotes that the start character has been read
|
||||
collectingUpper bool // denotes if this is a class of all upper case letters
|
||||
}
|
||||
|
||||
func (s *UpperCaseState) Member(sym rune, peek *rune) bool {
|
||||
if !(unicode.IsLower(sym) || unicode.IsUpper(sym)) {
|
||||
return false
|
||||
}
|
||||
|
||||
if peek != nil && unicode.IsUpper(sym) && unicode.IsLower(*peek) {
|
||||
return false
|
||||
}
|
||||
|
||||
if !s.startedCollecting {
|
||||
// now we have to determine if upper-case letters are collected.
|
||||
s.startedCollecting = true
|
||||
s.collectingUpper = unicode.IsUpper(sym)
|
||||
return true
|
||||
}
|
||||
|
||||
return s.collectingUpper == unicode.IsUpper(sym)
|
||||
}
|
||||
|
||||
func (s *UpperCaseState) StartSym(sym rune) bool {
|
||||
return unicode.IsUpper(sym)
|
||||
}
|
||||
|
||||
type NumberCaseState struct{}
|
||||
|
||||
func (s *NumberCaseState) Member(sym rune, peek *rune) bool {
|
||||
return unicode.IsNumber(sym)
|
||||
}
|
||||
|
||||
func (s *NumberCaseState) StartSym(sym rune) bool {
|
||||
return s.Member(sym, nil)
|
||||
}
|
||||
|
||||
type NonAlphaNumericCaseState struct{}
|
||||
|
||||
func (s *NonAlphaNumericCaseState) Member(sym rune, peek *rune) bool {
|
||||
return !unicode.IsLower(sym) && !unicode.IsUpper(sym) && !unicode.IsNumber(sym)
|
||||
}
|
||||
|
||||
func (s *NonAlphaNumericCaseState) StartSym(sym rune) bool {
|
||||
return s.Member(sym, nil)
|
||||
}
|
Loading…
Reference in New Issue