0
0
bleve/analysis/token_filters/unicode_normalize/unicode_normalize_test.go

158 lines
3.3 KiB
Go
Raw Normal View History

2014-08-05 03:59:57 +02:00
// Copyright (c) 2014 Couchbase, Inc.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
// except in compliance with the License. You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing permissions
// and limitations under the License.
2014-08-05 03:59:57 +02:00
package unicode_normalize
import (
"reflect"
"testing"
"github.com/blevesearch/bleve/analysis"
2014-08-05 03:59:57 +02:00
)
// the following tests come from the lucene
// test cases for CJK width filter
2014-12-18 18:43:12 +01:00
// which is our basis for using this
2014-08-05 03:59:57 +02:00
// as a substitute for that
func TestUnicodeNormalization(t *testing.T) {
tests := []struct {
formName string
input analysis.TokenStream
output analysis.TokenStream
}{
{
formName: NFKD,
input: analysis.TokenStream{
&analysis.Token{
Term: []byte(""),
},
},
output: analysis.TokenStream{
&analysis.Token{
Term: []byte("Test"),
},
},
},
{
formName: NFKD,
input: analysis.TokenStream{
&analysis.Token{
Term: []byte(""),
},
},
output: analysis.TokenStream{
&analysis.Token{
Term: []byte("1234"),
},
},
},
{
formName: NFKD,
input: analysis.TokenStream{
&analysis.Token{
Term: []byte("カタカナ"),
},
},
output: analysis.TokenStream{
&analysis.Token{
Term: []byte("カタカナ"),
},
},
},
{
formName: NFKC,
2014-08-05 03:59:57 +02:00
input: analysis.TokenStream{
&analysis.Token{
Term: []byte("ヴィッツ"),
},
},
output: analysis.TokenStream{
&analysis.Token{
Term: []byte("ヴィッツ"),
},
},
},
{
formName: NFKC,
2014-08-05 03:59:57 +02:00
input: analysis.TokenStream{
&analysis.Token{
Term: []byte("パナソニック"),
},
},
output: analysis.TokenStream{
&analysis.Token{
Term: []byte("パナソニック"),
},
},
},
{
formName: NFD,
input: analysis.TokenStream{
&analysis.Token{
Term: []byte("\u212B"),
},
},
output: analysis.TokenStream{
&analysis.Token{
Term: []byte("\u0041\u030A"),
},
},
},
{
formName: NFC,
input: analysis.TokenStream{
&analysis.Token{
Term: []byte("\u212B"),
},
},
output: analysis.TokenStream{
&analysis.Token{
Term: []byte("\u00C5"),
},
},
},
{
formName: NFKD,
input: analysis.TokenStream{
&analysis.Token{
Term: []byte("\uFB01"),
},
},
output: analysis.TokenStream{
&analysis.Token{
Term: []byte("\u0066\u0069"),
},
},
},
{
formName: NFKC,
input: analysis.TokenStream{
&analysis.Token{
Term: []byte("\uFB01"),
},
},
output: analysis.TokenStream{
&analysis.Token{
Term: []byte("\u0066\u0069"),
},
},
},
}
for _, test := range tests {
filter := MustNewUnicodeNormalizeFilter(test.formName)
actual := filter.Filter(test.input)
if !reflect.DeepEqual(actual, test.output) {
t.Errorf("expected %s, got %s", test.output[0].Term, actual[0].Term)
t.Errorf("expected %#v, got %#v", test.output[0].Term, actual[0].Term)
}
}
}