0
0
Fork 0

added hindi normalizer

closes #64
This commit is contained in:
Marty Schoch 2014-08-11 19:51:47 -04:00
parent cd0e3fd85b
commit c65f7415ff
3 changed files with 372 additions and 0 deletions

View File

@ -0,0 +1,125 @@
// Copyright (c) 2014 Couchbase, Inc.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
// except in compliance with the License. You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing permissions
// and limitations under the License.
package hindi_normalize
import (
"bytes"
"github.com/couchbaselabs/bleve/analysis"
)
type HindiNormalizeFilter struct {
}
func NewHindiNormalizeFilter() *HindiNormalizeFilter {
return &HindiNormalizeFilter{}
}
func (s *HindiNormalizeFilter) Filter(input analysis.TokenStream) analysis.TokenStream {
rv := make(analysis.TokenStream, 0)
for _, token := range input {
term := normalize(token.Term)
token.Term = term
rv = append(rv, token)
}
return rv
}
func normalize(input []byte) []byte {
runes := bytes.Runes(input)
for i := 0; i < len(runes); i++ {
switch runes[i] {
// dead n -> bindu
case '\u0928':
if i+1 < len(runes) && runes[i+1] == '\u094D' {
runes[i] = '\u0902'
runes = analysis.DeleteRune(runes, i+1)
}
// candrabindu -> bindu
case '\u0901':
runes[i] = '\u0902'
// nukta deletions
case '\u093C':
runes = analysis.DeleteRune(runes, i)
i--
case '\u0929':
runes[i] = '\u0928'
case '\u0931':
runes[i] = '\u0930'
case '\u0934':
runes[i] = '\u0933'
case '\u0958':
runes[i] = '\u0915'
case '\u0959':
runes[i] = '\u0916'
case '\u095A':
runes[i] = '\u0917'
case '\u095B':
runes[i] = '\u091C'
case '\u095C':
runes[i] = '\u0921'
case '\u095D':
runes[i] = '\u0922'
case '\u095E':
runes[i] = '\u092B'
case '\u095F':
runes[i] = '\u092F'
// zwj/zwnj -> delete
case '\u200D', '\u200C':
runes = analysis.DeleteRune(runes, i)
i--
// virama -> delete
case '\u094D':
runes = analysis.DeleteRune(runes, i)
i--
// chandra/short -> replace
case '\u0945', '\u0946':
runes[i] = '\u0947'
case '\u0949', '\u094A':
runes[i] = '\u094B'
case '\u090D', '\u090E':
runes[i] = '\u090F'
case '\u0911', '\u0912':
runes[i] = '\u0913'
case '\u0972':
runes[i] = '\u0905'
// long -> short ind. vowels
case '\u0906':
runes[i] = '\u0905'
case '\u0908':
runes[i] = '\u0907'
case '\u090A':
runes[i] = '\u0909'
case '\u0960':
runes[i] = '\u090B'
case '\u0961':
runes[i] = '\u090C'
case '\u0910':
runes[i] = '\u090F'
case '\u0914':
runes[i] = '\u0913'
// long -> short dep. vowels
case '\u0940':
runes[i] = '\u093F'
case '\u0942':
runes[i] = '\u0941'
case '\u0944':
runes[i] = '\u0943'
case '\u0963':
runes[i] = '\u0962'
case '\u0948':
runes[i] = '\u0947'
case '\u094C':
runes[i] = '\u094B'
}
}
return analysis.BuildTermFromRunes(runes)
}

View File

@ -0,0 +1,245 @@
// Copyright (c) 2014 Couchbase, Inc.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
// except in compliance with the License. You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing permissions
// and limitations under the License.
package hindi_normalize
import (
"reflect"
"testing"
"github.com/couchbaselabs/bleve/analysis"
)
func TestHindiNormalizeFilter(t *testing.T) {
tests := []struct {
input analysis.TokenStream
output analysis.TokenStream
}{
// basics
{
input: analysis.TokenStream{
&analysis.Token{
Term: []byte("अँगरेज़ी"),
},
},
output: analysis.TokenStream{
&analysis.Token{
Term: []byte("अंगरेजि"),
},
},
},
{
input: analysis.TokenStream{
&analysis.Token{
Term: []byte("अँगरेजी"),
},
},
output: analysis.TokenStream{
&analysis.Token{
Term: []byte("अंगरेजि"),
},
},
},
{
input: analysis.TokenStream{
&analysis.Token{
Term: []byte("अँग्रेज़ी"),
},
},
output: analysis.TokenStream{
&analysis.Token{
Term: []byte("अंगरेजि"),
},
},
},
{
input: analysis.TokenStream{
&analysis.Token{
Term: []byte("अँग्रेजी"),
},
},
output: analysis.TokenStream{
&analysis.Token{
Term: []byte("अंगरेजि"),
},
},
},
{
input: analysis.TokenStream{
&analysis.Token{
Term: []byte("अंगरेज़ी"),
},
},
output: analysis.TokenStream{
&analysis.Token{
Term: []byte("अंगरेजि"),
},
},
},
{
input: analysis.TokenStream{
&analysis.Token{
Term: []byte("अंगरेजी"),
},
},
output: analysis.TokenStream{
&analysis.Token{
Term: []byte("अंगरेजि"),
},
},
},
{
input: analysis.TokenStream{
&analysis.Token{
Term: []byte("अंग्रेज़ी"),
},
},
output: analysis.TokenStream{
&analysis.Token{
Term: []byte("अंगरेजि"),
},
},
},
{
input: analysis.TokenStream{
&analysis.Token{
Term: []byte("अंग्रेजी"),
},
},
output: analysis.TokenStream{
&analysis.Token{
Term: []byte("अंगरेजि"),
},
},
},
// test decompositions
// removing nukta dot
{
input: analysis.TokenStream{
&analysis.Token{
Term: []byte("क़िताब"),
},
},
output: analysis.TokenStream{
&analysis.Token{
Term: []byte("किताब"),
},
},
},
{
input: analysis.TokenStream{
&analysis.Token{
Term: []byte("फ़र्ज़"),
},
},
output: analysis.TokenStream{
&analysis.Token{
Term: []byte("फरज"),
},
},
},
{
input: analysis.TokenStream{
&analysis.Token{
Term: []byte("क़र्ज़"),
},
},
output: analysis.TokenStream{
&analysis.Token{
Term: []byte("करज"),
},
},
},
// some other composed nukta forms
{
input: analysis.TokenStream{
&analysis.Token{
Term: []byte("ऱऴख़ग़ड़ढ़य़"),
},
},
output: analysis.TokenStream{
&analysis.Token{
Term: []byte("रळखगडढय"),
},
},
},
// removal of format (ZWJ/ZWNJ)
{
input: analysis.TokenStream{
&analysis.Token{
Term: []byte("शार्‍मा"),
},
},
output: analysis.TokenStream{
&analysis.Token{
Term: []byte("शारमा"),
},
},
},
{
input: analysis.TokenStream{
&analysis.Token{
Term: []byte("शार्‌मा"),
},
},
output: analysis.TokenStream{
&analysis.Token{
Term: []byte("शारमा"),
},
},
},
// removal of chandra
{
input: analysis.TokenStream{
&analysis.Token{
Term: []byte("ॅॆॉॊऍऎऑऒ\u0972"),
},
},
output: analysis.TokenStream{
&analysis.Token{
Term: []byte("ेेोोएएओओअ"),
},
},
},
// vowel shortening
{
input: analysis.TokenStream{
&analysis.Token{
Term: []byte("आईऊॠॡऐऔीूॄॣैौ"),
},
},
output: analysis.TokenStream{
&analysis.Token{
Term: []byte("अइउऋऌएओिुृॢेो"),
},
},
},
// empty
{
input: analysis.TokenStream{
&analysis.Token{
Term: []byte(""),
},
},
output: analysis.TokenStream{
&analysis.Token{
Term: []byte(""),
},
},
},
}
hindiNormalizeFilter := NewHindiNormalizeFilter()
for _, test := range tests {
actual := hindiNormalizeFilter.Filter(test.input)
if !reflect.DeepEqual(actual, test.output) {
t.Errorf("expected %#v, got %#v", test.output, actual)
t.Errorf("expected % x, got % x", test.output[0].Term, actual[0].Term)
}
}
}

View File

@ -28,6 +28,7 @@ import (
"github.com/couchbaselabs/bleve/analysis/token_filters/cld2"
"github.com/couchbaselabs/bleve/analysis/token_filters/elision_filter"
"github.com/couchbaselabs/bleve/analysis/token_filters/german_normalize"
"github.com/couchbaselabs/bleve/analysis/token_filters/hindi_normalize"
"github.com/couchbaselabs/bleve/analysis/token_filters/length_filter"
"github.com/couchbaselabs/bleve/analysis/token_filters/lower_case_filter"
"github.com/couchbaselabs/bleve/analysis/token_filters/persian_normalize"
@ -298,6 +299,7 @@ func init() {
Config.Analysis.TokenFilters["normalize_fa"] = persian_normalize.NewPersianNormalizeFilter()
Config.Analysis.TokenFilters["normalize_ar"] = arabic_normalize.NewArabicNormalizeFilter()
Config.Analysis.TokenFilters["normalize_de"] = german_normalize.NewGermanNormalizeFilter()
Config.Analysis.TokenFilters["normalize_hi"] = hindi_normalize.NewHindiNormalizeFilter()
// register analyzers
keywordAnalyzer := Config.MustBuildNewAnalyzer([]string{}, "single", []string{})