2014-08-12 00:35:35 +02:00
|
|
|
// Copyright (c) 2014 Couchbase, Inc.
|
2016-10-02 16:13:14 +02:00
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
2014-09-02 16:54:50 +02:00
|
|
|
|
2014-08-14 03:14:47 +02:00
|
|
|
package ar
|
2014-08-12 00:35:35 +02:00
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
|
2014-08-28 21:38:57 +02:00
|
|
|
"github.com/blevesearch/bleve/analysis"
|
|
|
|
"github.com/blevesearch/bleve/registry"
|
2014-08-12 00:35:35 +02:00
|
|
|
)
|
|
|
|
|
2014-08-14 03:14:47 +02:00
|
|
|
const NormalizeName = "normalize_ar"
|
|
|
|
|
2014-08-12 00:35:35 +02:00
|
|
|
const (
|
2014-09-03 23:48:40 +02:00
|
|
|
Alef = '\u0627'
|
|
|
|
AlefMadda = '\u0622'
|
|
|
|
AlefHamzaAbove = '\u0623'
|
|
|
|
AlefHamzaBelow = '\u0625'
|
|
|
|
Yeh = '\u064A'
|
|
|
|
DotlessYeh = '\u0649'
|
|
|
|
TehMarbuta = '\u0629'
|
|
|
|
Heh = '\u0647'
|
|
|
|
Tatweel = '\u0640'
|
|
|
|
Fathatan = '\u064B'
|
|
|
|
Dammatan = '\u064C'
|
|
|
|
Kasratan = '\u064D'
|
|
|
|
Fatha = '\u064E'
|
|
|
|
Damma = '\u064F'
|
|
|
|
Kasra = '\u0650'
|
|
|
|
Shadda = '\u0651'
|
|
|
|
Sukun = '\u0652'
|
2014-08-12 00:35:35 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
type ArabicNormalizeFilter struct {
|
|
|
|
}
|
|
|
|
|
|
|
|
func NewArabicNormalizeFilter() *ArabicNormalizeFilter {
|
|
|
|
return &ArabicNormalizeFilter{}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *ArabicNormalizeFilter) Filter(input analysis.TokenStream) analysis.TokenStream {
|
|
|
|
for _, token := range input {
|
|
|
|
term := normalize(token.Term)
|
|
|
|
token.Term = term
|
|
|
|
}
|
2014-09-24 00:41:32 +02:00
|
|
|
return input
|
2014-08-12 00:35:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func normalize(input []byte) []byte {
|
|
|
|
runes := bytes.Runes(input)
|
|
|
|
for i := 0; i < len(runes); i++ {
|
|
|
|
switch runes[i] {
|
2014-09-03 23:48:40 +02:00
|
|
|
case AlefMadda, AlefHamzaAbove, AlefHamzaBelow:
|
|
|
|
runes[i] = Alef
|
|
|
|
case DotlessYeh:
|
|
|
|
runes[i] = Yeh
|
|
|
|
case TehMarbuta:
|
|
|
|
runes[i] = Heh
|
|
|
|
case Tatweel, Kasratan, Dammatan, Fathatan, Fatha, Damma, Kasra, Shadda, Sukun:
|
2014-08-12 00:35:35 +02:00
|
|
|
runes = analysis.DeleteRune(runes, i)
|
|
|
|
i--
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return analysis.BuildTermFromRunes(runes)
|
|
|
|
}
|
2014-08-14 03:14:47 +02:00
|
|
|
|
|
|
|
func NormalizerFilterConstructor(config map[string]interface{}, cache *registry.Cache) (analysis.TokenFilter, error) {
|
|
|
|
return NewArabicNormalizeFilter(), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
registry.RegisterTokenFilter(NormalizeName, NormalizerFilterConstructor)
|
|
|
|
}
|