部分搜索在Elasticsearch+Haystack上不起作用,尽管使用Ngram和Edgengram来构建索引



我正在构建这样的索引:

class BookIndex(indexes.SearchIndex,indexes.Indexable):
text= indexes.EdgeNgramField(document=True,use_template=True)
content_auto = indexes.EdgeNgramField(model_attr='title')
isbn_13 = indexes.CharField(model_attr='isbn_13')
validate = indexes.IntegerField(model_attr='validate')
price = indexes.IntegerField(model_attr='price')
authors = indexes.EdgeNgramField()
reviews = indexes.CharField()
publishers = indexes.EdgeNgramField()
institutes = indexes.EdgeNgramField()
sellers = indexes.CharField()
category = indexes.CharField()
sub_category = indexes.CharField()

我什至尝试使用 Ngram,但部分搜索不起作用。

我正在查询它SearchQuerySet().all().filter(content=query)我也尝试过SearchQuerySet().filter(content__contains=query)即使这样它也没有显示部分匹配的结果。

有人可以帮我吗?

Haystack 在 ElasticSearch 中不是很好,你不能使用正确的索引值,所以你必须提供自定义的 ElasticSearchBackEnd 来启用它:

#in a search_backends.py file
from django.conf import settings
from haystack.backends.elasticsearch_backend import (
    ElasticsearchSearchBackend,
    ElasticsearchSearchEngine
)
from haystack.fields import EdgeNgramField as BaseEdgeNgramField, NgramField as BaseNgramField
from haystack.indexes import CharField
#just an example of which degree of configuration could be possible
CUSTOM_FIELD_TYPE = {
    'completion': {
        'type': 'completion',
        'payloads': True,
        'analyzer': 'suggest_analyzer',
        'preserve_separators': True,
        'preserve_position_increments': False
    },
}
# Custom Backend
class CustomElasticBackend(ElasticsearchSearchBackend):
    DEFAULT_ANALYZER = None
    def __init__(self, connection_alias, **connection_options):
        super(CustomElasticBackend, self).__init__(
                                connection_alias, **connection_options)
        user_settings = getattr(settings, 'ELASTICSEARCH_INDEX_SETTINGS', None)
        self.DEFAULT_ANALYZER = getattr(settings, 'ELASTICSEARCH_DEFAULT_ANALYZER', "snowball")
        if user_settings:
            setattr(self, 'DEFAULT_SETTINGS', user_settings)
    def build_schema(self, fields):
        content_field_name, mapping = super(CustomElasticBackend,
                                              self).build_schema(fields)
        for field_name, field_class in fields.items():
            field_mapping = mapping[field_class.index_fieldname]
            index_analyzer = getattr(field_class, 'index_analyzer', None)
            search_analyzer = getattr(field_class, 'search_analyzer', None)
            field_analyzer = getattr(field_class, 'analyzer', self.DEFAULT_ANALYZER)
            if field_mapping['type'] == 'string' and field_class.indexed:
                field_mapping["term_vector"] = "with_positions_offsets"
                if not hasattr(field_class, 'facet_for') and not field_class.field_type in('ngram', 'edge_ngram'):
                    field_mapping['analyzer'] = field_analyzer
            if field_class.field_type in CUSTOM_FIELD_TYPE:
                field_mapping = CUSTOM_FIELD_TYPE.get(field_class.field_type).copy()
            if index_analyzer and search_analyzer:
                field_mapping['index_analyzer'] = index_analyzer
                field_mapping['search_analyzer'] = search_analyzer
                if 'analyzer' in field_mapping:
                    del(field_mapping['analyzer'])
            mapping.update({field_class.index_fieldname: field_mapping})
        return (content_field_name, mapping)

class CustomElasticSearchEngine(ElasticsearchSearchEngine):
    backend = CustomElasticBackend

# Custom fields, just use the ones you need or create yours
class CustomFieldMixin(object):
    def __init__(self, **kwargs):
        self.analyzer = kwargs.pop('analyzer', None)
        self.index_analyzer = kwargs.pop('index_analyzer', None)
        self.search_analyzer = kwargs.pop('search_analyzer', None)
        super(CustomFieldMixin, self).__init__(**kwargs)
class CustomCharField(CustomFieldMixin, CharField):
    pass

class CustomCompletionField(CustomFieldMixin, CharField):
    field_type = 'completion'

class CustomEdgeNgramField(CustomFieldMixin, BaseEdgeNgramField):
    pass

class CustomNgramField(CustomFieldMixin, BaseNgramField):
    pass


#settings.py
ELASTICSEARCH_INDEX_SETTINGS = {
    'settings': {
        "analysis": {
            "analyzer": {
                "custom_analyzer": {
                    "type": "custom",
                    "tokenizer": "standard",
                    "filter":  [ "lowercase", "asciifolding" ]
                },
                "str_index_analyzer" : {
                    "type": "custom",
                    "tokenizer" : "haystack_ngram_tokenizer",
                    "filter" : ["stopwords", "asciifolding", "lowercase", "snowball", "elision", "worddelimiter"]
                },
                "str_search_analyzer" : {
                    "type": "custom",
                    "tokenizer" : "standard",
                    "filter" : ["stopwords", "asciifolding", "lowercase", "snowball", "elision", "worddelimiter"]
                },
                "suggest_analyzer": {
                    "type":"custom",
                    "tokenizer":"standard",
                    "filter":[
                        "stopwords",
                        "standard",
                        "lowercase",
                        "asciifolding"
                    ]
                },
            },
            "tokenizer": {
                "haystack_ngram_tokenizer": {
                    "type": "nGram",
                    "min_gram": 2,
                    "max_gram": 20,
                },
            },
            "filter": {
                "elision": {
                    "type": "elision",
                    "articles": ["l", "m", "t", "qu", "n", "s", "j", "d"]
                },
                "stopwords": {
                    "type": "stop",
                    "stopwords": ["_french_", "_english_"],
                    "ignore_case": True
                },
                "worddelimiter": {
                    "type": "word_delimiter"
                }
            }
        }
    }
}
#Haystack settings
HAYSTACK_CONNECTIONS = {
    'default': {
        ...
        'ENGINE': 'path.to.search_backends.CustomElasticSearchEngine',
        ...
    },
}

elasticsearch-2.xdjango-haystack版本一起使用<2.5会导致此问题。检查您的版本是否与这些版本匹配。

elasticsearch-2.x以后,boost不再是 haystack 传递给它的支持元数据。(请参阅 https://stackoverflow.com/a/36847352/5108155 的回答)
此问题已在大海捞针2.5版本中得到修复。

在构建(或更新)索引时,elasticsearch从未获得要应用于该字段的 ngram 分析器。您可以通过手动运行来验证这一点 curl 'http://<elasticsearch_address>/<index_name>/?pretty' 这将仅显示字段上的类型,而不显示分析器属性。

有趣的是,haystack 不会因为 ElasticSearchBackend 类中的内部 silently_fail 属性而引发此异常。

相关内容

  • 没有找到相关文章

最新更新