Python Spacy KeyError: "[E018] 无法检索哈希字符串



我正在尝试让我的代码在树莓派 4 上运行,并被这个错误困了几个小时。此代码段在其上抛出错误,但在具有相同项目的窗口上完美运行

def create_lem_texts(数据(:# 作为列表 定义sent_to_words(句子(: 对于句子中的句子: yield (gensim.utils.simple_preprocess(str(句子(, deacc=True(( # deacc=True 删除标点符号

data_words = list(sent_to_words(data))
bigram = gensim.models.Phrases(data_words, min_count=5, threshold=100)  # higher threshold fewer phrases.
bigram_mod = gensim.models.phrases.Phraser(bigram)
def remove_stopwords(texts):
return [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts]
def make_bigrams(texts):
return [bigram_mod[doc] for doc in texts]
def lemmatization(texts, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']):
"""https://spacy.io/api/annotation"""
texts_out = []
print(os.getcwd())
for sent in texts:
doc = nlp(" ".join(sent))
texts_out.append([token.lemma_ for token in doc if token.pos_ in allowed_postags])
return texts_out
data_words_nostops = remove_stopwords(data_words)
data_words_bigrams = make_bigrams(data_words_nostops)
print(os.getcwd())
nlp = spacy.load('en_core_web_sm', disable=['parser', 'ner'])
# Do lemmatization keeping only noun, adj, vb, adv
data_lemmatized = lemmatization(data_words_bigrams, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV'])
return data_lemmatized

此代码依次由此函数调用:

def assign_topics_tweet(tweets):
owd = os.getcwd()
print(owd)
os.chdir('/home/pi/Documents/pycharm_project_twitter/topic_model/')
print(os.getcwd())
lda = LdaModel.load("LDA26")
print(lda)
id2word = Dictionary.load('Id2Word')
print(id2word)
os.chdir(owd)
data = create_lem_texts(tweets)
corpus = [id2word.doc2bow(text) for text in data]
topics = []
for tweet in corpus:
topics_dist = lda.get_document_topics(tweet)
topics.append(topics_dist)
return topics

这是错误消息

Traceback (most recent call last):
File "/home/pi/Documents/pycharm_project_twitter/Twitter_Import.py", line 193, in <module>
main()
File "/home/pi/Documents/pycharm_project_twitter/Twitter_Import.py", line 169, in main
topics = assign_topics_tweet(data)
File "/home/pi/Documents/pycharm_project_twitter/TopicModel.py", line 238, in assign_topics_tweet
data = create_lem_texts(tweets)
File "/home/pi/Documents/pycharm_project_twitter/TopicModel.py", line 76, in create_lem_texts
data_lemmatized = lemmatization(data_words_bigrams, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV'])
File "/home/pi/Documents/pycharm_project_twitter/TopicModel.py", line 67, in lemmatization
texts_out.append([token.lemma_ for token in doc if token.pos_ in allowed_postags])
File "/home/pi/Documents/pycharm_project_twitter/TopicModel.py", line 67, in <listcomp>
texts_out.append([token.lemma_ for token in doc if token.pos_ in allowed_postags])
File "token.pyx", line 871, in spacy.tokens.token.Token.lemma_.__get__
File "strings.pyx", line 136, in spacy.strings.StringStore.__getitem__
KeyError: "[E018] Can't retrieve string for hash '18446744073541552667'. This usually refers to an issue with the `Vocab` or `StringStore`."
Process finished with exit code 1

我尝试重新安装 spacy 和 en 模型,直接在 pi 上运行它,空间版本在我的 Windows 机器和 Pi 上都是相同的。而且网上基本上没有关于这个错误的信息

经过三天的测试,只需安装旧版本的 Spacy 2.0.1 即可解决问题

最新更新