输入输出 Python 中的隐马尔可夫模型实现



我正在尝试使用输入输出架构实现隐马尔可夫模型,但我找不到任何好的 python 实现。

任何人都可以共享 Python 包,他们会考虑以下 HMM 实现。

允许连续排放。 允许协变量的功能(即 I/O HMM 中的自变量)。

目前,我正在努力寻找相同的python实现。

我在 hmmlearn 中找不到相关示例。

以下是我测试过的一些库:

hmmlearn:hmmlearn允许将多个特征传递给排放/观测,但不提供支持包括协变量(即自变量)。

hmms:不支持添加连续排放的功能,也不支持添加自变量。

IOHMM:我能够使用这个库训练HMM模型,但在训练模型后找不到进行预测的文档。

因此,我正在寻找可以达到目的的软件包。

from IOHMM import UnSupervisedIOHMM
from IOHMM import OLS, DiscreteMNL, CrossEntropyMNL, forward_backward
SHMM = UnSupervisedIOHMM(num_states=3, max_EM_iter=200, EM_tol=1e-6)
SHMM.set_models(model_emissions = [OLS(est_stderr=True)], 
model_transition=CrossEntropyMNL(solver='lbfgs'),
model_initial=CrossEntropyMNL(solver='lbfgs'))
SHMM.set_inputs(covariates_initial = [], covariates_transition = [], covariates_emissions = [['Insulin']])

SHMM.set_outputs([['Glucose']])
SHMM.set_data([data])
SHMM.train() 

在上述训练之后,我无法弄清楚如何获得发射概率和隐藏状态的顺序。

参考"https://web.ece.ucsb.edu/Faculty/Rabiner/ece259/Reprints/tutorial%20on%20hmm%20and%20applications.pdf"和库"https://hmmlearn.readthedocs.io/en/latest/",我找到了这个解决方案:

1- 通过log_gamma(后向分布):

state_sequences = []
for i in range(100):
for j in range(lengths[i]):
state_sequences.append(np.argmax(np.exp(SHMM.log_gammas[i])[j]))
pred_state_seq = [state_sequences[df[df['unit'] == i].index[0]:df[df['unit'] == i].index[-1] + 1] for i in
range(1, df_A['unit'].max() + 1)]

2-维特比算法:

from hmmlearn import _hmmc
transmat = np.empty((num_states, num_states))
for i in range(num_states):
transmat = np.concatenate((transmat, np.exp(SHMM.model_transition[i].predict_log_proba(np.array([[]])))))
transmat = transmat[num_states:]
startprob = np.exp(SHMM.model_initial.predict_log_proba(np.array([[]]))).squeeze()

def log_mask_zero(a):
"""
Compute the log of input probabilities masking divide by zero in log.
Notes
-----
During the M-step of EM-algorithm, very small intermediate start
or transition probabilities could be normalized to zero, causing a
*RuntimeWarning: divide by zero encountered in log*.
This function masks this unharmful warning.
"""
a = np.asarray(a)
with np.errstate(divide="ignore"):
return np.log(a)

def _do_viterbi_pass(framelogprob):
n_samples, n_components = framelogprob.shape
state_sequence, logprob = _hmmc._viterbi(n_samples, n_components, log_mask_zero(startprob),
log_mask_zero(transmat), framelogprob)
return logprob, state_sequence

def _decode_viterbi(X):
framelogprob = SHMM.log_Eys[X]
return _do_viterbi_pass(framelogprob)

def decode():
decoder = {"viterbi": _decode_viterbi}["viterbi"]
logprob = 0
sub_state_sequences = []
for sub_X in range(100):
# XXX decoder works on a single sample at a time!
sub_logprob, sub_state_sequence = decoder(sub_X)
logprob += sub_logprob
sub_state_sequences.append(sub_state_sequence)
return logprob, np.concatenate(sub_state_sequences)

def predict():
"""
Find most likely state sequence corresponding to ``X``.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Feature matrix of individual samples.
lengths : array-like of integers, shape (n_sequences, ), optional
Lengths of the individual sequences in ``X``. The sum of
these should be ``n_samples``.
Returns
-------
state_sequence : array, shape (n_samples, )
Labels for each sample from ``X``.
"""
logprob, state_sequence = decode()
return logprob, state_sequence

_, state_seq = predict()
pred_state_seq = [state_seq[df[df['unit'] == i].index[0]:df[df['unit'] == i].index[-1] + 1] for i in
range(1, df_A['unit'].max() + 1)]

最新更新