如何使用Python中的Kullback-Leibler方法最小化威布尔分布的参数



我想通过使用Kullbak-Leibler方法最小化参数来找到威布尔分布的参数。我在这里找到了一个做同样事情的代码。我用威布尔分布替换了原始代码中的正态分布。我不知道为什么我得到"楠"参数和"楠"Kullback-Leibler散度值。有人能帮忙吗?

import numpy as np
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import seaborn as sns
sns.set()
from scipy.stats import weibull_min
learning_rate = 0.001
epochs = 100
x = np.arange(0, 2000,0.001)
p_pdf=weibull_min.pdf(x, 1.055,0, 468).reshape(1, -1)
p = tf.placeholder(tf.float64, shape=p_pdf.shape)
alpha = tf.Variable(np.zeros(1))
beta = tf.Variable(np.eye(1))
weibull=(beta / alpha) * ((x / alpha)**(beta - 1)) * tf.exp(-((x / alpha)**beta))
q = weibull
kl_divergence = tf.reduce_sum(tf.where(p == 0, tf.zeros(p_pdf.shape, tf.float64), p * tf.log(p / q)))
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(kl_divergence)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
history = []    
alphas = []
betas = []


for i in range(epochs):
sess.run(optimizer, { p: p_pdf })

if i % 10 == 0:
history.append(sess.run(kl_divergence, { p: p_pdf }))
alphas.append(sess.run(alpha)[0])
betas.append(sess.run(beta)[0][0])

for a, b in zip(alphas, betas):
q_pdf =weibull_min.pdf(x, b,0,a)
plt.plot(x, q_pdf.reshape(-1, 1), c='red')
plt.title('KL(P||Q) = %1.3f' % history[-1])
plt.plot(x, p_pdf.reshape(-1, 1), linewidth=3)
plt.show()  
plt.plot(history)
plt.show()   
sess.close()

尝试将字母初始化为非0。也许改为初始化为np.ones(1)

如果你使用0的alpha,你会得到一个带scipy的nan。

from scipy.stats import weibull_min
weibull_min.pdf(100, 0, 0, 2.), weibull_min.pdf(100, 1, 0, 2.)
(nan, 9.643749239819589e-23)

最新更新