PyMC混合模型的AIC和BIC



我使用PyMC将一些数据拟合到一条直线上。这些数据有异常值,所以我改编了一些由Jake Vanderplas为他的教科书编写的代码(链接中的第三个例子)。该方法使用向量变量qi来编码每个单独的数据点是属于前景模型(我们拟合到直线上)还是背景模型,我们不关心。

class lin_fit_ol(object):
    '''
    fit a straight line to one independent variable
        (`xi`, with zero errors) and one dependent variable
        (`yi`, with possibly heteroscedastic errors `dyi`)
    Outliers in `yi` are permitted
    Intended to be a complement to a straight-line fit, for model
        testing purposes
    Modified from Vanderplas's code
        (found at http://www.astroml.
        org/book_figures/chapter8/fig_outlier_rejection.html)
    '''
    def __init__(self, xi, yi, dyi, value):
        self.xi, self.yi, self.dyi, self.value = xi, yi, dyi, value
        @pymc.stochastic
        def beta(value=np.array([0.5, 1.0])):
            """Slope and intercept parameters for a straight line.
            The likelihood corresponds to the prior probability of the parameters."""
            slope, intercept = value
            prob_intercept = 1 + 0 * intercept
            # uniform prior on theta = arctan(slope)
            # d[arctan(x)]/dx = 1 / (1 + x^2)
            prob_slope = np.log(1. / (1. + slope ** 2))
            return prob_intercept + prob_slope
        @pymc.deterministic
        def model(xi=xi, beta=beta):
            slope, intercept = beta
            return slope * xi + intercept
        # uniform prior on Pb, the fraction of bad points
        Pb = pymc.Uniform('Pb', 0, 1.0, value=0.1)
        # uniform prior on Yb, the centroid of the outlier distribution
        Yb = pymc.Uniform('Yb', -10000, 10000, value=0)
        # uniform prior on log(sigmab), the spread of the outlier distribution
        log_sigmab = pymc.Uniform('log_sigmab', -10, 10, value=5)
        # qi is bernoulli distributed
        # Note: this syntax requires pymc version 2.2
        qi = pymc.Bernoulli('qi', p=1 - Pb, value=np.ones(len(xi)))
        @pymc.deterministic
        def sigmab(log_sigmab=log_sigmab):
            return np.exp(log_sigmab)
        def outlier_likelihood(yi, mu, dyi, qi, Yb, sigmab):
            """likelihood for full outlier posterior"""
            Vi = dyi ** 2
            Vb = sigmab ** 2
            root2pi = np.sqrt(2 * np.pi)
            logL_in = -0.5 * np.sum(
                qi * (np.log(2 * np.pi * Vi) + (yi - mu) ** 2 / Vi))
            logL_out = -0.5 * np.sum(
                (1 - qi) * (np.log(2 * np.pi * (Vi + Vb)) +
                            (yi - Yb) ** 2 / (Vi + Vb)))
            return logL_out + logL_in
        OutlierNormal = pymc.stochastic_from_dist(
            'outliernormal', logp=outlier_likelihood, dtype=np.float,
            mv=True)
        y_outlier = OutlierNormal(
            'y_outlier', mu=model, dyi=dyi, Yb=Yb, sigmab=sigmab, qi=qi,
            observed=True, value=yi)
        self.M = dict(y_outlier=y_outlier, beta=beta, model=model,
                      qi=qi, Pb=Pb, Yb=Yb, log_sigmab=log_sigmab,
                      sigmab=sigmab)
        self.sample_invoked = False
    def sample(self, iter, burn, calc_deviance=True):
        self.S0 = pymc.MCMC(self.M)
        self.S0.sample(iter=iter, burn=burn)
        self.trace = self.S0.trace('beta')
        self.btrace = self.trace[:, 0]
        self.mtrace = self.trace[:, 1]
        self.sample_invoked = True
    def triangle(self):
        assert self.sample_invoked == True, 
            'Must sample first! Use sample(iter, burn)'
        corner(self.trace[:], labels=['$m$', '$b$'])
    def plot(self, xlab='$x$', ylab='$y$'):
        # plot the data points
        plt.errorbar(self.xi, self.yi, yerr=self.dyi, fmt='.k')
        # do some shimmying to get quantile bounds
        xa = np.linspace(self.xi.min(), self.xi.max(), 100)
        A = np.vander(xa, 2)
        # generate all possible lines
        lines = np.dot(self.trace[:], A.T)
        quantiles = np.percentile(lines, [16, 84], axis=0)
        plt.fill_between(xa, quantiles[0], quantiles[1],
                         color="#8d44ad", alpha=0.5)
        # plot circles around points identified as outliers
        qi = self.S0.trace('qi')[:]
        Pi = qi.astype(float).mean(0)
        outlier_x = self.xi[Pi < 0.32]
        outlier_y = self.yi[Pi < 0.32]
        plt.scatter(outlier_x, outlier_y, lw=1, s=400, alpha=0.5,
                    facecolors='none', edgecolors='red')
        plt.xlabel(xlab)
        plt.ylabel(ylab)
    def ICs(self):
        self.MAP = pymc.MAP(self.M)
        self.MAP.fit()
        self.BIC = self.MAP.BIC
        self.AIC = self.MAP.AIC
        self.logp = self.MAP.logp
        self.logp_at_max = self.MAP.logp_at_max
        return self.AIC, self.BIC

因此,当我们使用该模型计算BIC和AIC时,我们得到非常大的值(因为有很多点)。这完全说得通。然而,这不利于拥有许多数据点,这让我很恼火。此外,较大的AIC和BIC会使一个偶然的观察者相信另一个模型(由于异常值而不适合)实际上是更好的模型。

是我在这里错过了BIC和AIC的微妙之处,还是使用混合模型的残酷现实,你总是不得不使用一堆额外的二进制参数来表示数据点的成员关系?

我推荐《统计学学习入门》这本书

在212页你可以找到AIC和BIC的公式。在每个公式中,样本数都在分母中。因此,结果不应受样本数量的影响。至少不是那种明显的方式。

最新更新