Esempio n. 1
0
    def test_mvt_pdf(self):
        cov3 = self.cov3
        mu3 = self.mu3

        mvt = MVT((0, 0), 1, 5)
        assert_almost_equal(mvt.logpdf(np.array([0., 0.])),
                            -1.837877066409345,
                            decimal=15)
        assert_almost_equal(mvt.pdf(np.array([0., 0.])),
                            0.1591549430918953,
                            decimal=15)

        mvt.logpdf(np.array([1., 1.])) - (-3.01552989458359)

        mvt1 = MVT((0, 0), 1, 1)
        mvt1.logpdf(np.array([1., 1.])) - (-3.48579549941151)  #decimal=16

        rvs = mvt.rvs(100000)
        assert_almost_equal(np.cov(rvs, rowvar=0), mvt.cov, decimal=1)

        mvt31 = MVT(mu3, cov3, 1)
        assert_almost_equal(mvt31.pdf(cov3), [
            0.0007276818698165781, 0.0009980625182293658, 0.0027661422056214652
        ],
                            decimal=17)

        mvt = MVT(mu3, cov3, 3)
        assert_almost_equal(
            mvt.pdf(cov3),
            [0.000863777424247410, 0.001277510788307594, 0.004156314279452241],
            decimal=17)
Esempio n. 2
0
    def __init__(self, mean, cov, df=None, random_state=1):
        self.mean = mean
        self.cov = cov
        self.sd = sd = np.sqrt(np.diag(cov))
        if df is None:
            self.dist = stats.multivariate_normal(mean=mean, cov=cov)
            self.udist = stats.norm(loc=mean, scale=sd)
            self.std_udist = stats.norm(loc=0., scale=1.)
        else:
            sigma = cov * (df - 2) / df
            self.dist = MVT(mean=mean, sigma=sigma, df=df)
            self.udist = stats.t(loc=mean, scale=sd, df=df)
            self.std_udist = stats.t(loc=0., scale=1., df=df)
        self.dist.random_state = random_state
        self.udist.random_state = random_state
        self.std_udist.random_state = random_state

        self._chol = cholesky(self.cov)
        self._pchol = pivoted_cholesky(self.cov)

        e, v = np.linalg.eigh(self.cov)
        # To match Bastos and O'Hagan definition
        # i.e., eigenvalues ordered from largest to smallest
        e, v = e[::-1], v[:, ::-1]
        ee = np.diag(np.sqrt(e))
        self._eig = (v @ ee)
Esempio n. 3
0
    def train_analytic(self):
        """ Calculating the analytic distribution posteriors given on page 15 of Lori's 
        Optimal Classification eq 34. """
        self.nustar = self.nu + self.n

        samplemean = self.data.mean(axis=0)
        samplecov = np.cov(self.data.T)

        self.mustar = (self.nu * self.priormu + self.n * samplemean) \
                / (self.nu + self.n)
        self.kappastar = self.kappa + self.n
        self.Sstar = self.S + (self.n-1)*samplecov + self.nu*self.n/(self.nu+self.n)\
                * np.outer((samplemean - self.priormu), (samplemean - self.priormu))
                
        # Now calculate effective class conditional densities from eq 55 page 21
        self.fx = MVT(
                self.mustar, 
                (self.nustar+1)/(self.kappastar-self.D+1)/self.nustar * self.Sstar, 
                self.kappastar - self.D + 1)
Esempio n. 4
0
    def __init__(self):
        np.random.seed(1234)

        self.n = 4  # Data points

        self.true_mu = 0.0
        self.true_sigma = 1  #di.invgamma.rvs(3)

        # For G function calculation and averaging
        self.grid_n = 100
        low, high = -4, 4
        self.gextent = (low, high)
        self.grid = np.linspace(low, high, self.grid_n)
        self.gavg = np.zeros(self.grid_n)
        self.numgavg = 0

        #self.data = di.norm.rvs(size=self.n)
        self.data = np.array([0.0, -0.0, 0.5, -0.5])
        assert self.data.size == self.n

        ######## Starting point of MCMC Run #######
        self.mu = 0.0
        self.sigma = 2.0

        ###### Bookeeping ######
        self.oldmu = None
        self.oldsigma = None

        ##### Prior Values and Confidences ######
        self.priorsigma = 2
        self.kappa = 1
        self.priormu = 0
        self.nu = 8.0
        #### Calculating the Analytic solution given on page 15 of Lori's
        #### Optimal Classification eq 34.
        self.nustar = self.nu + self.n

        samplemean = self.data.mean()
        samplevar = np.cov(self.data)

        self.mustar = (self.nu*self.priormu + self.n * samplemean) \
                / (self.nu + self.n)
        self.kappastar = self.kappa + self.n
        self.Sstar = self.priorsigma + (self.n-1)*samplevar + self.nu*self.n/(self.nu+self.nu)\
                * (samplemean - self.priormu)**2

        #### Now calculate effective class conditional densities from eq 55
        #### page 21

        #self.fx = MVT(
        #self.mu0star,
        #(self.nu0star+1)/(self.kappa0star-self.D+1)/self.nu0star * self.S0star,
        #self.kappa0star - self.D + 1)
        # So I'm pretty sure this is incorrect below, off by some scaling
        # parameters
        self.fx = MVT([self.mustar],
                      [(self.nustar + 1) /
                       (self.kappastar) / self.nustar * self.Sstar / 2],
                      self.kappastar / 2)

        self.analyticfx = self.fx.logpdf(self.grid.reshape(-1, 1))