def covariance_pdf(density, x_cond, n_samples=10**6):
    covs = np.zeros((x_cond.shape[0], density.ndim_y, density.ndim_y))
    mean = density.mean_(x_cond)
    for i in range(x_cond.shape[0]):
        x = x = np.tile(x_cond[i].reshape((1, x_cond[i].shape[0])),
                        (n_samples, 1))

        def cov(y):
            a = (y - mean[i])

            #compute cov matrices c for sampled instances and weight them with the probability p from the pdf
            c = np.empty((a.shape[0], a.shape[1]**2))
            for j in range(a.shape[0]):
                c[j, :] = np.outer(a[j], a[j]).flatten()

            p = np.tile(np.expand_dims(density.pdf(x, y), axis=1),
                        (1, density.ndim_y**2))
            res = c * p
            return res

        integral = mc_integration_student_t(cov,
                                            ndim=density.ndim_y,
                                            n_samples=n_samples)
        covs[i] = integral.reshape((density.ndim_y, density.ndim_y))
    return covs
def _divergence_mc(p, q, x_cond, divergenc_fun, n_samples=10**5, n_measures=1):
    assert x_cond.ndim == 2 and x_cond.shape[1] == q.ndim_x

    P = p.pdf
    Q = q.pdf

    def _div(x_tiled, y_samples):
        p = P(x_tiled, y_samples).flatten()
        q = Q(x_tiled, y_samples).flatten()
        q = np.ma.masked_where(q < 10**-64, q).flatten()
        p = np.ma.masked_where(p < 10**-64, p).flatten()

        r = divergenc_fun(p, q)
        return r.filled(0)

    if n_measures == 1:
        distances = np.zeros(x_cond.shape[0])
    else:
        distances = np.zeros((x_cond.shape[0], n_measures))
    mu_proposal, std_proposal = p._determine_mc_proposal_dist()
    for i in range(x_cond.shape[0]):
        x = np.tile(x_cond[i].reshape((1, x_cond[i].shape[0])), (n_samples, 1))
        func = lambda y: _make_2d(_div(x, y))
        distances[i] = mc_integration_student_t(func,
                                                q.ndim_y,
                                                n_samples=n_samples,
                                                loc_proposal=mu_proposal,
                                                scale_proposal=std_proposal)
    assert distances.shape[0] == x_cond.shape[0]
    return distances
 def test_mc_integration_t_1(self):
     func = lambda y: np.expand_dims(stats.multivariate_normal.pdf(
         y, mean=[0, 0], cov=np.diag([2, 2])),
                                     axis=1)
     integral = mc_integration_student_t(func,
                                         ndim=2,
                                         n_samples=10**7,
                                         batch_size=10**6)
     self.assertAlmostEqual(1.0, integral[0], places=2)
def mean_pdf(density, x_cond, n_samples=10**6):
    means = np.zeros((x_cond.shape[0], density.ndim_y))
    for i in range(x_cond.shape[0]):
        x = x = np.tile(x_cond[i].reshape((1, x_cond[i].shape[0])),
                        (n_samples, 1))
        func = lambda y: y * np.tile(np.expand_dims(density.pdf(x, y), axis=1),
                                     (1, density.ndim_y))
        integral = mc_integration_student_t(func, ndim=2, n_samples=n_samples)
        means[i] = integral
    return means
 def _mean_pdf(self, x_cond, n_samples=10**6):
     means = np.zeros((x_cond.shape[0], self.ndim_y))
     for i in range(x_cond.shape[0]):
         mean_fun = lambda y: y
         if self.ndim_y == 1:
             n_samples_int, lower, upper = self._determine_integration_bounds(
             )
             func_to_integrate = lambda y: mean_fun(y) * np.squeeze(
                 self._tiled_pdf(y, x_cond[i], n_samples_int))
             integral = numeric_integation(func_to_integrate, n_samples_int,
                                           lower, upper)
         else:
             loc_proposal, scale_proposal = self._determine_mc_proposal_dist(
             )
             func_to_integrate = lambda y: mean_fun(y) * self._tiled_pdf(
                 y, x_cond[i], n_samples)
             integral = mc_integration_student_t(
                 func_to_integrate,
                 ndim=self.ndim_y,
                 n_samples=n_samples,
                 loc_proposal=loc_proposal,
                 scale_proposal=scale_proposal)
         means[i] = integral
     return means
    def _covariance_pdf(self, x_cond, n_samples=10**6, mean=None):
        assert hasattr(self, "mean_")
        assert hasattr(self, "pdf")
        assert mean is None or mean.shape == (x_cond.shape[0], self.ndim_y)

        loc_proposal, scale_proposal = self._determine_mc_proposal_dist()

        if mean is None:
            mean = self.mean_(x_cond, n_samples=n_samples)

        covs = np.zeros((x_cond.shape[0], self.ndim_y, self.ndim_y))
        for i in range(x_cond.shape[0]):
            x = x = np.tile(x_cond[i].reshape((1, x_cond[i].shape[0])),
                            (n_samples, 1))

            def cov(y):
                a = (y - mean[i])

                # compute cov matrices c for sampled instances and weight them with the probability p from the pdf
                c = np.empty((a.shape[0], a.shape[1]**2))
                for j in range(a.shape[0]):
                    c[j, :] = np.reshape(np.outer(a[j], a[j]),
                                         (a.shape[1]**2, ))

                p = np.tile(np.expand_dims(self.pdf(x, y), axis=1),
                            (1, self.ndim_y**2))
                res = c * p
                return res

            integral = mc_integration_student_t(cov,
                                                ndim=self.ndim_y,
                                                n_samples=n_samples,
                                                loc_proposal=loc_proposal,
                                                scale_proposal=scale_proposal)
            covs[i] = integral.reshape((self.ndim_y, self.ndim_y))
        return covs