def martingale(Yn, n, prior, data, nsamples=1):
    """
    Returns normal martingale distribution Y_{n+m} | Y_n

    where m is the number of samples to draw (default: 1)
    """
    gamma = (prior.tau / data.tau)**2

    return d.Gaussian(Yn, data.sigma / (n + nsamples + gamma) * math.sqrt(nsamples**2/(n+gamma) + nsamples))
Esempio n. 2
0
File: models.py Progetto: yyht/VLAE
    def write_summary(self, x, writer, epoch):
        with torch.no_grad():
            q_z_x, _ = self.encoder.forward(x)
            mu = q_z_x.mu

            for i in range(self.n_update):
                p_x_z, W_dec = self.decoder.forward(mu, compute_jacobian=True)
                writer.add_image(
                    f'reconstruction_mu/{i}',
                    vutils.make_grid(
                        self.dataset.unpreprocess(p_x_z.mu).clamp(0, 1)),
                    epoch)
                writer.add_scalar(f'recon_error/{i}',
                                  -torch.mean(p_x_z.log_probability(x)).item(),
                                  epoch)

                mu_new, precision = self.solve_mu(x, mu, p_x_z, W_dec)
                lr = self.update_rate(i)
                mu = (1 - lr) * mu + lr * mu_new

            p_x_z, W_dec = self.decoder.forward(mu, compute_jacobian=True)
            _, precision = self.solve_mu(x, mu, p_x_z, W_dec)
            writer.add_image(
                f'reconstruction_mu',
                vutils.make_grid(
                    self.dataset.unpreprocess(p_x_z.mu).clamp(0, 1)), epoch)

            q_z_x = distribution.Gaussian(mu, precision)
            z = q_z_x.sample()
            p_x_z = self.decoder.forward(z)

            writer.add_scalar(
                'kl_div',
                torch.mean(-self.prior.log_probability(z) +
                           q_z_x.log_probability(z)).item(), epoch)
            writer.add_scalar('recon_error',
                              -torch.mean(p_x_z.log_probability(x)).item(),
                              epoch)
            writer.add_image('data',
                             vutils.make_grid(self.dataset.unpreprocess(x)),
                             epoch)
            writer.add_image(
                'reconstruction_z',
                vutils.make_grid(
                    self.dataset.unpreprocess(p_x_z.mu).clamp(0, 1)), epoch)

            sample = torch.randn(len(x), z.shape[1]).cuda()
            sample = self.decoder(sample).mu
            writer.add_image(
                'generated',
                vutils.make_grid(
                    self.dataset.unpreprocess(sample).clamp(0, 1)), epoch)
def normal_msprt_test(p, s, prior, alpha=0.05, Nmax=5000, steps=1):
    Sn = 0
    X = d.Binomial(steps, p)
    for n in range(steps, Nmax + 1, steps):
        Sn += X.sample()
        data = d.Gaussian(0, (Sn + p) / (n + 1) * (1 - (Sn + p) / (n + 1)))
        pval = normal_msprt_pval(normal_msprt(Sn / n, n, s, prior, data))
        if pval < alpha:
            if Sn / n > s:
                return 1, n, Sn
            return -1, n, Sn
    # no outcome after Nmax steps
    return 0, n, Sn
def normal_heuristic(s, prior, data, N, T, alpha=0.05, beta=0.3):
    gamma = data.sigma**2 / prior.sigma**2

    boundaries = []
    for n in range(1, N):
        accept = normal_acceptance(n, s, prior, data, alpha=alpha)

        target = normal_acceptance(n+T, s, prior, data, alpha=alpha)
        at_current = d.Gaussian(n/(n+gamma) * target,
                                math.sqrt(n) * data.sigma / (n+gamma))
        reject = at_current.ppf(beta)

        boundaries.append(boundary(n, accept, reject, {}))

    return boundaries
Esempio n. 5
0
File: models.py Progetto: yyht/VLAE
    def forward(self, x):
        q_z_x, _ = self.encoder.forward(x)
        mu = q_z_x.mu

        for i in range(self.n_update):
            p_x_z, W_dec = self.decoder.forward(mu, compute_jacobian=True)
            mu_new, precision = self.solve_mu(x, mu, p_x_z, W_dec)
            lr = self.update_rate(i)
            mu = (1 - lr) * mu + lr * mu_new

        p_x_z, W_dec = self.decoder.forward(mu, compute_jacobian=True)
        var_inv = torch.exp(-self.decoder.logvar).unsqueeze(1)
        precision = torch.matmul(W_dec.transpose(1, 2) * var_inv, W_dec)
        precision += torch.eye(precision.shape[1]).unsqueeze(0).cuda()

        # Update with analytically calulated mean and covariance.
        q_z_x = distribution.Gaussian(mu, precision)
        z = q_z_x.sample()  # reparam trick
        p_x_z = self.decoder.forward(z)

        return self.loss(x, z, p_x_z, self.prior, q_z_x)
Esempio n. 6
0
File: models.py Progetto: yyht/VLAE
    def importance_sample(self, x):
        q_z_x, _ = self.encoder.forward(x)
        mu = q_z_x.mu

        for i in range(self.n_update):
            p_x_z, W_dec = self.decoder.forward(mu, compute_jacobian=True)
            mu_new, precision = self.solve_mu(x, mu, p_x_z, W_dec)
            lr = self.update_rate(i)
            mu = (1 - lr) * mu + lr * mu_new

        p_x_z, W_dec = self.decoder.forward(mu, compute_jacobian=True)
        var_inv = torch.exp(-self.decoder.logvar).unsqueeze(1)
        precision = torch.matmul(W_dec.transpose(1, 2) * var_inv, W_dec)
        precision += torch.eye(precision.shape[1]).unsqueeze(0).cuda()

        # Update with analytically calulated mean and covariance.
        q_z_x = distribution.Gaussian(mu, precision)
        q_z_x = q_z_x.repeat(n_importance_sample)
        z = q_z_x.sample()
        p_x_z = self.decoder(z)
        x = x.view(-1, self.image_size).unsqueeze(1).repeat(
            1, n_importance_sample, 1).view(-1, self.image_size)
        return self.importance_weighting(x, z, p_x_z, self.prior, q_z_x)

def binomial_kldiv(p, q):
    assert p.n == q.n
    return plogpq(p.p, q.p) * p.n + plogpq(1 - p.p, 1 - q.p) * p.n


def normal_kldiv(p, q):
    return math.log(
        q.sigma / p.sigma) + (p.sigma**2 +
                              (p.mu - q.mu)**2) / (2 * q.sigma**2) - 1 / 2


N = 500
n_s = 1.5
n_prior = d.Gaussian(0, 1)
n_data = d.Gaussian(0, 3)

normal_boundaries, normal_ET = solve_normal(n_s,
                                            n_prior,
                                            n_data,
                                            N,
                                            ET=307,
                                            lb=300,
                                            ub=310,
                                            verbose=True)
n_heuristic = normal_heuristic(n_s, n_prior, n_data, N, normal_ET, beta=0.4)

bin_s = 0.4
bin_prior = d.Beta(3, 10)
def normal_acceptance(n, s, prior, data, alpha=0.05):
    za = d.Gaussian(0, 1).ppf(alpha)
    gamma = data.sigma**2 / prior.sigma**2

    return s - za * data.sigma / math.sqrt(n + gamma)