示例#1
0
        linear = self.fe(x).squeeze() + F.sum(z * u[0], axis=1)
        linear = F.clip(linear, -10.0, 10.0)
        log_pdf = y * linear - F.exp(linear) - F.gammaln(y + 1.0)
        return log_pdf


# Model
np.random.seed(123)
mx.random.seed(123)
dim_all = px + pz + 1
model = LatentModel(GLMMLogLik(px, pz),
                    encoder=VAEEncoder([dim_all, 256, 128],
                                       latent_dim=pz,
                                       act="softrelu"),
                    decoder=VAEDecoder([128, 256, pz],
                                       latent_dim=pz,
                                       npar=1,
                                       act="softrelu"),
                    sim_z=10,
                    nchain=30,
                    ctx=ctx)
model.init(lr=0.0001, lr_bc=0.0001)

# Model fitting
batch_size = 1000
est_nsamp = 100000
bhat = model.module.log_cond_pdf.fe.weight

# VAE
model.fit(xzy, epochs=2000, batch_size=batch_size)
mu_est_vae = model.simulate_prior(est_nsamp)[0]
b_est_vae = bhat.data(ctx=ctx[0]).asnumpy()
示例#2
0
    t1 = time.time()
    mu0, _ = gen_mu_mixture_prior(est_nsamp, **mix_par)

    # Data
    mu, x = gen_mu_mixture_prior(n, **mix_par)
    xt = mx.nd.array(x).reshape(-1, 1)

    # Empirical Bayes estimation
    eb_mu = np.mean(x)
    eb_var = np.var(x) - 1.0
    mu_est_eb = np.random.normal(eb_mu, math.sqrt(eb_var), est_nsamp)

    # Model
    model = LatentModel(ConditionalNormal(dimu=1),
                        encoder=VAEEncoder([1, 50, 100, 50], latent_dim=1),
                        decoder=VAEDecoder([50, 100, 50, 1], latent_dim=1, npar=1),
                        sim_z=100, nchain=nchain, ctx=ctx)
    model.init(lr=0.01, lr_bc=0.01)

    # Model fitting
    logger.log("     => VAE")

    model.fit(xt, epochs=1000, batch_size=batch_size, eval_nll=False, verbose=False)
    mu_est_vae = model.simulate_prior(est_nsamp)[0].squeeze()
    ks = stats.kstest(mu_est_vae, true_dist)
    w = stats.wasserstein_distance(mu0, mu_est_vae)

    logger.log("        => KS = {}, p-val = {}".format(ks.statistic, ks.pvalue))
    logger.log("        => W = {}\n".format(w))

    logger.log("     => Bias correction")
示例#3
0
文件: 02_exp.py 项目: yixuan/almond
    x = mu + np.random.randn(n)
    return mu, x

# Data
np.random.seed(123)
mx.random.seed(123)
n = 1000
exp_scale = 2.0

mu, x = gen_mu_exp_prior(n, exp_scale)
xt = mx.nd.array(x).reshape(-1, 1)

# Model
model = LatentModel(ConditionalNormal(dimu=1),
                    encoder=VAEEncoder([1, 10], latent_dim=1),
                    decoder=VAEDecoder([10, 1], latent_dim=1, npar=1),
                    sim_z=10, nchain=100, ctx=ctx)
model.init(lr=0.01, lr_bc=0.01)

# Model fitting
batch_size = n         # mini-batch size
est_nsamp = 10000      # size of Monte Carlo sample to approximate the density
epochs_vae = 1000      # pre-train model using VAE
epochs_bc = 1000       # bias correction step

# VAE
model.fit(xt, epochs=epochs_vae, batch_size=batch_size)
mu_est_vae = model.simulate_prior(est_nsamp)[0].squeeze()

# ALMOND bias correction
particles = model.fit_bc(xt, epochs=epochs_bc, warmups=100, batch_size=batch_size, burnin=10, step_size=0.01)