示例#1
0
def get_sampler(args):
    data_dim = np.prod(args.input_size)
    if args.input_type == "binary":
        if args.sampler == "gibbs":
            sampler = samplers.PerDimGibbsSampler(data_dim, rand=False)
        elif args.sampler == "rand_gibbs":
            sampler = samplers.PerDimGibbsSampler(data_dim, rand=True)
        elif args.sampler.startswith("bg-"):
            block_size = int(args.sampler.split('-')[1])
            sampler = block_samplers.BlockGibbsSampler(data_dim, block_size)
        elif args.sampler.startswith("hb-"):
            block_size, hamming_dist = [int(v) for v in args.sampler.split('-')[1:]]
            sampler = block_samplers.HammingBallSampler(data_dim, block_size, hamming_dist)
        elif args.sampler == "gwg":
            sampler = samplers.DiffSampler(data_dim, 1,
                                           fixed_proposal=False, approx=True, multi_hop=False, temp=2.)
        elif args.sampler.startswith("gwg-"):
            n_hops = int(args.sampler.split('-')[1])
            sampler = samplers.MultiDiffSampler(data_dim, 1, approx=True, temp=2., n_samples=n_hops)
        else:
            raise ValueError("Invalid sampler...")
    else:
        if args.sampler == "gibbs":
            sampler = samplers.PerDimMetropolisSampler(data_dim, int(args.n_out), rand=False)
        elif args.sampler == "rand_gibbs":
            sampler = samplers.PerDimMetropolisSampler(data_dim, int(args.n_out), rand=True)
        elif args.sampler == "gwg":
            sampler = samplers.DiffSamplerMultiDim(data_dim, 1, approx=True, temp=2.)
        else:
            raise ValueError("invalid sampler")
    return sampler
示例#2
0
def main(args):
    makedirs("{}/sources".format(args.save_dir))

    torch.manual_seed(args.seed)
    np.random.seed(args.seed)

    W = args.W_init_sigma * torch.randn((args.K,))
    W0 = args.W_init_sigma * torch.randn((1,))
    p = args.X_keep_prob * torch.ones((args.K,))
    v = args.X0_mean * torch.ones((args.K,))

    model = fhmm.FHMM(args.N, args.K, W, W0, args.obs_sigma, p, v, alt_logpx=args.alt)
    model.to(device)
    print("device is", device)

    # generate data
    Xgt = model.sample_X(1)
    p_y_given_Xgt = model.p_y_given_x(Xgt)

    mu = p_y_given_Xgt.loc
    mu_true = mu[0]
    plt.clf()
    plt.plot(mu_true.detach().cpu().numpy(), label="mean")
    ygt = p_y_given_Xgt.sample()[0]
    plt.plot(ygt.detach().cpu().numpy(), label='sample')
    plt.legend()
    plt.savefig("{}/data.png".format(args.save_dir))
    ygt = ygt.to(device)

    for k in range(args.K):
        plt.clf()
        plt.plot(Xgt[0, :, k].detach().cpu().numpy())
        plt.savefig("{}/sources/x_{}.png".format(args.save_dir, k))


    logp_joint_real = model.log_p_joint(ygt, Xgt).item()
    print("joint likelihood of real data is {}".format(logp_joint_real))

    log_joints = {}
    diffs = {}
    times = {}
    recons = {}
    ars = {}
    hops = {}
    phops = {}
    mus = {}

    dim = args.K * args.N
    x_init = model.sample_X(args.n_test_samples).to(device)
    samp_model = lambda _x: model.log_p_joint(ygt, _x)

    temps = ['bg-1', 'bg-2', 'hb-10-1', 'gwg', 'gwg-3', 'gwg-5']
    for temp in temps:
        makedirs("{}/{}".format(args.save_dir, temp))
        if temp == 'dim-gibbs':
            sampler = samplers.PerDimGibbsSampler(dim)
        elif temp == "rand-gibbs":
            sampler = samplers.PerDimGibbsSampler(dim, rand=True)
        elif "bg-" in temp:
            block_size = int(temp.split('-')[1])
            sampler = block_samplers.BlockGibbsSampler(dim, block_size)
        elif "hb-" in temp:
            block_size, hamming_dist = [int(v) for v in temp.split('-')[1:]]
            sampler = block_samplers.HammingBallSampler(dim, block_size, hamming_dist)
        elif temp == "gwg":
            sampler = samplers.DiffSampler(dim, 1,
                                           fixed_proposal=False, approx=True, multi_hop=False, temp=2.)
        elif "gwg-" in temp:
            n_hops = int(temp.split('-')[1])
            sampler = samplers.MultiDiffSampler(dim, 1,
                                                approx=True, temp=2., n_samples=n_hops)
        else:
            raise ValueError("Invalid sampler...")
        
        x = x_init.clone().view(x_init.size(0), -1)

        diffs[temp] = []

        log_joints[temp] = []
        ars[temp] = []
        hops[temp] = []
        phops[temp] = []
        recons[temp] = []
        start_time = time.time()
        for i in range(args.n_steps + 1):
            if args.anneal is None:
                sm = samp_model
            else:
                s = np.linspace(args.anneal, args.obs_sigma, args.n_steps + 1)[i]
                sm = lambda _x: model.log_p_joint(ygt, _x, sigma=s)
            xhat = sampler.step(x.detach(), sm).detach()

            # compute hamming dist
            cur_hops = (x != xhat).float().sum(-1).mean().item()
            # update trajectory
            x = xhat

            if i % 1000 == 0:
                p_y_given_x = model.p_y_given_x(x)
                mu = p_y_given_x.loc
                plt.clf()
                plt.plot(mu_true.detach().cpu().numpy(), label="true")
                plt.plot(mu[0].detach().cpu().numpy() + .01, label='mu0')
                plt.plot(mu[1].detach().cpu().numpy() - .01, label='mu1')
                plt.legend()
                plt.savefig("{}/{}/mean_{}.png".format(args.save_dir, temp, i))
                mus[temp] = mu[0].detach().cpu().numpy()

            if i % 10 == 0:
                p_y_given_x = model.p_y_given_x(x)
                mu = p_y_given_x.loc
                err = ((mu - ygt[None]) ** 2).sum(1).mean()
                recons[temp].append(err.item())

                log_j = model.log_p_joint(ygt, x)
                diff = (x.view(x.size(0), args.N, args.K) != Xgt).float().view(x.size(0), -1).mean(1)
                log_joints[temp].append(log_j.mean().item())
                diffs[temp].append(diff.mean().item())
                hops[temp].append(cur_hops)
                print("temp {}, itr = {}, log-joint = {:.4f}, "
                      "hop-dist = {:.4f}, recons = {:.4f}".format(temp, i, log_j.mean().item(), cur_hops, err.item()))

        for k in range(args.K):
            plt.clf()
            xr = x.view(x.size(0), args.N, args.K)
            plt.plot(xr[0, :, k].detach().cpu().numpy())
            plt.savefig("{}/{}/source_{}.png".format(args.save_dir, temp, k))

        times[temp] = time.time() - start_time


    plt.clf()
    for temp in temps:
        plt.plot(log_joints[temp], label=temp)
    plt.plot([logp_joint_real for _ in log_joints[temp]], label="true")
    plt.legend()
    plt.savefig("{}/joints.png".format(args.save_dir))

    plt.clf()
    for temp in temps:
        plt.plot(recons[temp], label=temp)
    plt.legend()
    plt.savefig("{}/recons.png".format(args.save_dir))

    plt.clf()
    for temp in temps:
        plt.plot(diffs[temp], label=temp)
    plt.legend()
    plt.savefig("{}/errs.png".format(args.save_dir))

    plt.clf()
    for i, temp in enumerate(temps):
        plt.plot(mus[temp] + float(i) * .01, label=temp)
    plt.plot(mu_true.detach().cpu().numpy(), label="true")
    plt.legend()
    plt.savefig("{}/mean.png".format(args.save_dir))

    plt.clf()
    for temp in temps:
        plt.plot(hops[temp], label="{}".format(temp))

    plt.legend()
    plt.savefig("{}/hops.png".format(args.save_dir))

    with open("{}/results.pkl".format(args.save_dir), 'wb') as f:
        results = {
            'hops': hops,
            'recons': recons,
            'joints': log_joints,
        }
        pickle.dump(results, f)
示例#3
0
def main(args):
    makedirs(args.save_dir)
    logger = open("{}/log.txt".format(args.save_dir), 'w')

    def my_print(s):
        print(s)
        logger.write(str(s) + '\n')

    torch.manual_seed(args.seed)
    np.random.seed(args.seed)

    # load existing data
    if args.data == "synthetic":
        train_loader, test_loader, data, ground_truth_J, ground_truth_h, ground_truth_C = utils.load_synthetic(
            args.data_file, args.batch_size)
        dim, n_out = data.size()[1:]
        ground_truth_J_norm = norm_J(ground_truth_J).to(device)
        matsave(ground_truth_J.abs().transpose(2, 1).reshape(dim * n_out, dim * n_out),
                "{}/ground_truth_J.png".format(args.save_dir))
        matsave(ground_truth_C, "{}/ground_truth_C.png".format(args.save_dir))
        matsave(ground_truth_J_norm, "{}/ground_truth_J_norm.png".format(args.save_dir))
        num_ecs = 120
        dm_indices = torch.arange(ground_truth_J_norm.size(0)).long()
    # generate the dataset
    elif args.data == "PF00018":
        train_loader, test_loader, data, num_ecs, ground_truth_J_norm, ground_truth_C = utils.load_ingraham(args)
        dim, n_out = data.size()[1:]
        ground_truth_J_norm = ground_truth_J_norm.to(device)
        matsave(ground_truth_C, "{}/ground_truth_C.png".format(args.save_dir))
        matsave(ground_truth_J_norm, "{}/ground_truth_dists.png".format(args.save_dir))
        dm_indices = torch.arange(ground_truth_J_norm.size(0)).long()

    else:
        train_loader, test_loader, data, num_ecs, ground_truth_J_norm, ground_truth_C, dm_indices = utils.load_real_protein(args)
        dim, n_out = data.size()[1:]
        ground_truth_J_norm = ground_truth_J_norm.to(device)
        matsave(ground_truth_C, "{}/ground_truth_C.png".format(args.save_dir))
        matsave(ground_truth_J_norm, "{}/ground_truth_dists.png".format(args.save_dir))

    if args.model == "lattice_potts":
        model = rbm.LatticePottsModel(int(args.dim), int(n_out), 0., 0., learn_sigma=True)
        buffer = model.init_sample(args.buffer_size)
    if args.model == "dense_potts":
        model = rbm.DensePottsModel(dim, n_out, learn_J=True, learn_bias=True)
        buffer = model.init_sample(args.buffer_size)
    elif args.model == "dense_ising":
        raise ValueError
    elif args.model == "mlp":
        raise ValueError

    model.to(device)

    # make G symmetric
    def get_J():
        j = model.J
        jt = j.transpose(0, 1).transpose(2, 3)
        return (j + jt) / 2

    def get_J_sub():
        j = get_J()
        j_sub = j[dm_indices, :][:, dm_indices]
        return j_sub

    if args.sampler == "gibbs":
        if "potts" in args.model:
            sampler = samplers.PerDimMetropolisSampler(dim, int(n_out), rand=False)
        else:
            sampler = samplers.PerDimGibbsSampler(dim, rand=False)
    elif args.sampler == "plm":
        sampler = samplers.PerDimMetropolisSampler(dim, int(n_out), rand=False)
    elif args.sampler == "rand_gibbs":
        if "potts" in args.model:
            sampler = samplers.PerDimMetropolisSampler(dim, int(n_out), rand=True)
        else:
            sampler = samplers.PerDimGibbsSampler(dim, rand=True)
    elif args.sampler == "gwg":
        if "potts" in args.model:
            sampler = samplers.DiffSamplerMultiDim(dim, 1, approx=True, temp=2.)
        else:
            sampler = samplers.DiffSampler(dim, 1, approx=True, fixed_proposal=False, temp=2.)
    else:
        assert "gwg-" in args.sampler
        n_hop = int(args.sampler.split('-')[1])
        if "potts" in args.model:
            raise ValueError
        else:
            sampler = samplers.MultiDiffSampler(model.data_dim, 1, approx=True, temp=2., n_samples=n_hop)

    my_print(device)
    my_print(model)
    my_print(buffer.size())
    my_print(sampler)

    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)

    # load ckpt
    if args.ckpt_path is not None:
        d = torch.load(args.ckpt_path)
        model.load_state_dict(d['model'])
        optimizer.load_state_dict(d['optimizer'])
        sampler.load_state_dict(d['sampler'])


    # mask matrix for PLM
    L, D = model.J.size(0), model.J.size(2)
    num_node = L * D
    J_mask = torch.ones((num_node, num_node)).to(device)
    for i in range(L):
        J_mask[D * i:D * i + D, D * i:D * i + D] = 0


    itr = 0
    sq_errs = []
    rmses = []
    all_inds = list(range(args.buffer_size))
    while itr < args.n_iters:
        for x in train_loader:
            if args.data == "synthetic":
                x = x[0].to(device)
                weights = torch.ones((x.size(0),)).to(device)
            else:
                weights = x[1].to(device)
                if args.unweighted:
                    weights = torch.ones_like(weights)
                x = x[0].to(device)

            if args.sampler == "plm":
                plm_J = model.J.transpose(2, 1).reshape(dim * n_out, dim * n_out)
                logits = torch.matmul(x.view(x.size(0), -1), plm_J * J_mask) + model.bias.view(-1)[None]
                x_inds = (torch.arange(x.size(-1))[None, None].to(x.device) * x).sum(-1)
                cross_entropy = nn.functional.cross_entropy(
                    input=logits.reshape((-1, D)),
                    target=x_inds.view(-1).long(),
                    reduce=False)
                cross_entropy = torch.sum(cross_entropy.reshape((-1, L)), -1)
                loss = (cross_entropy * weights).mean()

            else:
                buffer_inds = np.random.choice(all_inds, args.batch_size, replace=False)
                x_fake = buffer[buffer_inds].to(device)
                for k in range(args.sampling_steps):
                    x_fake = sampler.step(x_fake.detach(), model).detach()

                buffer[buffer_inds] = x_fake.detach().cpu()

                logp_real = (model(x).squeeze() * weights).mean()
                logp_fake = model(x_fake).squeeze().mean()

                obj = logp_real - logp_fake
                loss = -obj

            # add l1 reg
            loss += args.l1 * norm_J(get_J()).sum()

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if itr % args.print_every == 0:
                if args.sampler == "plm":
                    my_print("({}) loss = {:.4f}".format(itr, loss.item()))
                else:
                    my_print("({}) log p(real) = {:.4f}, log p(fake) = {:.4f}, diff = {:.4f}, hops = {:.4f}".format(itr,
                                                                                                  logp_real.item(),
                                                                                                  logp_fake.item(),
                                                                                                  obj.item(),
                                                                                                  sampler._hops))

                sq_err = ((ground_truth_J_norm - norm_J(get_J_sub())) ** 2).sum()
                rmse = ((ground_truth_J_norm - norm_J(get_J_sub())) ** 2).mean().sqrt()
                inds = torch.triu_indices(ground_truth_C.size(0), ground_truth_C.size(1), 1)
                C_inds = ground_truth_C[inds[0], inds[1]]
                J_inds = norm_J(get_J_sub())[inds[0], inds[1]]
                J_inds_sorted = torch.sort(J_inds, descending=True).indices
                C_inds_sorted = C_inds[J_inds_sorted]
                C_cumsum = C_inds_sorted.cumsum(0)
                arange = torch.arange(C_cumsum.size(0)) + 1
                acc_at = C_cumsum.float() / arange.float()
                my_print("\t err^2 = {:.4f}, rmse = {:.4f}, acc @ 50 = {:.4f}, acc @ 75 = {:.4f}, acc @ 100 = {:.4f}".format(sq_err, rmse,
                                                                                                         acc_at[50],
                                                                                                         acc_at[75],
                                                                                                         acc_at[100]))
                logger.flush()


            if itr % args.viz_every == 0:
                sq_err = ((ground_truth_J_norm - norm_J(get_J_sub())) ** 2).sum()
                rmse = ((ground_truth_J_norm - norm_J(get_J_sub())) ** 2).mean().sqrt()

                sq_errs.append(sq_err.item())
                plt.clf()
                plt.plot(sq_errs, label="sq_err")
                plt.legend()
                plt.savefig("{}/sq_err.png".format(args.save_dir))

                rmses.append(rmse.item())
                plt.clf()
                plt.plot(rmses, label="rmse")
                plt.legend()
                plt.savefig("{}/rmse.png".format(args.save_dir))


                matsave(get_J_sub().abs().transpose(2, 1).reshape(dm_indices.size(0) * n_out,
                                                                  dm_indices.size(0) * n_out),
                        "{}/model_J_{}_sub.png".format(args.save_dir, itr))
                matsave(norm_J(get_J_sub()), "{}/model_J_norm_{}_sub.png".format(args.save_dir, itr))

                matsave(get_J().abs().transpose(2, 1).reshape(dim * n_out, dim * n_out),
                        "{}/model_J_{}.png".format(args.save_dir, itr))
                matsave(norm_J(get_J()), "{}/model_J_norm_{}.png".format(args.save_dir, itr))

                inds = torch.triu_indices(ground_truth_C.size(0), ground_truth_C.size(1), 1)
                C_inds = ground_truth_C[inds[0], inds[1]]
                J_inds = norm_J(get_J_sub())[inds[0], inds[1]]
                J_inds_sorted = torch.sort(J_inds, descending=True).indices
                C_inds_sorted = C_inds[J_inds_sorted]
                C_cumsum = C_inds_sorted.cumsum(0)
                arange = torch.arange(C_cumsum.size(0)) + 1
                acc_at = C_cumsum.float() / arange.float()

                plt.clf()
                plt.plot(acc_at[:num_ecs].detach().cpu().numpy())
                plt.savefig("{}/acc_at_{}.png".format(args.save_dir, itr))

            if itr % args.ckpt_every == 0:
                my_print("Saving checkpoint to {}/ckpt.pt".format(args.save_dir))
                torch.save({
                    "model": model.state_dict(),
                    "optimizer": optimizer.state_dict(),
                    "sampler": sampler.state_dict()
                }, "{}/ckpt.pt".format(args.save_dir))


            itr += 1

            if itr > args.n_iters:
                sq_err = ((ground_truth_J_norm - norm_J(get_J_sub())) ** 2).sum()
                rmse = ((ground_truth_J_norm - norm_J(get_J_sub())) ** 2).mean().sqrt()
                with open("{}/sq_err.txt".format(args.save_dir), 'w') as f:
                    f.write(str(sq_err))
                with open("{}/rmse.txt".format(args.save_dir), 'w') as f:
                    f.write(str(rmse))

                torch.save({
                    "model": model.state_dict(),
                    "optimizer": optimizer.state_dict(),
                    "sampler": sampler.state_dict()
                }, "{}/ckpt.pt".format(args.save_dir))

                quit()
示例#4
0
def main(args):
    makedirs(args.save_dir)

    torch.manual_seed(args.seed)
    np.random.seed(args.seed)

    model = rbm.BernoulliRBM(args.n_visible, args.n_hidden)
    model.to(device)
    print(device)

    if args.data == "mnist":
        assert args.n_visible == 784
        train_loader, test_loader, plot, viz = utils.get_data(args)

        init_data = []
        for x, _ in train_loader:
            init_data.append(x)
        init_data = torch.cat(init_data, 0)
        init_mean = init_data.mean(0).clamp(.01, .99)

        model = rbm.BernoulliRBM(args.n_visible,
                                 args.n_hidden,
                                 data_mean=init_mean)
        model.to(device)

        optimizer = torch.optim.Adam(model.parameters(), lr=args.rbm_lr)

        # train!
        itr = 0
        for x, _ in train_loader:
            x = x.to(device)
            xhat = model.gibbs_sample(v=x, n_steps=args.cd)

            d = model.logp_v_unnorm(x)
            m = model.logp_v_unnorm(xhat)

            obj = d - m
            loss = -obj.mean()

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if itr % args.print_every == 0:
                print(
                    "{} | log p(data) = {:.4f}, log p(model) = {:.4f}, diff = {:.4f}"
                    .format(itr, d.mean(), m.mean(), (d - m).mean()))

    else:
        model.W.data = torch.randn_like(model.W.data) * (.05**.5)
        model.b_v.data = torch.randn_like(model.b_v.data) * 1.0
        model.b_h.data = torch.randn_like(model.b_h.data) * 1.0
        viz = plot = None

    gt_samples = model.gibbs_sample(n_steps=args.mcmc_steps,
                                    n_samples=args.n_samples +
                                    args.n_test_samples,
                                    plot=True)
    kmmd = mmd.MMD(mmd.exp_avg_hamming, False)
    gt_samples, gt_samples2 = gt_samples[:args.n_samples], gt_samples[
        args.n_samples:]
    if plot is not None:
        plot("{}/ground_truth.png".format(args.save_dir), gt_samples2)
    opt_stat = kmmd.compute_mmd(gt_samples2, gt_samples)
    print("gt <--> gt log-mmd", opt_stat, opt_stat.log10())

    new_samples = model.gibbs_sample(n_steps=0, n_samples=args.n_test_samples)

    log_mmds = {}
    log_mmds['gibbs'] = []
    for i in range(args.n_steps):
        if i % 10 == 0:
            stat = kmmd.compute_mmd(new_samples, gt_samples)
            log_stat = stat.log10().item()
            log_mmds['gibbs'].append(log_stat)
            print("gibbs", i, stat, stat.log10())
        new_samples = model.gibbs_sample(new_samples, 1)

    r_model = samplers_old.BinaryRelaxedModel(args.n_visible, model)
    r_model.to(device)

    temps = [2.]
    for temp in temps:
        log_mmds['svgd'] = []
        target = lambda x: r_model.logp_surrogate(x, temp)
        x = model.init_dist.sample((args.n_test_samples, )).to(device)
        x = nn.Parameter(r_model.init_from_data(x))
        #x = nn.Parameter(r_model.base_dist.sample((args.n_test_samples, args.n_visible)).to(device))
        optim = torch.optim.Adam(params=[x], lr=args.lr)
        svgd = samplers_old.SVGD(optim)
        for i in range(args.n_steps):
            #svgd.step(x, target)
            svgd.discrete_step(x, r_model.logp_target, target)

            if i % 100 == 0 and plot is not None:
                if args.data == "mnist":
                    hx = samplers_old.threshold(x)
                else:
                    hx = x
                plot(
                    "{}/samples_temp_{}_{}.png".format(args.save_dir, temp, i),
                    hx)

            if i % 10 == 0:
                hard_samples = samplers_old.threshold(x)
                stat = kmmd.compute_mmd(hard_samples, gt_samples)
                log_stat = stat.log10().item()
                log_mmds['svgd'].append(log_stat)
                print("temp = {}, itr = {}, log-mmd = {:.4f}, ess = {:.4f}".
                      format(temp, i, log_stat, svgd._ess))

    sampler = samplers.DiffSampler(args.n_visible,
                                   1,
                                   fixed_proposal=False,
                                   approx=True,
                                   multi_hop=False,
                                   temp=2.)
    x = model.init_dist.sample((args.n_test_samples, )).to(device)

    log_mmds['gwg'] = []
    for i in range(args.n_steps):
        # do sampling and time it
        xhat = sampler.step(x.detach(), model).detach()

        # compute hamming dist
        cur_hops = (x != xhat).float().sum(-1).mean().item()

        # update trajectory
        x = xhat

        if i % 100 == 0 and plot is not None:
            plot("{}/samples_gwg_{}.png".format(args.save_dir, i), x)

        if i % 10 == 0:
            hard_samples = x
            stat = kmmd.compute_mmd(hard_samples, gt_samples)
            log_stat = stat.log10().item()
            log_mmds['gwg'].append(log_stat)
            print("gwg, itr = {}, log-mmd = {:.4f}, hop-dist = {:.4f}".format(
                i, log_stat, cur_hops))

    temps = [.1]
    for sampler in ["hmc", "mala"]:
        for temp in temps:
            for ss in [.001]:  #[.001, .01, .1]:#, 1.]:
                name = "{}-{}-{}".format(sampler, temp, ss)
                log_mmds[name] = []

                log_temp = nn.Parameter(torch.tensor([temp]).log().to(device))
                #mala_samples = r_model.init(args.n_test_samples).to(device)
                x = model.init_dist.sample((args.n_test_samples, )).to(device)
                mala_samples = r_model.init_from_data(x)
                print("Burn in")
                for i in range(args.n_steps):
                    if sampler == "hmc":
                        mala_samples, ar, _ = r_model.hmc_step(
                            mala_samples, ss, 1,
                            log_temp.exp().detach())
                        ar = ar.mean().item()
                    else:
                        mala_samples, ar = r_model.step(mala_samples,
                                                        ss,
                                                        log_temp.exp(),
                                                        accept_dist="target",
                                                        tt=args.tt)

                    if i % 10 == 0:
                        hard_samples = samplers_old.threshold(mala_samples)
                        stat = kmmd.compute_mmd(hard_samples, gt_samples)
                        print(sampler, temp, i,
                              log_temp.mean().exp().item(), ss, ar, stat,
                              stat.log10())
                        log_mmds[name].append(stat.log10().item())

                    if i % 100 == 0 and plot is not None:
                        hx = samplers_old.threshold(mala_samples)
                        plot(
                            "{}/samples_{}_{}.png".format(
                                args.save_dir, name, i), hx)

    plt.clf()
    for temp in log_mmds.keys():
        plt.plot(log_mmds[temp], label="{}".format(temp))

    plt.legend()
    plt.savefig("{}/results.png".format(args.save_dir))

    with open("{}/results.pkl".format(args.save_dir), 'wb') as f:
        pickle.dump(log_mmds, f)
示例#5
0
def main(args):
    makedirs(args.save_dir)

    torch.manual_seed(args.seed)
    np.random.seed(args.seed)

    model = rbm.LatticeIsingModel(args.dim, args.sigma, args.bias)
    model.to(device)
    print(device)

    plot = lambda p, x: torchvision.utils.save_image(x.view(
        x.size(0), 1, args.dim, args.dim),
                                                     p,
                                                     normalize=False,
                                                     nrow=int(x.size(0)**.5))
    ess_samples = model.init_sample(args.n_samples).to(device)

    hops = {}
    ess = {}
    times = {}
    chains = {}
    means = {}

    temps = ['bg-1', 'bg-2', 'hb-10-1', 'gwg', 'gwg-3', 'gwg-5']
    for temp in temps:
        if temp == 'dim-gibbs':
            sampler = samplers.PerDimGibbsSampler(model.data_dim)
        elif temp == "rand-gibbs":
            sampler = samplers.PerDimGibbsSampler(model.data_dim, rand=True)
        elif "bg-" in temp:
            block_size = int(temp.split('-')[1])
            sampler = block_samplers.BlockGibbsSampler(model.data_dim,
                                                       block_size)
        elif "hb-" in temp:
            block_size, hamming_dist = [int(v) for v in temp.split('-')[1:]]
            sampler = block_samplers.HammingBallSampler(
                model.data_dim, block_size, hamming_dist)
        elif temp == "gwg":
            sampler = samplers.DiffSampler(model.data_dim,
                                           1,
                                           fixed_proposal=False,
                                           approx=True,
                                           multi_hop=False,
                                           temp=2.)
        elif "gwg-" in temp:
            n_hops = int(temp.split('-')[1])
            sampler = samplers.MultiDiffSampler(model.data_dim,
                                                1,
                                                approx=True,
                                                temp=2.,
                                                n_samples=n_hops)
        else:
            raise ValueError("Invalid sampler...")

        x = model.init_dist.sample((args.n_test_samples, )).to(device)

        times[temp] = []
        hops[temp] = []
        chain = []
        cur_time = 0.
        mean = torch.zeros_like(x)
        for i in range(args.n_steps):
            # do sampling and time it
            st = time.time()
            xhat = sampler.step(x.detach(), model).detach()
            cur_time += time.time() - st

            # compute hamming dist
            cur_hops = (x != xhat).float().sum(-1).mean().item()

            # update trajectory
            x = xhat

            mean = mean + x
            if i % args.subsample == 0:
                if args.ess_statistic == "dims":
                    chain.append(x.cpu().numpy()[0][None])
                else:
                    xc = x  #[0][None]
                    h = (xc != ess_samples[0][None]).float().sum(-1)
                    chain.append(h.detach().cpu().numpy()[None])

            if i % args.viz_every == 0 and plot is not None:
                plot(
                    "/{}/temp_{}_samples_{}.png".format(
                        args.save_dir, temp, i), x)

            if i % args.print_every == 0:
                times[temp].append(cur_time)
                hops[temp].append(cur_hops)
                print("temp {}, itr = {}, hop-dist = {:.4f}".format(
                    temp, i, cur_hops))

        means[temp] = mean / args.n_steps
        chain = np.concatenate(chain, 0)
        chains[temp] = chain
        if not args.no_ess:
            ess[temp] = get_ess(chain, args.burn_in)
            print("ess = {} +/- {}".format(ess[temp].mean(), ess[temp].std()))

    ess_temps = temps
    plt.clf()
    plt.boxplot([get_log_rmse(means[temp]) for temp in ess_temps],
                labels=ess_temps,
                showfliers=False)
    plt.savefig("{}/log_rmse.png".format(args.save_dir))

    if not args.no_ess:
        ess_temps = temps
        plt.clf()
        plt.boxplot([ess[temp] for temp in ess_temps],
                    labels=ess_temps,
                    showfliers=False)
        plt.savefig("{}/ess.png".format(args.save_dir))

        plt.clf()
        plt.boxplot([
            ess[temp] / times[temp][-1] / (1. - args.burn_in)
            for temp in ess_temps
        ],
                    labels=ess_temps,
                    showfliers=False)
        plt.savefig("{}/ess_per_sec.png".format(args.save_dir))

    plt.clf()
    for temp in temps:
        plt.plot(hops[temp], label="{}".format(temp))

    plt.legend()
    plt.savefig("{}/hops.png".format(args.save_dir))

    for temp in temps:
        plt.clf()
        plt.plot(chains[temp][:, 0])
        plt.savefig("{}/trace_{}.png".format(args.save_dir, temp))

    with open("{}/results.pkl".format(args.save_dir), 'wb') as f:
        results = {'ess': ess, 'hops': hops, 'chains': chains, 'means': means}
        pickle.dump(results, f)
示例#6
0
def main(args):
    makedirs(args.save_dir)

    torch.manual_seed(args.seed)
    np.random.seed(args.seed)

    model = rbm.BernoulliRBM(args.n_visible, args.n_hidden)
    model.to(device)
    print(device)

    if args.data == "mnist":
        assert args.n_visible == 784
        train_loader, test_loader, plot, viz = utils.get_data(args)

        init_data = []
        for x, _ in train_loader:
            init_data.append(x)
        init_data = torch.cat(init_data, 0)
        init_mean = init_data.mean(0).clamp(.01, .99)

        model = rbm.BernoulliRBM(args.n_visible,
                                 args.n_hidden,
                                 data_mean=init_mean)
        model.to(device)

        optimizer = torch.optim.Adam(model.parameters(), lr=args.rbm_lr)

        # train!
        itr = 0
        for x, _ in train_loader:
            x = x.to(device)
            xhat = model.gibbs_sample(v=x, n_steps=args.cd)

            d = model.logp_v_unnorm(x)
            m = model.logp_v_unnorm(xhat)

            obj = d - m
            loss = -obj.mean()

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if itr % args.print_every == 0:
                print(
                    "{} | log p(data) = {:.4f}, log p(model) = {:.4f}, diff = {:.4f}"
                    .format(itr, d.mean(), m.mean(), (d - m).mean()))

    else:
        model.W.data = torch.randn_like(model.W.data) * (.05**.5)
        model.b_v.data = torch.randn_like(model.b_v.data) * 1.0
        model.b_h.data = torch.randn_like(model.b_h.data) * 1.0
        viz = plot = None

    gt_samples = model.gibbs_sample(n_steps=args.gt_steps,
                                    n_samples=args.n_samples +
                                    args.n_test_samples,
                                    plot=True)
    kmmd = mmd.MMD(mmd.exp_avg_hamming, False)
    gt_samples, gt_samples2 = gt_samples[:args.n_samples], gt_samples[
        args.n_samples:]
    if plot is not None:
        plot("{}/ground_truth.png".format(args.save_dir), gt_samples2)
    opt_stat = kmmd.compute_mmd(gt_samples2, gt_samples)
    print("gt <--> gt log-mmd", opt_stat, opt_stat.log10())

    new_samples = model.gibbs_sample(n_steps=0, n_samples=args.n_test_samples)

    log_mmds = {}
    log_mmds['gibbs'] = []
    ars = {}
    hops = {}
    ess = {}
    times = {}
    chains = {}
    chain = []

    times['gibbs'] = []
    start_time = time.time()
    for i in range(args.n_steps):
        if i % args.print_every == 0:
            stat = kmmd.compute_mmd(new_samples, gt_samples)
            log_stat = stat.log10().item()
            log_mmds['gibbs'].append(log_stat)
            print("gibbs", i, stat, stat.log10())
            times['gibbs'].append(time.time() - start_time)
        new_samples = model.gibbs_sample(new_samples, 1)
        if i % args.subsample == 0:
            if args.ess_statistic == "dims":
                chain.append(new_samples.cpu().numpy()[0][None])
            else:
                xc = new_samples[0][None]
                h = (xc != gt_samples).float().sum(-1)
                chain.append(h.detach().cpu().numpy()[None])

    chain = np.concatenate(chain, 0)
    chains['gibbs'] = chain
    ess['gibbs'] = get_ess(chain, args.burn_in)
    print("ess = {} +/- {}".format(ess['gibbs'].mean(), ess['gibbs'].std()))

    temps = ['bg-1', 'bg-2', 'hb-10-1', 'gwg', 'gwg-3', 'gwg-5']
    for temp in temps:
        if temp == 'dim-gibbs':
            sampler = samplers.PerDimGibbsSampler(args.n_visible)
        elif temp == "rand-gibbs":
            sampler = samplers.PerDimGibbsSampler(args.n_visible, rand=True)
        elif "bg-" in temp:
            block_size = int(temp.split('-')[1])
            sampler = block_samplers.BlockGibbsSampler(args.n_visible,
                                                       block_size)
        elif "hb-" in temp:
            block_size, hamming_dist = [int(v) for v in temp.split('-')[1:]]
            sampler = block_samplers.HammingBallSampler(
                args.n_visible, block_size, hamming_dist)
        elif temp == "gwg":
            sampler = samplers.DiffSampler(args.n_visible,
                                           1,
                                           fixed_proposal=False,
                                           approx=True,
                                           multi_hop=False,
                                           temp=2.)
        elif "gwg-" in temp:
            n_hops = int(temp.split('-')[1])
            sampler = samplers.MultiDiffSampler(args.n_visible,
                                                1,
                                                approx=True,
                                                temp=2.,
                                                n_samples=n_hops)
        else:
            raise ValueError("Invalid sampler...")

        x = model.init_dist.sample((args.n_test_samples, )).to(device)

        log_mmds[temp] = []
        ars[temp] = []
        hops[temp] = []
        times[temp] = []
        chain = []
        cur_time = 0.
        for i in range(args.n_steps):
            # do sampling and time it
            st = time.time()
            xhat = sampler.step(x.detach(), model).detach()
            cur_time += time.time() - st

            # compute hamming dist
            cur_hops = (x != xhat).float().sum(-1).mean().item()

            # update trajectory
            x = xhat

            if i % args.subsample == 0:
                if args.ess_statistic == "dims":
                    chain.append(x.cpu().numpy()[0][None])
                else:
                    xc = x[0][None]
                    h = (xc != gt_samples).float().sum(-1)
                    chain.append(h.detach().cpu().numpy()[None])

            if i % args.viz_every == 0 and plot is not None:
                plot(
                    "/{}/temp_{}_samples_{}.png".format(
                        args.save_dir, temp, i), x)

            if i % args.print_every == 0:
                hard_samples = x
                stat = kmmd.compute_mmd(hard_samples, gt_samples)
                log_stat = stat.log10().item()
                log_mmds[temp].append(log_stat)
                times[temp].append(cur_time)
                hops[temp].append(cur_hops)
                print("temp {}, itr = {}, log-mmd = {:.4f}, hop-dist = {:.4f}".
                      format(temp, i, log_stat, cur_hops))
        chain = np.concatenate(chain, 0)
        ess[temp] = get_ess(chain, args.burn_in)
        chains[temp] = chain
        print("ess = {} +/- {}".format(ess[temp].mean(), ess[temp].std()))

    ess_temps = temps
    plt.clf()
    plt.boxplot([ess[temp] for temp in ess_temps],
                labels=ess_temps,
                showfliers=False)
    plt.savefig("{}/ess.png".format(args.save_dir))

    plt.clf()
    plt.boxplot([
        ess[temp] / times[temp][-1] / (1. - args.burn_in) for temp in ess_temps
    ],
                labels=ess_temps,
                showfliers=False)
    plt.savefig("{}/ess_per_sec.png".format(args.save_dir))

    plt.clf()
    for temp in temps + ['gibbs']:
        plt.plot(log_mmds[temp], label="{}".format(temp))

    plt.legend()
    plt.savefig("{}/results.png".format(args.save_dir))

    plt.clf()
    for temp in temps:
        plt.plot(ars[temp], label="{}".format(temp))

    plt.legend()
    plt.savefig("{}/ars.png".format(args.save_dir))

    plt.clf()
    for temp in temps:
        plt.plot(hops[temp], label="{}".format(temp))

    plt.legend()
    plt.savefig("{}/hops.png".format(args.save_dir))

    for temp in temps:
        plt.clf()
        plt.plot(chains[temp][:, 0])
        plt.savefig("{}/trace_{}.png".format(args.save_dir, temp))

    with open("{}/results.pkl".format(args.save_dir), 'wb') as f:
        results = {
            'ess': ess,
            'hops': hops,
            'log_mmds': log_mmds,
            'chains': chains,
            'times': times
        }
        pickle.dump(results, f)
示例#7
0
def main(args):
    makedirs(args.save_dir)
    logger = open("{}/log.txt".format(args.save_dir), 'w')

    def my_print(s):
        print(s)
        logger.write(str(s) + '\n')

    torch.manual_seed(args.seed)
    np.random.seed(args.seed)

    # load existing data
    if args.data == "mnist" or args.data_file is not None:
        train_loader, test_loader, plot, viz = utils.get_data(args)
    # generate the dataset
    else:
        data, data_model = utils.generate_data(args)
        my_print(
            "we have created your data, but what have you done for me lately?????"
        )
        with open("{}/data.pkl".format(args.save_dir), 'wb') as f:
            pickle.dump(data, f)
        if args.data_model == "er_ising":
            ground_truth_J = data_model.J.detach().cpu()
            with open("{}/J.pkl".format(args.save_dir), 'wb') as f:
                pickle.dump(ground_truth_J, f)
        quit()

    if args.model == "lattice_potts":
        model = rbm.LatticePottsModel(int(args.dim),
                                      int(args.n_state),
                                      0.,
                                      0.,
                                      learn_sigma=True)
        buffer = model.init_sample(args.buffer_size)
    elif args.model == "lattice_ising":
        model = rbm.LatticeIsingModel(int(args.dim), 0., 0., learn_sigma=True)
        buffer = model.init_sample(args.buffer_size)
    elif args.model == "lattice_ising_3d":
        model = rbm.LatticeIsingModel(int(args.dim),
                                      .2,
                                      learn_G=True,
                                      lattice_dim=3)
        ground_truth_J = model.J.clone().to(device)
        model.G.data = torch.randn_like(model.G.data) * .01
        model.sigma.data = torch.ones_like(model.sigma.data)
        buffer = model.init_sample(args.buffer_size)
        plt.clf()
        plt.matshow(ground_truth_J.detach().cpu().numpy())
        plt.savefig("{}/ground_truth.png".format(args.save_dir))
    elif args.model == "lattice_ising_2d":
        model = rbm.LatticeIsingModel(int(args.dim),
                                      args.sigma,
                                      learn_G=True,
                                      lattice_dim=2)
        ground_truth_J = model.J.clone().to(device)
        model.G.data = torch.randn_like(model.G.data) * .01
        model.sigma.data = torch.ones_like(model.sigma.data)
        buffer = model.init_sample(args.buffer_size)
        plt.clf()
        plt.matshow(ground_truth_J.detach().cpu().numpy())
        plt.savefig("{}/ground_truth.png".format(args.save_dir))
    elif args.model == "er_ising":
        model = rbm.ERIsingModel(int(args.dim), 2, learn_G=True)
        model.G.data = torch.randn_like(model.G.data) * .01
        buffer = model.init_sample(args.buffer_size)
        with open(args.graph_file, 'rb') as f:
            ground_truth_J = pickle.load(f)
            plt.clf()
            plt.matshow(ground_truth_J.detach().cpu().numpy())
            plt.savefig("{}/ground_truth.png".format(args.save_dir))
        ground_truth_J = ground_truth_J.to(device)
    elif args.model == "rbm":
        model = rbm.BernoulliRBM(args.dim, args.n_hidden)
        buffer = model.init_dist.sample((args.buffer_size, ))
    elif args.model == "dense_potts":
        raise ValueError
    elif args.model == "dense_ising":
        raise ValueError
    elif args.model == "mlp":
        raise ValueError

    model.to(device)
    buffer = buffer.to(device)

    # make G symmetric
    def get_J():
        j = model.J
        return (j + j.t()) / 2

    if args.sampler == "gibbs":
        if "potts" in args.model:
            sampler = samplers.PerDimMetropolisSampler(model.data_dim,
                                                       int(args.n_state),
                                                       rand=False)
        else:
            sampler = samplers.PerDimGibbsSampler(model.data_dim, rand=False)
    elif args.sampler == "rand_gibbs":
        if "potts" in args.model:
            sampler = samplers.PerDimMetropolisSampler(model.data_dim,
                                                       int(args.n_state),
                                                       rand=True)
        else:
            sampler = samplers.PerDimGibbsSampler(model.data_dim, rand=True)
    elif args.sampler == "gwg":
        if "potts" in args.model:
            sampler = samplers.DiffSamplerMultiDim(model.data_dim,
                                                   1,
                                                   approx=True,
                                                   temp=2.)
        else:
            sampler = samplers.DiffSampler(model.data_dim,
                                           1,
                                           approx=True,
                                           fixed_proposal=False,
                                           temp=2.)
    else:
        assert "gwg-" in args.sampler
        n_hop = int(args.sampler.split('-')[1])
        if "potts" in args.model:
            raise ValueError
        else:
            sampler = samplers.MultiDiffSampler(model.data_dim,
                                                1,
                                                approx=True,
                                                temp=2.,
                                                n_samples=n_hop)

    my_print(device)
    my_print(model)
    my_print(buffer.size())
    my_print(sampler)

    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay)
    itr = 0
    sigmas = []
    sq_errs = []
    rmses = []
    while itr < args.n_iters:
        for x in train_loader:
            x = x[0].to(device)

            for k in range(args.sampling_steps):
                buffer = sampler.step(buffer.detach(), model).detach()

            logp_real = model(x).squeeze().mean()
            logp_fake = model(buffer).squeeze().mean()

            obj = logp_real - logp_fake
            loss = -obj
            loss += args.l1 * get_J().abs().sum()
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            model.G.data *= (1. - torch.eye(model.G.data.size(0))).to(model.G)

            if itr % args.print_every == 0:
                my_print(
                    "({}) log p(real) = {:.4f}, log p(fake) = {:.4f}, diff = {:.4f}, hops = {:.4f}"
                    .format(itr, logp_real.item(), logp_fake.item(),
                            obj.item(), sampler._hops))
                if args.model in ("lattice_potts", "lattice_ising"):
                    my_print(
                        "\tsigma true = {:.4f}, current sigma = {:.4f}".format(
                            args.sigma, model.sigma.data.item()))
                else:
                    sq_err = ((ground_truth_J - get_J())**2).sum()
                    rmse = ((ground_truth_J - get_J())**2).mean().sqrt()
                    my_print("\t err^2 = {:.4f}, rmse = {:.4f}".format(
                        sq_err, rmse))
                    print(ground_truth_J)
                    print(get_J())

            if itr % args.viz_every == 0:
                if args.model in ("lattice_potts", "lattice_ising"):
                    sigmas.append(model.sigma.data.item())
                    plt.clf()
                    plt.plot(sigmas, label="model")
                    plt.plot([args.sigma for s in sigmas], label="gt")
                    plt.legend()
                    plt.savefig("{}/sigma.png".format(args.save_dir))
                else:
                    sq_err = ((ground_truth_J - get_J())**2).sum()
                    sq_errs.append(sq_err.item())
                    plt.clf()
                    plt.plot(sq_errs, label="sq_err")
                    plt.legend()
                    plt.savefig("{}/sq_err.png".format(args.save_dir))

                    rmse = ((ground_truth_J - get_J())**2).mean().sqrt()
                    rmses.append(rmse.item())
                    plt.clf()
                    plt.plot(rmses, label="rmse")
                    plt.legend()
                    plt.savefig("{}/rmse.png".format(args.save_dir))

                    plt.clf()
                    plt.matshow(get_J().detach().cpu().numpy())
                    plt.savefig("{}/model_{}.png".format(args.save_dir, itr))

                plot("{}/data_{}.png".format(args.save_dir, itr),
                     x.detach().cpu())
                plot("{}/buffer_{}.png".format(args.save_dir, itr),
                     buffer[:args.batch_size].detach().cpu())

            itr += 1

            if itr > args.n_iters:
                if args.model in ("lattice_potts", "lattice_ising"):
                    final_sigma = model.sigma.data.item()
                    with open("{}/sigma.txt".format(args.save_dir), 'w') as f:
                        f.write(str(final_sigma))
                else:
                    sq_err = ((ground_truth_J - get_J())**2).sum().item()
                    rmse = ((ground_truth_J - get_J())**2).mean().sqrt().item()
                    with open("{}/sq_err.txt".format(args.save_dir), 'w') as f:
                        f.write(str(sq_err))
                    with open("{}/rmse.txt".format(args.save_dir), 'w') as f:
                        f.write(str(rmse))

                quit()