def generate_field_map(model):
    probabilities = np.array([1.0 / 5, 4.0 / 5], dtype=np.float)
    means = np.array([[-5, -5], [5, 5]], dtype=np.float)
    toy_ds = toy_experiment_dataset.ToyExperimentDataset(
        probabilities, means, 1)

    n = 20
    xs = np.linspace(-8, 8, n)
    ys = np.linspace(-8, 8, n)
    xx, yy = np.meshgrid(xs, ys)
    xx = xx.flatten()
    yy = yy.flatten()
    grid_values = np.stack((xx, yy), axis=1)

    log_density = toy_ds.compute_log_p(grid_values).detach().numpy()
    density_color_grid = log_density.reshape(n, n)

    true_gradient = toy_ds.compute_p_gradient(grid_values).numpy()
    plt.pcolormesh(xs, ys, density_color_grid)
    plt.quiver(grid_values[:, 0], grid_values[:, 1], true_gradient[:, 0],
               true_gradient[:, 1])
    plt.title('Data scores')
    plt.savefig('figures/fig2a.svg')
    plt.show()

    grid_values_torch = torch.tensor(grid_values).float()
    with torch.no_grad():
        estimated_gradient = model(grid_values_torch).numpy()

    plt.pcolormesh(xs, ys, density_color_grid)
    plt.quiver(grid_values[:, 0], grid_values[:, 1], estimated_gradient[:, 0],
               estimated_gradient[:, 1])
    plt.title('Estimated scores')
    plt.savefig('figures/fig2b.svg')
    plt.show()
def langevin(probs, means, num, input, lr=0.01, step=1000):
    ds = toy_experiment_dataset.ToyExperimentDataset(probs, means, 1)
    for i in trange(step):
        # print(input.dtype)
        input += lr * ds.compute_p_gradient(input).float().detach() / 2
        input += torch.randn_like(input) * np.sqrt(lr)
    # print(input)
    return input
Exemple #3
0
    def test_can_generate_on_many_samples_many_modes(self):
        probs = np.array([1.0 / 5, 4.0 / 5], dtype=np.float)
        means = np.array([[-5, -5], [5, 5]], dtype=np.float)
        num = 1
        ds = toy_experiment_dataset.ToyExperimentDataset(probs, means, num)

        test_dataset = np.ones((100, 2)) * np.array([-5, 5])
        gradient = ds.compute_p_gradient(test_dataset)
        self.assertTupleEqual(gradient.shape, (100, 2))
Exemple #4
0
    def test_can_compute_gradients(self):
        probs = np.array([1.0], dtype=np.float)
        means = np.array([[0, 0]], dtype=np.float)
        num = 1
        ds = toy_experiment_dataset.ToyExperimentDataset(probs, means, num)

        test_dataset = np.array([[0.0, 0.0]])
        gradient = ds.compute_p_gradient(test_dataset)
        self.assertTupleEqual(gradient.shape, (1, 2))
        np.testing.assert_allclose(gradient.cpu().numpy(), np.array([[0, 0]]))
Exemple #5
0
    def test_generated_data_has_expected_mean(self):
        probs = np.array([1.0 / 5, 4.0 / 5], dtype=np.float)
        means = np.array([[-5, -5], [5, 5]], dtype=np.float)
        num = 10**4

        ds = toy_experiment_dataset.ToyExperimentDataset(probs, means, num)
        ds_mean = ds.tensors[0].mean(axis=0).numpy()

        expected_mean = np.array([3.0, 3.0])
        np.testing.assert_allclose(ds_mean, expected_mean, atol=0.1)
def anneal_langevin(probs, means, num, input, sigmas, lr=0.01, step=1000):
    for s in sigmas:
        ds = toy_experiment_dataset.ToyExperimentDataset(probs,
                                                         means,
                                                         1,
                                                         sigma=s)
        for i in trange(step):
            # print(s, sigmas[-1])
            lr_new = lr * np.power(s / sigmas[-1], 2)
            input += lr_new * ds.compute_p_gradient(input).float().detach() / 2
            input += torch.randn_like(input) * np.sqrt(lr_new)
    # print(input)
    return input
def train(model, optimizer):
    probs = np.array([1.0 / 5, 4.0 / 5], dtype=np.float)
    means = np.array([[-5, -5], [5, 5]], dtype=np.float)
    num = 128 * 10**4
    batch_size = 128
    toy_ds = toy_experiment_dataset.ToyExperimentDataset(probs, means, num)
    toy_dl = torch.utils.data.DataLoader(toy_ds, batch_size=batch_size)
    for points in tqdm(toy_dl, desc='Training model', total=num // batch_size):
        # input = toy_experiment_dataset.ToyExperimentDataset(probs, means, num)
        # print(points)
        loss, _, _ = sliced_score_estimation_vr(model, points[0].float())
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
    return model
def main():
    probabilities = np.array([1.0 / 5, 4.0 / 5], dtype=np.float)
    means = np.array([[-5, -5], [5, 5]], dtype=np.float)
    num_generate = 1280
    toy_ds = toy_experiment_dataset.ToyExperimentDataset(
        probabilities, means, num_generate)
    plot_2d_tensor(toy_ds.tensors[0])
    plt.savefig('generated_toy_data.png')

    xs = np.linspace(-8, 8, 20)
    ys = np.linspace(-8, 8, 20)
    xx, yy = np.meshgrid(xs, ys)
    xx = xx.flatten()
    yy = yy.flatten()
    grid_values = np.stack((xx, yy), axis=1)
    evaluated_gradient = toy_ds.compute_p_gradient(grid_values).numpy()
    evaluated_gradient /= np.sqrt(np.max(np.sum(evaluated_gradient**2,
                                                axis=1)))
    plt.quiver(grid_values[:, 0], grid_values[:, 1], evaluated_gradient[:, 0],
               evaluated_gradient[:, 1])
    plt.savefig('gradient_plot.png')
def toy_generate():
    probs = np.array([1.0 / 5, 4.0 / 5], dtype=np.float)
    means = np.array([[-5, -5], [5, 5]], dtype=np.float)
    num = 1280

    ds = toy_experiment_dataset.ToyExperimentDataset(probs, means, num)

    dataset = ds.tensors[0].numpy()
    # Fig3a samples
    d1 = (dataset[:, 0] < 0).sum() / 1280
    d2 = (dataset[:, 0] > 0).sum() / 1280
    plt.scatter(dataset[dataset[:, 0] < 0, 0],
                dataset[dataset[:, 1] < 0, 1],
                s=1,
                label='Доля = ' + str(d1))
    plt.scatter(dataset[dataset[:, 0] > 0, 0],
                dataset[dataset[:, 1] > 0, 1],
                s=1,
                label='Доля = ' + str(d2))
    plt.title('Samples')
    plt.legend()
    plt.savefig("figures/fig3a.svg")
    plt.show()

    # Fig3b

    # ds = toy_experiment_dataset.ToyExperimentDataset(probs, means, num)
    start_point = torch.rand(1280, 2) * 16 - 8
    # print(start_point)
    after_lan = langevin(probs, means, num, start_point, lr=0.1,
                         step=1000).detach().numpy()
    d1 = (after_lan[:, 0] < 0).sum() / 1280
    d2 = (after_lan[:, 0] > 0).sum() / 1280
    plt.scatter(after_lan[after_lan[:, 0] < 0, 0],
                after_lan[after_lan[:, 1] < 0, 1],
                s=1,
                label='Доля = ' + str(d1))
    plt.scatter(after_lan[after_lan[:, 0] > 0, 0],
                after_lan[after_lan[:, 1] > 0, 1],
                s=1,
                label='Доля = ' + str(d2))
    plt.title('Langevin')
    plt.legend()
    plt.savefig("figures/fig3b.svg")
    plt.show()

    # Fig3c

    sigmas = np.geomspace(
        20, 0.7, 10
    )  # np.geomspace(2, 0.1, 10)#np.exp(np.linspace(np.log(20), 0., 10))#np.geomspace(10, 0.1, 10)
    # print(sigmas)
    # ds = toy_experiment_dataset.ToyExperimentDataset(probs, means, num)
    start_point = torch.rand(1280, 2) * 16 - 8
    after_lan = anneal_langevin(probs,
                                means,
                                num,
                                start_point,
                                sigmas,
                                lr=0.1,
                                step=100).detach().numpy()
    d1 = (after_lan[:, 0] < 0).sum() / 1280
    d2 = (after_lan[:, 0] > 0).sum() / 1280
    plt.scatter(after_lan[after_lan[:, 0] < 0, 0],
                after_lan[after_lan[:, 1] < 0, 1],
                s=1,
                label='Доля = ' + str(d1))
    plt.scatter(after_lan[after_lan[:, 0] > 0, 0],
                after_lan[after_lan[:, 1] > 0, 1],
                s=1,
                label='Доля = ' + str(d2))
    plt.title('Annealed Langevin')
    plt.legend()
    plt.savefig("figures/fig3c.svg")
    plt.show()
Exemple #10
0
 def test_generated_data_has_expected_shape(self):
     probs = np.array([1.0 / 5, 4.0 / 5], dtype=np.float)
     means = np.array([[-5, -5], [5, 5]], dtype=np.float)
     num = 10**2
     ds = toy_experiment_dataset.ToyExperimentDataset(probs, means, num)
     self.assertTupleEqual(ds.tensors[0].shape, (10**2, 2))