Example #1
0
def positive_wavefunction_data(gpu, num_hidden):
    with open(os.path.join(__tests_location__, "data", "test_grad_data.pkl"),
              "rb") as f:
        test_data = pickle.load(f)

    qucumber.set_random_seed(SEED, cpu=True, gpu=gpu, quiet=True)

    data = torch.tensor(test_data["tfim1d"]["train_samples"],
                        dtype=torch.double)
    target_psi = torch.tensor(test_data["tfim1d"]["target_psi"],
                              dtype=torch.double)

    num_visible = data.shape[-1]

    nn_state = PositiveWavefunction(num_visible, num_hidden, gpu=gpu)
    PGU = PosGradsUtils(nn_state)

    data = data.to(device=nn_state.device)
    vis = nn_state.generate_hilbert_space(num_visible)
    target_psi = target_psi.to(device=nn_state.device)

    PositiveWavefunctionFixture = namedtuple(
        "PositiveWavefunctionFixture",
        ["data", "target_psi", "grad_utils", "nn_state", "vis"],
    )

    return PositiveWavefunctionFixture(data=data,
                                       target_psi=target_psi,
                                       grad_utils=PGU,
                                       nn_state=nn_state,
                                       vis=vis)
Example #2
0
def test_single_positive_sample():
    nn_state = PositiveWavefunction(10, 7, gpu=False)

    sample = nn_state.sample(k=10).squeeze()
    h_sample = nn_state.sample_h_given_v(sample)
    v_prob = nn_state.prob_v_given_h(h_sample)

    msg = "Single hidden sample should give a "
    assert v_prob.dim() == 1, msg
Example #3
0
def test_positive_wavefunction_psi():
    nn_state = PositiveWavefunction(10, gpu=False)

    vis_state = torch.ones(10).to(dtype=torch.double)
    actual_psi = nn_state.psi(vis_state)[1].to(vis_state)
    expected_psi = torch.zeros(1).to(vis_state)

    msg = "PositiveWavefunction is giving a non-zero imaginary part!"
    assert torch.equal(actual_psi, expected_psi), msg
Example #4
0
def test_sampling_with_overwrite():
    nn_state = PositiveWavefunction(10, gpu=False)

    old_state = torch.empty(100, 10).bernoulli_().to(dtype=torch.double)
    initial_state = old_state.clone()

    sample = nn_state.sample(k=10, initial_state=initial_state, overwrite=True)

    assert torch.equal(sample,
                       initial_state), "initial_state did not get overwritten!"
    assert not torch.equal(sample,
                           old_state), "Markov Chain did not get updated!"
Example #5
0
def test_positive_wavefunction(gpu):
    qucumber.set_random_seed(SEED, cpu=True, gpu=gpu, quiet=True)

    nn_state = PositiveWavefunction(10, gpu=gpu)

    old_params = parameters_to_vector(nn_state.rbm_am.parameters())

    data = torch.ones(100, 10)
    nn_state.fit(data, epochs=1, pos_batch_size=10, neg_batch_size=10)

    new_params = parameters_to_vector(nn_state.rbm_am.parameters())

    msg = "PositiveWavefunction's parameters did not change!"
    assert not torch.equal(old_params, new_params), msg
Example #6
0
def test_positive_wavefunction_phase():
    nn_state = PositiveWavefunction(10, gpu=False)

    vis_state = torch.ones(10).to(dtype=torch.double)
    actual_phase = nn_state.phase(vis_state).to(vis_state)
    expected_phase = torch.zeros(1).to(vis_state)

    msg = "PositiveWavefunction is giving a non-zero phase for single visible state!"
    assert torch.equal(actual_phase, expected_phase), msg

    vis_state = torch.ones(10, 10).to(dtype=torch.double)
    actual_phase = nn_state.phase(vis_state).to(vis_state)
    expected_phase = torch.zeros(10).to(vis_state)

    msg = "PositiveWavefunction is giving a non-zero phase for batch of visible states!"
    assert torch.equal(actual_phase, expected_phase), msg
Example #7
0
def test_stop_training(gpu):
    qucumber.set_random_seed(SEED, cpu=True, gpu=gpu, quiet=True)
    np.random.seed(SEED)

    nn_state = PositiveWavefunction(10, gpu=gpu)

    old_params = parameters_to_vector(nn_state.rbm_am.parameters())
    data = torch.ones(100, 10)

    nn_state.stop_training = True
    nn_state.fit(data)

    new_params = parameters_to_vector(nn_state.rbm_am.parameters())

    msg = "stop_training didn't work!"
    assert torch.equal(old_params, new_params), msg
Example #8
0
def test_stop_training_in_epoch(gpu):
    qucumber.set_random_seed(SEED, cpu=True, gpu=gpu, quiet=True)
    np.random.seed(SEED)

    nn_state = PositiveWavefunction(10, gpu=gpu)

    data = torch.ones(100, 10)

    callbacks = [
        LambdaCallback(
            on_epoch_end=lambda nn_state, ep: set_stop_training(nn_state))
    ]

    nn_state.fit(data, callbacks=callbacks)

    msg = "stop_training wasn't set!"
    assert nn_state.stop_training, msg
Example #9
0
def test_bad_stop_training_val():
    nn_state = PositiveWavefunction(10, gpu=False)

    msg = "Setting stop_training to a non-boolean value should have raised an error."
    with pytest.raises(ValueError, message=msg):
        nn_state.stop_training = "foobar"
Example #10
0
    def test_trainingpositive(self):
        print("Positive Wavefunction")
        print("---------------------")

        train_samples_path = os.path.join(
            __tests_location__,
            "..",
            "examples",
            "Tutorial1_TrainPosRealWavefunction",
            "tfim1d_data.txt",
        )
        psi_path = os.path.join(
            __tests_location__,
            "..",
            "examples",
            "Tutorial1_TrainPosRealWavefunction",
            "tfim1d_psi.txt",
        )

        train_samples, target_psi = data.load_data(train_samples_path,
                                                   psi_path)

        nv = nh = train_samples.shape[-1]

        fidelities = []
        KLs = []

        epochs = 5
        batch_size = 100
        num_chains = 200
        CD = 10
        lr = 0.1
        log_every = 5

        print(
            "Training 10 times and checking fidelity and KL at 5 epochs...\n")
        for i in range(10):
            print("Iteration: ", i + 1)

            nn_state = PositiveWavefunction(num_visible=nv,
                                            num_hidden=nh,
                                            gpu=False)

            space = nn_state.generate_hilbert_space(nv)
            callbacks = [
                MetricEvaluator(
                    log_every,
                    {
                        "Fidelity": ts.fidelity,
                        "KL": ts.KL
                    },
                    target_psi=target_psi,
                    space=space,
                    verbose=True,
                )
            ]

            self.initialize_posreal_params(nn_state)

            nn_state.fit(
                data=train_samples,
                epochs=epochs,
                pos_batch_size=batch_size,
                neg_batch_size=num_chains,
                k=CD,
                lr=lr,
                time=True,
                progbar=False,
                callbacks=callbacks,
            )

            fidelities.append(ts.fidelity(nn_state, target_psi, space).item())
            KLs.append(ts.KL(nn_state, target_psi, space).item())

        print("\nStatistics")
        print("----------")
        print(
            "Fidelity: ",
            np.average(fidelities),
            "+/-",
            np.std(fidelities) / np.sqrt(len(fidelities)),
            "\n",
        )
        print("KL: ", np.average(KLs), "+/-",
              np.std(KLs) / np.sqrt(len(KLs)), "\n")

        self.assertTrue(abs(np.average(fidelities) - 0.85) < 0.02)
        self.assertTrue(abs(np.average(KLs) - 0.29) < 0.05)
        self.assertTrue((np.std(fidelities) / np.sqrt(len(fidelities))) < 0.01)
        self.assertTrue((np.std(KLs) / np.sqrt(len(KLs))) < 0.01)