Пример #1
0
def test_stop_training_in_epoch(gpu):
    qucumber.set_random_seed(SEED, cpu=True, gpu=gpu, quiet=True)
    np.random.seed(SEED)

    nn_state = PositiveWaveFunction(10, gpu=gpu)

    data = torch.ones(100, 10)

    callbacks = [
        LambdaCallback(
            on_epoch_end=lambda nn_state, ep: set_stop_training(nn_state))
    ]

    nn_state.fit(data, callbacks=callbacks)

    msg = "stop_training wasn't set!"
    assert nn_state.stop_training, msg
Пример #2
0
def test_trainingpositive():
    print("Positive WaveFunction")
    print("---------------------")

    train_samples_path = os.path.join(
        __tests_location__,
        "..",
        "examples",
        "Tutorial1_TrainPosRealWaveFunction",
        "tfim1d_data.txt",
    )
    psi_path = os.path.join(
        __tests_location__,
        "..",
        "examples",
        "Tutorial1_TrainPosRealWaveFunction",
        "tfim1d_psi.txt",
    )

    train_samples, target_psi = data.load_data(train_samples_path, psi_path)

    nv = nh = train_samples.shape[-1]

    fidelities = []
    KLs = []

    epochs = 5
    batch_size = 100
    num_chains = 200
    CD = 10
    lr = 0.1
    log_every = 5

    print("Training 10 times and checking fidelity and KL at 5 epochs...\n")
    for i in range(10):
        print("Iteration: ", i + 1)

        nn_state = PositiveWaveFunction(num_visible=nv,
                                        num_hidden=nh,
                                        gpu=False)

        space = nn_state.generate_hilbert_space(nv)
        callbacks = [
            MetricEvaluator(
                log_every,
                {
                    "Fidelity": ts.fidelity,
                    "KL": ts.KL
                },
                target_psi=target_psi,
                space=space,
                verbose=True,
            )
        ]

        initialize_posreal_params(nn_state)

        nn_state.fit(
            data=train_samples,
            epochs=epochs,
            pos_batch_size=batch_size,
            neg_batch_size=num_chains,
            k=CD,
            lr=lr,
            time=True,
            progbar=False,
            callbacks=callbacks,
        )

        fidelities.append(ts.fidelity(nn_state, target_psi, space))
        KLs.append(ts.KL(nn_state, target_psi, space))

    print("\nStatistics")
    print("----------")
    print(
        "Fidelity: ",
        np.average(fidelities),
        "+/-",
        np.std(fidelities) / np.sqrt(len(fidelities)),
        "\n",
    )
    print("KL: ", np.average(KLs), "+/-",
          np.std(KLs) / np.sqrt(len(KLs)), "\n")

    assert abs(np.average(fidelities) - 0.85) < 0.02
    assert abs(np.average(KLs) - 0.29) < 0.05
    assert (np.std(fidelities) / np.sqrt(len(fidelities))) < 0.01
    assert (np.std(KLs) / np.sqrt(len(KLs))) < 0.01
            "Fidelity": ts.fidelity,
            "KL": ts.KL,
            "A_Ψrbm_0": psi_coefficient
        },
        target=true_psi,
        verbose=True,
        space=space,
        A=1.0,
    )
]

nn_state.fit(
    train_data,
    epochs=epochs,
    pos_batch_size=pbs,
    neg_batch_size=nbs,
    lr=lr,
    k=k,
    callbacks=callbacks,
    time=True,
)

final_state_vector = np.array(
    nn_state.psi(space)[0] / nn_state.compute_normalization(space).sqrt_())
np.savetxt("phi_fourier_RBM.txt", final_state_vector, fmt='%1.5f')
print("Estado cuántico final:", final_state_vector)

plt.close()
fig1 = plt.bar(np.arange(np.power(2, nv)), np.power(final_state_vector.real,
                                                    2))
plt.xlabel("Probabilities")
plt.ylim(0, 0.01)