Beispiel #1
0
plot_samples = 1_000_000
iters = 30000
samples = 1000
gamma = 0.0
evaluation_points = 2**23

psi_energies = EnergyCallback(samples=plot_samples, verbose=True)
psi_symmetries = SymmetryCallback(samples=plot_samples)
psi_parameters = ParameterCallback()

train(
    psi,
    H,
    psi_sampler,
    iters=iters,
    samples=samples,
    gamma=gamma,
    optimizer=AdamOptimizer(len(psi.parameters)),
    call_backs=(psi_energies, psi_symmetries, psi_parameters),
)
mpiprint("Training regular dnn complete")

np.savetxt("QD-parameters-dnn-regular.txt", psi.parameters)

psi_sorted_energies = EnergyCallback(samples=plot_samples, verbose=True)
psi_sorted_parameters = ParameterCallback()

train(
    psi_sorted,
    H,
    psi_sorted_sampler,
    AdamOptimizer(len(psi.parameters), 0.05, 0.9),
    AdamOptimizer(len(psi.parameters), 0.05, 0.7),
]
E = []
for opt in optimizers:
    # psi.parameters = org_params
    psi = SimpleGaussian(0.8)
    sampler = ImportanceSampler(system, psi, 0.1)
    sampler.thermalize(10000)
    E_training = EnergyCallback(samples=1000000, verbose=True)
    train(
        psi,
        H,
        sampler,
        iters=150,
        samples=1000,
        gamma=0.0,
        optimizer=opt,
        call_backs=[E_training],
        call_back_resolution=50,
    )
    E.append(np.asarray(E_training))

if master_rank():
    fig, ax = plt.subplots()
    ax.set_xlabel(r"% of training")
    ax.set_ylabel(r"Energy error [a.u.]")
    for e, label in zip(E, labels):
        ax.semilogy(np.abs(e / N - D / 2), label=label)
    ax.legend()
    matplotlib2tikz.save(
Beispiel #3
0
system = np.empty((P, D))
H = CoulombHarmonicOscillator()
simple_gaussian = SimpleGaussian(alpha=0.5)
jastrow = JastrowPade(alpha=1, beta=1)
psi = WavefunctionProduct(simple_gaussian, jastrow)
psi_sampler = ImportanceSampler(system, psi, step_size=0.1)
psi_simple_sampler = ImportanceSampler(system, simple_gaussian, step_size=0.1)

psi_energies = EnergyCallback(samples=100000)
psi_parameters = ParameterCallback()

train(
    psi,
    H,
    psi_sampler,
    iters=2000,
    samples=1000,
    gamma=0,
    optimizer=AdamOptimizer(len(psi.parameters)),
    call_backs=(psi_energies, psi_parameters),
)

mpiprint("Training complete")

stats = [
    compute_statistics_for_series(
        H.local_energy_array(psi_simple_sampler, simple_gaussian, 2**22),
        method="blocking",
    ),
    compute_statistics_for_series(H.local_energy_array(psi_sampler, psi,
                                                       2**22),
                                  method="blocking"),
Beispiel #4
0
H = LennardJones(L)
psi = JastrowMcMillian(5, 2.85, L)

sampler = HeliumSampler(system, psi, 0.5, L)
sampler.thermalize(20000)
mpiprint("Acceptance rate after thermalization:", sampler.acceptance_rate)

psi_energies = EnergyCallback(samples=5_000_000, verbose=True)
psi_parameters = ParameterCallback()

train(
    psi,
    H,
    sampler,
    iters=8000,
    samples=5000,
    gamma=0,
    optimizer=AdamOptimizer(len(psi.parameters), 0.0001),
    call_backs=(psi_energies, psi_parameters),
)

mpiprint("Training complete")
mpiprint(psi.parameters)

if master_rank():
    plot_training(np.asarray(psi_energies) / P, psi_parameters)

stats, labels = [], []

for P, step in zip([32, 64, 256], [0.5, 0.6, 0.8]):
    L = (P / rho)**(1 / 3)
Beispiel #5
0
# Define trial wave function:
gaussian = SimpleGaussian(alpha=0.8)
jastrow = JastrowPade(alpha=1, beta=1)
psi = WavefunctionProduct(gaussian, jastrow)

# Set up sampling strategy:
sampler = ImportanceSampler(np.empty((P, D)), psi, step_size=0.1)

# Train wave function:
training_energies = EnergyCallback(samples=100000)
training_params = ParameterCallback()
train(
    psi,
    H,
    sampler,
    iters=150,     # Optimization steps.
    samples=1000,  # MC cycles per optimization step.
    gamma=0,       # Regularization parameter (disabled here).
    optimizer=SgdOptimizer(0.1),
    call_backs=(training_energies, training_params),
)

# With a trained model, time to evaluate!
energy = H.local_energy_array(sampler, psi, 2 ** 21)
stats = compute_statistics_for_series(energy, method="blocking")
mpiprint(stats, pretty=True)


if master_rank():
    fig, (eax, pax) = plt.subplots(ncols=2, sharex=True)
    eax.plot(training_energies)
    eax.set_title(r"$\langle E_L\rangle$ [a.u]")
Beispiel #6
0
N, D = 10, 3
system = np.empty((N, D))
H = HarmonicOscillator(omega_ho=1)
psi_G = SimpleGaussian(alpha=0.3)
isampler_G = ImportanceSampler(system, psi_G, 0.1)

isampler_G.thermalize(10000)

E_training = EnergyCallback(samples=50000, verbose=True)
G_training = ParameterCallback()
train(
    psi_G,
    H,
    isampler_G,
    iters=100,
    samples=1000,
    gamma=0,
    optimizer=SgdOptimizer(0.001),
    call_backs=[E_training, G_training],
)
E_training = np.asarray(E_training) / N

fig, (eax, pax) = plt.subplots(ncols=2)
eax.plot(E_training, label=r"$\psi_G$")
eax.plot(
    np.ones_like(E_training) * D / 2,
    label=r"$\Phi$",
    linestyle="--",
    alpha=0.5,
    color="k",
)
Beispiel #7
0
system = np.empty((P, D))
H = CoulombHarmonicOscillator()
psi = RBMWavefunction(P * D, N)
psi_sorted = InputSorter(psi)
psi_sampler = ImportanceSampler(system, psi, step_size=0.1)
psi_sorted_sampler = ImportanceSampler(system, psi_sorted, step_size=0.1)

psi_energies = EnergyCallback(samples=1_000_000, verbose=True)
psi_symmetries = SymmetryCallback(samples=1_000_000)
psi_parameters = ParameterCallback()

train(
    psi,
    H,
    psi_sampler,
    iters=40000,
    samples=1000,
    gamma=0.001,
    optimizer=AdamOptimizer(len(psi.parameters), 0.005),
    call_backs=(psi_energies, psi_symmetries, psi_parameters),
)

mpiprint("Training complete")

psi_sorted_sampler.thermalize(100_000)
mpiprint(
    f"Sorted sampler acceptance rate: {psi_sorted_sampler.acceptance_rate}")

stats = [
    compute_statistics_for_series(H.local_energy_array(psi_sampler, psi,
                                                       2**23),
                                  method="blocking"),