Esempio n. 1
0
def test_monte_carlo_update_unbalanced_local_energy():
    with DEFAULT_TF_GRAPH.as_default():
        model = complex_values_linear_1d_model()

        sample = np.array([[1, 1, 1, -1, -1, -1, -1], [1, 1, 1, -1, 1, -1, -1],
                           [1, -1, 1, 1, -1, -1, -1]])
        local_connections = np.random.choice([-1, 1], size=(5, 3, 7))
        local_connections[0, ...] = sample
        hamiltonian_values = np.array([[2.0, 7j + 8, 0.0, 0.0, 3],
                                       [0.0, 0.0, 0.0, 0.0, -1.0],
                                       [5.0, 3j, 0.0, -2, 9]]).T
        all_use_conn = np.array([[True, True, False, False, True],
                                 [True, False, False, False, True],
                                 [True, True, False, True, True]]).T

        class SimpleSampler(Sampler):
            def __init__(self):
                super(SimpleSampler, self).__init__((7, ), 3)

            def __next__(self):
                return sample

        variational_monte_carlo = VariationalMonteCarlo(
            model, Heisenberg(hilbert_state_shape=(7, )), SimpleSampler())
        unbalanced_local_energy = np.mean(
            variational_monte_carlo.energy_observable.
            local_values_optimized_for_unbalanced_local_connections(
                variational_monte_carlo.wave_function, local_connections,
                hamiltonian_values, all_use_conn))
        balanced_local_energy = np.mean(
            variational_monte_carlo.energy_observable.
            local_values_optimized_for_balanced_local_connections(
                variational_monte_carlo.wave_function, local_connections,
                hamiltonian_values))
        assert np.allclose(balanced_local_energy, unbalanced_local_energy)
Esempio n. 2
0
def run_pyket(args):
    hilbert_state_shape = (args.input_size, 1)
    padding = ((0, args.kernel_size - 1), )
    inputs = Input(shape=hilbert_state_shape, dtype='int8')
    x = ToComplex128()(inputs)
    for i in range(args.depth):
        x = PeriodicPadding(padding)(x)
        x = ComplexConv1D(args.width,
                          args.kernel_size,
                          use_bias=False,
                          dtype=tf.complex128)(x)
        x = Activation(lncosh)(x)
    x = Flatten()(x)
    predictions = Lambda(lambda y: tf.reduce_sum(y, axis=1, keepdims=True))(x)
    model = Model(inputs=inputs, outputs=predictions)
    if args.fast_jacobian:
        predictions_jacobian = lambda x: get_predictions_jacobian(keras_model=
                                                                  model)
    else:
        predictions_jacobian = lambda x: gradients.jacobian(
            tf.real(model.output), x, use_pfor=not args.no_pfor)
    if args.use_stochastic_reconfiguration:
        optimizer = ComplexValuesStochasticReconfiguration(
            model,
            predictions_jacobian,
            lr=args.learning_rate,
            diag_shift=10.0,
            iterative_solver=args.use_iterative,
            use_cholesky=args.use_cholesky,
            iterative_solver_max_iterations=None)
        model.compile(optimizer=optimizer,
                      loss=loss_for_energy_minimization,
                      metrics=optimizer.metrics)
    else:
        optimizer = SGD(lr=args.learning_rate)
        model.compile(optimizer=optimizer, loss=loss_for_energy_minimization)
    model.summary()
    operator = Heisenberg(hilbert_state_shape=hilbert_state_shape, pbc=True)
    sampler = MetropolisHastingsHamiltonian(
        model,
        args.batch_size,
        operator,
        num_of_chains=args.pyket_num_of_chains,
        unused_sampels=numpy.prod(hilbert_state_shape))
    variational_monte_carlo = VariationalMonteCarlo(model, operator, sampler)
    model.fit_generator(variational_monte_carlo.to_generator(),
                        steps_per_epoch=5,
                        epochs=1,
                        max_queue_size=0,
                        workers=0)
    start_time = time.time()
    model.fit_generator(variational_monte_carlo.to_generator(),
                        steps_per_epoch=args.num_of_iterations,
                        epochs=1,
                        max_queue_size=0,
                        workers=0)
    end_time = time.time()
    return end_time - start_time
Esempio n. 3
0
def main():
    parser = argparse.ArgumentParser(prog='Heisenberg NAQS')
    subparsers = parser.add_subparsers()
    parser_train = subparsers.add_parser('train',
                                         help='train Heisenberg model')
    parser_eval = subparsers.add_parser('eval', help='eval Heisenberg model')
    parser_train.set_defaults(func=train)
    parser_eval.set_defaults(func=run)
    create_evaluation_config_parser(parser=parser_eval,
                                    depth=20,
                                    mini_batch_size=2**7,
                                    hilbert_state_shape=[10, 10])
    create_training_config_parser(parser=parser_train,
                                  depth=20,
                                  mini_batch_size=2**11,
                                  num_epoch=[80, 370],
                                  batch_size=[2**10, 2**11],
                                  hilbert_state_shape=[10, 10],
                                  learning_rate=[1e-3, 1e-3])
    parser.add_argument('--use_pbc', dest='pbc', action='store_true')
    parser.add_argument('--no_pbc', dest='pbc', action='store_false')
    parser.set_defaults(pbc=False)
    config = parser.parse_args()

    operator = Heisenberg(hilbert_state_shape=config.hilbert_state_shape,
                          pbc=config.pbc)
    true_ground_state_energy = None
    if len(config.hilbert_state_shape) == 2 and config.hilbert_state_shape[
            0] == 10 and config.hilbert_state_shape[1] == 10:
        if config.pbc:
            true_ground_state_energy = -268.61976
        else:
            true_ground_state_energy = -251.4624
    elif (not config.pbc) and len(
            config.hilbert_state_shape) == 2 and config.hilbert_state_shape[
                0] == 16 and config.hilbert_state_shape[1] == 16:
        true_ground_state_energy = -658.9759488
    config.func(operator, config, true_ground_state_energy)
Esempio n. 4
0
predictions = rbm.predictions
model = Model(inputs=inputs, outputs=predictions)

batch_size = 1000
steps_per_epoch = 300

optimizer = ComplexValuesStochasticReconfiguration(model,
                                                   rbm.predictions_jacobian,
                                                   lr=0.05,
                                                   diag_shift=0.1,
                                                   iterative_solver=False)
model.compile(optimizer=optimizer,
              loss=loss_for_energy_minimization,
              metrics=optimizer.metrics)
model.summary()
operator = Heisenberg(hilbert_state_shape=hilbert_state_shape, pbc=True)
sampler = MetropolisHastingsHamiltonian(
    model,
    batch_size,
    operator,
    num_of_chains=20,
    unused_sampels=numpy.prod(hilbert_state_shape))
variational_monte_carlo = VariationalMonteCarlo(model, operator, sampler)

tensorboard = TensorBoardWithGeneratorValidationData(
    log_dir='tensorboard_logs/rbm_with_sr_run_6',
    generator=variational_monte_carlo,
    update_freq=1,
    histogram_freq=1,
    batch_size=batch_size,
    write_output=False)
Esempio n. 5
0
import pytest
import numpy as np
import tensorflow as tf
from tensorflow.keras.optimizers import Adam

from flowket.callbacks.exact import ExactLocalEnergy
from flowket.callbacks.monte_carlo import LocalEnergyStats
from flowket.evaluation import evaluate, exact_evaluate
from flowket.operators import Heisenberg, NetketOperatorWrapper
from flowket.optimization import ExactVariational, VariationalMonteCarlo, loss_for_energy_minimization
from flowket.samplers import ExactSampler, Sampler
from .simple_models import complex_values_linear_1d_model, real_values_1d_model

DEFAULT_TF_GRAPH = tf.get_default_graph()
ONE_DIM_OPERATOR = Heisenberg(hilbert_state_shape=[7], pbc=True)


def test_monte_carlo_update_unbalanced_local_energy():
    with DEFAULT_TF_GRAPH.as_default():
        model = complex_values_linear_1d_model()

        sample = np.array([[1, 1, 1, -1, -1, -1, -1], [1, 1, 1, -1, 1, -1, -1],
                           [1, -1, 1, 1, -1, -1, -1]])
        local_connections = np.random.choice([-1, 1], size=(5, 3, 7))
        local_connections[0, ...] = sample
        hamiltonian_values = np.array([[2.0, 7j + 8, 0.0, 0.0, 3],
                                       [0.0, 0.0, 0.0, 0.0, -1.0],
                                       [5.0, 3j, 0.0, -2, 9]]).T
        all_use_conn = np.array([[True, True, False, False, True],
                                 [True, False, False, False, True],
                                 [True, True, False, True, True]]).T