Esempio n. 1
0
def test_monte_carlo_update_unbalanced_local_energy():
    with DEFAULT_TF_GRAPH.as_default():
        model = complex_values_linear_1d_model()

        sample = np.array([[1, 1, 1, -1, -1, -1, -1], [1, 1, 1, -1, 1, -1, -1],
                           [1, -1, 1, 1, -1, -1, -1]])
        local_connections = np.random.choice([-1, 1], size=(5, 3, 7))
        local_connections[0, ...] = sample
        hamiltonian_values = np.array([[2.0, 7j + 8, 0.0, 0.0, 3],
                                       [0.0, 0.0, 0.0, 0.0, -1.0],
                                       [5.0, 3j, 0.0, -2, 9]]).T
        all_use_conn = np.array([[True, True, False, False, True],
                                 [True, False, False, False, True],
                                 [True, True, False, True, True]]).T

        class SimpleSampler(Sampler):
            def __init__(self):
                super(SimpleSampler, self).__init__((7, ), 3)

            def __next__(self):
                return sample

        variational_monte_carlo = VariationalMonteCarlo(
            model, Heisenberg(hilbert_state_shape=(7, )), SimpleSampler())
        unbalanced_local_energy = np.mean(
            variational_monte_carlo.energy_observable.
            local_values_optimized_for_unbalanced_local_connections(
                variational_monte_carlo.wave_function, local_connections,
                hamiltonian_values, all_use_conn))
        balanced_local_energy = np.mean(
            variational_monte_carlo.energy_observable.
            local_values_optimized_for_balanced_local_connections(
                variational_monte_carlo.wave_function, local_connections,
                hamiltonian_values))
        assert np.allclose(balanced_local_energy, unbalanced_local_energy)
Esempio n. 2
0
def run_pyket(args):
    hilbert_state_shape = (args.input_size, 1)
    padding = ((0, args.kernel_size - 1), )
    inputs = Input(shape=hilbert_state_shape, dtype='int8')
    x = ToComplex128()(inputs)
    for i in range(args.depth):
        x = PeriodicPadding(padding)(x)
        x = ComplexConv1D(args.width,
                          args.kernel_size,
                          use_bias=False,
                          dtype=tf.complex128)(x)
        x = Activation(lncosh)(x)
    x = Flatten()(x)
    predictions = Lambda(lambda y: tf.reduce_sum(y, axis=1, keepdims=True))(x)
    model = Model(inputs=inputs, outputs=predictions)
    if args.fast_jacobian:
        predictions_jacobian = lambda x: get_predictions_jacobian(keras_model=
                                                                  model)
    else:
        predictions_jacobian = lambda x: gradients.jacobian(
            tf.real(model.output), x, use_pfor=not args.no_pfor)
    if args.use_stochastic_reconfiguration:
        optimizer = ComplexValuesStochasticReconfiguration(
            model,
            predictions_jacobian,
            lr=args.learning_rate,
            diag_shift=10.0,
            iterative_solver=args.use_iterative,
            use_cholesky=args.use_cholesky,
            iterative_solver_max_iterations=None)
        model.compile(optimizer=optimizer,
                      loss=loss_for_energy_minimization,
                      metrics=optimizer.metrics)
    else:
        optimizer = SGD(lr=args.learning_rate)
        model.compile(optimizer=optimizer, loss=loss_for_energy_minimization)
    model.summary()
    operator = Heisenberg(hilbert_state_shape=hilbert_state_shape, pbc=True)
    sampler = MetropolisHastingsHamiltonian(
        model,
        args.batch_size,
        operator,
        num_of_chains=args.pyket_num_of_chains,
        unused_sampels=numpy.prod(hilbert_state_shape))
    variational_monte_carlo = VariationalMonteCarlo(model, operator, sampler)
    model.fit_generator(variational_monte_carlo.to_generator(),
                        steps_per_epoch=5,
                        epochs=1,
                        max_queue_size=0,
                        workers=0)
    start_time = time.time()
    model.fit_generator(variational_monte_carlo.to_generator(),
                        steps_per_epoch=args.num_of_iterations,
                        epochs=1,
                        max_queue_size=0,
                        workers=0)
    end_time = time.time()
    return end_time - start_time
Esempio n. 3
0
def run(operator, config, true_ground_state_energy=None):
    model, sampler = build_model(operator, config)
    model.load_weights(config.weights_path)

    evaluation_inputs = Input(shape=config.hilbert_state_shape, dtype='int8')
    obc_input = Input(shape=config.hilbert_state_shape, dtype=evaluation_inputs.dtype)
    invariant_model = make_2d_obc_invariants(obc_input, model)
    invariant_model = make_up_down_invariant(evaluation_inputs, invariant_model)
    sampler = sampler.copy_with_new_batch_size(config.mini_batch_size)
    variational_monte_carlo = VariationalMonteCarlo(invariant_model, operator, sampler, mini_batch_size=config.mini_batch_size)
    callbacks = default_wave_function_stats_callbacks_factory(variational_monte_carlo, log_in_batch_or_epoch=False, true_ground_state_energy=true_ground_state_energy)

    results = evaluate(variational_monte_carlo, steps=(config.num_of_samples) // config.mini_batch_size, callbacks=callbacks[:4],
            keys_to_progress_bar_mapping={'energy/energy': 'energy', 'energy/relative_error': 'relative_error', 'energy/local_energy_variance': 'variance'})
    print(results)
Esempio n. 4
0
def test_exact_and_monte_carlo_agree(model_builder, operator, batch_size,
                                     num_of_mc_iterations):
    with DEFAULT_TF_GRAPH.as_default():
        model = model_builder()
        exact_variational = ExactVariational(model, operator, batch_size)
        reduce_variance(exact_variational, model)
        sampler = ExactSampler(exact_variational, batch_size)
        variational_monte_carlo = VariationalMonteCarlo(
            model, operator, sampler)
        exact_logs = exact_evaluate(exact_variational,
                                    [ExactLocalEnergy(exact_variational)])
        exact_energy = exact_logs['energy/energy']
        monte_carlo_energy = evaluate(
            variational_monte_carlo, num_of_mc_iterations,
            [LocalEnergyStats(variational_monte_carlo)])['energy/energy']
        monte_carlo_std = np.sqrt(exact_logs['energy/local_energy_variance'] /
                                  (batch_size * num_of_mc_iterations))
        assert monte_carlo_energy == pytest.approx(exact_energy,
                                                   monte_carlo_std)
Esempio n. 5
0
                                  num_of_channels=32,
                                  weights_normalization=False)
predictions, conditional_log_probs = convnet.predictions, convnet.conditional_log_probs
model = Model(inputs=inputs, outputs=predictions)
conditional_log_probs_model = Model(inputs=inputs,
                                    outputs=conditional_log_probs)

batch_size = 128
steps_per_epoch = 500

optimizer = Adam(lr=0.001, beta_1=0.9, beta_2=0.999)
model.compile(optimizer=optimizer, loss=loss_for_energy_minimization)
model.summary()
operator = Ising(h=3.0, hilbert_state_shape=hilbert_state_shape, pbc=False)
sampler = AutoregressiveSampler(conditional_log_probs_model, batch_size)
monte_carlo_generator = VariationalMonteCarlo(model, operator, sampler)

callbacks = default_wave_function_stats_callbacks_factory(
    monte_carlo_generator, true_ground_state_energy=-50.18662388277671)
model.fit_generator(monte_carlo_generator.to_generator(),
                    steps_per_epoch=steps_per_epoch,
                    epochs=2,
                    callbacks=callbacks,
                    max_queue_size=0,
                    workers=0)

print('evaluate normal model')
print(
    evaluate(monte_carlo_generator.to_generator(),
             steps=200,
             callbacks=callbacks,
Esempio n. 6
0
# import netket as nk
# g = nk.graph.Hypercube(length=12, n_dim=2, pbc=False)
# hi = nk.hilbert.Spin(s=0.5, graph=g)
# netket_operator = nk.operator.Ising(h=3.0, hilbert=hi, J=1.0)
# operator = NetketOperator(netket_operator=netket_operator, hilbert_state_shape=hilbert_state_shape=hilbert_state_shape=hilbert_state_shape, max_num_of_local_connections=200)
wave_function_cache = WaveFunctionValuesCache(
    reset_cache_interval=logical_actual_ratio)
# VariationalMonteCarlo
sampler = MetropolisHastingsLocal(model,
                                  batch_size,
                                  num_of_chains=10,
                                  unused_sampels=100)
# sampler = FastAutoregressiveSampler(fast_sampling, buffer_size=5000)
# sampler = FastAutoregressiveSampler(conditional_log_probs)
generator = VariationalMonteCarlo(model,
                                  operator,
                                  sampler,
                                  cache=wave_function_cache)
#### Exact Grads ####
# generator = ExactVariational(model, operator, batch_size, cache=wave_function_cache)
checkpoint = ModelCheckpoint('ising_fcnn.h5',
                             monitor='energy',
                             save_best_only=True,
                             save_weights_only=True)
# tensorboard = TensorBoard(update_freq=1)
tensorboard = TensorBoardWithGeneratorValidationData(
    monte_carlo_iterator=generator,
    update_freq=1,
    histogram_freq=1,
    batch_size=batch_size)
early_stopping = EarlyStopping(monitor='relative_energy_error', min_delta=1e-5)
callbacks = [