コード例 #1
0
def test_sampler_by_l1(sampler_class, machine_input, machine_class,
                       machine_args):
    # this test based on https://arxiv.org/pdf/1308.3946.pdf
    with GRAPH.as_default():
        machine = machine_class(machine_input, **machine_args)
        model = Model(inputs=machine_input, outputs=machine.predictions)
        operator = IdentityOperator(list(K.int_shape(machine_input)[1:]))
        exact_variational = ExactVariational(model, operator, BATCH_SIZE)
        exact_variational._update_wave_function_arrays()
        exact_sampler = ExactSampler(exact_variational, BATCH_SIZE)
        num_of_samples = max(2**16, 16 * exact_variational.num_of_states)
        exact_sampler._set_batch_size(num_of_samples,
                                      mini_batch_size=BATCH_SIZE)
        batch_from_exact_sampler = next(exact_sampler)
        sampler = sampler_factory(sampler_class, machine, machine_input,
                                  num_of_samples)
        batch_from_sampler = next(sampler)
        sampler_chosen_idx = binary_array_to_decimal_array(
            batch_from_sampler.reshape((num_of_samples, -1)))
        exact_sampler_chosen_idx = binary_array_to_decimal_array(
            batch_from_exact_sampler.reshape((num_of_samples, -1)))
        x = numpy.bincount(sampler_chosen_idx.astype(numpy.int),
                           minlength=exact_variational.num_of_states)
        y = numpy.bincount(exact_sampler_chosen_idx.astype(numpy.int),
                           minlength=exact_variational.num_of_states)
        z = ((numpy.square(x - y) - x - y) / (x + y + 1e-20)).sum()
        assert z <= numpy.sqrt(num_of_samples)
コード例 #2
0
def test_monte_carlo_and_netket_agree(netket):
    input_size = 7
    batch_size = 1000
    num_of_mc_iterations = 1000
    g = netket.graph.Hypercube(length=input_size, n_dim=1)
    hi = netket.hilbert.Spin(s=0.5, graph=g)
    ha = netket.operator.Heisenberg(hilbert=hi)
    layers = (netket.layer.FullyConnected(input_size=input_size,
                                          output_size=1), )
    ma = netket.machine.FFNN(hi, layers)
    sa = netket.sampler.ExactSampler(machine=ma)
    op = netket.optimizer.Sgd(learning_rate=0.00)

    flowket_model = complex_values_linear_1d_model()
    exact_variational = ExactVariational(
        flowket_model, NetketOperatorWrapper(ha, (input_size, )), batch_size)
    exact_logs = exact_evaluate(exact_variational,
                                [ExactLocalEnergy(exact_variational)])
    real_weights, imag_weights = flowket_model.get_weights()
    ma.parameters = (real_weights + imag_weights * -1j).flatten()
    gs = netket.variational.Vmc(hamiltonian=ha,
                                sampler=sa,
                                optimizer=op,
                                method='Gd',
                                n_samples=batch_size,
                                diag_shift=0.01)
    netket_energy = np.zeros((num_of_mc_iterations, ))
    for i in range(num_of_mc_iterations):
        gs.advance(1)
        netket_energy[i] = gs.get_observable_stats()['Energy']['Mean']
    netket_energy_mean = np.mean(netket_energy)
    exact_energy = exact_logs['energy/energy']
    monte_carlo_std = np.sqrt(exact_logs['energy/local_energy_variance'] /
                              (batch_size * num_of_mc_iterations))
    assert netket_energy_mean == pytest.approx(exact_energy, monte_carlo_std)
コード例 #3
0
def test_exact_and_monte_carlo_agree(model_builder, operator, batch_size,
                                     num_of_mc_iterations):
    with DEFAULT_TF_GRAPH.as_default():
        model = model_builder()
        exact_variational = ExactVariational(model, operator, batch_size)
        reduce_variance(exact_variational, model)
        sampler = ExactSampler(exact_variational, batch_size)
        variational_monte_carlo = VariationalMonteCarlo(
            model, operator, sampler)
        exact_logs = exact_evaluate(exact_variational,
                                    [ExactLocalEnergy(exact_variational)])
        exact_energy = exact_logs['energy/energy']
        monte_carlo_energy = evaluate(
            variational_monte_carlo, num_of_mc_iterations,
            [LocalEnergyStats(variational_monte_carlo)])['energy/energy']
        monte_carlo_std = np.sqrt(exact_logs['energy/local_energy_variance'] /
                                  (batch_size * num_of_mc_iterations))
        assert monte_carlo_energy == pytest.approx(exact_energy,
                                                   monte_carlo_std)
コード例 #4
0
hilbert_state_shape = [
    16,
]
inputs = Input(shape=hilbert_state_shape, dtype='int8')
convnet = SimpleConvNetAutoregressive1D(inputs,
                                        depth=7,
                                        num_of_channels=32,
                                        weights_normalization=False)
model = Model(inputs=inputs, outputs=convnet.predictions)

batch_size = 2**12
steps_per_epoch = 500 * (2**4)

operator = Ising(h=3.0, hilbert_state_shape=hilbert_state_shape, pbc=False)
exact_variational = ExactVariational(model, operator, batch_size)

optimizer = Adam(lr=0.001, beta_1=0.9, beta_2=0.999)
convert_to_accumulate_gradient_optimizer(
    optimizer,
    update_params_frequency=exact_variational.num_of_batch_until_full_cycle,
    accumulate_sum_or_mean=True)
model.compile(optimizer=optimizer, loss=loss_for_energy_minimization)
model.summary()

# still can use monte carlo generator for estimating histograms tensorboard = TensorBoardWithGeneratorValidationData(
# log_dir='tensorboard_logs/exact_run_0', generator=monte_carlo_generator, update_freq=1, histogram_freq=1,
# batch_size=batch_size, write_output=False)
tensorboard = TensorBoard(log_dir='tensorboard_logs/exact_run_single_gpu',
                          update_freq=1)