def get_simple_linear_model(orig_optimizer,
                            update_params_frequency,
                            accumulate_sum_or_mean,
                            ema_decay=0):
    inputs = Input(shape=(1, ), dtype='float32')
    outputs = Dense(1, use_bias=False, kernel_initializer='ones')(inputs)
    model = Model(inputs=inputs, outputs=outputs)
    convert_to_accumulate_gradient_optimizer(
        orig_optimizer,
        update_params_frequency=update_params_frequency,
        accumulate_sum_or_mean=accumulate_sum_or_mean,
        ema_decay=ema_decay)

    def y_loss(y_true, y_pred):
        return K.mean(y_pred)

    def get_w():
        return model.get_weights()[0][0][0].item()

    def get_sgd_iteration():
        return orig_optimizer.get_weights()[orig_optimizer.weights.index(
            orig_optimizer.iterations)].item()

    def set_weights_ema():
        orig_optimizer.set_weights_ema()

    model.compile(optimizer=orig_optimizer, loss=y_loss)
    return model, get_w, get_sgd_iteration, set_weights_ema
Exemple #2
0
def compile_model(model, intial_learning_rate, use_horovod):
    optimizer = Adam(lr=intial_learning_rate, beta_1=0.9, beta_2=0.9)
    convert_to_accumulate_gradient_optimizer(optimizer,
                                             update_params_frequency=1,
                                             accumulate_sum_or_mean=True,
                                             use_horovod=use_horovod)
    model.compile(optimizer=optimizer, loss=loss_for_energy_minimization)
    return optimizer
Exemple #3
0
def build_model(hilbert_state_shape, depth, width, weights_normalization,
                learning_rate):
    inputs = Input(shape=hilbert_state_shape, dtype='int8')
    convnet = ConvNetAutoregressive2D(
        inputs,
        depth=depth,
        num_of_channels=width,
        weights_normalization=weights_normalization)
    predictions, conditional_log_phobs = convnet.predictions, convnet.conditional_log_probs
    model = Model(inputs=inputs, outputs=predictions)
    conditional_log_probs_model = Model(inputs=inputs,
                                        outputs=conditional_log_phobs)

    optimizer = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.9)
    convert_to_accumulate_gradient_optimizer(optimizer,
                                             update_params_frequency=1,
                                             accumulate_sum_or_mean=True)
    model.compile(optimizer=optimizer, loss=loss_for_energy_minimization)
    model.summary()
    sampler = FastAutoregressiveSampler(conditional_log_probs_model, 16)
    return model, sampler
Exemple #4
0
inputs = Input(shape=hilbert_state_shape, dtype='int8')
convnet = SimpleConvNetAutoregressive1D(inputs,
                                        depth=7,
                                        num_of_channels=32,
                                        weights_normalization=False)
model = Model(inputs=inputs, outputs=convnet.predictions)

batch_size = 2**12
steps_per_epoch = 500 * (2**4)

operator = Ising(h=3.0, hilbert_state_shape=hilbert_state_shape, pbc=False)
exact_variational = ExactVariational(model, operator, batch_size)

optimizer = Adam(lr=0.001, beta_1=0.9, beta_2=0.999)
convert_to_accumulate_gradient_optimizer(
    optimizer,
    update_params_frequency=exact_variational.num_of_batch_until_full_cycle,
    accumulate_sum_or_mean=True)
model.compile(optimizer=optimizer, loss=loss_for_energy_minimization)
model.summary()

# still can use monte carlo generator for estimating histograms tensorboard = TensorBoardWithGeneratorValidationData(
# log_dir='tensorboard_logs/exact_run_0', generator=monte_carlo_generator, update_freq=1, histogram_freq=1,
# batch_size=batch_size, write_output=False)
tensorboard = TensorBoard(log_dir='tensorboard_logs/exact_run_single_gpu',
                          update_freq=1)

callbacks = default_wave_function_callbacks_factory(
    exact_variational,
    true_ground_state_energy=-49.257706531889006) + [tensorboard]
model.fit_generator(exact_variational.to_generator(),
                    steps_per_epoch=steps_per_epoch,
Exemple #5
0
# convnet = ConvNetAutoregressive2D(inputs, depth=10, num_of_channels=32, kernel_size=(3,3))
# predictions, conditional_log_probs, fast_sampling = convnet.predictions, convnet.conditional_log_probs, convnet.samples

# This creates a model that includes
# the Input layer and three Dense layers
model = Model(inputs=inputs, outputs=predictions)
logical_batch_size = 1024
batch_size = 256
logical_steps_per_epoch = 500
logical_actual_ratio = int(logical_batch_size / batch_size)
steps_per_epoch = logical_steps_per_epoch * logical_actual_ratio

# optimizer = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=Taccumulate_sum_or_meanrue)
optimizer = Adam(lr=0.001, beta_1=0.9, beta_2=0.999)
convert_to_accumulate_gradient_optimizer(
    optimizer,
    update_params_frequency=logical_actual_ratio,
    accumulate_sum_or_mean=False)
model.compile(optimizer=optimizer, loss=loss_for_energy_minimization)
hilbert_state_shape = cube_shape(number_of_spins_in_each_dimention=12,
                                 cube_dimention=2)
operator = Ising(h=3.0, hilbert_state_shape=hilbert_state_shape, pbc=False)
# import netket as nk
# g = nk.graph.Hypercube(length=12, n_dim=2, pbc=False)
# hi = nk.hilbert.Spin(s=0.5, graph=g)
# netket_operator = nk.operator.Ising(h=3.0, hilbert=hi, J=1.0)
# operator = NetketOperator(netket_operator=netket_operator, hilbert_state_shape=hilbert_state_shape=hilbert_state_shape=hilbert_state_shape, max_num_of_local_connections=200)
wave_function_cache = WaveFunctionValuesCache(
    reset_cache_interval=logical_actual_ratio)
# VariationalMonteCarlo
sampler = MetropolisHastingsLocal(model,
                                  batch_size,