def run(operator, config, true_ground_state_energy=None): model, sampler = build_model(operator, config) model.load_weights(config.weights_path) evaluation_inputs = Input(shape=config.hilbert_state_shape, dtype='int8') obc_input = Input(shape=config.hilbert_state_shape, dtype=evaluation_inputs.dtype) invariant_model = make_2d_obc_invariants(obc_input, model) invariant_model = make_up_down_invariant(evaluation_inputs, invariant_model) sampler = sampler.copy_with_new_batch_size(config.mini_batch_size) variational_monte_carlo = VariationalMonteCarlo(invariant_model, operator, sampler, mini_batch_size=config.mini_batch_size) callbacks = default_wave_function_stats_callbacks_factory(variational_monte_carlo, log_in_batch_or_epoch=False, true_ground_state_energy=true_ground_state_energy) results = evaluate(variational_monte_carlo, steps=(config.num_of_samples) // config.mini_batch_size, callbacks=callbacks[:4], keys_to_progress_bar_mapping={'energy/energy': 'energy', 'energy/relative_error': 'relative_error', 'energy/local_energy_variance': 'variance'}) print(results)
def test_exact_and_monte_carlo_agree(model_builder, operator, batch_size, num_of_mc_iterations): with DEFAULT_TF_GRAPH.as_default(): model = model_builder() exact_variational = ExactVariational(model, operator, batch_size) reduce_variance(exact_variational, model) sampler = ExactSampler(exact_variational, batch_size) variational_monte_carlo = VariationalMonteCarlo( model, operator, sampler) exact_logs = exact_evaluate(exact_variational, [ExactLocalEnergy(exact_variational)]) exact_energy = exact_logs['energy/energy'] monte_carlo_energy = evaluate( variational_monte_carlo, num_of_mc_iterations, [LocalEnergyStats(variational_monte_carlo)])['energy/energy'] monte_carlo_std = np.sqrt(exact_logs['energy/local_energy_variance'] / (batch_size * num_of_mc_iterations)) assert monte_carlo_energy == pytest.approx(exact_energy, monte_carlo_std)
monte_carlo_generator, validation_generator=validation_generator, true_ground_state_energy=-50.18662388277671) + [tensorboard] model.fit_generator(monte_carlo_generator, steps_per_epoch=steps_per_epoch, epochs=2, callbacks=callbacks, max_queue_size=0, workers=0) model.save_weights('final_2d_ising_fcnn.h5') print('evaluate normal model') evaluate(monte_carlo_generator, steps=200, callbacks=callbacks[:4], keys_to_progress_bar_mapping={ 'energy/energy': 'energy', 'energy/relative_error': 'relative_error' }) print('evaluate invariant model') evaluation_inputs = Input(shape=hilbert_state_shape, dtype='int8') invariant_model = make_2d_obc_invariants(evaluation_inputs, model) generator = VariationalMonteCarlo(invariant_model, operator, sampler) evaluate(generator, steps=200, callbacks=callbacks[:4], keys_to_progress_bar_mapping={ 'energy/energy': 'energy', 'energy/relative_error': 'relative_error' })
def run(params, batch_size_list, epochs_list): run_name = 'depth_%s_width_%s_weights_normalization_%s_adam_lr_%s_gamma_%s_run_%s' % ( params.depth, params.width, params.no_weights_normalization, params.learning_rate, params.gamma, params.run_index) hilbert_state_shape = (12, 12) model, sampler = build_model(hilbert_state_shape, params.depth, params.width, not params.no_weights_normalization, params.learning_rate) operator = Ising(hilbert_state_shape=hilbert_state_shape, pbc=False, h=params.gamma) checkpoint_path = '%s.h5' % run_name initial_epoch = restore_run_state(model, checkpoint_path) total_epochs = 0 mini_batch_size = depth_to_max_mini_batch(params.depth) true_ground_state_energy = true_ground_state_energy_mapping[params.gamma] for idx, (batch_size, epochs) in enumerate(zip(batch_size_list, epochs_list)): total_epochs += epochs if total_epochs <= initial_epoch: continue validation_sampler = sampler.copy_with_new_batch_size( min(batch_size * 8, 2**15), mini_batch_size=mini_batch_size) assert batch_size < mini_batch_size or batch_size % mini_batch_size == 0 sampler = sampler.copy_with_new_batch_size(batch_size, mini_batch_size) variational_monte_carlo = VariationalMonteCarlo( model, operator, sampler, mini_batch_size=mini_batch_size) validation_generator = VariationalMonteCarlo( model, operator, validation_sampler, wave_function_evaluation_batch_size=mini_batch_size) model.optimizer.set_update_params_frequency( variational_monte_carlo.update_params_frequency) tensorboard = TensorBoardWithGeneratorValidationData( log_dir='tensorboard_logs/%s' % run_name, generator=variational_monte_carlo, update_freq='epoch', histogram_freq=0, batch_size=batch_size, write_output=False) callbacks = default_wave_function_stats_callbacks_factory( variational_monte_carlo, validation_generator=validation_generator, log_in_batch_or_epoch=False, true_ground_state_energy=true_ground_state_energy, validation_period=3) callbacks += [ tensorboard, CheckpointByTime(checkpoint_path, save_weights_only=True) ] model.fit_generator(variational_monte_carlo.to_generator(), steps_per_epoch=100 * variational_monte_carlo.update_params_frequency, epochs=total_epochs, callbacks=callbacks, max_queue_size=0, workers=0, initial_epoch=initial_epoch) model.save_weights('%s_stage_%s.h5' % (run_name, idx)) initial_epoch = total_epochs evaluation_inputs = Input(shape=hilbert_state_shape, dtype='int8') obc_input = Input(shape=hilbert_state_shape, dtype=evaluation_inputs.dtype) invariant_model = make_2d_obc_invariants(obc_input, model) invariant_model = make_up_down_invariant(evaluation_inputs, invariant_model) mini_batch_size = mini_batch_size // 16 sampler = sampler.copy_with_new_batch_size(mini_batch_size) variational_monte_carlo = VariationalMonteCarlo( invariant_model, operator, sampler, mini_batch_size=mini_batch_size) callbacks = default_wave_function_stats_callbacks_factory( variational_monte_carlo, log_in_batch_or_epoch=False, true_ground_state_energy=true_ground_state_energy) results = evaluate(variational_monte_carlo, steps=(2**15) // mini_batch_size, callbacks=callbacks[:4], keys_to_progress_bar_mapping={ 'energy/energy': 'energy', 'energy/relative_error': 'relative_error', 'energy/local_energy_variance': 'variance' }) print(results)
def train(operator, config, true_ground_state_energy=None): if config.use_horovod: init_horovod() to_valid_stages_config(config) is_rank_0 = (not config.use_horovod) or hvd.rank() == 0 if is_rank_0: save_config(config) model, sampler = build_model(operator, config) optimizer = compile_model(model, config.learning_rate[0], config.use_horovod) checkpoint_path = os.path.join(config.output_path, 'model.h5') initial_epoch = load_weights_if_exist(model, checkpoint_path) total_epochs = 0 for idx, (batch_size, num_epoch, learning_rate) in enumerate( zip(config.batch_size, config.num_epoch, config.learning_rate)): total_epochs += num_epoch if total_epochs <= initial_epoch: continue vmc_cls = VariationalMonteCarlo if config.use_horovod: batch_size = int(math.ceil(batch_size / hvd.size())) vmc_cls = HorovodVariationalMonteCarlo validation_sampler = sampler.copy_with_new_batch_size( min(batch_size * 8, 2**15), mini_batch_size=config.mini_batch_size) assert batch_size < config.mini_batch_size or batch_size % config.mini_batch_size == 0 sampler = sampler.copy_with_new_batch_size(batch_size, config.mini_batch_size) variational_monte_carlo = vmc_cls( model, operator, sampler, mini_batch_size=config.mini_batch_size) validation_generator = vmc_cls( model, operator, validation_sampler, wave_function_evaluation_batch_size=config.mini_batch_size) optimizer.set_update_params_frequency( variational_monte_carlo.update_params_frequency) K.set_value(optimizer.lr, learning_rate) callbacks = default_wave_function_stats_callbacks_factory( variational_monte_carlo, validation_generator=validation_generator, log_in_batch_or_epoch=False, true_ground_state_energy=true_ground_state_energy, validation_period=config.validation_period) if config.use_horovod: callbacks = [ hvd.callbacks.BroadcastGlobalVariablesCallback(0), ] + callbacks + [hvd.callbacks.MetricAverageCallback()] if is_rank_0: tensorboard = TensorBoardWithGeneratorValidationData( log_dir=config.output_path, generator=variational_monte_carlo, update_freq='epoch', histogram_freq=0, batch_size=batch_size, write_output=False, write_graph=False) callbacks += [ tensorboard, CheckpointByTime(checkpoint_path, save_weights_only=True) ] verbose = 1 else: verbose = 0 model.fit_generator(variational_monte_carlo.to_generator(), steps_per_epoch=config.steps_per_epoch * variational_monte_carlo.update_params_frequency, epochs=total_epochs, callbacks=callbacks, max_queue_size=0, workers=0, initial_epoch=initial_epoch, verbose=verbose) if is_rank_0: model.save_weights( os.path.join(config.output_path, 'stage_%s.h5' % (idx + 1))) initial_epoch = total_epochs evaluation_inputs = Input(shape=config.hilbert_state_shape, dtype='int8') obc_input = Input(shape=config.hilbert_state_shape, dtype=evaluation_inputs.dtype) invariant_model = make_2d_obc_invariants(obc_input, model) invariant_model = make_up_down_invariant(evaluation_inputs, invariant_model) mini_batch_size = config.mini_batch_size // 16 sampler = sampler.copy_with_new_batch_size(config.mini_batch_size) vmc_cls = VariationalMonteCarlo if config.use_horovod: vmc_cls = HorovodVariationalMonteCarlo variational_monte_carlo = vmc_cls(invariant_model, operator, sampler, mini_batch_size=config.mini_batch_size) callbacks = default_wave_function_stats_callbacks_factory( variational_monte_carlo, log_in_batch_or_epoch=False, true_ground_state_energy=true_ground_state_energy) if config.use_horovod: callbacks = callbacks + [hvd.callbacks.MetricAverageCallback()] results = evaluate(variational_monte_carlo, steps=(2**15) // mini_batch_size, callbacks=callbacks[:4], keys_to_progress_bar_mapping={ 'energy/energy': 'energy', 'energy/relative_error': 'relative_error', 'energy/local_energy_variance': 'variance' }, verbose=is_rank_0) if is_rank_0: print(results)