ylim=flags['ylim'], delta=flags['delta_lp'], output_dir=flags['latent_points_dir'], fig_ext=flags['lp_plotter_fig_ext']) ls_plotter = LatentSamplePlotter( image_shape=flags['img_shape'], xlim=flags['xlim'], ylim=flags['ylim'], delta=flags['delta_ls'], num_samples_to_average=flags['num_samples_to_average'], output_dir=flags['latent_samples_dir']) training_data = Data(flags['training_data'], shuffle_first=flags['shuffle_data'], batch_size=flags['training_batch_size'], log_epochs=flags['data_log_epochs'], name='TrainingData') Y = training_data.next_batch() config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True)) session = tf.Session(config=config) kern = gplvm.SEKernel(session=session, alpha=flags['kernel_alpha'], gamma=flags['kernel_gamma'], ARD=flags['kernel_ard'], Q=flags['q']) layer = gplvm.GPLVM(Y=Y,
module_spec = importlib.util.spec_from_file_location('flags', 'flags.py') module = importlib.util.module_from_spec(module_spec) module_spec.loader.exec_module(module) flags = module.flags # Note, this script should be run after the model is trained (run train.py first). # Basically, the generalisation experiment consist in trying to "reconstruct" the # test data as closely as possible. The output of this script will be two Numpy arrays, # one simply containing the test data and the other one the corresponding data generated # by the model. init_logging(flags['test_log_file']) test_data = Data(flags['test_data'], shuffle_first=False, batch_size=flags['test_generalisation_batch_size'], log_epochs=flags['data_log_epochs'], name='TestData') test_data_batch = test_data.next_batch() config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True)) session = tf.Session(config=config) layer1 = SBM_Lower(session=session, side=flags['img_shape'][0], side_overlap=flags['layer_1_side_overlap'], num_h=flags['layer_1_num_h'], name=flags['layer_1_name']) layer1.restore(flags['layer_1_ckpt'])