Example #1
0
    def setUp(self):
        self.NUM_V = 6
        self.NUM_H = 5
        self.BATCH_SIZE = 20
        self.V_BATCH_SHAPE = (self.BATCH_SIZE, 6)
        self.H_BATCH_SHAPE = (self.BATCH_SIZE, 5)
        self.W_STD = 0.01
        self.GRAD_ERROR = 1e-3

        self.sess = tf.Session()

        self.rbm = RBM(num_v=self.NUM_V,
                       num_h=self.NUM_H,
                       W_std=self.W_STD,
                       session=self.sess)

        self.h_batch = tf.constant(np.random.binomial(1,
                                                      0.5,
                                                      size=self.H_BATCH_SHAPE),
                                   dtype=c.float_type)

        self.v_batch = tf.constant(np.random.binomial(1,
                                                      0.5,
                                                      size=self.V_BATCH_SHAPE),
                                   dtype=c.float_type)

        h_probs = self.rbm.h_probs(self.v_batch)
        self.sample_h = self.rbm.sample_h(h_probs)

        v_probs = self.rbm.v_probs(self.h_batch)
        self.sample_v = self.rbm.sample_v(v_probs)

        self.free_energy = self.rbm.free_energy(self.v_batch)

        self.sess.run(tf.global_variables_initializer())
Example #2
0
test_data_batch = test_data.next_batch()

config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
session = tf.Session(config=config)

layer1 = SBM_Lower(session=session,
                   side=flags['img_shape'][0],
                   side_overlap=flags['layer_1_side_overlap'],
                   num_h=flags['layer_1_num_h'],
                   name=flags['layer_1_name'])

layer1.restore(flags['layer_1_ckpt'])

layer2 = RBM(session=session,
             num_v=flags['layer_1_num_h'],
             num_h=flags['layer_2_num_h'],
             bottom=layer1,
             name=flags['layer_2_name'])

layer2.restore(flags['layer_2_ckpt'])

table_path = os.path.join(flags['test_generalisation_plots_dir'], 'table')

layer2.test_generalisation(
    test_data=test_data_batch,
    output_table_path=table_path,
    num_iterations=flags['test_generalisation_num_iterations'],
    num_runs=flags['test_generalisation_num_runs'],
    num_samples_to_average=flags['test_generalisation_num_samples_to_average'])

with open(table_path, 'rb') as f:
Example #3
0
                 batch_size=flags['test_generalisation_batch_size'],
                 log_epochs=flags['data_log_epochs'],
                 name='TestData')

test_data_batch = test_data.next_batch()

x_test = tf.get_variable(name='x_test',
                         initializer=tf.random_normal(shape=(1, flags['q']),
                                                      dtype=c.float_type))

config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
session = tf.Session(config=config)

layer1 = RBM(session=session,
             num_v=flags['img_shape'][0] * flags['img_shape'][1],
             num_h=flags['layer_1_num_h'],
             temperature=flags['temperature'],
             name=flags['layer_1_name'])

layer1.restore(flags['layer_1_ckpt'])

layer2 = RBM(session=session,
             num_v=flags['layer_1_num_h'],
             num_h=flags['layer_2_num_h'],
             temperature=flags['temperature'],
             bottom=layer1,
             name=flags['layer_2_name'])

layer2.restore(flags['layer_2_ckpt'])

kern = gplvm.SEKernel(session=session,
Example #4
0
                     name='TrainingData')
test_data = Data(flags['test_data'],
                 shuffle_first=flags['shuffle_data'],
                 batch_size=flags['test_batch_size'],
                 log_epochs=flags['data_log_epochs'],
                 name='TestData')

optimizer = tf.train.AdamOptimizer(learning_rate=flags['learning_rate'])

config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
session = tf.Session(config=config)

layer1 = RBM(session=session,
             num_v=flags['img_shape'][0] * flags['img_shape'][1],
             num_h=flags['layer_1_num_h'],
             lr=flags['learning_rate'],
             W_std=flags['layer_1_W_std'],
             loss_plotter=loss_plotter,
             distr_plotter=distr_plotter,
             name=flags['layer_1_name'])

layer1.train(optimizer=optimizer,
             training_data=training_data,
             test_data=test_data,
             num_gibbs_steps=flags['num_gibbs_steps'],
             pcd=flags['pcd'],
             max_iterations=flags['num_iterations'],
             eval_interval=flags['eval_interval'],
             ckpt_interval=flags['ckpt_interval'],
             ckpt_dir=flags['ckpt_dir'])

layer2 = RBM(session=session,
Example #5
0
layer1.train(optimizer=optimizer,
             training_data=training_data,
             test_data=test_data,
             num_gibbs_steps=flags['num_gibbs_steps'],
             pcd=flags['pcd'],
             max_iterations=flags['num_iterations'],
             eval_interval=flags['eval_interval'],
             ckpt_interval=flags['ckpt_interval'],
             ckpt_dir=flags['ckpt_dir'])

layer2 = RBM(session=session,
             num_v=flags['layer_1_num_h'],
             num_h=flags['layer_2_num_h'],
             lr=flags['learning_rate'],
             W_std=flags['layer_1_W_std'],
             bottom=layer1,
             loss_plotter=loss_plotter,
             distr_plotter=distr_plotter,
             name=flags['layer_2_name'])

layer2.train(optimizer=optimizer,
             training_data=training_data,
             test_data=test_data,
             num_gibbs_steps=flags['num_gibbs_steps'],
             pcd=flags['pcd'],
             max_iterations=flags['num_iterations'],
             eval_interval=flags['eval_interval'],
             ckpt_interval=flags['ckpt_interval'],
             ckpt_dir=flags['ckpt_dir'])