Exemple #1
0
def test_optimizer_step():
    batch_size = 64
    width = 14
    height = 28
    hyper = {
        'width_height': (width, height, 1),
        'model_type': 'GS',
        'batch_size': batch_size,
        'learning_rate': 0.0003,
        'units_per_layer': 240,
        'temp': tf.constant(0.1)
    }
    shape = (batch_size, width, height, 1)
    x_upper, x_lower = create_upper_and_lower_dummy_data(shape=shape)
    sop = SOP(hyper=hyper)
    optimizer = tf.keras.optimizers.Adam(learning_rate=hyper['learning_rate'],
                                         beta_1=0.9,
                                         beta_2=0.999,
                                         decay=1.e-3)
    sop_opt = SOPOptimizer(model=sop, optimizer=optimizer)
    gradients, loss = sop_opt.compute_gradients_and_loss(x_upper=x_upper,
                                                         x_lower=x_lower)
    sop_opt.apply_gradients(gradients=gradients)
    print('\nTEST: Gradient step from optimizer')
    assert gradients is not None
Exemple #2
0
def setup_sop_optimizer(hyper):
    optimizer = tf.keras.optimizers.Adam(learning_rate=hyper['learning_rate'],
                                         beta_1=0.9,
                                         beta_2=0.999)
    model = SOP(hyper=hyper)
    sop_optimizer = SOPOptimizer(model=model, optimizer=optimizer)
    return sop_optimizer
Exemple #3
0
def test_count_of_network_parameters():
    batch_size, width, height, rgb = 64, 14, 28, 1
    units_per_layer = 240
    hyper = {
        'width_height': (width, height, rgb),
        'model_type': 'GS',
        'batch_size': batch_size,
        'units_per_layer': units_per_layer,
        'temp': tf.constant(0.1)
    }
    shape = (batch_size, width, height, rgb)
    sop = SOP(hyper=hyper)
    sop.build(input_shape=shape)
    print('\nTEST: Number of parameters in the network')
    assert sop.h1_dense.count_params() == (392 + 1) * units_per_layer
    assert sop.h2_dense.count_params() == (units_per_layer +
                                           1) * units_per_layer
    assert sop.out_dense.count_params() == (units_per_layer + 1) * 392
Exemple #4
0
 def test_fwd_pass_connections_and_gradient(self):
     batch_size = 64
     width = 14
     height = 28
     hyper = {
         'width_height': (width, height, 1),
         'model_type': 'GS',
         'batch_size': batch_size,
         'temp': tf.constant(0.1)
     }
     shape = (batch_size, width, height, 1)
     x_upper, x_lower = create_upper_and_lower(shape=shape)
     sop = SOP(hyper=hyper)
     with tf.GradientTape() as tape:
         logits = sop.call(x_upper=x_upper)
         loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=x_lower,
                                                        logits=logits)
     grad = tape.gradient(sources=sop.trainable_variables, target=loss)
     self.assertTrue(grad is not None)
Exemple #5
0
def test_fwd_pass_connections_and_gradient():
    batch_size, width, height, rgb, sample_size = 64, 14, 28, 1, 1
    hyper = {
        'width_height': (width, height, 1),
        'model_type': 'GS',
        'batch_size': batch_size,
        'units_per_layer': 240,
        'temp': tf.constant(0.1)
    }
    shape = (batch_size, width, height, rgb)
    x_upper, x_lower = create_upper_and_lower_dummy_data(shape=shape)
    sop = SOP(hyper=hyper)
    with tf.GradientTape() as tape:
        logits = sop.call(x_upper=x_upper)
        x_lower = tf.reshape(x_lower, x_lower.shape + (sample_size, ))
        loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=x_lower,
                                                       logits=logits)
    grad = tape.gradient(sources=sop.trainable_variables, target=loss)
    print('\nTEST: Forward pass and gradient computation')
    assert grad is not None
Exemple #6
0
 def test_optimizer_step(self):
     batch_size = 64
     width = 14
     height = 28
     hyper = {
         'width_height': (width, height, 1),
         'model_type': 'IGR',
         'batch_size': batch_size,
         'learning_rate': 0.001,
         'temp': tf.constant(0.1)
     }
     shape = (batch_size, width, height, 1)
     x_upper, x_lower = create_upper_and_lower(shape=shape)
     sop = SOP(hyper=hyper)
     optimizer = tf.keras.optimizers.Adam(
         learning_rate=hyper['learning_rate'])
     sop_opt = SOPOptimizer(model=sop, optimizer=optimizer)
     gradients, loss = sop_opt.compute_gradients_and_loss(x_upper=x_upper,
                                                          x_lower=x_lower)
     sop_opt.apply_gradients(gradients=gradients)
     self.assertTrue(gradients is not None)
Exemple #7
0
from Utils.load_data import load_mnist_sop_data
from Models.SOP import SOP
from Models.SOPOptimizer import viz_reconstruction
import tensorflow as tf

model_type = 'GS'
hyper = {
    'width_height': (14, 28, 1),
    'model_type': model_type,
    'batch_size': 64,
    'learning_rate': 0.0003,
    'epochs': 100,
    'iter_per_epoch': 937,
    'temp': tf.constant(0.67)
}
data = load_mnist_sop_data(batch_n=hyper['batch_size'], epochs=hyper['epochs'])
train, test = data
model = SOP(hyper=hyper)
results_file = './Log/model_weights_GS.h5'
shape = (hyper['batch_size'], ) + hyper['width_height']
shape = (64, 14, 28, 1)
model.build(input_shape=shape)
model.load_weights(filepath=results_file)
for x_test in test.take(10):
    images = x_test

viz_reconstruction(test_image=images, model=model)