Example #1
0
def show_maps():
    layer = model.h_conv
    data = Data(1)
    # print data._y.shape
    # print data._X.shape
    # layer = model.h_conv5
    batch_size = 40  # image that will be used, taken from test_data/X.npy
    batch = data.next_batch(batch_size)  # jump batch_size images
    batch = data.next_batch(batch_size)  # use the first image of this batch
    feature_maps = sess.run(layer,
                            feed_dict={
                                model.x: batch[0],
                                model.y_: batch[1],
                                model.keep_prob: 1.0
                            })
    plotFeatureMaps(feature_maps)
Example #2
0
loss = tf.reduce_mean(tf.square(tf.subtract(
    model.y_, model.y))) + tf.add_n([tf.nn.l2_loss(v)
                                     for v in train_vars]) * L2NormConst
train_step = tf.train.AdamOptimizer(1e-4).minimize(loss)

sess.run(tf.global_variables_initializer())

# Training loop variables
epochs = 100
batch_size = 50
num_samples = data.num_examples
step_size = int(num_samples / batch_size)

for epoch in range(epochs):
    for i in range(step_size):
        batch = data.next_batch(batch_size)

        train_step.run(feed_dict={
            model.x: batch[0],
            model.y_: batch[1],
            model.keep_prob: 0.8
        })

        if i % 10 == 0:
            loss_value = loss.eval(feed_dict={
                model.x: batch[0],
                model.y_: batch[1],
                model.keep_prob: 1.0
            })
            print("epoch: %d step: %d loss: %g" %
                  (epoch, epoch * batch_size + i, loss_value))
Example #3
0
sess.run(tf.global_variables_initializer())

# Training loop variables
epochs = 30
batch_size = 64
num_train_samples = train_data.num_examples  # train ~6500 test 520
num_train_batches = int(num_train_samples / batch_size)  # train 130, test 11
step_size = int(num_train_samples / batch_size)  # 11

train_loss_values = []
test_loss_values = []

for epoch in range(epochs):
    for i in range(num_train_batches):
        count = epoch * num_train_batches + i  # batches seen
        train_batch = train_data.next_batch(batch_size)
        test_batch = test_data.next_batch(batch_size)
        train_step.run(
            feed_dict={
                model.x: train_batch[0],
                model.y_: train_batch[1],
                model.keep_prob: 0.7
            })
        test_loss_value = test_loss.eval(feed_dict={
            model.x: test_batch[0],
            model.y_: test_batch[1],
            model.keep_prob: 1.0
        })
        if count % 100 == 0:
            train_loss_value = loss.eval(
                feed_dict={