예제 #1
0
def test_implementation():
    tf.reset_default_graph()
    tests.test_nn_image_inputs(neural_net_image_input)
    tests.test_nn_label_inputs(neural_net_label_input)
    tests.test_nn_keep_prob_inputs(neural_net_keep_prob_input)
    tests.test_con_pool(conv2d_maxpool)
    tests.test_flatten(flatten)
    tests.test_fully_conn(fully_conn)
    tests.test_output(output)

    build_cnn()

    tests.test_conv_net(conv_net)
    tests.test_train_nn(train_neural_network)
def run_tests():

    import problem_unittests as t

    t.test_folder_path(cifar10_dataset_folder_path)
    t.test_normalize(normalize)
    t.test_one_hot_encode(one_hot_encode)
    t.test_nn_image_inputs(neural_net_image_input)
    t.test_nn_label_inputs(neural_net_label_input)
    t.test_nn_keep_prob_inputs(neural_net_keep_prob_input)
    t.test_con_pool(conv2conv2d_maxpool)
    t.test_flatten(flatten)
    t.test_fully_conn(fully_conn)
    t.test_output(output)
    t.test_conv_net(conv_net)
    t.test_train_nn(train_neural_network)
예제 #3
0
    """
    Optimize the session on a batch of images and labels
    : session: Current TensorFlow session
    : optimizer: TensorFlow optimizer function
    : keep_probability: keep probability
    : feature_batch: Batch of Numpy image data
    : label_batch: Batch of Numpy label data
    """
    # TODO: Implement Function

    session.run(optimizer, feed_dict={x: feature_batch, y: label_batch, keep_prob: keep_probability})

"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_train_nn(train_neural_network)


# ### Show Stats
# Implement the function `print_stats` to print loss and validation accuracy.  Use the global variables `valid_features` and `valid_labels` to calculate validation accuracy.  Use a keep probability of `1.0` to calculate the loss and validation accuracy.

# In[ ]:


def print_stats(session, feature_batch, label_batch, cost, accuracy):
    """
    Print information about loss and validation accuracy
    : session: Current TensorFlow session
    : feature_batch: Batch of Numpy image data
    : label_batch: Batch of Numpy label data
    : cost: TensorFlow cost function
    """
    Optimize the session on a batch of images and labels
    : session: Current TensorFlow session
    : optimizer: TensorFlow optimizer function
    : keep_probability: keep probability
    : feature_batch: Batch of Numpy image data
    : label_batch: Batch of Numpy label data
    """
    # TODO: Implement Function
    return session.run(optimizer, feed_dict={x:feature_batch,y:label_batch,keep_prob:keep_probability})


"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_train_nn(train_neural_network)


# ### Show Stats
# Implement the function `print_stats` to print loss and validation accuracy.  Use the global variables `valid_features` and `valid_labels` to calculate validation accuracy.  Use a keep probability of `1.0` to calculate the loss and validation accuracy.

# In[14]:


def print_stats(session, feature_batch, label_batch, cost, accuracy):
    """
    Print information about loss and validation accuracy
    : session: Current TensorFlow session
    : feature_batch: Batch of Numpy image data
    : label_batch: Batch of Numpy label data
    : cost: TensorFlow cost function
    def runTrainingTests(self):

        tests.test_train_nn(train_neural_network)
        print("Model TrainingTests Ran Successfully")
logits = conv_net(x, keep_prob, train_phase)

# Name logits Tensor, so that is can be loaded from disk after training
logits = tf.identity(logits, name='logits')

# Loss and Optimizer
cost = tf.reduce_mean(
    tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))
optimizer = tf.train.AdamOptimizer().minimize(cost)

# Accuracy
correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy')

tests.test_conv_net(conv_net)
tests.test_train_nn(train_neural_network_once, x, y, keep_prob)

import os
directory = 'training_progress'
if not os.path.exists(directory):
    os.makedirs(directory)

#%% Tune Parameters and run in a single batch of the data
epochs = 1
batch_size = 64 * 4
keep_probability = 0.5

print('Checking the Training on a Single Batch...')
n_batches = 1
shuffle_data = True