def runOutputLayerTests(self):
        tests.test_output(output)
        ##############################
        ## Build the Neural Network ##
        ##############################

        # Remove previous weights, bias, inputs, etc..

        tests.test_conv_net(conv_net)
        print("Model OutputLayerTests Ran Successfully")
Пример #2
0
def test_implementation():
    tf.reset_default_graph()
    tests.test_nn_image_inputs(neural_net_image_input)
    tests.test_nn_label_inputs(neural_net_label_input)
    tests.test_nn_keep_prob_inputs(neural_net_keep_prob_input)
    tests.test_con_pool(conv2d_maxpool)
    tests.test_flatten(flatten)
    tests.test_fully_conn(fully_conn)
    tests.test_output(output)

    build_cnn()

    tests.test_conv_net(conv_net)
    tests.test_train_nn(train_neural_network)
def run_tests():

    import problem_unittests as t

    t.test_folder_path(cifar10_dataset_folder_path)
    t.test_normalize(normalize)
    t.test_one_hot_encode(one_hot_encode)
    t.test_nn_image_inputs(neural_net_image_input)
    t.test_nn_label_inputs(neural_net_label_input)
    t.test_nn_keep_prob_inputs(neural_net_keep_prob_input)
    t.test_con_pool(conv2conv2d_maxpool)
    t.test_flatten(flatten)
    t.test_fully_conn(fully_conn)
    t.test_output(output)
    t.test_conv_net(conv_net)
    t.test_train_nn(train_neural_network)
Пример #4
0
# Model
logits = conv_net(x, keep_prob)

# Name logits Tensor, so that is can be loaded from disk after training
logits = tf.identity(logits, name='logits')

# Loss and Optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))
optimizer = tf.train.AdamOptimizer().minimize(cost)

# Accuracy
correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy')

tests.test_conv_net(conv_net)


# ## Train the Neural Network
# ### Single Optimization
# Implement the function `train_neural_network` to do a single optimization.  The optimization should use `optimizer` to optimize in `session` with a `feed_dict` of the following:
# * `x` for image input
# * `y` for labels
# * `keep_prob` for keep probability for dropout
#
# This function will be called for each batch, so `tf.global_variables_initializer()` has already been called.
#
# Note: Nothing needs to be returned. This function is only optimizing the neural network.

# In[ ]:
# Model
logits = conv_net(x, keep_prob)

# Name logits Tensor, so that is can be loaded from disk after training
logits = tf.identity(logits, name='logits')

# Loss and Optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))
optimizer = tf.train.AdamOptimizer().minimize(cost)

# Accuracy
correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy')

tests.test_conv_net(conv_net)


# ## Train the Neural Network
# ### Single Optimization
# Implement the function `train_neural_network` to do a single optimization.  The optimization should use `optimizer` to optimize in `session` with a `feed_dict` of the following:
# * `x` for image input
# * `y` for labels
# * `keep_prob` for keep probability for dropout
# 
# This function will be called for each batch, so `tf.global_variables_initializer()` has already been called.
# 
# Note: Nothing needs to be returned. This function is only optimizing the neural network.

# In[13]: