def evaluate_with_evaluation_data_set(session, accuracy, x_placeholder, y_placeholder):
    loader_config = cl.LoaderDataSetConfig()
    loader_config.load_evaluate_dataset = True

    print("Reading evaluation dataset")
    labels, images = cl.load_data_set(config=loader_config)
    print("Read [%s] images and [%s] labels." % (len(images), len(labels)))

    return evaluate(session, accuracy, x_placeholder, y_placeholder, images, labels)
    def test_model_static_multiple_batches(self):
        print("========================= test_model_static_batches")
        tf.reset_default_graph()

        config = cl.LoaderDataSetConfig()
        config.max_records = cm.BATCH_SIZE * 2
        labels, images = cl.load_data_set(config=config)
        print("Read [%s] images and [%s] labels." % (len(images), len(labels)))
        input_placeholder, labels_placeholder = cm.create_placeholder()

        model = cm.forward_propagation(input=input_placeholder,
                                       parameters=cm.initialize_parameters())
        cost = cm.compute_cost(model, tf.one_hot(labels_placeholder, 10))
        optimizer = cm.backward_propagation(cost)

        with tf.Session() as session:
            assert (model is not None)
            session.run(tf.global_variables_initializer())

            _, minibatch_cost = session.run(
                [optimizer, cost],
                feed_dict={
                    input_placeholder: images[0:cm.BATCH_SIZE],
                    labels_placeholder: labels[0:cm.BATCH_SIZE]
                })

            print("Convolution result: ", minibatch_cost)
            print("labels: ", np.array(labels).shape)
            print(labels)
            print("========================= test_model_static")

            _, minibatch_cost = session.run(
                [optimizer, cost],
                feed_dict={
                    input_placeholder: images[cm.BATCH_SIZE:cm.BATCH_SIZE * 2],
                    labels_placeholder: labels[cm.BATCH_SIZE:cm.BATCH_SIZE * 2]
                })

            print("Convolution result: ", minibatch_cost)
            print("labels: ", np.array(labels).shape)
            print(labels)
            print("========================= test_model_static")
    def test_model_static(self):
        print("========================= test_model_static")
        tf.reset_default_graph()
        config = cl.LoaderDataSetConfig()
        config.max_records = 1
        labels, images = cl.load_data_set(config=config)
        print("Read [%s] images and [%s] labels." % (len(images), len(labels)))

        model = cm.forward_propagation(input=images,
                                       parameters=cm.initialize_parameters())

        assert (model is not None)

        session = tf.Session()
        session.run(tf.global_variables_initializer())
        model_result = session.run(model)

        print("Convolution result: ", model_result)
        print("Result with shape: ", model_result.shape)
        print("labels: ", np.array(labels).shape)
        print(labels)
        print("========================= test_model_static")
def cifar10_train_main():

    print("========================= train")
    tf.reset_default_graph()
    tf.set_random_seed(1)

    labels, images = cl.load_data_set()
    print("Read [%s] images and [%s] labels." % (len(images), len(labels)))

    x_placeholder, y_placeholder = cm.create_placeholder()
    y_onehot_matrix = tf.one_hot(y_placeholder, cm.NUM_CLASSES)
    parameters = cm.initialize_parameters()

    logits = cm.forward_propagation(input=x_placeholder, parameters=parameters)
    cost_fnc = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_onehot_matrix))
    train_fnc = tf.train.GradientDescentOptimizer(learning_rate=LEARNING_RATE).minimize(cost_fnc)

    correct_prediction = tf.equal(tf.cast(tf.argmax(logits, 1), tf.int32), y_placeholder)
    accuracy_fnc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    costs_values, accuracy_values = train(images, labels, x_placeholder, y_placeholder, cost_fnc, train_fnc, accuracy_fnc)

    plot(costs_values, 'values', 'cost', '==== cost ===')
    plot(accuracy_values, 'values', 'accuracy', '==== accuracy ===')
Example #5
0
def cifar10_evaluate_main():
    loader_config = cl.LoaderDataSetConfig()
    loader_config.load_evaluate_dataset = True

    print("Reading evaluation dataset")
    labels, images = cl.load_data_set(config=loader_config)
    print("Read [%s] images and [%s] labels." % (len(images), len(labels)))

    x_placeholder, y_placeholder = cm.create_placeholder()
    parameters = cm.initialize_parameters()

    logits = cm.forward_propagation(input=x_placeholder, parameters=parameters)
    correct_prediction = tf.equal(tf.cast(tf.argmax(logits, 1), tf.int32), y_placeholder)
    accuracy_fnc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    saver = tf.train.Saver()
    with tf.Session() as sess:
        # Restore variables from disk.
        model_file = os.path.join(project_constants.MODEL_DIR_PATH, project_constants.MODEL_PREFIX)
        saver.restore(sess, model_file)

        accuracy_eval = evaluate(sess, accuracy_fnc, x_placeholder, y_placeholder, images, labels)

        print("Accuracy (evaluation): %f" % accuracy_eval)
Example #6
0
 def test_load_cifar10(self):
     cifar10_record_list = cl.load_data_set()