예제 #1
0
파일: eval.py 프로젝트: dhanushgowda/CNN_Dr
def run_prediction(data):
    ret = []
    with tf.Graph().as_default():
        images_pl = tf.placeholder(tf.float32, shape=[BATCH_SIZE, 512, 512, 3])
        labels_pl = tf.placeholder(tf.int32, shape=[BATCH_SIZE])

        logits = graph.inference(images_pl)

        saver = tf.train.Saver(tf.trainable_variables())

        init = tf.initialize_all_variables()
        sess = tf.Session()

        saver.restore(sess, "model.ckpt")
        print("Model restored.")
        sess.run(init)

        images, labels = (data.images, data.labels)

        for example in range(data.num_examples):
            # images, labels = data.next_batch(BATCH_SIZE)

            feed_dict = {
                images_pl: [images[example]],
                labels_pl: [labels[example]]
            }

            logits_op, lab = sess.run([logits, labels_pl], feed_dict=feed_dict)

            # print logits_op[0], logits_op[0].argmax(), lab, data.labels[example]
            ret.append([logits_op[0].argmax(), lab])

        return ret
예제 #2
0
def run_training(data):
    with tf.Graph().as_default():
        global_step = tf.Variable(0, name='global_step', trainable=False)
        images_pl = tf.placeholder(tf.float32, shape=[BATCH_SIZE, 32, 32, 3])
        labels_pl = tf.placeholder(tf.int32, shape=[BATCH_SIZE])

        logits = graph.inference(images_pl)
        loss = graph.loss(logits, labels_pl)
        train_op = graph.train(loss, global_step)
        eval_correct = graph.evaluate(logits, labels_pl)

        summary_op = tf.merge_all_summaries()
        saver = tf.train.Saver(tf.all_variables())

        init = tf.initialize_all_variables()
        sess = tf.Session()

        sess.run(init)

        summary_writer = tf.train.SummaryWriter(SUMMARY_DIR, sess.graph)

        for step in range(N_EPOCH * (DS_SIZE // BATCH_SIZE)):
            start_time = time.time()
            feed_dict = fill_feed_dict(data.train, images_pl, labels_pl)
            _, loss_val = sess.run([train_op, loss], feed_dict=feed_dict)
            duration = time.time() - start_time

            assert not np.isnan(loss_val), 'Model diverged with loss = NaN'

            if step % 10 == 0 or step == N_EPOCH * (DS_SIZE // BATCH_SIZE) - 1:
                print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_val, duration))
                if step > 0:
                    summary_str = sess.run(summary_op, feed_dict)
                    summary_writer.add_summary(summary_str, step)
                    summary_writer.flush()

            if step > 0:
                if step < 1000 and step % 200 == 0:
                    print('Training Data Eval:')
                    do_eval(sess, eval_correct, images_pl, labels_pl, data.train)

                    print('Validation Data Eval:')
                    do_eval(sess, eval_correct, images_pl, labels_pl, data.validation)

                if step % 1000 == 0 or step == N_EPOCH * (DS_SIZE // BATCH_SIZE) - 1:
                    print('Training Data Eval:')
                    do_eval(sess, eval_correct, images_pl, labels_pl, data.train)

                    print('Validation Data Eval:')
                    do_eval(sess, eval_correct, images_pl, labels_pl, data.validation)

            if step == N_EPOCH * (DS_SIZE // BATCH_SIZE) - 1:
                print('Test Data Eval:')
                do_eval(sess, eval_correct, images_pl, labels_pl, data.test)

            # Save the model checkpoint periodically.
            if step % 1000 == 0 or step == N_EPOCH * (DS_SIZE // BATCH_SIZE) - 1:
                checkpoint_path = CHECKPOINT_DIR
                saver.save(sess, checkpoint_path, global_step=step)
예제 #3
0
def run_training(data):
    with tf.Graph().as_default():
        images_pl = tf.placeholder(tf.float32, shape=[BATCH_SIZE, 512, 512, 3])
        labels_pl = tf.placeholder(tf.int32, shape=[BATCH_SIZE])

        logits = graph.inference(images_pl)

        loss = graph.loss(logits, labels_pl)
        train_op = graph.train(loss, 0.0001)
        eval_correct = graph.evaluate(logits, labels_pl)
        saver = tf.train.Saver(tf.trainable_variables())
        summary_op = tf.merge_all_summaries()

        init = tf.initialize_all_variables()
        sess = tf.Session()

        sess.run(init)
        # c,d = data.train.next_batch(BATCH_SIZE)
        # a = sess.run(logits,feed_dict={images_pl: c})
        # print a
        summary_writer = tf.train.SummaryWriter("summary", sess.graph)

        for step in range(N_EPOCH * (DS_SIZE // BATCH_SIZE)):
            start_time = time.time()
            feed_dict = fill_feed_dict(data.train, images_pl, labels_pl)
            _, loss_val = sess.run([train_op, loss], feed_dict=feed_dict)
            duration = time.time() - start_time

            assert not np.isnan(loss_val), 'Model diverged with loss = NaN'

            if step % 10 == 0 or step == N_EPOCH * (DS_SIZE // BATCH_SIZE) - 1:
                print('Step %d: loss = %.2f (%.3f sec)' %
                      (step, loss_val, duration))
                if step > 0:
                    summary_str = sess.run(summary_op, feed_dict)
                    summary_writer.add_summary(summary_str, step)
                    summary_writer.flush()

            if step % 100 == 0 or step == N_EPOCH * (DS_SIZE //
                                                     BATCH_SIZE) - 1:
                save_path = saver.save(sess, "model.ckpt")
                print("Model saved in file: %s" % save_path)
                print('Training Data Eval:')
                do_eval(sess, eval_correct, images_pl, labels_pl, data.train)
                print('Validation Data Eval:')
                do_eval(sess, eval_correct, images_pl, labels_pl,
                        data.validation)
예제 #4
0
def run_evaluation(data):
    with tf.Graph().as_default():
        images_pl = tf.placeholder(tf.float32, shape=[BATCH_SIZE, 32, 32, 3])
        labels_pl = tf.placeholder(tf.int32, shape=[BATCH_SIZE])

        logits = graph.inference(images_pl)
        eval_correct = graph.evaluate(logits, labels_pl)

        saver = tf.train.Saver(tf.all_variables())

        # init = tf.initialize_all_variables()
        sess = tf.Session()

        saver.restore(sess, "checkpoints/-4500")
        print("Model restored.")

        # sess.run(init)
        do_eval(sess, eval_correct, images_pl, labels_pl, data.test)
예제 #5
0
def run_prediction(data, file_names):
    ret = []
    with tf.Graph().as_default():
        images_pl = tf.placeholder(tf.float32, shape=[BATCH_SIZE, 32, 32, 3])
        # labels_pl = tf.placeholder(tf.int32, shape=[BATCH_SIZE])

        logits = graph.inference(images_pl)

        saver = tf.train.Saver(tf.all_variables())

        # init = tf.initialize_all_variables()
        sess = tf.Session()

        saver.restore(sess, "checkpoints/-4500")
        print("Model restored.")

        # sess.run(init)

        images, labels = (data.images, data.labels)

        for example in range(data.num_examples):
            feed_dict = {
                images_pl: [images[example]],

            }

            logits_op = sess.run(logits, feed_dict=feed_dict)

            predicted = logits_op[0].argmax()

            # if int(predicted) == 0:
            #     predicted = 10

            if int(labels[example]) == 10:
                labels[example] = 0

            print(logits_op[0], predicted, labels[example])
            ret.append([predicted, labels[example], file_names[example]])

        return ret
예제 #6
0
def run_training():

    training_dataset, test_dataset = input_data.get_datasets()
    training_dataqueue = input_data.make_dataqueue(training_dataset)

    with tf.Graph().as_default():

        optimizer = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE)

        images_pl, labels_pl, keep_prob_pl = placeholder_inputs(training_dataset.batch_size)
        logits      = graph.inference(images_pl, keep_prob_pl)
        loss        = graph.loss(logits, labels_pl)
        train_op    = graph.training(optimizer, loss)
        evaluation  = graph.evaluation(logits, labels_pl)

        saver = tf.train.Saver()

        sess = tf.Session()
        init = tf.initialize_all_variables()
        sess.run(init)

        def save_model(step, eval_value):
            saver.save(
                sess,
                '/home/hannah/data/mcam-artifacts/saved-model-v%s-s%d-e%f'  \
                    % (VERSION, step, eval_value)
            )

        step = 0
        trace = []
        t0 = time.time()
        last_checkpoint = t0
        last_print      = t0
        last_plot       = t0
        while True:
            step += 1
            start_time = time.time()

            if training_dataqueue.empty():
                print 'Training queue empty! Waiting...'
            batch = training_dataqueue.get()
            if batch is None:
                print 'Training queue exhausted'
                save_model(step, eval_value)
                break

            images_feed, labels_feed = batch
            feed_dict = {
                images_pl: images_feed,
                labels_pl: labels_feed,
                keep_prob_pl: DROPOUT
            }
            _, loss_value, eval_value = sess.run(
                [train_op, loss, evaluation],
                feed_dict=feed_dict
            )

            end_time = time.time()
            trace.append((step, loss_value, eval_value))

            if end_time - last_print > 2:
                print ('Step %d:  cross-entropy = %.2f  accuracy = %.2f  time = %.2f'  \
                           % (step, loss_value, eval_value, end_time-start_time))
                last_print = time.time()

            #### DEBUG
            if end_time - last_plot > 10:
                plt.figure('trace_loss')
                plt.clf()
                plt.plot([s for s,l,e in trace], [l for s,l,e in trace])
                plt.title('Cross-entropy')
                plt.savefig('debug/loss.png')

                plt.figure('trace_eval')
                plt.clf()
                plt.plot([s for s,l,e in trace], [e for s,l,e in trace])
                plt.title('Accuracy')
                plt.savefig('debug/eval.png')

                last_plot = time.time()
            ####

            if end_time - last_checkpoint > 5*60:
                print '\n ==== Saving checkpoint ==== \n'
                save_model(step, eval_value)
                last_checkpoint = time.time()
예제 #7
0
def run_training():

    training_dataset, test_dataset = input_data.get_datasets()
    training_dataqueue = input_data.make_dataqueue(training_dataset)

    with tf.Graph().as_default():

        optimizer = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE)

        images_pl, labels_pl, keep_prob_pl = placeholder_inputs(
            training_dataset.batch_size)
        logits = graph.inference(images_pl, keep_prob_pl)
        loss = graph.loss(logits, labels_pl)
        train_op = graph.training(optimizer, loss)
        evaluation = graph.evaluation(logits, labels_pl)

        saver = tf.train.Saver()

        sess = tf.Session()
        init = tf.initialize_all_variables()
        sess.run(init)

        def save_model(step, eval_value):
            saver.save(
                sess,
                '/home/hannah/data/mcam-artifacts/saved-model-v%s-s%d-e%f'  \
                    % (VERSION, step, eval_value)
            )

        step = 0
        trace = []
        t0 = time.time()
        last_checkpoint = t0
        last_print = t0
        last_plot = t0
        while True:
            step += 1
            start_time = time.time()

            if training_dataqueue.empty():
                print 'Training queue empty! Waiting...'
            batch = training_dataqueue.get()
            if batch is None:
                print 'Training queue exhausted'
                save_model(step, eval_value)
                break

            images_feed, labels_feed = batch
            feed_dict = {
                images_pl: images_feed,
                labels_pl: labels_feed,
                keep_prob_pl: DROPOUT
            }
            _, loss_value, eval_value = sess.run([train_op, loss, evaluation],
                                                 feed_dict=feed_dict)

            end_time = time.time()
            trace.append((step, loss_value, eval_value))

            if end_time - last_print > 2:
                print ('Step %d:  cross-entropy = %.2f  accuracy = %.2f  time = %.2f'  \
                           % (step, loss_value, eval_value, end_time-start_time))
                last_print = time.time()

            #### DEBUG
            if end_time - last_plot > 10:
                plt.figure('trace_loss')
                plt.clf()
                plt.plot([s for s, l, e in trace], [l for s, l, e in trace])
                plt.title('Cross-entropy')
                plt.savefig('debug/loss.png')

                plt.figure('trace_eval')
                plt.clf()
                plt.plot([s for s, l, e in trace], [e for s, l, e in trace])
                plt.title('Accuracy')
                plt.savefig('debug/eval.png')

                last_plot = time.time()
            ####

            if end_time - last_checkpoint > 5 * 60:
                print '\n ==== Saving checkpoint ==== \n'
                save_model(step, eval_value)
                last_checkpoint = time.time()