Ejemplo n.º 1
0
def main(_):

    # Choose between modes - train/finetune/evaluate
    if FLAGS.mode == 'train':
        summaries_dir = FLAGS.summaries_train_dir
        model_dir = FLAGS.model_train_dir
        print("\033[95m Mode opted: Training from scratch... \033[0m")
    elif FLAGS.mode == 'finetune':
        summaries_dir = FLAGS.summaries_finetune_dir
        model_dir = FLAGS.model_finetune_dir
        print(
            "\033[95m Mode opted: Finetuning the pre-trained model ... \033[0m"
        )
    elif FLAGS.mode == 'evaluation':
        print(
            "\033[95m Mode opted: Evaluating the model on test data set ... \033[0m"
        )
        _evaluation_operation()
        return
    else:
        print(
            " \033[91m Provide Either train or finetune.... Other Implementations pending//// \033[0m"
        )
        return

    #Clearing summaries and model directories if exists
    if tf.gfile.Exists(summaries_dir):
        tf.gfile.DeleteRecursively(summaries_dir)
    tf.gfile.MakeDirs(summaries_dir)
    if tf.gfile.Exists(model_dir):
        tf.gfile.DeleteRecursively(model_dir)
    tf.gfile.MakeDirs(model_dir)

    checkpoint_path = os.path.join(model_dir, 'model.ckpt')

    sess = tf.Session()
    config = Config()
    print("\033[94m Loading the model... \033[0m")
    m = model(is_training=True, config=config)
    print("\033[94m Preparing the input stream for training... \033[0m")
    input_stream = input.input(FLAGS.data_path, config)
    print("\033[94m Initializing the network... \033[0m")
    merged = tf.summary.merge_all()
    saver = tf.train.Saver(tf.global_variables())
    train_writer = tf.summary.FileWriter(summaries_dir + '/train', sess.graph)
    test_writer = tf.summary.FileWriter(summaries_dir + '/test', sess.graph)
    init = tf.global_variables_initializer()
    sess.run(init)

    global_step = 0
    if FLAGS.mode == 'finetune':
        ckpt = tf.train.get_checkpoint_state(FLAGS.model_train_dir)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
            global_step = int(
                ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1])
        else:
            print(
                "\033[91m Opted for finetune mode, but no pre-trained model found \033[0m"
            )
            return

    print("\033[94m Training begins... \033[0m")
    #state = sess.run(m.initial_state)
    for i in range(FLAGS.iterations):

        input_feature, input_targets = input_stream.next_batch(sess,
                                                               flag='train')
        feed_dict = {
            m.input_feature: input_feature,
            m.input_targets: input_targets
        }
        train_summary, _ = sess.run([merged, m.train_step],
                                    feed_dict=feed_dict)
        train_writer.add_summary(train_summary, i)
        train_accuracy, loss_value, learning_rate = sess.run(
            [m.evaluation_step, m.loss_mean, m.learning_rate],
            feed_dict=feed_dict)

        input_feature, input_targets = input_stream.next_batch(sess,
                                                               flag='test')
        feed_dict = {
            m.input_feature: input_feature,
            m.input_targets: input_targets
        }
        test_summary, test_accuracy = sess.run([merged, m.evaluation_step],
                                               feed_dict=feed_dict)
        test_writer.add_summary(test_summary, i)

        print(
            'Step %d:  Learning Rate = %f  Train Loss = %f  Train Accuracy = %f  Test_Accuracy = %f'
            % (i, learning_rate, loss_value, train_accuracy, test_accuracy))
        if (i % FLAGS.saving_iter) == 0:
            saver.save(sess, checkpoint_path, global_step=i + global_step)
        if (((i % FLAGS.learning_rate_iter) == 0) & (i != 0)):
            m.assign_learning_rate(sess,
                                   FLAGS.learning_rate_decay * learning_rate)
Ejemplo n.º 2
0
def _evaluation_operation():
    if FLAGS.eval_mode == 'train':
        model_dir = FLAGS.model_train_dir
    elif FLAGS.eval_mode == 'finetune':
        model_dir = FLAGS.model_finetune_dir
    else:
        print(
            "\033[91m eval_mode not selected, chosse --eval_mode= train or finetune \033[0m "
        )
        return

    graph, bottleneck_tensor, jpeg_data_tensor, resized_image_tensor = (
        create_inception_graph())
    sess = tf.Session()
    config = Config()
    config.batch_size = 1
    print("\033[94m Loading the model... \033[0m")
    m = model(is_training=False, config=config)
    print("\033[94m Preparing the input stream for testing... \033[0m")
    input_stream = input.input(FLAGS.data_path,
                               config,
                               image_data_tensor=jpeg_data_tensor,
                               bottleneck_tensor=bottleneck_tensor)

    saver = tf.train.Saver(tf.all_variables())
    ckpt = tf.train.get_checkpoint_state(model_dir)
    if ckpt and ckpt.model_checkpoint_path:
        print("\033[94m Loading the pre-trained weights of model... \033[0m")
        saver.restore(sess, ckpt.model_checkpoint_path)
    else:
        print(
            "\033[91m Opted for evaluation mode, but no pre-trained/pre-finetuned model found \033[0m"
        )
        return

    dacc_frame = np.zeros([config.num_class, config.num_class])
    dacc_cumul = np.zeros([config.num_class, config.num_class])
    seqlen_frame = np.zeros([config.num_class])
    seqlen_cumul = np.zeros([config.num_class])

    total_videos = input_stream.test_total
    for i in range(input_stream.test_total):
        print(" \033[94m Currently on : %d / %d \033[0m " % (i, total_videos))
        input_feature, sequence_len, input_targets = input_stream.next_batch(
            sess, flag='eval')
        feed_dict = {
            m.input_feature: input_feature,
            m.input_targets: input_targets,
            m.sequence_len: sequence_len
        }
        final_result = sess.run([m.final_result], feed_dict=feed_dict)

        label = input_targets[0, 0]
        temp_list = np.zeros([config.num_class])
        for j in range(sequence_len[0]):
            temp_list[final_result[0][j]] += 1

        dacc_frame[label, :] += temp_list
        seqlen_frame[label] += sequence_len[0]
        pool_result = np.argmax(temp_list)
        dacc_cumul[label, pool_result] += 1
        seqlen_cumul[label] += 1

    acc_frame = dacc_frame.trace() / sum(seqlen_frame)
    acc_cumul = dacc_cumul.trace() / sum(seqlen_cumul)
    print("\033[93m Accuracy:   Frame = %f  Cumulative = %f \033[0m " %
          (acc_frame, acc_cumul))
    for i in range(config.num_class):
        dacc_frame[i, :] /= seqlen_frame[i]
        dacc_cumul[i, :] /= seqlen_cumul[i]
    np.savetxt("delta_acc_frame.csv", np.asarray(dacc_frame), delimiter=",")
    np.savetxt("delta_acc_cumul.csv", np.asarray(dacc_cumul), delimiter=",")