コード例 #1
0
def evaluate_10crop_3():
    val_filenames, val_labels = scene_input.list_images('validation')

    with tf.Graph().as_default() as g:
        keep_prob = tf.placeholder(tf.float32)

        images_10crop_batched, label, val_data_init_op \
            = scene_input.get_dataset_10crop_eval(val_filenames, val_labels)

        with tf.name_scope('inference'):
            conv_net = vgg.Vgg16()
            conv_net.build(images_10crop_batched, keep_prob,
                           scene_input.num_classes)
            prob_10crop = tf.reduce_max(conv_net.get_softmax(), axis=0)

        # Calculate predictions.
        print("prob_10crop.shape: {}".format(prob_10crop.shape))
        print("label.shape: {}".format(label.shape))
        prob_10crop = tf.reshape(prob_10crop, [-1, scene_input.num_classes])
        label = tf.reshape(label, [-1])
        print("prob_10crop.shape: {}".format(prob_10crop.shape))
        print("label.shape: {}".format(label.shape))
        top_1_op = tf.nn.in_top_k(prob_10crop, label, 1)
        top_3_op = tf.nn.in_top_k(prob_10crop, label, 3)

        # Restore the moving average version of the learned variables for eval.
        variable_averages = tf.train.ExponentialMovingAverage(
            scene.MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        with tf.Session() as sess:
            ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
            if ckpt and ckpt.model_checkpoint_path:
                # Restores from checkpoint
                saver.restore(sess, ckpt.model_checkpoint_path)
                # Assuming model_checkpoint_path looks something like:
                #   /my-favorite-path/cifar10_train/model.ckpt-0,
                # extract global_step from it.
                # global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
            else:
                raise ValueError(
                    "Cannot find checkpoint data in {}".format(checkpoint_dir))

            # accuracy on validation set.
            acc_top1, acc_top3 = get_accuracy_10crop(sess, top_1_op, top_3_op,
                                                     val_data_init_op,
                                                     keep_prob)
            print('Val: accuracy (top1){0:.4f} (top3){1:.4f}'.format(
                acc_top1, acc_top3))
コード例 #2
0
def evaluate():
    # Get the list of filenames and corresponding list of labels for training et validation
    train_filenames, train_labels = scene_input.list_images('train')
    val_filenames, val_labels = scene_input.list_images('validation')

    with tf.Graph().as_default() as g:
        keep_prob = tf.placeholder(tf.float32)

        images, labels, train_data_init_op, val_data_init_op \
            = scene_input.get_dataset(train_filenames, train_labels,
                                      val_filenames, val_labels, batch_size)

        with tf.name_scope('inference'):
            conv_net = vgg.Vgg16()
            conv_net.build(images, keep_prob, scene_input.num_classes)
            logits = conv_net.get_softmax_linear()

        # Calculate predictions.
        top_1_op = tf.nn.in_top_k(logits, labels, 1)
        top_3_op = tf.nn.in_top_k(logits, labels, 3)

        # Restore the moving average version of the learned variables for eval.
        variable_averages = tf.train.ExponentialMovingAverage(
            scene.MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        with tf.Session() as sess:
            ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
            if ckpt and ckpt.model_checkpoint_path:
                # Restores from checkpoint
                saver.restore(sess, ckpt.model_checkpoint_path)
                # Assuming model_checkpoint_path looks something like:
                #   /my-favorite-path/cifar10_train/model.ckpt-0,
                # extract global_step from it.
                # global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
            else:
                raise ValueError(
                    "Cannot find checkpoint data in {}".format(checkpoint_dir))

            # accuracy on validation set.
            acc_top1, acc_top3 = get_accuracy(sess, top_1_op, top_3_op,
                                              val_data_init_op, keep_prob)
            print('Val: accuracy (top1){0:.4f} (top3){1:.4f}'.format(
                acc_top1, acc_top3))
コード例 #3
0
ファイル: scene.py プロジェクト: GRSEB9S/ai_challenger_scene
def main():
    # Get the list of filenames and corresponding list of labels for training et validation
    train_filenames, train_labels = scene_input.list_images('train')
    val_filenames, val_labels = scene_input.list_images('validation')

    # --------------------------------------------------------------------------
    # In TensorFlow, you first want to define the computation graph with all the
    # necessary operations: loss, training op, accuracy...
    # Any tensor created in the `graph.as_default()` scope will be part of `graph`
    graph = tf.Graph()
    with graph.as_default():
        global_step = tf.contrib.framework.get_or_create_global_step()

        keep_prob = tf.placeholder(tf.float32)

        images, labels, train_data_init_op, val_data_init_op \
            = scene_input.get_dataset_with_color_augmentation(train_filenames, train_labels,
                                                              val_filenames, val_labels, batch_size)

        with tf.name_scope('inference'):
            conv_net = vgg.Vgg16()
            conv_net.build(images, keep_prob, scene_input.num_classes)
            logits = conv_net.get_softmax_linear()

        with tf.name_scope('loss'):
            # Calculate the average cross entropy loss across the batch.
            labels = tf.cast(labels, tf.int64)
            cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
                labels=labels, logits=logits, name='cross_entropy_per_example')
            loss = tf.reduce_mean(cross_entropy, name='cross_entropy_loss')

        with tf.name_scope('train'):
            full_train_op = train_momentum_sgd(loss, global_step)

        with tf.name_scope('evaluation'):
            # Evaluation metrics
            prediction = tf.to_int32(tf.argmax(logits, 1))
            labels = tf.to_int32(labels)
            correct_prediction = tf.equal(prediction, labels)
            accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
            correct_prediction_top3 = tf.nn.in_top_k(logits, labels, 3)
            accuracy_top3 = tf.reduce_mean(tf.cast(correct_prediction_top3, tf.float32))

        init_var_op = tf.global_variables_initializer()

        merged_summary = tf.summary.merge_all()

        saver = tf.train.Saver()

        tf.get_default_graph().finalize()

    # --------------------------------------------------------------------------
    # Now that we have built the graph and finalized it, we define the session.
    # The session is the interface to *run* the computational graph.
    # We can call our training operations with `sess.run(train_op)` for instance
    with tf.Session(graph=graph) as sess:
        sess.run(init_var_op)

        # tensorboard writer.
        writer = tf.summary.FileWriter(log_dir, graph)

        # check model_dir for checkpoint file.
        restore_epoch = None
        ckpt = tf.train.get_checkpoint_state(model_dir)
        if ckpt and ckpt.model_checkpoint_path:
            # Restores from checkpoint
            saver.restore(sess, ckpt.model_checkpoint_path)
            # Assuming model_checkpoint_path looks something like:
            #   /my-favorite-path/model/model-10.xxx,
            # extract epoch from it.
            restore_epoch = ckpt.model_checkpoint_path.split('/')[-1].split('-')[2]
            sess.run(global_step.assign((restore_epoch - 1) * steps_per_epoch))
            print("restore check point from: {}".format(ckpt.model_checkpoint_path))
            print("get epoch: {} step: {}".format(restore_epoch, (restore_epoch - 1) * steps_per_epoch))
        else:
            print('training whole conv net from scratch.')
        start_time = time.time()

        # Train the entire model for a few more epochs, continuing with the *same* weights.
        for epoch in range(num_epochs):
            if restore_epoch is not None and epoch < restore_epoch:
                continue
            else:
                print('epoch {} / {}'.format(epoch + 1, num_epochs))
            tick = time.time()
            sess.run(train_data_init_op)
            for i in tqdm.tqdm(range(steps_per_epoch)):
                try:
                    if tensorboard_write_frq > 0 and i % tensorboard_write_frq == 0:
                        _, summary = sess.run([full_train_op, merged_summary], {keep_prob: dropout_keep_prob})
                        writer.add_summary(summary, epoch * steps_per_epoch + i)
                    else:
                        _ = sess.run(full_train_op, {keep_prob: dropout_keep_prob})
                except tf.errors.OutOfRangeError:
                    break
            tock = time.time()
            print(check_time(tock - start_time, steps_per_epoch, tock - tick))

            # check point
            if (epoch + 1) % checkpoint_freq == 0:
                saver.save(sess, os.path.join(checkpoint_dir, 'scene'), global_step=epoch + 1)

            # Check on the train and val sets every epoch.
            train_acc, train_loss = check_train(sess, correct_prediction, keep_prob,
                                                train_data_init_op, n_batch=100, loss=loss)
            print('Train: accuracy {0:.4f} loss {1:.4f}'.format(train_acc, train_loss))
            val_acc, val_acc_top3 = check_val(sess, correct_prediction, keep_prob, val_data_init_op,
                                              correct_prediction_top3=correct_prediction_top3)
            print('Val: accuracy (top1){0:.4f} (top3){1:.4f}'.format(val_acc, val_acc_top3))