示例#1
0
def evaluate(model, dataset, batch_size=128, checkpoint_dir='./checkpoint'):
    with tf.Graph().as_default() as g:
        data = get_data_provider(dataset, training=False)
        with tf.device('/cpu:0'):
            if FLAGS.dataset == "imagenet":
                x, yt = image_processing.inputs(
                    data,
                    batch_size=batch_size,
                    num_preprocess_threads=FLAGS.num_threads)
            else:
                x, yt = data.generate_batches(batch_size)
            is_training = tf.placeholder(tf.bool, [], name='is_training')

        # Build the Graph that computes the logits predictions
        y = model(x, is_training=False)

        # Calculate predictions.
        loss = tf.reduce_mean(
            tf.nn.sparse_softmax_cross_entropy_with_logits(labels=yt,
                                                           logits=y))
        accuracy_top1 = tf.reduce_mean(
            tf.cast(tf.nn.in_top_k(y, yt, 1), tf.float32))
        accuracy_top5 = tf.reduce_mean(
            tf.cast(tf.nn.in_top_k(y, yt, 5), tf.float32))
        # Restore the moving average version of the learned variables for eval.
        #variable_averages = tf.train.ExponentialMovingAverage(
        #    MOVING_AVERAGE_DECAY)
        #variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver()  #variables_to_restore)

        # Configure options for session
        gpu_options = tf.GPUOptions(allow_growth=True)
        sess = tf.Session(config=tf.ConfigProto(
            log_device_placement=False,
            allow_soft_placement=True,
            gpu_options=gpu_options,
        ))
        ckpt = tf.train.get_checkpoint_state(checkpoint_dir + '/')
        if ckpt and ckpt.model_checkpoint_path:
            # Restores from checkpoint
            saver.restore(sess, ckpt.model_checkpoint_path)
        else:
            print('No checkpoint file found')
            return

        # Start the queue runners.
        coord = tf.train.Coordinator()
        try:
            threads = []
            for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
                threads.extend(
                    qr.create_threads(sess,
                                      coord=coord,
                                      daemon=True,
                                      start=True))

            num_batches = int(math.ceil(data.size[0] / batch_size))
            total_top1 = 0  # Counts the number of correct predictions per batch.
            total_top5 = 0  # Counts the number of correct predictions per batch.
            total_loss = 0  # Sum the loss of predictions per batch.
            step = 0
            while step < num_batches and not coord.should_stop():
                acc_top1, acc_top5, loss_val = sess.run(
                    [accuracy_top1, accuracy_top5, loss])
                total_top1 += acc_top1
                total_top5 += acc_top5
                total_loss += loss_val
                step += 1

            # Compute precision and loss
            total_top1 /= num_batches
            total_top5 /= num_batches
            total_loss /= num_batches

        except Exception as e:  # pylint: disable=broad-except
            coord.request_stop(e)

        coord.request_stop()
        coord.join(threads)
        return total_top1, total_top5, total_loss
示例#2
0
def evaluate(model,
             dataset,
             batch_size=128,
             num_gpu=1,
             if_debug=False,
             checkpoint_dir='./checkpoint',
             checkpoint_file=''):
    with tf.Graph().as_default() as g:
        data = get_data_provider(dataset, training=False)
        with tf.device('/cpu:0'):
            x, yt = data.generate_batches(batch_size)

        # Build the Graph that computes the logits predictions
        assert batch_size % num_gpu == 0, (
            'Batch size must be divisible by number of GPUs')
        x_splits = tf.split(axis=0, num_or_size_splits=num_gpu, value=x)
        yt_splits = tf.split(axis=0, num_or_size_splits=num_gpu, value=yt)
        reuse = False

        for i in range(num_gpu):
            if if_debug:
                device_str = '/gpu:0'
            else:
                device_str = '/gpu:' + str(i)
            with tf.device(device_str):
                with tf.name_scope('%s_%d' % ('tower', i)) as scope:
                    y = model(x_splits[i], is_training=False, reuse=reuse)
                    cross_entropy_loss = tf.reduce_mean(
                        tf.nn.sparse_softmax_cross_entropy_with_logits(
                            labels=yt_splits[i], logits=y),
                        name='cross_entropy_losses')
                    regularizer_loss = tf.reduce_sum(tf.get_collection(
                        tf.GraphKeys.REGULARIZATION_LOSSES),
                                                     name='regularize_losses')
                    total_loss = tf.add(cross_entropy_loss,
                                        regularizer_loss,
                                        name='total_losses')
                    tf.add_to_collection('total_losses', total_loss)
                    reuse = True
            accuracy_top1 = tf.reduce_mean(tf.cast(
                tf.nn.in_top_k(y, yt_splits[i], 1), tf.float32),
                                           name='accuracies_top1')
            tf.add_to_collection('accuracies_top1', accuracy_top1)
            accuracy_top5 = tf.reduce_mean(tf.cast(
                tf.nn.in_top_k(y, yt_splits[i], 5), tf.float32),
                                           name='accuracies_top5')
            tf.add_to_collection('accuracies_top5', accuracy_top5)

        loss = tf.reduce_mean(tf.get_collection('total_losses'),
                              name='total_loss')
        accuracy_top1 = tf.reduce_mean(tf.get_collection('accuracies_top1'),
                                       name='accuracy_top1')
        accuracy_top5 = tf.reduce_mean(tf.get_collection('accuracies_top5'),
                                       name='accuracy_top5')

        # Restore the moving average version of the learned variables for eval.
        #variable_averages = tf.train.ExponentialMovingAverage(
        #    MOVING_AVERAGE_DECAY)
        #variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver()  #variables_to_restore)

        # Configure options for session
        sess = tf.Session(config=tf.ConfigProto(log_device_placement=False,
                                                allow_soft_placement=True,
                                                gpu_options=tf.GPUOptions(
                                                    allow_growth=True)))
        if checkpoint_file == '':
            ckpt = tf.train.get_checkpoint_state(checkpoint_dir + '/')
            if ckpt and ckpt.model_checkpoint_path:
                # Restores from checkpoint
                saver.restore(sess, ckpt.model_checkpoint_path)
            else:
                return 0., 0., 0.
        else:
            saver.restore(sess, checkpoint_file)

        # Start the queue runners.
        coord = tf.train.Coordinator()
        try:
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)
            num_batches = int(math.ceil(data.size[0] / batch_size))
            total_acc1 = 0  # Counts the number of correct predictions per batch.
            total_acc5 = 0  # Counts the number of correct predictions per batch.
            total_loss = 0  # Sum the loss of predictions per batch.
            step = 0
            while step < num_batches and not coord.should_stop():
                acc_val1, acc_val5, loss_val = sess.run(
                    [accuracy_top1, accuracy_top5, loss])
                total_acc1 += acc_val1
                total_acc5 += acc_val5
                total_loss += loss_val
                step += 1

            # Compute precision and loss
            total_acc1 /= num_batches
            total_acc5 /= num_batches
            total_loss /= num_batches

        except Exception as e:  # pylint: disable=broad-except
            coord.request_stop(e)

        coord.request_stop()
        coord.join(threads)
        coord.clear_stop()
        sess.close()
    return total_acc1, total_acc5, total_loss
示例#3
0
def evaluate(model, dataset, batch_size=128, checkpoint_dir='./checkpoint'):
    with tf.Graph().as_default() as g:
        data = get_data_provider(dataset, training=False)
        with tf.device('/cpu:1'):
            x, yt = data.generate_batches(batch_size)

        # Build the Graph that computes the logits predictions
        alpha = tf.Variable(initial_value=0.0)
        y = model(x, alpha, is_training=False)

        # Calculate predictions.
        loss = tf.reduce_mean(
            tf.nn.sparse_softmax_cross_entropy_with_logits(labels=yt,
                                                           logits=y))
        accuracy_top1 = tf.reduce_mean(
            tf.cast(tf.nn.in_top_k(y, yt, 1), tf.float32))
        accuracy_top5 = tf.reduce_mean(
            tf.cast(tf.nn.in_top_k(y, yt, 5), tf.float32))

        # var_list = [var for var in tf.global_variables() if "moving" in var.name]
        # var_list += tf.trainable_variables()
        # saver = tf.train.Saver(var_list=var_list)

        # Configure options for session
        gpu_options = tf.GPUOptions(allow_growth=True)
        sess = tf.Session(config=tf.ConfigProto(
            log_device_placement=False,
            allow_soft_placement=True,
            gpu_options=gpu_options,
        ))
        saver = tf.train.Saver()
        ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
        if ckpt and ckpt.model_checkpoint_path:
            # Restores from checkpoint
            saver.restore(sess, ckpt.model_checkpoint_path)
        else:
            print('No checkpoint file found')
            return
        saver.restore(sess, save_path='./results/vgg16.ckpt')

        # Start the queue runners.
        coord = tf.train.Coordinator()
        try:
            threads = []
            for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
                threads.extend(
                    qr.create_threads(sess,
                                      coord=coord,
                                      daemon=True,
                                      start=True))
            num_batches = int(math.ceil(data.size[0] / batch_size))
            total_acc_top1 = 0  # Counts the number of correct predictions per batch.
            total_acc_top5 = 0
            total_loss = 0  # Sum the loss of predictions per batch.
            step = 0
            #sess.run(tf.assign(alpha, 1.0))
            #bar = Bar('Evaluating', max=num_batches,suffix='%(percent)d%% eta: %(eta)ds')
            while step < num_batches and not coord.should_stop():
                a = datetime.now()
                acc_val_top1, acc_val_top5, loss_val = sess.run(
                    [accuracy_top1, accuracy_top5, loss])
                b = datetime.now()
                total_acc_top1 += acc_val_top1
                total_acc_top5 += acc_val_top5
                total_loss += loss_val
                step += 1

            # Compute precision and loss
            total_acc_top1 /= num_batches
            total_acc_top5 /= num_batches
            total_loss /= num_batches

            #bar.finish()
            time = (b - a).total_seconds()
            print('Calculate time: %.5f' % time)
            #print('alpha: %f' % alpha.eval(session=sess))

        except Exception as e:  # pylint: disable=broad-except
            coord.request_stop(e)

        coord.request_stop()
        coord.join(threads)
        return total_acc_top1, total_acc_top5, total_loss
示例#4
0
def evaluate(model, dataset, batch_size=100, checkpoint_dir='./checkpoint'):
    with tf.Graph().as_default() as g:
        data = get_data_provider(dataset, training=False)
        x, yt = data.next_batch(batch_size)
        # is_training = tf.placeholder(tf.bool,[],name='is_training')
        # Build the Graph that computes the logits predictions
        y = model(x, is_training=False)

        # Calculate predictions.
        # loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=yt, logits=y))
        # accuracy = tf.reduce_mean(tf.cast(tf.nn.in_top_k(y,yt,1), tf.float32))
        yt_one = tf.one_hot(yt, 10)
        loss = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits(labels=yt_one, logits=y))
        accuracy = tf.reduce_mean(
            tf.cast(tf.equal(
                yt, tf.cast(tf.argmax(y, dimension=1), dtype=tf.int32)),
                    dtype=tf.float32))
        saver = tf.train.Saver()  #variables_to_restore
        # Configure options for session
        sess = tf.Session(config=tf.ConfigProto(
            log_device_placement=False,
            allow_soft_placement=True,
        ))
        ckpt = tf.train.get_checkpoint_state(checkpoint_dir + '/')
        if ckpt and ckpt.model_checkpoint_path:
            # Restores from checkpoint
            saver.restore(sess, ckpt.model_checkpoint_path)
        else:
            print('No checkpoint file found')
            return

        # Start the queue runners.
        coord = tf.train.Coordinator()
        # threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        try:
            threads = []
            for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
                threads.extend(
                    qr.create_threads(sess,
                                      coord=coord,
                                      daemon=True,
                                      start=True))
            tempsize = 10000 if dataset == 'MNIST' else data.size[0]
            num_batches = int(math.ceil(tempsize / batch_size))
            total_acc = 0  # Counts the number of correct predictions per batch.
            total_loss = 0  # Sum the loss of predictions per batch.
            step = 0
            while step < num_batches and not coord.should_stop():

                acc_val, loss_val, xval, yval = sess.run(
                    [accuracy, loss, x, yt])
                total_acc += acc_val
                total_loss += loss_val
                step += 1
            # Compute precision and loss
            total_acc /= num_batches
            total_loss /= num_batches

        except Exception as e:  # pylint: disable=broad-except
            coord.request_stop(e)

        coord.request_stop()
        coord.join(threads)
        return total_acc, total_loss
def evaluate(model, dataset, batch_size=256, checkpoint_dir='./checkpoint'):
    with tf.Graph().as_default() as g:
        print(FLAGS.checkpoint_dir)
        preds = []
        labels = []
        data = get_data_provider(dataset, training=False)
        with tf.device('/cpu:0'):
            if FLAGS.dataset == "imagenet":
                x, yt = image_processing.inputs(
                    data,
                    batch_size=batch_size,
                    num_preprocess_threads=FLAGS.num_threads)
            else:
                x, yt = data.generate_batches(batch_size)
            is_training = tf.placeholder(tf.bool, [], name='is_training')

        # Build the Graph that computes the logits predictions
        y = model(x, is_training=False)

        # Calculate predictions.
        softmax = tf.nn.softmax(y)
        loss = tf.reduce_mean(
            tf.nn.sparse_softmax_cross_entropy_with_logits(labels=yt,
                                                           logits=y))
        accuracy_top1 = tf.reduce_mean(
            tf.cast(tf.nn.in_top_k(y, yt, 1), tf.float32))
        accuracy_top5 = tf.reduce_mean(
            tf.cast(tf.nn.in_top_k(y, yt, 5), tf.float32))
        # Restore the moving average version of the learned variables for eval.
        #variable_averages = tf.train.ExponentialMovingAverage(
        #    MOVING_AVERAGE_DECAY)
        #variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(
        )  #variabimport matplotlib.pyplot as pltles_to_restore)

        # Configure options for session
        gpu_options = tf.GPUOptions(allow_growth=True)
        sess = tf.Session(config=tf.ConfigProto(
            log_device_placement=False,
            allow_soft_placement=True,
            gpu_options=gpu_options,
        ))
        ckpt = tf.train.get_checkpoint_state(checkpoint_dir + '/')
        if ckpt and ckpt.model_checkpoint_path:
            # Restores from checkpoint
            saver.restore(sess, ckpt.model_checkpoint_path)
        else:
            print('No checkpoint file found')
            return

        # Start the queue runners.
        coord = tf.train.Coordinator()
        try:
            threads = []
            for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
                threads.extend(
                    qr.create_threads(sess,
                                      coord=coord,
                                      daemon=True,
                                      start=True))

            num_batches = int(math.ceil(data.size[0] / batch_size))
            total_top1 = 0  # Counts the number of correct predictions per batch.
            total_top5 = 0  # Counts the number of correct predictions per batch.
            total_loss = 0  # Sum the loss of predictions per batch.
            step = 0
            bar = Bar('Evaluating',
                      max=num_batches,
                      suffix='%(percent)d%% eta: %(eta)ds')
            while step < num_batches and not coord.should_stop():
                image, pred_onehot, label, softmax_val, acc_top1, acc_top5, loss_val = sess.run(
                    [x, y, yt, softmax, accuracy_top1, accuracy_top5, loss])
                total_top1 += acc_top1
                total_top5 += acc_top5
                total_loss += loss_val
                step += 1
                print(total_top1 / step)
                pred = np.argmax(pred_onehot, axis=1)
                preds.extend(pred)
                labels.extend(label)
                for i in range(batch_size):
                    if pred[i] != label[i]:
                        if not os.path.isdir(
                                "./" + FLAGS.checkpoint_dir.split('/')[-1] +
                                '_mis'):
                            os.makedirs("./" +
                                        FLAGS.checkpoint_dir.split('/')[-1] +
                                        '_mis')
                        img = image[i, :, :, :]
                        confidence = np.max(softmax_val[i, :])
                        label_string = CIFAR10_LABELS_LIST[label[i]]
                        mislabel = CIFAR10_LABELS_LIST[pred[i]]
                        scipy.misc.imsave(
                            "./" + FLAGS.checkpoint_dir.split('/')[-1] +
                            '_mis/' + label_string + '_' + mislabel + "_" +
                            str(confidence) + '.jpg', img)
                bar.next()

            # Compute precision and loss
            total_top1 /= num_batches
            total_top5 /= num_batches
            total_loss /= num_batches
            conf_mat = confusion_matrix(labels, preds)
            np.set_printoptions(precision=2)
            # plt.figure()
            plot_confusion_matrix(conf_mat,
                                  classes=CIFAR10_LABELS_LIST,
                                  normalize=True,
                                  title='{} - Accuracy: {}%'.format(
                                      FLAGS.model_name,
                                      np.around(total_top1 * 100, 1)))
            print(conf_mat)
            print(total_top1)
            bar.finish()

        except Exception as e:  # pylint: disable=broad-except
            coord.request_stop(e)

        coord.request_stop()
        coord.join(threads)
        return total_top1, total_top5, total_loss
示例#6
0
def evaluate(model, dataset,
        batch_size=128,
        checkpoint_dir='./checkpoint'):
    with tf.Graph().as_default() as g:
        data = get_data_provider(dataset, training=False)
        with tf.device('/cpu:0'):
            x, yt = data.generate_batches(batch_size)
            is_training = tf.placeholder(tf.bool,[],name='is_training')

        # Build the Graph that computes the logits predictions
        y = model(x, is_training=False)

        # Calculate predictions.
        loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=yt, logits=y))
        accuracy = tf.reduce_mean(tf.cast(tf.nn.in_top_k(y,yt,1), tf.float32))

        # Restore the moving average version of the learned variables for eval.
        #variable_averages = tf.train.ExponentialMovingAverage(
        #    MOVING_AVERAGE_DECAY)
        #variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver()#variables_to_restore)


        # Configure options for session
        gpu_options = tf.GPUOptions(allow_growth=True)
        sess = tf.Session(
                config=tf.ConfigProto(
                            log_device_placement=False,
                            allow_soft_placement=True,
                            gpu_options=gpu_options,
                            )
                        )
        ckpt = tf.train.get_checkpoint_state(checkpoint_dir+'/')
        if ckpt and ckpt.model_checkpoint_path:
        # Restores from checkpoint
            saver.restore(sess, ckpt.model_checkpoint_path)
        else:
            print('No checkpoint file found')
            return

         # Start the queue runners.
        coord = tf.train.Coordinator()
        try:
            threads = []
            for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
             threads.extend(qr.create_threads(sess, coord=coord, daemon=True,
                                       start=True))

            num_batches = int(math.ceil(data.size[0] / batch_size))
            total_acc = 0  # Counts the number of correct predictions per batch.
            total_loss = 0 # Sum the loss of predictions per batch.
            step = 0
            bar = Bar('Evaluating', max=num_batches,suffix='%(percent)d%% eta: %(eta)ds')
            while step < num_batches and not coord.should_stop():
              acc_val, loss_val = sess.run([accuracy, loss])
              total_acc += acc_val
              total_loss += loss_val
              step += 1
              bar.next()

            # Compute precision and loss
            total_acc /= num_batches
            total_loss /= num_batches

            bar.finish()


        except Exception as e:  # pylint: disable=broad-except
            coord.request_stop(e)

        coord.request_stop()
        coord.join(threads)
        return total_acc, total_loss