示例#1
0
 def __init__(self):
     self.train_dataset = imagenet.get_split(
         'train',
         FLAGS.data_dir,
         labels_dir=FLAGS.labels_dir,
         file_pattern=FLAGS.file_pattern)
     self.eval_dataset = imagenet.get_split('validation',
                                            FLAGS.data_dir,
                                            labels_dir=FLAGS.labels_dir,
                                            file_pattern=FLAGS.file_pattern)
     self.image_preprocessing_fn = vgg_preprocessing.preprocess_image
     model = layers_resnet.get_model(FLAGS.model)
     self.network_fn = model(num_classes=self.train_dataset.num_classes)
     self.batches_per_epoch = (self.train_dataset.num_samples /
                               FLAGS.batch_size)
示例#2
0
def main(_):
    if not FLAGS.output_file:
        raise ValueError(
            'You must supply the path to save to with --output_file')
    tf.logging.set_verbosity(tf.logging.INFO)
    with tf.Graph().as_default() as graph:
        #dataset = dataset_factory.get_dataset(FLAGS.dataset_name, 'train',
        #                                       FLAGS.dataset_dir)
        image_net = imagenet.get_split('train',
                                       FLAGS.dataset_dir,
                                       file_pattern=None,
                                       reader=None)
        inception_network_fn = inception_v4_get_network_fn(
            num_classes=(image_net.num_classes - FLAGS.labels_offset),
            is_training=False)
        #pdb.set_trace()
        image_size = inception_network_fn.default_image_size
        input_shape = [FLAGS.batch_size, image_size, image_size, 3]
        placeholder = tf.placeholder(name='input',
                                     dtype=tf.float32,
                                     shape=input_shape)
        inception_network_fn(placeholder)

        if FLAGS.quantize:
            tf.contrib.quantize.create_eval_graph()

        graph_def = graph.as_graph_def()
        if FLAGS.write_text_graphdef:
            tf.io.write_graph(graph_def,
                              os.path.dirname(FLAGS.output_file),
                              os.path.basename(FLAGS.output_file),
                              as_text=True)
        else:
            with gfile.GFile(FLAGS.output_file, 'wb') as f:
                f.write(graph_def.SerializeToString())
示例#3
0
    def __call__(self, params):
        """Input function which provides a single batch for train or eval."""
        batch_size = params['batch_size']
        if FLAGS.use_data == 'real':
            train_dataset = imagenet.get_split('train', FLAGS.data_dir)
            eval_dataset = imagenet.get_split('validation', FLAGS.data_dir)

            dataset = train_dataset if self.is_training else eval_dataset

            capacity_multiplier = 20 if self.is_training else 2
            min_multiplier = 10 if self.is_training else 1

            provider = tf.contrib.slim.dataset_data_provider.DatasetDataProvider(
                dataset=dataset,
                num_readers=4,
                common_queue_capacity=capacity_multiplier * batch_size,
                common_queue_min=min_multiplier * batch_size)

            image, label = provider.get(['image', 'label'])

            image = vgg_preprocessing.preprocess_image(
                image=image,
                output_height=FLAGS.height,
                output_width=FLAGS.width,
                is_training=self.is_training,
                resize_side_min=_RESIZE_SIDE_MIN,
                resize_side_max=_RESIZE_SIDE_MAX)

            images, labels = tf.train.batch(tensors=[image, label],
                                            batch_size=batch_size,
                                            num_threads=4,
                                            capacity=5 * batch_size)

            labels = tf.one_hot(labels, FLAGS.num_classes)
        else:
            images = tf.random_uniform(
                [batch_size, FLAGS.height, FLAGS.width, 3],
                minval=-1,
                maxval=1)
            labels = tf.random_uniform([batch_size],
                                       minval=0,
                                       maxval=999,
                                       dtype=tf.int32)
            labels = tf.one_hot(labels, FLAGS.num_classes)

        return images, labels
示例#4
0
def gen_data(split_name, dataset, batch_size, img_size=224, aug=False):
    dataset = imagenet.get_split(split_name,
                                 dataset,
                                 file_pattern=None,
                                 reader=None)
    if split_name == 'train':
        num_file_readers = 16
        num_img_readers = 16
        shuffle = True
        batch_scale = 10
        if aug:
            preprocess_fn = preprocess_aug
        else:
            preprocess_fn = preprocess
    else:
        num_file_readers = 1
        num_img_readers = 4
        shuffle = False
        batch_scale = 1
        preprocess_fn = preprocess

    provider = slim.dataset_data_provider.DatasetDataProvider(
        dataset,
        num_readers=num_file_readers,
        shuffle=shuffle,
        common_queue_capacity=2 * batch_scale * batch_size,
        common_queue_min=batch_scale * batch_size)

    [image, label] = provider.get(['image', 'label'])

    image = preprocess_fn(image, img_size, img_size)

    images, labels = tf.train.batch([image, label],
                                    batch_size=batch_size,
                                    num_threads=num_img_readers,
                                    capacity=5 * batch_size)
    return images, labels
示例#5
0
    type=int,
    default=None,
    help='The number of steps to run before the first evaluation. Useful if '
    'you have stopped partway through a training cycle.')

FLAGS = parser.parse_args()
_EVAL_STEPS = 50000 // FLAGS.eval_batch_size

# Scale the learning rate linearly with the batch size. When the batch size is
# 256, the learning rate should be 0.1.
_INITIAL_LEARNING_RATE = 0.1 * FLAGS.train_batch_size / 256

_MOMENTUM = 0.9
_WEIGHT_DECAY = 1e-4

train_dataset = imagenet.get_split('train', FLAGS.data_dir)
eval_dataset = imagenet.get_split('validation', FLAGS.data_dir)

image_preprocessing_fn = vgg_preprocessing.preprocess_image
network = resnet_model.resnet_v2(resnet_size=FLAGS.resnet_size,
                                 num_classes=train_dataset.num_classes)

batches_per_epoch = train_dataset.num_samples / FLAGS.train_batch_size


def input_fn(is_training):
    """Input function which provides a single batch for train or eval."""
    batch_size = FLAGS.train_batch_size if is_training else FLAGS.eval_batch_size
    dataset = train_dataset if is_training else eval_dataset
    capacity_multiplier = 20 if is_training else 2
    min_multiplier = 10 if is_training else 1
示例#6
0
def main(_):
    if not FLAGS.dataset_dir:
        raise ValueError(
            'You must supply the dataset directory with --dataset_dir')

    tf.logging.set_verbosity(tf.logging.INFO)
    with tf.Graph().as_default():
        tf_global_step = tf.train.get_or_create_global_step()

        ###################
        # Prepare dataset #
        ###################
        dataset = imagenet.get_split(FLAGS.split_name, FLAGS.dataset_dir)
        provider = slim.dataset_data_provider.DatasetDataProvider(
            dataset,
            shuffle=False,
            common_queue_capacity=2 * FLAGS.batch_size,
            common_queue_min=FLAGS.batch_size)
        [dataset_image, label] = provider.get(['image', 'label'])
        dataset_image = preprocess_for_eval(dataset_image, IMAGE_SIZE,
                                            IMAGE_SIZE)
        dataset_images, labels = tf.train.batch(
            [dataset_image, label],
            batch_size=FLAGS.batch_size,
            num_threads=FLAGS.num_preprocessing_threads,
            capacity=5 * FLAGS.batch_size)

        ########################################
        # Define the model and input exampeles #
        ########################################
        create_model(tf.placeholder(tf.float32, shape=dataset_images.shape))
        input_images = get_input_images(dataset_images)
        logits, _ = create_model(input_images, reuse=True)

        if FLAGS.moving_average_decay > 0:
            variable_averages = tf.train.ExponentialMovingAverage(
                FLAGS.moving_average_decay, tf_global_step)
            variables_to_restore = variable_averages.variables_to_restore(
                slim.get_model_variables())
            variables_to_restore[tf_global_step.op.name] = tf_global_step
        else:
            variables_to_restore = slim.get_variables_to_restore()

        ######################
        # Define the metrics #
        ######################
        predictions = tf.argmax(logits, 1)
        labels = tf.squeeze(labels)
        names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
            'Accuracy':
            slim.metrics.streaming_accuracy(predictions, labels),
            'Recall_5':
            slim.metrics.streaming_sparse_recall_at_k(
                logits, tf.reshape(labels, [-1, 1]), 5),
        })

        ######################
        # Run evaluation     #
        ######################
        if FLAGS.max_num_batches:
            num_batches = FLAGS.max_num_batches
        else:
            # This ensures that we make a single pass over all of the data.
            num_batches = math.ceil(dataset.num_samples /
                                    float(FLAGS.batch_size))

        if tf.gfile.IsDirectory(FLAGS.checkpoint_path):
            checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)
        else:
            checkpoint_path = FLAGS.checkpoint_path

        tf.logging.info('Evaluating %s' % checkpoint_path)

        top1_accuracy, top5_accuracy = slim.evaluation.evaluate_once(
            master=FLAGS.master,
            checkpoint_path=checkpoint_path,
            logdir=None,
            summary_op=None,
            num_evals=num_batches,
            eval_op=list(names_to_updates.values()),
            final_op=[
                names_to_values['Accuracy'], names_to_values['Recall_5']
            ],
            variables_to_restore=variables_to_restore)

        print('Top1 Accuracy: ', top1_accuracy)
        print('Top5 Accuracy: ', top5_accuracy)
示例#7
0
def main(_):
  if not FLAGS.dataset_dir:
    raise ValueError('You must supply the dataset directory with --dataset_dir')

  tf.logging.set_verbosity(tf.logging.INFO)
  with tf.Graph().as_default():
    tf_global_step = tf.train.get_or_create_global_step()

    ###################
    # Prepare dataset #
    ###################
    dataset = imagenet.get_split(FLAGS.split_name, FLAGS.dataset_dir)
    provider = slim.dataset_data_provider.DatasetDataProvider(
        dataset,
        shuffle=False,
        common_queue_capacity=2 * FLAGS.batch_size,
        common_queue_min=FLAGS.batch_size)
    [dataset_image, label] = provider.get(['image', 'label'])
    dataset_image = preprocess_for_eval(dataset_image, IMAGE_SIZE, IMAGE_SIZE)
    dataset_images, labels = tf.train.batch(
        [dataset_image, label],
        batch_size=FLAGS.batch_size,
        num_threads=FLAGS.num_preprocessing_threads,
        capacity=5 * FLAGS.batch_size)

    ########################################
    # Define the model and input exampeles #
    ########################################
    create_model(tf.placeholder(tf.float32, shape=dataset_images.shape))
    input_images = get_input_images(dataset_images)
    logits, _ = create_model(input_images, reuse=True)

    if FLAGS.moving_average_decay > 0:
      variable_averages = tf.train.ExponentialMovingAverage(
          FLAGS.moving_average_decay, tf_global_step)
      variables_to_restore = variable_averages.variables_to_restore(
          slim.get_model_variables())
      variables_to_restore[tf_global_step.op.name] = tf_global_step
    else:
      variables_to_restore = slim.get_variables_to_restore()

    ######################
    # Define the metrics #
    ######################
    predictions = tf.argmax(logits, 1)
    labels = tf.squeeze(labels)
    names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
        'Accuracy': slim.metrics.streaming_accuracy(predictions, labels),
        'Recall_5': slim.metrics.streaming_sparse_recall_at_k(
            logits, tf.reshape(labels, [-1, 1]), 5),
    })

    ######################
    # Run evaluation     #
    ######################
    if FLAGS.max_num_batches:
      num_batches = FLAGS.max_num_batches
    else:
      # This ensures that we make a single pass over all of the data.
      num_batches = math.ceil(dataset.num_samples / float(FLAGS.batch_size))

    if tf.gfile.IsDirectory(FLAGS.checkpoint_path):
      checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)
    else:
      checkpoint_path = FLAGS.checkpoint_path

    tf.logging.info('Evaluating %s' % checkpoint_path)

    top1_accuracy, top5_accuracy = slim.evaluation.evaluate_once(
        master=FLAGS.master,
        checkpoint_path=checkpoint_path,
        logdir=None,
        summary_op=None,
        num_evals=num_batches,
        eval_op=list(names_to_updates.values()),
        final_op=[names_to_values['Accuracy'], names_to_values['Recall_5']],
        variables_to_restore=variables_to_restore)

    print('Top1 Accuracy: ', top1_accuracy)
    print('Top5 Accuracy: ', top5_accuracy)
示例#8
0
    def __call__(self, params):
        """Input function which provides a single batch for train or eval."""
        batch_size = params['batch_size']
        if FLAGS.use_data == 'real' and FLAGS.use_slim:
            train_dataset = imagenet.get_split('train', FLAGS.data_dir)
            eval_dataset = imagenet.get_split('validation', FLAGS.data_dir)

            dataset = train_dataset if self.is_training else eval_dataset

            capacity_multiplier = 20 if self.is_training else 2
            min_multiplier = 10 if self.is_training else 1

            provider = tf.contrib.slim.dataset_data_provider.DatasetDataProvider(
                dataset=dataset,
                num_readers=4,
                common_queue_capacity=capacity_multiplier * batch_size,
                common_queue_min=min_multiplier * batch_size)

            image, label = provider.get(['image', 'label'])

            image = vgg_preprocessing.preprocess_image(
                image=image,
                output_height=FLAGS.height,
                output_width=FLAGS.width,
                is_training=self.is_training,
                resize_side_min=_RESIZE_SIDE_MIN,
                resize_side_max=_RESIZE_SIDE_MAX)

            images, labels = tf.train.batch(tensors=[image, label],
                                            batch_size=batch_size,
                                            num_threads=4,
                                            capacity=5 * batch_size)

            labels = tf.one_hot(labels, FLAGS.num_classes)

        elif FLAGS.use_data == 'real' and not FLAGS.use_slim:
            train_dataset = imagenet.get_split('train',
                                               FLAGS.data_dir,
                                               use_slim=False)
            eval_dataset = imagenet.get_split('validation',
                                              FLAGS.data_dir,
                                              use_slim=False)
            dataset = train_dataset if self.is_training else eval_dataset
            decoder = imagenet.get_decoder()

            def prefetch_dataset(filename):
                size = (FLAGS.prefetch_size
                        if FLAGS.prefetch_size > 0 else batch_size)
                return tf.contrib.data.TFRecordDataset(
                    filename,
                    buffer_size=FLAGS.dataset_reader_buffer_size).prefetch(
                        size)

            if FLAGS.input_files_shuffle_capacity > 0:
                dataset = dataset.shuffle(FLAGS.input_files_shuffle_capacity)
            dataset = dataset.repeat()

            if FLAGS.prefetch_enabled:
                dataset = dataset.interleave(prefetch_dataset,
                                             cycle_length=FLAGS.num_readers,
                                             block_length=batch_size)

            if FLAGS.input_shuffle_capacity > 0:
                dataset = dataset.shuffle(FLAGS.input_shuffle_capacity)

            def parser(serialized_example):
                image, label = decoder.decode(serialized_example,
                                              ['image', 'label'])
                image = vgg_preprocessing.preprocess_image(
                    image=image,
                    output_height=FLAGS.height,
                    output_width=FLAGS.width,
                    is_training=self.is_training,
                    resize_side_min=_RESIZE_SIDE_MIN,
                    resize_side_max=_RESIZE_SIDE_MAX)
                return image, tf.one_hot(label, FLAGS.num_classes)

            dataset = dataset.map(parser,
                                  num_threads=FLAGS.map_threads,
                                  output_buffer_size=batch_size)

            dataset = dataset.batch(batch_size)
            images, labels = dataset.make_one_shot_iterator().get_next()

            # TODO(xiejw,saeta): Consider removing the sharding dimension below.
            images.set_shape(images.get_shape().merge_with(
                tf.TensorShape([batch_size, None, None, None])))
            labels.set_shape(labels.get_shape().merge_with(
                tf.TensorShape([batch_size, None])))
        else:
            images = tf.random_uniform(
                [batch_size, FLAGS.height, FLAGS.width, 3],
                minval=-1,
                maxval=1)
            labels = tf.random_uniform([batch_size],
                                       minval=0,
                                       maxval=999,
                                       dtype=tf.int32)
            labels = tf.one_hot(labels, FLAGS.num_classes)

        output_transform_fn = TensorTranspose(batch_size, is_input=False)
        images = output_transform_fn(images)
        return images, labels
示例#9
0
def main(_):
  if not FLAGS.dataset_dir:
    raise ValueError('You must supply the dataset directory with --dataset_dir')

  tf.logging.set_verbosity(tf.logging.INFO)
  with tf.Graph().as_default():
    tf_global_step = tf.train.get_or_create_global_step()

    ###################
    # Prepare dataset #
    ###################
    dataset = imagenet.get_split(FLAGS.split_name, FLAGS.dataset_dir)
    provider = slim.dataset_data_provider.DatasetDataProvider(
        dataset,
        shuffle=False,
        common_queue_capacity=2 * FLAGS.batch_size,
        common_queue_min=FLAGS.batch_size)
    [dataset_image, label] = provider.get(['image', 'label'])
    dataset_image = preprocess_for_eval(dataset_image, IMAGE_SIZE, IMAGE_SIZE)
    dataset_images, labels = tf.train.batch(
        [dataset_image, label],
        batch_size=FLAGS.batch_size,
        num_threads=FLAGS.num_preprocessing_threads,
        capacity=5 * FLAGS.batch_size)

    ########################################
    # Define the model and input exampeles #
    ########################################
    create_model(tf.placeholder(tf.float32, shape=dataset_images.shape))
    input_images = get_input_images(dataset_images)
    logits, _ = create_model(input_images, reuse=True)

    if FLAGS.moving_average_decay is not None:
      variable_averages = tf.train.ExponentialMovingAverage(
          FLAGS.moving_average_decay, tf_global_step)
      variables_to_restore = variable_averages.variables_to_restore(
          slim.get_model_variables())
      variables_to_restore[tf_global_step.op.name] = tf_global_step
    else:
      variables_to_restore = slim.get_variables_to_restore()

    ######################
    # Define the metrics #
    ######################
    predictions = tf.argmax(logits, 1)
    labels = tf.squeeze(labels)
    names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
        'Accuracy': slim.metrics.streaming_accuracy(predictions, labels),
        'Recall_5': slim.metrics.streaming_sparse_recall_at_k(
            logits, tf.reshape(labels, [-1, 1]), 5),
    })

    ######################
    # Run evaluation     #
    ######################
    if FLAGS.max_num_batches:
      num_batches = FLAGS.max_num_batches
    else:
      # This ensures that we make a single pass over all of the data.
      num_batches = math.ceil(dataset.num_samples / float(FLAGS.batch_size))

    if tf.gfile.IsDirectory(FLAGS.checkpoint_path):
      checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)
    else:
      checkpoint_path = FLAGS.checkpoint_path

    tf.logging.info('Evaluating %s' % checkpoint_path)

    sess = tf.Session()
    saver = tf.train.Saver(variables_to_restore)
    saver.restore(sess, checkpoint_path)

    tf.logging.info('Checkpoint restored')

    tf.train.start_queue_runners(sess=sess)

    init = tf.global_variables_initializer()
    sess.run(init)
    init = tf.local_variables_initializer()
    sess.run(init)

    options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
    run_metadata = tf.RunMetadata()

    tf.logging.info('Start Profiling')
    eval_results = sess.run(list(names_to_updates.values()),
                            options=options, run_metadata=run_metadata)
    cg = CompGraph('adv_imagenet_models', run_metadata, tf.get_default_graph())
    tf.logging.info('Profiling finished')

    cg_tensor_dict = cg.get_tensors()
    cg_sorted_keys = sorted(cg_tensor_dict.keys())
    cg_sorted_items = []
    for cg_key in cg_sorted_keys:
      cg_sorted_items.append(tf.shape(cg_tensor_dict[cg_key]))

    cg_sorted_shape = sess.run(cg_sorted_items)
    cg.op_analysis(dict(zip(cg_sorted_keys, cg_sorted_shape)),
                   'adv_imagenet_models.pickle')

    top1_accuracy = eval_results[0]
    top5_accuracy = eval_results[1]

    #top1_accuracy, top5_accuracy = slim.evaluation.evaluate_once(
    #    master=FLAGS.master,
    #    checkpoint_path=checkpoint_path,
    #    logdir=None,
    #    summary_op=None,
    #    num_evals=num_batches,
    #    eval_op=list(names_to_updates.values()),
    #    final_op=[names_to_values['Accuracy'], names_to_values['Recall_5']],
    #    variables_to_restore=variables_to_restore)

    print('Top1 Accuracy: ', top1_accuracy)
    print('Top5 Accuracy: ', top5_accuracy)
示例#10
0
    def __call__(self, params):
        """Input function which provides a single batch for train or eval."""
        batch_size = params['batch_size']

        if FLAGS.use_data == 'real':
            if self.is_training:
                dataset = imagenet.get_split('train',
                                             FLAGS.data_dir,
                                             use_slim=False)
            else:
                dataset = imagenet.get_split('validation',
                                             FLAGS.data_dir,
                                             use_slim=False)

            decoder = imagenet.get_decoder()

            # the set of operations that follow are based on guidelines
            # discussed in new pipeline best usage presentation.
            if FLAGS.initial_shuffle_buffer_size > 0:
                dataset = dataset.shuffle(
                    buffer_size=FLAGS.initial_shuffle_buffer_size)
            dataset = dataset.repeat()

            # use interleave() and prefetch() to read many files concurrently
            def prefetch_map_fn(filename):
                return tf.contrib.data.TFRecordDataset(
                    filename,
                    buffer_size=FLAGS.prefetch_dataset_buffer_size).prefetch(
                        FLAGS.prefetch_size or batch_size)

            if FLAGS.prefetch_enabled:
                dataset = dataset.interleave(prefetch_map_fn,
                                             cycle_length=FLAGS.cycle_length,
                                             block_length=FLAGS.block_length
                                             or batch_size)

            if FLAGS.followup_shuffle_buffer_size > 0:
                dataset = dataset.shuffle(
                    buffer_size=FLAGS.followup_shuffle_buffer_size)

            # use num_parallel_calls to parallelize map()
            def parser(serialized_example):
                image, label = decoder.decode(serialized_example,
                                              ['image', 'label'])
                image = vgg_preprocessing.preprocess_image(
                    image=image,
                    output_height=FLAGS.height,
                    output_width=FLAGS.width,
                    is_training=self.is_training,
                    resize_side_min=_RESIZE_SIDE_MIN,
                    resize_side_max=_RESIZE_SIDE_MAX)
                return image, label

            dataset = dataset.map(parser,
                                  num_parallel_calls=FLAGS.num_parallel_calls,
                                  output_buffer_size=batch_size)

            dataset = dataset.batch(batch_size)

            # use prefetch to overlap producer and consumer
            dataset = dataset.prefetch(1)

            images, labels = dataset.make_one_shot_iterator().get_next()
            labels = tf.one_hot(labels, FLAGS.num_classes, dtype=tf.int32)

            if FLAGS.input_layout == 'NHWC':
                images.set_shape([batch_size, FLAGS.height, FLAGS.width, 3])
            else:
                images.set_shape([batch_size, 3, FLAGS.height, FLAGS.width])
            labels.set_shape([batch_size, FLAGS.num_classes])

        else:
            if FLAGS.input_layout == 'NHWC':
                images = tf.random_uniform(
                    [batch_size, FLAGS.height, FLAGS.width, 3],
                    minval=-1,
                    maxval=1)
            else:
                images = tf.random_uniform(
                    [batch_size, 3, FLAGS.height, FLAGS.width],
                    minval=-1,
                    maxval=1)

            labels = tf.random_uniform([batch_size],
                                       minval=0,
                                       maxval=999,
                                       dtype=tf.int32)
            labels = tf.one_hot(labels, FLAGS.num_classes)

        images = tensor_transform_fn(images, params['output_perm'])
        return images, labels
split_name = 'train'
dataset_dir = <PATH_TO_TFSLIM_IMAGENET_TFRECORDS>

# Recommended to run multiple of these scripts in parallel
num_images = 4000 #(1-million images)/(250 parallel runs)

random_seed = args.seed #set random seed (different for each run)
im_shape = 224

slim = tf.contrib.slim
#build the network
nets, sess, metamer_layers = build_network.main()

# metamer_layers is a list of strings which are the keys of nets
layers_activations = [nets[layer] for layer in metamer_layers] 
dataset = get_split(split_name, dataset_dir)
data_provider = slim.dataset_data_provider.DatasetDataProvider(dataset, 
                                                               shuffle=True, 
                                                               seed=random_seed)
image_raw, label = data_provider.get(['image', 'label'])
image = tf.expand_dims(image_center_crop(image_raw, im_shape=im_shape), 0)

#all of the losses. shape=(num_image_pairs, num_layers)
spearman_r = []
with slim.queues.QueueRunners(sess):
    for i in range(num_images):
        
        #use the first 2 images then update the images_list index 
        image1 = sess.run(image)
        image2 = sess.run(image)
        spearman_r_pair = []
示例#12
0
def main(_):
  if not FLAGS.dataset_dir:
    raise ValueError('You must supply the dataset directory with --dataset_dir')

  tf.logging.set_verbosity(tf.logging.INFO)
  with tf.Graph().as_default():
    tf_global_step = slim.get_or_create_global_step()

    ######################
    # Select the eval dataset #
    ######################
    assert FLAGS.dataset_name == 'imagenet'
    dataset = imagenet.get_split(FLAGS.dataset_split_name, FLAGS.dataset_dir)

    ####################
    # Select the model #
    ####################

    networks_map = {
                    'mobilenet_v1': mobilenet_v1.mobilenet_v1,
                    'mobilenet_v1_075': mobilenet_v1.mobilenet_v1_075,
                    'mobilenet_v1_050': mobilenet_v1.mobilenet_v1_050,
                    'mobilenet_v1_025': mobilenet_v1.mobilenet_v1_025,
                   }

    name = FLAGS.model_name
    num_classes=dataset.num_classes
    if name not in networks_map:
      raise ValueError('Name of network unknown %s' % name)
    def network_fn(images):
      arg_scope = mobilenet_v1.mobilenet_v1_arg_scope(is_training=False)
      with slim.arg_scope(arg_scope):
        return networks_map[name](images, num_classes, is_training=False)


    ##############################################################
    # Create a dataset provider that loads data from the dataset #
    ##############################################################
    provider = slim.dataset_data_provider.DatasetDataProvider(
        dataset,
        shuffle=False,
        common_queue_capacity=2 * FLAGS.batch_size,
        common_queue_min=FLAGS.batch_size)
    [image, label] = provider.get(['image', 'label'])

    #####################################
    # Select the preprocessing function #
    #####################################
    image = preprocess_for_eval(image, FLAGS.eval_image_size, FLAGS.eval_image_size)

    print(FLAGS.batch_size)
    images, labels = tf.train.batch(
        [image, label],
        batch_size=FLAGS.batch_size,
        num_threads=FLAGS.num_preprocessing_threads,
        capacity=5 * FLAGS.batch_size)
    print(images.shape, labels.shape)

    ####################
    # Define the model #
    ####################
    logits, _ = network_fn(images)
    variables_to_restore = slim.get_variables_to_restore()

    predictions = tf.argmax(logits, 1)
    labels = tf.squeeze(labels)

    # Define the metrics:
    names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
        'Accuracy': slim.metrics.streaming_accuracy(predictions, labels),
        'Recall_5': slim.metrics.streaming_recall_at_k(
            logits, labels, 5),
    })

    # Print the summaries to screen.
    for name, value in names_to_values.items():
      summary_name = 'eval/%s' % name
      op = tf.summary.scalar(summary_name, value, collections=[])
      op = tf.Print(op, [value], summary_name)
      tf.add_to_collection(tf.GraphKeys.SUMMARIES, op)

    num_batches = math.ceil(dataset.num_samples / float(FLAGS.batch_size))

    if tf.gfile.IsDirectory(FLAGS.checkpoint_path):
      checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)
    else:
      checkpoint_path = FLAGS.checkpoint_path

    tf.logging.info('Evaluating %s' % checkpoint_path)

    print(checkpoint_path)
    print(FLAGS.eval_dir)
    print(num_batches)
    print(list(names_to_updates.values()))
    print(variables_to_restore)
    slim.evaluation.evaluate_once(master='', checkpoint_path=checkpoint_path, logdir=FLAGS.eval_dir, num_evals=num_batches, eval_op=list(names_to_updates.values()), variables_to_restore=variables_to_restore)
示例#13
0
parser.add_argument(
    '--first_cycle_steps', type=int, default=None,
    help='The number of steps to run before the first evaluation. Useful if '
    'you have stopped partway through a training cycle.')

FLAGS = parser.parse_args()
_EVAL_STEPS = 50000 // FLAGS.eval_batch_size

# Scale the learning rate linearly with the batch size. When the batch size is
# 256, the learning rate should be 0.1.
_INITIAL_LEARNING_RATE = 0.1 * FLAGS.train_batch_size / 256

_MOMENTUM = 0.9
_WEIGHT_DECAY = 1e-4

train_dataset = imagenet.get_split('train', FLAGS.data_dir)
eval_dataset = imagenet.get_split('validation', FLAGS.data_dir)

image_preprocessing_fn = vgg_preprocessing.preprocess_image
network = resnet_model.resnet_v2(
    resnet_size=FLAGS.resnet_size, num_classes=train_dataset.num_classes)

batches_per_epoch = train_dataset.num_samples / FLAGS.train_batch_size


def input_fn(is_training):
  """Input function which provides a single batch for train or eval."""
  batch_size = FLAGS.train_batch_size if is_training else FLAGS.eval_batch_size
  dataset = train_dataset if is_training else eval_dataset
  capacity_multiplier = 20 if is_training else 2
  min_multiplier = 10 if is_training else 1