def _preproc_image_batch(self, batch_size, num_threads=1):
        '''
        This function is only used for queue input pipeline. It reads a filename
        from the filename queue, decodes the image, pushes it through a pre-processing
        function and then uses tf.train.batch to generate batches.

        :param batch_size: int, batch size
        :param num_threads: int, number of input threads (default=1)
        :return: tf.Tensor, batch of pre-processed input images
        '''

        if ("resnet_v2"
                in self._network_name) and (self._preproc_func_name is None):
            raise ValueError(
                "When using ResNet, please perform the pre-processing "
                "function manually. See here for details: "
                "https://github.com/tensorflow/models/tree/master/slim")

        # Read image file from disk and decode JPEG
        reader = tf.WholeFileReader()
        image_filename, image_raw = reader.read(self._filename_queue)
        image = tf.image.decode_jpeg(image_raw, channels=3)
        # Image preprocessing
        preproc_func_name = self._network_name if self._preproc_func_name is None else self._preproc_func_name
        image_preproc_fn = preprocessing_factory.get_preprocessing(
            preproc_func_name, is_training=False)
        image_preproc = image_preproc_fn(image, self.image_size,
                                         self.image_size)
        # Read a batch of preprocessing images from queue
        image_batch = tf.train.batch([image_preproc, image_filename],
                                     batch_size,
                                     num_threads=num_threads,
                                     allow_smaller_final_batch=True)
        return image_batch
Exemple #2
0
	def preprocess(self, arch='inception', pr = False, add_batch = True):
		self.check_colour_channel()	
		proc = pp.get_preprocessing(arch)
		self.image = tf.convert_to_tensor(self.image)
		self.image = proc(self.image, 224, 224)
		with tf.Session('') as sess:
			self.image = sess.run(self.image)
		ready_img = self.image
		if add_batch:
			ready_img = np.reshape(self.image, [1, 224, 224, 3])
		return ready_img	
Exemple #3
0
def neualstyle(model_file, image_file):

    # 初始化参数
    loss_model = 'vgg_16'
    height = 0
    width = 0

    # 读取图片,并将图片转化为tensor张量
    with open(image_file, 'rb') as img:
        with tf.Session().as_default() as sess:
            if image_file.lower().endswith('png'):
                # 将图片转化为张量,tensor,[height,width,channels]
                image = sess.run(tf.image.decode_png(img.read()))
            else:
                image = sess.run(tf.image.decode_jpeg(img.read()))
            height = image.shape[0]
            width = image.shape[1]

    with tf.Graph().as_default():
        with tf.Session().as_default() as sess:
            # 获取处理图片的方法
            image_preprocessing_fn, _ = preprocessing_factory.get_preprocessing(
                loss_model, is_training=False)
            # 获取预处理后的tensor
            image = reader.get_image(image_file, height, width,
                                     image_preprocessing_fn)
            # 添加一个维度,oldshape=[height,width,3] ——> newshape=[1,height,width,3]
            image = tf.expand_dims(image, 0)

            # 获取神经网络
            generated = model.net(image, training=False)
            generated = tf.cast(generated, tf.uint8)

            # 去掉增加的维
            generated = tf.squeeze(generated, [0])

            # 保存模型的变量
            saver = tf.train.Saver(tf.global_variables(),
                                   write_version=tf.train.SaverDef.V1)
            sess.run([
                tf.global_variables_initializer(),
                tf.local_variables_initializer()
            ])
            saver.restore(sess, model_file)

            # 保存图片
            generated_name = image_file.split('/')[-1]
            generated_file = os.getcwd(
            ) + '/wx_robot/handler/neuralstyle/neuralpic/' + generated_name
            with open(generated_file, 'wb') as img:
                img.write(sess.run(tf.image.encode_jpeg(generated)))
Exemple #4
0
def get_weights(model_name,
                dataset_name,
                dataset_dir,
                dataset_split_name,
                checkpoint_path,
                conv_scope=None,
                batch_size=1,
                is_training=False):

    with tf.Graph().as_default():
        dataset = dataset_factory.get_dataset(dataset_name, dataset_split_name,
                                              dataset_dir)
        preprocessing_fn = preprocessing_factory.get_preprocessing(
            model_name, is_training=is_training)
        network_fn = network_factory.get_network_fn(model_name,
                                                    FLAGS.num_classes)
        images, _, labels = train_test_utils.load_batch(
            dataset,
            preprocessing_fn,
            is_training=is_training,
            batch_size=batch_size,
            shuffle=False)
        logits, end_points = network_fn(images)

        model_vars = slim.get_model_variables()

        variables_to_restore = slim.get_variables_to_restore()
        restorer = tf.train.Saver(variables_to_restore)

        if os.path.isdir(checkpoint_path):
            checkpoint_path = tf.train.latest_checkpoint(checkpoint_path)

        with tf.Session() as sess:
            restorer.restore(sess, checkpoint_path)

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(coord=coord)

            vals = sess.run(model_vars)
            val_map = {}
            for i, var in enumerate(model_vars):
                if 'conv' in var.op.name:
                    val_map[var.op.name] = vals[i]

            coord.request_stop()
            coord.join()

    return val_map
Exemple #5
0
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path.insert(0, '/home/marcelo/tensorflow/Scripts/')
sys.path.insert(0, '/home/marcelo/git/models/research/slim/preprocessing/')

import preprocessing_factory as pp
from MUtils.img_proc import ImgProc

bfp_out_module = tf.load_op_library('/home/marcelo/tensorflow/Scripts/BFP/lib/bfp_out.so')

proc = pp.get_preprocessing('inception')

######
# Loading Image to test
######
data_dir = "/mnt/d/Data/ILSVRC2012/ILSVRC2012_img_val/"
img_pth = data_dir+"ILSVRC2012_val_00000028.jpeg"
img = ImgProc(img_pth, 1).preprocess()
imgplot = plt.imshow(img[0])
plt.show()

with tf.Session(''):
	print(img)
	res1 = bfp_out_module.bfp_out(img, ShDepth=3, MWidth=2, EWidth=2).eval()
	imgplot = plt.imshow(res1[0])
	plt.show()
	print(res1[0])
Exemple #6
0
def main(_):
    if not FLAGS.dataset_dir:
        raise ValueError(
            'You must supply the dataset directory with --dataset_dir')

    tf.logging.set_verbosity(tf.logging.INFO)
    #os.environ["OMP_NUM_THREADS"] = "54"
    with tf.Graph().as_default():
        tf_global_step = slim.get_or_create_global_step()

        ######################
        # Select the dataset #
        ######################
        dataset = dataset_factory.get_dataset(FLAGS.dataset_name,
                                              FLAGS.dataset_split_name,
                                              FLAGS.dataset_dir)

        ####################
        # Select the model #
        ####################
        network_fn = nets_factory.get_network_fn(
            FLAGS.model_name,
            num_classes=(dataset.num_classes - FLAGS.labels_offset),
            is_training=False)

        ##############################################################
        # Create a dataset provider that loads data from the dataset #
        ##############################################################
        provider = slim.dataset_data_provider.DatasetDataProvider(
            dataset,
            shuffle=False,
            common_queue_capacity=2 * FLAGS.batch_size,
            common_queue_min=FLAGS.batch_size)
        [image, label] = provider.get(['image', 'label'])
        label -= FLAGS.labels_offset

        #####################################
        # Select the preprocessing function #
        #####################################
        preprocessing_name = FLAGS.preprocessing_name or FLAGS.model_name
        image_preprocessing_fn = preprocessing_factory.get_preprocessing(
            preprocessing_name, is_training=False)

        eval_image_size = \
            FLAGS.eval_image_size or network_fn.default_image_size

        image = image_preprocessing_fn(image, eval_image_size, eval_image_size)

        images, labels = tf.train.batch(
            [image, label],
            batch_size=FLAGS.batch_size,
            num_threads=FLAGS.num_preprocessing_threads,
            capacity=5 * FLAGS.batch_size)

        ####################
        # Define the model #
        ####################
        logits, _ = network_fn(images)

        if FLAGS.moving_average_decay:
            variable_averages = tf.train.ExponentialMovingAverage(
                FLAGS.moving_average_decay, tf_global_step)
            variables_to_restore = variable_averages.variables_to_restore(
                slim.get_model_variables())
            variables_to_restore[tf_global_step.op.name] = tf_global_step
        else:
            variables_to_restore = slim.get_variables_to_restore()

        predictions = tf.argmax(logits, 1)
        #labels = tf.squeeze(labels)

        # Define the metrics:
        names_to_values, names_to_updates = \
            slim.metrics.aggregate_metric_map({
                'Accuracy': slim.metrics.streaming_accuracy(
                    predictions, labels),
                'Recall_5': slim.metrics.streaming_recall_at_k(
                    logits, labels, 5),
        })

        # Print the summaries to screen.
        for name, value in names_to_values.items():
            summary_name = 'eval/%s' % name
            op = tf.summary.scalar(summary_name, value, collections=[])
            op = tf.Print(op, [value], summary_name)
            tf.add_to_collection(tf.GraphKeys.SUMMARIES, op)

        # TODO(sguada) use num_epochs=1
        if FLAGS.max_num_batches:
            num_batches = FLAGS.max_num_batches
        else:
            # This ensures that we make a single pass over all of the data.
            num_batches = math.ceil(dataset.num_samples /
                                    float(FLAGS.batch_size))

        num_batches = 100

        config = tf.ConfigProto(
            inter_op_parallelism_threads=FLAGS.inter_op_parallelism_threads,
            intra_op_parallelism_threads=FLAGS.intra_op_parallelism_threads)

        if tf.gfile.IsDirectory(FLAGS.checkpoint_path):
            checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)
        else:
            checkpoint_path = FLAGS.checkpoint_path

        tf.logging.info('Evaluating %s' % checkpoint_path)

        slim.evaluation.evaluate_once(
            master=FLAGS.master,
            checkpoint_path=checkpoint_path,
            logdir=FLAGS.eval_dir,
            num_evals=num_batches,
            eval_op=list(names_to_updates.values()),
            variables_to_restore=variables_to_restore,
            hooks=[_LoggerHook()],
            session_config=config)
Exemple #7
0
def eval_model(model_name,
               dataset_name,
               dataset_dir,
               train_dir,
               batch_size=32,
               use_mask=False,
               num_groups=2,
               init_from_pre_trained=False,
               init_pointwise_from_pre_trained=False,
               weights_map=None,
               **kwargs):
  """ Evaluate the performance of a model. """

  with tf.Graph().as_default():
    tf.logging.set_verbosity(tf.logging.INFO)

    tf_global_step = tf.train.get_or_create_global_step()

    dataset = dataset_factory.get_dataset(
        dataset_name, 'test', dataset_dir)

    preprocessing_fn = preprocessing_factory.get_preprocessing(
        model_name, is_training=False)

    network_fn = network_factory.get_network_fn(
        model_name, dataset.num_classes, use_mask=use_mask)

    images, images_raw, labels = load_batch(dataset,
                                            preprocessing_fn,
                                            shuffle=False,
                                            batch_size=batch_size,
                                            is_training=False)

    logits, _ = network_fn(images, **kwargs)

    predictions = tf.argmax(logits, 1)

    # Evaluation
    names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
        'eval/accuracy': slim.metrics.streaming_accuracy(predictions, labels),
        'eval/recall_5': slim.metrics.streaming_sparse_recall_at_k(logits, labels, 5),
    })

    summary_op = create_eval_summary_op(names_to_values)

    num_evals = int(math.ceil(dataset.num_samples / float(batch_size)))
    variables_to_restore = slim.get_variables_to_restore()

    # We don't want to mess up with the train dir
    eval_dir = train_dir + '_eval'

    metric_values = slim.evaluation.evaluate_once(
        '',
        tf.train.latest_checkpoint(train_dir),
        eval_dir,
        num_evals=num_evals,
        eval_op=list(names_to_updates.values()),
        final_op=list(names_to_updates.values()),
        variables_to_restore=variables_to_restore)

    names_to_values = dict(zip(names_to_values.keys(), metric_values))
    for name in names_to_values:
      print('%s: %f' % (name, names_to_values[name]))

  return names_to_values['eval/accuracy'], names_to_values['eval/recall_5']
Exemple #8
0
def train_model(model_name,
                dataset_name,
                dataset_dir,
                train_dir,
                batch_size=32,
                checkpoint_path=None,
                max_number_of_epochs=10,
                trainable_scopes=None,
                checkpoint_exclude_scopes=None,
                use_mask=False,
                weight_decay=4e-5,
                learning_rate=1e-3,
                num_groups=2,
                init_from_pre_trained=False,
                init_pointwise_from_pre_trained=False,
                weights_map=None,
                bipartite_connections_map=None,
                **kwargs):
  """ Train the given model.

  Returns:
      A final loss of the training process.
  """

  tf.logging.set_verbosity(tf.logging.INFO)

  layer_replacement = kwargs.get('layer_replacement')

  with tf.Graph().as_default():
    dataset = dataset_factory.get_dataset(dataset_name, 'train', dataset_dir)

    preprocessing_fn = preprocessing_factory.get_preprocessing(
        model_name, is_training=True)

    network_fn = network_factory.get_network_fn(
        model_name, dataset.num_classes,
        use_mask=use_mask, weight_decay=weight_decay, is_training=True)

    max_number_of_steps = int(math.ceil(max_number_of_epochs * dataset.num_samples
                                        / batch_size))

    tf.logging.info('Training on %s' % train_dir)
    tf.logging.info('Number of samples: %d' % dataset.num_samples)
    tf.logging.info('Max number of steps: %d' % max_number_of_steps)

    """
    Load data from the dataset and pre-process.
    """
    images, images_raw, labels = load_batch(
        dataset, preprocessing_fn, is_training=True)

    # create arg_scope
    logits, end_points = network_fn(images,
        bipartite_connections_map=bipartite_connections_map,
        **kwargs)

    # compute losses
    one_hot_labels = slim.one_hot_encoding(labels, dataset.num_classes)
    slim.losses.softmax_cross_entropy(logits, one_hot_labels)
    total_loss = slim.losses.get_total_loss()

    # configure learning rate
    global_step = slim.create_global_step()
    learning_rate = configure_learning_rate(learning_rate,
                                            dataset.num_samples,
                                            global_step)

    # create summary op
    summary_op = create_train_summary_op(end_points, learning_rate,
                                         total_loss, images_raw, images,
                                         model_name=model_name)

    """
    Configure optimizer and training.

    if we do fine-tuning, just set trainable_scopes.
    """
    variables_to_train = get_variables_to_train(trainable_scopes)
    optimizer = tf.train.RMSPropOptimizer(learning_rate=learning_rate,
                                          decay=0.9,
                                          momentum=0.9,
                                          epsilon=1.0)

    # training operator
    train_op = slim.learning.create_train_op(total_loss, optimizer,
                                             variables_to_train=variables_to_train)

    init_fn = get_init_fn(checkpoint_path,
                          checkpoint_exclude_scopes,
                          layer_replacement,
                          model_name,
                          weights_map=weights_map,
                          bipartite_connections_map=bipartite_connections_map,
                          init_from_pre_trained=init_from_pre_trained,
                          init_pointwise_from_pre_trained=init_pointwise_from_pre_trained,
                          num_groups=num_groups)
    # final loss value
    final_loss = slim.learning.train(train_op,
                                     logdir=train_dir,
                                     init_fn=init_fn,
                                     number_of_steps=max_number_of_steps,
                                     log_every_n_steps=10,
                                     save_summaries_secs=60)

    print('Finished training. Final batch loss %f' % final_loss)

    return final_loss
def evaluate(model_name,
             dataset_name,
             dataset_dir,
             dataset_split_name,
             eval_dir,
             checkpoint_path,
             conv_scope,
             batch_size=32,
             in_channel=0,
             out_channel=0,
             is_training=False):
    """
  Evaluate a single conv_scope
  """

    with tf.Graph().as_default():
        g = tf.get_default_graph()

        dataset = dataset_factory.get_dataset(dataset_name, dataset_split_name,
                                              dataset_dir)
        num_batches = math.ceil(dataset.num_samples / batch_size)

        preprocessing_fn = preprocessing_factory.get_preprocessing(
            model_name, is_training=is_training)

        network_fn = network_factory.get_network_fn(model_name,
                                                    dataset.num_classes,
                                                    use_mask=True,
                                                    is_training=is_training)

        images, _, labels = train_test_utils.load_batch(
            dataset,
            preprocessing_fn,
            is_training=is_training,
            batch_size=batch_size,
            shuffle=False)

        logits, end_points = network_fn(images)
        predictions = tf.argmax(logits, 1)

        mask_assign_op = get_mask_assign_op(conv_scope, in_channel,
                                            out_channel, g)

        one_hot_labels = slim.one_hot_encoding(labels, dataset.num_classes)
        loss = tf.nn.softmax_cross_entropy_with_logits_v2(
            logits=logits, labels=one_hot_labels)

        # Evaluation
        names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
            'eval/accuracy':
            tf.metrics.accuracy(predictions, labels),
            'eval/recall_5':
            slim.metrics.streaming_sparse_recall_at_k(logits, labels, 5),
            'eval/mean_loss':
            tf.metrics.mean(loss),
        })

        mask_variables = [
            var for var in slim.get_model_variables() if 'mask' in var.op.name
        ]

        variables_to_restore = slim.get_variables_to_restore()
        variables_to_restore = [
            x for x in variables_to_restore if 'mask' not in x.op.name
        ]
        restorer = tf.train.Saver(variables_to_restore)

        with tf.Session() as sess:
            tf.summary.FileWriter(eval_dir, sess.graph)

            restorer.restore(sess, tf.train.latest_checkpoint(checkpoint_path))

            sess.run(tf.local_variables_initializer())
            sess.run(tf.variables_initializer(mask_variables))
            sess.run(mask_assign_op)

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(coord=coord)

            for batch_id in range(num_batches):
                if batch_id != 0 and batch_id % 10 == 0:
                    tf.logging.info('Evaluated [%5d/%5d]' %
                                    (batch_id, num_batches))

                # run accuracy evaluation
                sess.run(list(names_to_updates.values()))

            metric_values = sess.run(list(names_to_values.values()))
            for metric, value in zip(names_to_values.keys(), metric_values):
                print('Metric %s has value: %f' % (metric, value))

            coord.request_stop()
            coord.join()