Пример #1
0
  def eval_graph(self, dataset='test'):
    """Constructs classifier evaluation graph.

    Args:
      dataset: the labeled dataset to evaluate, {'train', 'test', 'valid'}.

    Returns:
      eval_ops: dict<metric name, tuple(value, update_op)>
      var_restore_dict: dict mapping variable restoration names to variables.
        Trainable variables will be mapped to their moving average names.
    """
    inputs = _inputs(dataset, pretrain=False, bidir=True)
    embedded = [self.layers['embedding'](inp.tokens) for inp in inputs]
    _, next_states, logits, _ = self.cl_loss_from_embedding(
        embedded, inputs=inputs, return_intermediates=True)
    f_inputs, _ = inputs

    eval_ops = {
        'accuracy':
            tf.contrib.metrics.streaming_accuracy(
                layers_lib.predictions(logits), f_inputs.labels,
                f_inputs.weights)
    }

    # Save states on accuracy update
    saves = [inp.save_state(state) for (inp, state) in zip(inputs, next_states)]
    with tf.control_dependencies(saves):
      acc, acc_update = eval_ops['accuracy']
      acc_update = tf.identity(acc_update)
      eval_ops['accuracy'] = (acc, acc_update)

    var_restore_dict = make_restore_average_vars_dict()
    return eval_ops, var_restore_dict
Пример #2
0
    def eval_graph(self, dataset='test'):
        """Constructs classifier evaluation graph.

    Args:
      dataset: the labeled dataset to evaluate, {'train', 'test', 'valid'}.

    Returns:
      eval_ops: dict<metric name, tuple(value, update_op)>
      var_restore_dict: dict mapping variable restoration names to variables.
        Trainable variables will be mapped to their moving average names.
    """
        inputs = _inputs(dataset, pretrain=False, bidir=True)
        embedded = [self.layers['embedding'](inp.tokens) for inp in inputs]
        _, next_states, logits, _ = self.cl_loss_from_embedding(
            embedded, inputs=inputs, return_intermediates=True)
        f_inputs, _ = inputs

        eval_ops = {
            'accuracy':
            tf.contrib.metrics.streaming_accuracy(
                layers_lib.predictions(logits), f_inputs.labels,
                f_inputs.weights)
        }

        # Save states on accuracy update
        saves = [
            inp.save_state(state) for (inp, state) in zip(inputs, next_states)
        ]
        with tf.control_dependencies(saves):
            acc, acc_update = eval_ops['accuracy']
            acc_update = tf.identity(acc_update)
            eval_ops['accuracy'] = (acc, acc_update)

        var_restore_dict = make_restore_average_vars_dict()
        return eval_ops, var_restore_dict
Пример #3
0
    def eval_graph(self, dataset='test'):
        """Constructs classifier evaluation graph.

    Args:
      dataset: the labeled dataset to evaluate, {'train', 'test', 'valid'}.

    Returns:
      eval_ops: dict<metric name, tuple(value, update_op)>
      var_restore_dict: dict mapping variable restoration names to variables.
        Trainable variables will be mapped to their moving average names.
    """
        inputs = _inputs(dataset, pretrain=False)
        embedded = self.layers['embedding'](inputs.tokens)
        _, next_state, logits, _ = self.cl_loss_from_embedding(
            embedded, inputs=inputs, return_intermediates=True)

        if FLAGS.single_label:
            indices = tf.stack([tf.range(FLAGS.batch_size), inputs.length - 1],
                               1)
            labels = tf.expand_dims(tf.gather_nd(inputs.labels, indices), 1)
            weights = tf.expand_dims(tf.gather_nd(inputs.weights, indices), 1)
        else:
            labels = inputs.labels
            weights = inputs.weights
        eval_ops = {
            'accuracy':
            tf.contrib.metrics.streaming_accuracy(
                layers_lib.predictions(logits), labels, weights)
        }

        with tf.control_dependencies([inputs.save_state(next_state)]):
            acc, acc_update = eval_ops['accuracy']
            acc_update = tf.identity(acc_update)
            eval_ops['accuracy'] = (acc, acc_update)

        var_restore_dict = make_restore_average_vars_dict()
        return eval_ops, var_restore_dict
Пример #4
0
  def eval_graph(self, dataset='test'):
    """Constructs classifier evaluation graph.

    Args:
      dataset: the labeled dataset to evaluate, {'train', 'test', 'valid'}.

    Returns:
      eval_ops: dict<metric name, tuple(value, update_op)>
      var_restore_dict: dict mapping variable restoration names to variables.
        Trainable variables will be mapped to their moving average names.
    """
    inputs = _inputs(dataset, pretrain=False)
    embedded = self.layers['embedding'](inputs.tokens)
    _, next_state, logits, _ = self.cl_loss_from_embedding(
        embedded, inputs=inputs, return_intermediates=True)

    if FLAGS.single_label:
      indices = tf.stack([tf.range(FLAGS.batch_size), inputs.length - 1], 1)
      labels = tf.expand_dims(tf.gather_nd(inputs.labels, indices), 1)
      weights = tf.expand_dims(tf.gather_nd(inputs.weights, indices), 1)
    else:
      labels = inputs.labels
      weights = inputs.weights
    eval_ops = {
        'accuracy':
            tf.contrib.metrics.streaming_accuracy(
                layers_lib.predictions(logits), labels, weights)
    }

    with tf.control_dependencies([inputs.save_state(next_state)]):
      acc, acc_update = eval_ops['accuracy']
      acc_update = tf.identity(acc_update)
      eval_ops['accuracy'] = (acc, acc_update)

    var_restore_dict = make_restore_average_vars_dict()
    return eval_ops, var_restore_dict
Пример #5
0
sys.path.append(path + '/segmentation/datasets/')
sys.path.append(path + '/segmentation/models')
sys.path.append(path + '/segmentation/notebooks')
import layers
import fcn8s
import util
import cityscapes
from colorize import colorize
from class_mean_iou import class_mean_iou

image_shape = [1, 256, 512, 3]
sess = tf.InteractiveSession()
image_op = tf.placeholder(tf.float32, shape=image_shape)

logits_op = fcn8s.inference(image_op)
predictions_op = layers.predictions(logits_op)
predictions_op_prob = tf.nn.softmax(logits_op)

init_op = tf.global_variables_initializer()
sess.run(init_op)

bridge = CvBridge()

saver = tf.train.Saver()
saver.restore(
    sess, path + '/tf_models/fcn8s_augment_finetune/' +
    'fcn8s_augment.checkpoint-30')
# prediction_publisher = rospy.Publisher('/prediction_color', Image, queue_size=1)


def predice_image(img_msg):
    def run(self, run_type):

        is_training = True if run_type == 'train' else False

        self.log('{} epoch: {}'.format(run_type, self.epoch))

        image_filenames, label_filenames = self.dataset.load_filenames(
            run_type)

        global_step = tf.Variable(1, name='global_step', trainable=False)

        images, labels = inputs.load_batches(image_filenames,
                                             label_filenames,
                                             shape=self.dataset.SHAPE,
                                             batch_size=self.batch_size,
                                             resize_shape=self.dataset.SHAPE,
                                             crop_shape=(256, 512),
                                             augment=True)

        with tf.name_scope('labels'):
            color_labels = util.colorize(labels, self.dataset.augmented_labels)
            labels = tf.cast(labels, tf.int32)
            ignore_mask = util.get_ignore_mask(labels,
                                               self.dataset.augmented_labels)
            tf.summary.image('label', color_labels, 1)
            tf.summary.image('weights', tf.cast(ignore_mask * 255, tf.uint8),
                             1)

        tf.summary.image('image', images, 1)

        logits = self.model.inference(images,
                                      num_classes=self.num_classes,
                                      is_training=is_training)

        with tf.name_scope('outputs'):
            predictions = layers.predictions(logits)
            color_predictions = util.colorize(predictions,
                                              self.dataset.augmented_labels)
            tf.summary.image('prediction', color_predictions, 1)

        # Add some metrics
        with tf.name_scope('metrics'):
            accuracy_op, accuracy_update_op = tf.contrib.metrics.streaming_accuracy(
                predictions, labels, weights=ignore_mask)
            mean_iou_op, mean_iou_update_op = tf.contrib.metrics.streaming_mean_iou(
                predictions,
                labels,
                num_classes=self.num_classes,
                weights=ignore_mask)

        if is_training:
            loss_op = layers.loss(logits,
                                  labels,
                                  mask=ignore_mask,
                                  weight_decay=self.weight_decay)
            train_op = layers.optimize(loss_op,
                                       learning_rate=self.learning_rate,
                                       global_step=global_step)

        # Merge all summaries into summary op
        summary_op = tf.summary.merge_all()

        # Create restorer for restoring
        saver = tf.train.Saver()

        # Initialize session and local variables (for input pipeline and metrics)
        sess = tf.Session()
        sess.run(tf.local_variables_initializer())

        if self.checkpoint is None:
            sess.run(tf.global_variables_initializer())
            self.log('{} {} from scratch.'.format(run_type, self.model_name))
        else:
            start_time = time.time()
            saver.restore(sess, self.checkpoint)
            duration = time.time() - start_time
            self.log('{} from previous checkpoint {:s} ({:.2f}s)'.format(
                run_type, self.checkpoint, duration))

        # Create summary writer
        summary_path = os.path.join(self.model_path, run_type)
        step_writer = tf.summary.FileWriter(summary_path, sess.graph)

        # Start filling the input queues
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        num_examples = self.dataset.NUM_TRAIN_EXAMPLES if is_training else self.dataset.NUM_VALID_EXAMPLES

        for local_step in range(num_examples // self.batch_size):

            # Take time!
            start_time = time.time()

            if is_training:
                _, loss, accuracy, mean_iou, summary = sess.run([
                    train_op, loss_op, accuracy_update_op, mean_iou_update_op,
                    summary_op
                ])
                duration = time.time() - start_time
                self.log('Epoch: {} train step: {} loss: {:.4f} accuracy: {:.2f}% duration: {:.2f}s' \
                    .format(self.epoch, local_step + 1, loss, accuracy * 100, duration))
            else:
                accuracy, mean_iou, summary = sess.run(
                    [accuracy_update_op, mean_iou_update_op, summary_op])
                duration = time.time() - start_time
                self.log('Epoch: {} eval step: {} accuracy: {:.2f}% duration: {:.2f}s'\
                    .format(self.epoch, local_step + 1, accuracy * 100, duration))

            # Save summary and print stats
            step_writer.add_summary(summary,
                                    global_step=global_step.eval(session=sess))

        # Write additional epoch summaries
        epoch_writer = tf.summary.FileWriter(summary_path)
        epoch_summaries = []
        if is_training:
            epoch_summaries.append(
                tf.summary.scalar('params/weight_decay', self.weight_decay))
            epoch_summaries.append(
                tf.summary.scalar('params/learning_rate', self.learning_rate))
        epoch_summaries.append(
            tf.summary.scalar('params/batch_size', self.batch_size))
        epoch_summaries.append(
            tf.summary.scalar('metrics/accuracy', accuracy_op))
        epoch_summaries.append(
            tf.summary.scalar('metrics/mean_iou', mean_iou_op))
        epoch_summary_op = tf.summary.merge(epoch_summaries)
        summary = sess.run(epoch_summary_op)
        epoch_writer.add_summary(summary, global_step=self.epoch)

        # Save after each epoch when training
        if is_training:
            checkpoint_path = os.path.join(self.model_path,
                                           self.model_name + '.checkpoint')
            start_time = time.time()
            self.checkpoint = saver.save(sess,
                                         checkpoint_path,
                                         global_step=self.epoch)
            duration = time.time() - start_time
            self.log('Model saved as {:s} ({:.2f}s)'.format(
                self.checkpoint, duration))

        # Stop queue runners and reset the graph
        coord.request_stop()
        coord.join(threads)
        sess.close()
        tf.reset_default_graph()