def setup_record(self):
        filename_queue = tf.train.string_input_producer(
                                    [self.record], num_epochs=10)

        self.image, self.annotation = read_tfrecord_and_decode_into_image_annotation_pair_tensors(filename_queue)
        self.resized_image, resized_annotation = scale_randomly_image_with_annotation_with_fixed_size_output(self.image, self.annotation, (self.size, self.size))
        self.resized_annotation = tf.squeeze(resized_annotation)
 def setup_val(self, tfname):
     self.restore = glob(os.path.join(self.checkpoint8, "FCN__*", "*.data*" ))[0].split(".data")[0]  
     
     filename_queue = tf.train.string_input_producer(
                                 [tfname], num_epochs=10)
     self.image_queue, self.annotation_queue = read_tfrecord_and_decode_into_image_annotation_pair_tensors(filename_queue)
     self.image = tf.placeholder_with_default(self.image, shape=[None, 
                                                                 None,
                                                                    3])
     self.annotation = tf.placeholder_with_default(self.annotation_queue, shape=[None,
                                                                 None,
                                                                    1])
     self.resized_image, resized_annotation = scale_randomly_image_with_annotation_with_fixed_size_output(self.image, self.annotation, (self.size, self.size))
     self.resized_annotation = tf.squeeze(resized_annotation)
     image_batch_tensor = tf.expand_dims(self.image, axis=0)
     annotation_batch_tensor = tf.expand_dims(self.annotation, axis=0)
     # Be careful: after adaptation, network returns final labels
     # and not logits
     FCN_8s_bis = adapt_network_for_any_size_input(FCN_8s, 32)
     self.pred, fcn_16s_variables_mapping = FCN_8s_bis(image_batch_tensor=image_batch_tensor,
                                                   number_of_classes=self.num_labels,
                                                   is_training=False)
     self.prob = [h for h in [s for s in [t for t in self.pred.op.inputs][0].op.inputs][0].op.inputs][0]
     initializer = tf.local_variables_initializer()
     self.saver = tf.train.Saver()
     with tf.Session() as sess:
         sess.run(initializer)
         self.saver.restore(sess, self.restore)
Beispiel #3
0
    def setup_record(self):
        """
        Setup record reading.
        """
        filename_queue = tf.train.string_input_producer([self.record],
                                                        num_epochs=10)

        self.image, self.annotation = read_tfrecord_and_decode_into_image_annotation_pair_tensors(
            filename_queue)
        self.resized_image, resized_annotation = scale_randomly_image_with_annotation_with_fixed_size_output(
            self.image, self.annotation, (self.size, self.size))
        self.resized_annotation = tf.squeeze(resized_annotation)
Beispiel #4
0
    def setup_val(self, tfname):
        """
        Setups the model in case we need to validate.
        """
        self.restore = glob(os.path.join(self.checkpoint8, "FCN__*",
                                         "*.data*"))[0].split(".data")[0]

        filename_queue = tf.train.string_input_producer([tfname],
                                                        num_epochs=10)
        self.image_queue, self.annotation_queue = read_tfrecord_and_decode_into_image_annotation_pair_tensors(
            filename_queue)
        self.image = tf.placeholder_with_default(self.image,
                                                 shape=[None, None, 3])
        self.annotation = tf.placeholder_with_default(self.annotation_queue,
                                                      shape=[None, None, 1])
        self.resized_image, resized_annotation = scale_randomly_image_with_annotation_with_fixed_size_output(
            self.image, self.annotation, (self.size, self.size))
        self.resized_annotation = tf.squeeze(resized_annotation)
        image_batch_tensor = tf.expand_dims(self.image, axis=0)
        annotation_batch_tensor = tf.expand_dims(self.annotation, axis=0)
        # Be careful: after adaptation, network returns final labels
        # and not logits
        FCN_8s_bis = adapt_network_for_any_size_input(FCN_8s, 32)
        self.pred, fcn_16s_variables_mapping = FCN_8s_bis(
            image_batch_tensor=image_batch_tensor,
            number_of_classes=self.num_labels,
            is_training=False)
        self.prob = [
            h for h in
            [s
             for s in [t
                       for t in self.pred.op.inputs][0].op.inputs][0].op.inputs
        ][0]
        initializer = tf.local_variables_initializer()
        self.saver = tf.train.Saver()
        with tf.Session() as sess:
            sess.run(initializer)
            self.saver.restore(sess, self.restore)
Beispiel #5
0
from matplotlib import pyplot as plt
from tf_image_segmentation.utils.pascal_voc import pascal_segmentation_lut
from tf_image_segmentation.utils.tf_records import read_tfrecord_and_decode_into_image_annotation_pair_tensors
from tf_image_segmentation.utils.inference import adapt_network_for_any_size_input
from tf_image_segmentation.utils.visualization import visualize_segmentation_adaptive

pascal_voc_lut = pascal_segmentation_lut()

tfrecord_filename = 'pascal_augmented_val.tfrecords'

number_of_classes = 21

filename_queue = tf.train.string_input_producer([tfrecord_filename],
                                                num_epochs=1)

image, annotation = read_tfrecord_and_decode_into_image_annotation_pair_tensors(
    filename_queue)

# Fake batch for image and annotation by adding
# leading empty axis.
image_batch_tensor = tf.expand_dims(image, axis=0)
annotation_batch_tensor = tf.expand_dims(annotation, axis=0)

# Be careful: after adaptation, network returns final labels
# and not logits
FCN_8s = adapt_network_for_any_size_input(FCN_8s, 32)

pred, fcn_16s_variables_mapping = FCN_8s(image_batch_tensor=image_batch_tensor,
                                         number_of_classes=number_of_classes,
                                         is_training=False)

# Take away the masked out values from evaluation
Beispiel #6
0
def read_photo_annotation_depth(batch_size=1):
    def convert2float(image):
        """ Transfrom from int image ([0,255]) to float tensor ([-1.,1.])
 		"""
        image = tf.image.convert_image_dtype(image, dtype=tf.float32)
        return (image / 127.5) - 1.0

    image_train_size = [256, 256]
    number_of_classes = 21

    #	tfrecord_filename = '/home/yaxing/DATA/scene_net_augmented_train_200.tfrecords'
    tfrecord_filename = '/home/yaxing/data/scene5M/scene_net_augmented_train_16k.tfrecords'

    pascal_voc_lut = {
        0: 'Unknown',
        1: 'Bed',
        2: 'Books',
        3: 'Ceiling',
        4: 'Chair',
        5: 'Floor',
        6: 'Furniture',
        7: 'Objects',
        8: 'Picture',
        9: 'Sofa',
        10: 'Table',
        11: 'TV',
        12: 'Wall',
        13: 'Window'
    }
    class_labels = pascal_voc_lut.keys()

    filename_queue = tf.train.string_input_producer([tfrecord_filename],
                                                    num_epochs=50000)

    image1, image2, annotation1, depth2, annotation2, depth1 = read_tfrecord_and_decode_into_image_annotation_pair_tensors(
        filename_queue)

    # Various data augmentation stages
    image1, annotation1, depth2, image2, annotation2, depth1 = flip_randomly_left_right_image_with_annotation(
        image1, annotation1, depth2, image2, annotation2, depth1)

    # image = distort_randomly_image_color(image)

    resized_image1, resized_annotation1, resized_depth2, resized_image2, resized_annotation2, resized_depth1 = scale_randomly_image_with_annotation_with_fixed_size_output(
        image1,
        annotation1,
        depth2,
        image_train_size,
        img_tensor2=image2,
        annotation_tensor2=annotation2,
        depth_tensor1=depth1)

    resized_annotation1 = tf.squeeze(resized_annotation1)
    resized_annotation2 = tf.squeeze(resized_annotation2)
    image_batch1, annotation_batch1, depth_batch2, image_batch2, annotation_batch2, depth_batch1 = tf.train.shuffle_batch(
        [
            resized_image1, resized_annotation1, resized_depth2,
            resized_image2, resized_annotation2, resized_depth1
        ],
        batch_size=batch_size,
        capacity=3000,
        num_threads=2,
        min_after_dequeue=1000)
    return tf.to_float(image_batch1) / 127.5 - 1., annotation_batch1, (
        tf.exp(depth_batch1) - 1.
    ) / 1000., tf.to_float(image_batch2) / 127.5 - 1., annotation_batch2, (
        tf.exp(depth_batch2) - 1.) / 1000.
Beispiel #7
0


    restoremodel = options.checkpoint
    lr = restoremodel.split('__')[1]
    slim = tf.contrib.slim


    tfrecord_filename = options.tf_records

    number_of_classes = options.labels

    filename_queue = tf.train.string_input_producer(
        [tfrecord_filename], num_epochs=1)

    image, annotation = read_tfrecord_and_decode_into_image_annotation_pair_tensors(filename_queue)

    # Fake batch for image and annotation by adding
    # leading empty axis.
    image_batch_tensor = tf.expand_dims(image, axis=0)
    annotation_batch_tensor = tf.expand_dims(annotation, axis=0)
    # Be careful: after adaptation, network returns final labels
    # and not logits
    FCN_8s = adapt_network_for_any_size_input(FCN_8s, 32)


    pred, fcn_16s_variables_mapping = FCN_8s(image_batch_tensor=image_batch_tensor,
                                              number_of_classes=number_of_classes,
                                              is_training=False)

    # Take away the masked out values from evaluation