Пример #1
0
    def setup_record(self):
        filename_queue = tf.train.string_input_producer(
                                    [self.record], num_epochs=10)

        self.image, self.annotation = read_tfrecord_and_decode_into_image_annotation_pair_tensors(filename_queue)
        self.resized_image, resized_annotation = scale_randomly_image_with_annotation_with_fixed_size_output(self.image, self.annotation, (self.size, self.size))
        self.resized_annotation = tf.squeeze(resized_annotation)
Пример #2
0
 def setup_val(self, tfname):
     self.restore = glob(os.path.join(self.checkpoint8, "FCN__*", "*.data*" ))[0].split(".data")[0]  
     
     filename_queue = tf.train.string_input_producer(
                                 [tfname], num_epochs=10)
     self.image_queue, self.annotation_queue = read_tfrecord_and_decode_into_image_annotation_pair_tensors(filename_queue)
     self.image = tf.placeholder_with_default(self.image, shape=[None, 
                                                                 None,
                                                                    3])
     self.annotation = tf.placeholder_with_default(self.annotation_queue, shape=[None,
                                                                 None,
                                                                    1])
     self.resized_image, resized_annotation = scale_randomly_image_with_annotation_with_fixed_size_output(self.image, self.annotation, (self.size, self.size))
     self.resized_annotation = tf.squeeze(resized_annotation)
     image_batch_tensor = tf.expand_dims(self.image, axis=0)
     annotation_batch_tensor = tf.expand_dims(self.annotation, axis=0)
     # Be careful: after adaptation, network returns final labels
     # and not logits
     FCN_8s_bis = adapt_network_for_any_size_input(FCN_8s, 32)
     self.pred, fcn_16s_variables_mapping = FCN_8s_bis(image_batch_tensor=image_batch_tensor,
                                                   number_of_classes=self.num_labels,
                                                   is_training=False)
     self.prob = [h for h in [s for s in [t for t in self.pred.op.inputs][0].op.inputs][0].op.inputs][0]
     initializer = tf.local_variables_initializer()
     self.saver = tf.train.Saver()
     with tf.Session() as sess:
         sess.run(initializer)
         self.saver.restore(sess, self.restore)
Пример #3
0
    def setup_record(self):
        """
        Setup record reading.
        """
        filename_queue = tf.train.string_input_producer([self.record],
                                                        num_epochs=10)

        self.image, self.annotation = read_tfrecord_and_decode_into_image_annotation_pair_tensors(
            filename_queue)
        self.resized_image, resized_annotation = scale_randomly_image_with_annotation_with_fixed_size_output(
            self.image, self.annotation, (self.size, self.size))
        self.resized_annotation = tf.squeeze(resized_annotation)
Пример #4
0
    def setup_val(self, tfname):
        """
        Setups the model in case we need to validate.
        """
        self.restore = glob(os.path.join(self.checkpoint8, "FCN__*",
                                         "*.data*"))[0].split(".data")[0]

        filename_queue = tf.train.string_input_producer([tfname],
                                                        num_epochs=10)
        self.image_queue, self.annotation_queue = read_tfrecord_and_decode_into_image_annotation_pair_tensors(
            filename_queue)
        self.image = tf.placeholder_with_default(self.image,
                                                 shape=[None, None, 3])
        self.annotation = tf.placeholder_with_default(self.annotation_queue,
                                                      shape=[None, None, 1])
        self.resized_image, resized_annotation = scale_randomly_image_with_annotation_with_fixed_size_output(
            self.image, self.annotation, (self.size, self.size))
        self.resized_annotation = tf.squeeze(resized_annotation)
        image_batch_tensor = tf.expand_dims(self.image, axis=0)
        annotation_batch_tensor = tf.expand_dims(self.annotation, axis=0)
        # Be careful: after adaptation, network returns final labels
        # and not logits
        FCN_8s_bis = adapt_network_for_any_size_input(FCN_8s, 32)
        self.pred, fcn_16s_variables_mapping = FCN_8s_bis(
            image_batch_tensor=image_batch_tensor,
            number_of_classes=self.num_labels,
            is_training=False)
        self.prob = [
            h for h in
            [s
             for s in [t
                       for t in self.pred.op.inputs][0].op.inputs][0].op.inputs
        ][0]
        initializer = tf.local_variables_initializer()
        self.saver = tf.train.Saver()
        with tf.Session() as sess:
            sess.run(initializer)
            self.saver.restore(sess, self.restore)
Пример #5
0
pascal_voc_lut = pascal_segmentation_lut()
class_labels = [0, 1, 255] #pascal_voc_lut.keys()
samples = 79


filename_queue = tf.train.string_input_producer(
    [tfrecord_filename], num_epochs=10)

image, annotation = read_tfrecord_and_decode_into_image_annotation_pair_tensors(filename_queue)

# Various data augmentation stages
image, annotation = flip_randomly_left_right_image_with_annotation(image, annotation)

# image = distort_randomly_image_color(image)

resized_image, resized_annotation = scale_randomly_image_with_annotation_with_fixed_size_output(image, annotation, image_train_size)


resized_annotation = tf.squeeze(resized_annotation)

image_batch, annotation_batch = tf.train.shuffle_batch( [resized_image, resized_annotation],
                                             batch_size=1,
                                             capacity=samples,
                                             num_threads=2,
                                                        min_after_dequeue=70)

upsampled_logits_batch, vgg_16_variables_mapping = FCN_32s(image_batch_tensor=image_batch,
                                                           number_of_classes=number_of_classes,
                                                           is_training=True)

Пример #6
0
def read_photo_annotation_depth(batch_size=1):
    def convert2float(image):
        """ Transfrom from int image ([0,255]) to float tensor ([-1.,1.])
 		"""
        image = tf.image.convert_image_dtype(image, dtype=tf.float32)
        return (image / 127.5) - 1.0

    image_train_size = [256, 256]
    number_of_classes = 21

    #	tfrecord_filename = '/home/yaxing/DATA/scene_net_augmented_train_200.tfrecords'
    tfrecord_filename = '/home/yaxing/data/scene5M/scene_net_augmented_train_16k.tfrecords'

    pascal_voc_lut = {
        0: 'Unknown',
        1: 'Bed',
        2: 'Books',
        3: 'Ceiling',
        4: 'Chair',
        5: 'Floor',
        6: 'Furniture',
        7: 'Objects',
        8: 'Picture',
        9: 'Sofa',
        10: 'Table',
        11: 'TV',
        12: 'Wall',
        13: 'Window'
    }
    class_labels = pascal_voc_lut.keys()

    filename_queue = tf.train.string_input_producer([tfrecord_filename],
                                                    num_epochs=50000)

    image1, image2, annotation1, depth2, annotation2, depth1 = read_tfrecord_and_decode_into_image_annotation_pair_tensors(
        filename_queue)

    # Various data augmentation stages
    image1, annotation1, depth2, image2, annotation2, depth1 = flip_randomly_left_right_image_with_annotation(
        image1, annotation1, depth2, image2, annotation2, depth1)

    # image = distort_randomly_image_color(image)

    resized_image1, resized_annotation1, resized_depth2, resized_image2, resized_annotation2, resized_depth1 = scale_randomly_image_with_annotation_with_fixed_size_output(
        image1,
        annotation1,
        depth2,
        image_train_size,
        img_tensor2=image2,
        annotation_tensor2=annotation2,
        depth_tensor1=depth1)

    resized_annotation1 = tf.squeeze(resized_annotation1)
    resized_annotation2 = tf.squeeze(resized_annotation2)
    image_batch1, annotation_batch1, depth_batch2, image_batch2, annotation_batch2, depth_batch1 = tf.train.shuffle_batch(
        [
            resized_image1, resized_annotation1, resized_depth2,
            resized_image2, resized_annotation2, resized_depth1
        ],
        batch_size=batch_size,
        capacity=3000,
        num_threads=2,
        min_after_dequeue=1000)
    return tf.to_float(image_batch1) / 127.5 - 1., annotation_batch1, (
        tf.exp(depth_batch1) - 1.
    ) / 1000., tf.to_float(image_batch2) / 127.5 - 1., annotation_batch2, (
        tf.exp(depth_batch2) - 1.) / 1000.
Пример #7
0
    fcn_8s_checkpoint_path = glob(options.checkpoint + "/*.data*")[0].split(".data")[0] 

    filename_queue = tf.train.string_input_producer(
        [tfrecord_filename], num_epochs=10)

    image, annotation = read_tfrecord_and_decode_into_image_annotation_pair_tensors(filename_queue)
    condition = tf.equal(annotation, 255)
    case_true = tf.ones([options.size, options.size, 1], dtype=tf.uint8)
    case_false = annotation
    annotation = tf.where(condition, case_true, case_false)
    # Various data augmentation stages
    #image, annotation = flip_randomly_left_right_image_with_annotation(image, annotation)

    # image = distort_randomly_image_color(image)

    resized_image, resized_annotation = scale_randomly_image_with_annotation_with_fixed_size_output(image, annotation, image_train_size)


    resized_annotation = tf.squeeze(resized_annotation)

    image_batch, annotation_batch = tf.train.shuffle_batch( [resized_image, resized_annotation],
                                                 batch_size=1,
                                                 capacity=3000,
                                                 num_threads=2,
                                                 min_after_dequeue=1000)

    upsampled_logits_batch, fcn_8s_variables_mapping = FCN_8s(image_batch_tensor=image_batch,
                                                               number_of_classes=number_of_classes,
                                                               is_training=True)