예제 #1
0
                                                num_epochs=1)

image, annotation = read_tfrecord_and_decode_into_image_annotation_pair_tensors(
    filename_queue)

# Fake batch for image and annotation by adding
# leading empty axis.
image_batch_tensor = tf.expand_dims(image, axis=0)
annotation_batch_tensor = tf.expand_dims(annotation, axis=0)

# Be careful: after adaptation, network returns final labels
# and not logits
FCN_8s = adapt_network_for_any_size_input(FCN_8s, 32)

pred, fcn_16s_variables_mapping = FCN_8s(image_batch_tensor=image_batch_tensor,
                                         number_of_classes=number_of_classes,
                                         is_training=False)

# Take away the masked out values from evaluation
weights = tf.to_float(tf.not_equal(annotation_batch_tensor, 255))

# Define the accuracy metric: Mean Intersection Over Union
miou, update_op = slim.metrics.streaming_mean_iou(
    predictions=pred,
    labels=annotation_batch_tensor,
    num_classes=number_of_classes,
    weights=weights)

# The op for initializing the variables.
initializer = tf.local_variables_initializer()
예제 #2
0
# image = distort_randomly_image_color(image)

resized_image, resized_annotation = scale_randomly_image_with_annotation_with_fixed_size_output(
    image, annotation, image_train_size)

resized_annotation = tf.squeeze(resized_annotation)

image_batch, annotation_batch = tf.train.shuffle_batch(
    [resized_image, resized_annotation],
    batch_size=1,
    capacity=3000,
    num_threads=2,
    min_after_dequeue=1000)

upsampled_logits_batch, fcn_16s_variables_mapping = FCN_8s(
    image_batch_tensor=image_batch,
    number_of_classes=number_of_classes,
    is_training=True)

valid_labels_batch_tensor, valid_logits_batch_tensor = get_valid_logits_and_labels(
    annotation_batch_tensor=annotation_batch,
    logits_batch_tensor=upsampled_logits_batch,
    class_labels=class_labels)

cross_entropies = tf.nn.softmax_cross_entropy_with_logits(
    logits=valid_logits_batch_tensor, labels=valid_labels_batch_tensor)

#cross_entropy_sum = tf.reduce_sum(cross_entropies)

cross_entropy_sum = tf.reduce_mean(cross_entropies)

pred = tf.argmax(upsampled_logits_batch, dimension=3)
def main(argv):

    sys.path.append("./tf-image-segmentation/")
    sys.path.append("./models/slim/")

    os.environ["CUDA_VISIBLE_DEVICES"] = '1'

    import logging

    logging.basicConfig(filename='log_examp.log', level=logging.INFO)
    logging.debug('This message should go to the log file')
    logging.info('So should this')
    logging.warning('And this, too')

    from tf_image_segmentation.models.fcn_8s import FCN_8s
    from tf_image_segmentation.utils.inference import adapt_network_for_any_size_input

    number_of_classes = 21

    image_filename = argv[1]

    image_filename_placeholder = tf.placeholder(tf.string)

    feed_dict_to_use = {image_filename_placeholder: image_filename}

    image_tensor = tf.read_file(image_filename_placeholder)

    image_tensor = tf.image.decode_jpeg(image_tensor, channels=3)

    image_batch_tensor = tf.expand_dims(image_tensor, axis=0)

    FCN_8s = adapt_network_for_any_size_input(FCN_8s, 32)

    pred, fcn_16s_variables_mapping = FCN_8s(
        image_batch_tensor=image_batch_tensor,
        number_of_classes=number_of_classes,
        is_training=False)

    initializer = tf.local_variables_initializer()

    saver = tf.train.Saver()

    with tf.Session() as sess:
        sess.run(initializer)

        saver.restore(sess, './fcn_8s_checkpoint/model_fcn8s_final.ckpt')

        image_np, pred_np = sess.run([image_tensor, pred],
                                     feed_dict=feed_dict_to_use)

        # io.imshow(image_np)
        # io.show()
        #
        # io.imshow(pred_np.squeeze())
        # io.show()

        import skimage.morphology

        imageSegmentationSign = int(argv[2])

        prediction_mask = (pred_np.squeeze() == imageSegmentationSign)

        cropped_object = image_np * np.dstack((prediction_mask, ) * 3)

        square = skimage.morphology.square(5)

        temp = skimage.morphology.binary_erosion(prediction_mask, square)

        negative_mask = (temp != True)

        eroding_countour = negative_mask * prediction_mask

        eroding_countour_img = np.dstack((eroding_countour, ) * 3)

        cropped_object[eroding_countour_img] = 248

        png_transparancy_mask = np.uint8(prediction_mask * 255)

        image_shape = cropped_object.shape

        png_array = np.zeros(shape=[image_shape[0], image_shape[1], 4],
                             dtype=np.uint8)

        png_array[:, :, :3] = cropped_object

        png_array[:, :, 3] = png_transparancy_mask

        # io.imshow(cropped_object)

        nowTime = time.strftime('%Y_%m_%d_%H%M%S', time.localtime(time.time()))

        io.imsave('image_' + argv[3], png_array)
예제 #4
0
slim = tf.contrib.slim

from tf_image_segmentation.models.fcn_8s import FCN_8s

from tf_image_segmentation.utils.pascal_voc import pascal_segmentation_lut
from tf_image_segmentation.utils.tf_records import read_tfrecord_and_decode_into_image_annotation_pair_tensors
from tf_image_segmentation.utils.inference import adapt_network_for_any_size_input
# from tf_image_segmentation.utils.visualization import visualize_segmentation_adaptive

pascal_voc_lut = pascal_segmentation_lut()

number_of_classes = 21
image_holder = tf.placeholder(tf.float32, [None, None, None, None],
                              name='input')
logits, _ = FCN_8s(image_batch_tensor=image_holder,
                   number_of_classes=number_of_classes,
                   is_training=False)

saver = tf.train.Saver()

with tf.Session() as sess:

    saver.restore(sess, FLAGS.ckpt_dir + "/model_fcn8s_final.ckpt")

    print("Export the Model...")
    graph_def = sess.graph.as_graph_def()
    # import ipdb
    # ipdb.set_trace()
    freeze_graph_def = graph_util.convert_variables_to_constants(
        sess, graph_def, ["fcn_8s/prediction"])
    with open(FLAGS.output_file, 'wb') as f:
예제 #5
0
파일: FCN_Object.py 프로젝트: trasse/DRFNS
    def setup_train8(self, lr):
        """
        Setups queues and model evaluation.
        """

        image_batch, annotation_batch = tf.train.shuffle_batch(
            [self.resized_image, self.resized_annotation],
            batch_size=1,
            capacity=3000,
            num_threads=2,
            min_after_dequeue=1000)

        upsampled_logits_batch, fcn_8s_variables_mapping = FCN_8s(
            image_batch_tensor=image_batch,
            number_of_classes=self.num_labels,
            is_training=True)

        valid_labels_batch_tensor, valid_logits_batch_tensor = get_valid_logits_and_labels(
            annotation_batch_tensor=annotation_batch,
            logits_batch_tensor=upsampled_logits_batch,
            class_labels=self.class_labels)

        # Count true positives, true negatives, false positives and false negatives.
        actual = tf.contrib.layers.flatten(tf.cast(annotation_batch, tf.int64))

        self.predicted_img = tf.argmax(upsampled_logits_batch, axis=3)
        cross_entropies = tf.nn.softmax_cross_entropy_with_logits(
            logits=valid_logits_batch_tensor, labels=valid_labels_batch_tensor)
        self.cross_entropy_sum = tf.reduce_mean(cross_entropies)

        pred = tf.argmax(upsampled_logits_batch, dimension=3)

        probabilities = tf.nn.softmax(upsampled_logits_batch)

        with tf.variable_scope("adam_vars"):
            self.train_step = tf.train.AdamOptimizer(
                learning_rate=lr).minimize(self.cross_entropy_sum)

        # Variable's initialization functions

        self.init_fn = slim.assign_from_checkpoint_fn(
            model_path=self.fcn_8s_checkpoint_path,
            var_list=fcn_8s_variables_mapping)

        global_vars_init_op = tf.global_variables_initializer()

        self.merged_summary_op = tf.summary.merge_all()

        self.summary_string_writer = tf.summary.FileWriter(
            "./log_8_{}".format(lr))

        if not os.path.exists(self.checkpointnew):
            os.makedirs(self.checkpointnew)

        #The op for initializing the variables.
        local_vars_init_op = tf.local_variables_initializer()

        self.combined_op = tf.group(local_vars_init_op, global_vars_init_op)

        # We need this to save only model variables and omit
        # optimization-related and other variables.
        model_variables = slim.get_model_variables()
        self.saver = tf.train.Saver(model_variables)