def getIimg(image_filename, net, number_of_classes):

    from tf_image_segmentation.models.fcn_32s import FCN_32s

    im = Image.open(image_filename)
    heatMap = np.empty((im.size[1], im.size[0], number_of_classes))

    image_filename_placeholder = tf.placeholder(tf.string)
    feed_dict_to_use = {image_filename_placeholder: image_filename}
    image_tensor = tf.read_file(image_filename_placeholder)
    image_tensor = tf.image.decode_jpeg(image_tensor, channels=3)
    # Fake batch for image and annotation by adding leading empty axis.
    image_batch_tensor = tf.expand_dims(image_tensor, axis=0)
    # Be careful: after adaptation, network returns final labels and not logits
    FCN_32s = adapt_network_for_any_size_input(FCN_32s, 32)
    if imgNum == 0 and net_reuse == 0:
        upsampled_logit, fcn_16s_variables_mapping_train = FCN_32s(
            image_batch_tensor=image_batch_tensor,
            number_of_classes=number_of_classes,
            is_training=False,
            reuse=None)
    else:
        upsampled_logit, fcn_16s_variables_mapping_train = FCN_32s(
            image_batch_tensor=image_batch_tensor,
            number_of_classes=number_of_classes,
            is_training=False,
            reuse=True)

    pred = tf.argmax(upsampled_logit, dimension=3)
    probabilities = tf.nn.softmax(upsampled_logit)
    initializer = tf.local_variables_initializer()
    saver = tf.train.Saver()
    cmap = plt.get_cmap('bwr')

    with tf.Session() as sess:
        sess.run(initializer)
        saver.restore(sess, net)
        image_np, pred_np, probabilities_np = sess.run(
            [image_tensor, pred, probabilities], feed_dict=feed_dict_to_use)

    for i in range(0, number_of_classes):
        heatMap[:, :, i] = probabilities_np.squeeze()[:, :, i]

    iimg = heatMap.cumsum(axis=0).cumsum(axis=1)

    return iimg
    image_tensor = tf.image.decode_jpeg(image_tensor, channels=3)

    # Fake batch for image and annotation by adding leading empty axis.
    image_batch_tensor = tf.expand_dims(image_tensor, axis=0)

    # Be careful: after adaptation, network returns final labels and not logits
    FCN_32s = adapt_network_for_any_size_input(FCN_32s, 32)

    # if imgNum == 0:
    #     reuse_var = False
    # else:
    #     reuse_var = True

    upsampled_logit, fcn_16s_variables_mapping_train = FCN_32s(
        image_batch_tensor=image_batch_tensor,
        number_of_classes=number_of_classes,
        is_training=False,
        reuse=False)

    imgNum += 1

    upsampled_logit_resized = tf.image.resize_images(upsampled_logit,
                                                     (720, 1280))

    pred = tf.argmax(upsampled_logit, dimension=3)
    probabilities = tf.nn.softmax(upsampled_logit)

    initializer = tf.local_variables_initializer()

    saver = tf.train.Saver()
Beispiel #3
0
# image = distort_randomly_image_color(image)

resized_image, resized_annotation = scale_randomly_image_with_annotation_with_fixed_size_output(image, annotation, image_train_size)


resized_annotation = tf.squeeze(resized_annotation)

image_batch, annotation_batch = tf.train.shuffle_batch( [resized_image, resized_annotation],
                                             batch_size=1,
                                             capacity=samples,
                                             num_threads=2,
                                                        min_after_dequeue=70)

upsampled_logits_batch, vgg_16_variables_mapping = FCN_32s(image_batch_tensor=image_batch,
                                                           number_of_classes=number_of_classes,
                                                           is_training=True)


valid_labels_batch_tensor, valid_logits_batch_tensor = get_valid_logits_and_labels(annotation_batch_tensor=annotation_batch,
                                                                                     logits_batch_tensor=upsampled_logits_batch,
                                                                                    class_labels=class_labels)



cross_entropies = tf.nn.softmax_cross_entropy_with_logits(logits=valid_logits_batch_tensor,
                                                          labels=valid_labels_batch_tensor)

# Normalize the cross entropy -- the number of elements
# is different during each step due to mask out regions
cross_entropy_sum = tf.reduce_mean(cross_entropies)
Beispiel #4
0
slim = tf.contrib.slim

from tf_image_segmentation.models.fcn_32s import FCN_32s

from tf_image_segmentation.utils.pascal_voc import pascal_segmentation_lut
from tf_image_segmentation.utils.tf_records import read_tfrecord_and_decode_into_image_annotation_pair_tensors
from tf_image_segmentation.utils.inference import adapt_network_for_any_size_input
# from tf_image_segmentation.utils.visualization import visualize_segmentation_adaptive

pascal_voc_lut = pascal_segmentation_lut()

number_of_classes = 21
image_holder = tf.placeholder(tf.float32, [None, None, None, None],
                              name='input')
logits, _ = FCN_32s(image_batch_tensor=image_holder,
                    number_of_classes=number_of_classes,
                    is_training=False)

saver = tf.train.Saver()

with tf.Session() as sess:

    saver.restore(sess, FLAGS.ckpt_dir + "/model_fcn32s_final.ckpt")

    print("Export the Model...")
    graph_def = sess.graph.as_graph_def()
    # import ipdb
    # ipdb.set_trace()
    freeze_graph_def = graph_util.convert_variables_to_constants(
        sess, graph_def, ["fcn_32s/prediction"])
    with open(FLAGS.output_file, 'wb') as f:
Beispiel #5
0
                                                num_epochs=1)

image, annotation = read_tfrecord_and_decode_into_image_annotation_pair_tensors(
    filename_queue)

# Fake batch for image and annotation by adding
# leading empty axis.
image_batch_tensor = tf.expand_dims(image, axis=0)
annotation_batch_tensor = tf.expand_dims(annotation, axis=0)

# Be careful: after adaptation, network returns final labels
# and not logits
FCN_32s = adapt_network_for_any_size_input(FCN_32s, 32)

pred, fcn_32s_variables_mapping = FCN_32s(
    image_batch_tensor=image_batch_tensor,
    number_of_classes=number_of_classes,
    is_training=False)

# Take away the masked out values from evaluation
weights = tf.to_float(tf.not_equal(annotation_batch_tensor, 255))

# Define the accuracy metric: Mean Intersection Over Union
miou, update_op = slim.metrics.streaming_mean_iou(
    predictions=pred,
    labels=annotation_batch_tensor,
    num_classes=number_of_classes,
    weights=weights)

# The op for initializing the variables.
initializer = tf.local_variables_initializer()