コード例 #1
0
 def setup_val(self, tfname):
     self.restore = glob(os.path.join(self.checkpoint8, "FCN__*", "*.data*" ))[0].split(".data")[0]  
     
     filename_queue = tf.train.string_input_producer(
                                 [tfname], num_epochs=10)
     self.image_queue, self.annotation_queue = read_tfrecord_and_decode_into_image_annotation_pair_tensors(filename_queue)
     self.image = tf.placeholder_with_default(self.image, shape=[None, 
                                                                 None,
                                                                    3])
     self.annotation = tf.placeholder_with_default(self.annotation_queue, shape=[None,
                                                                 None,
                                                                    1])
     self.resized_image, resized_annotation = scale_randomly_image_with_annotation_with_fixed_size_output(self.image, self.annotation, (self.size, self.size))
     self.resized_annotation = tf.squeeze(resized_annotation)
     image_batch_tensor = tf.expand_dims(self.image, axis=0)
     annotation_batch_tensor = tf.expand_dims(self.annotation, axis=0)
     # Be careful: after adaptation, network returns final labels
     # and not logits
     FCN_8s_bis = adapt_network_for_any_size_input(FCN_8s, 32)
     self.pred, fcn_16s_variables_mapping = FCN_8s_bis(image_batch_tensor=image_batch_tensor,
                                                   number_of_classes=self.num_labels,
                                                   is_training=False)
     self.prob = [h for h in [s for s in [t for t in self.pred.op.inputs][0].op.inputs][0].op.inputs][0]
     initializer = tf.local_variables_initializer()
     self.saver = tf.train.Saver()
     with tf.Session() as sess:
         sess.run(initializer)
         self.saver.restore(sess, self.restore)
コード例 #2
0
ファイル: FCN_Object.py プロジェクト: trasse/DRFNS
    def test8(self, steps, restore, p1):
        """
        Tests the model.
        """
        # Fake batch for image and annotation by adding
        # leading empty axis.
        image_batch_tensor = tf.expand_dims(self.image, axis=0)
        annotation_batch_tensor = tf.expand_dims(self.annotation, axis=0)
        # Be careful: after adaptation, network returns final labels
        # and not logits
        FCN_8s_bis = adapt_network_for_any_size_input(FCN_8s, 32)

        pred, fcn_16s_variables_mapping = FCN_8s_bis(
            image_batch_tensor=image_batch_tensor,
            number_of_classes=self.num_labels,
            is_training=False)
        prob = [
            h for h in [s for s in [t for t in pred.op.inputs][0].op.inputs
                        ][0].op.inputs
        ][0]

        initializer = tf.local_variables_initializer()
        saver = tf.train.Saver()
        loss, roc = 0., 0.
        acc, F1, recall = 0., 0., 0.
        precision, jac, AJI = 0., 0., 0.
        with tf.Session() as sess:

            sess.run(initializer)
            saver.restore(sess, restore)

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(coord=coord)

            for i in xrange(steps):

                image_np, annotation_np, pred_np, prob_np = sess.run(
                    [self.image, self.annotation, pred, prob])
                prob_float = np.exp(-prob_np[0, :, :, 0]) / (
                    np.exp(-prob_np[0, :, :, 0]) +
                    np.exp(-prob_np[0, :, :, 1]))
                prob_int8 = misc.imresize(prob_float,
                                          size=image_np[:, :, 0].shape)

                prob_float = (prob_int8.copy().astype(float) / 255)
                out = ComputeMetrics(prob_float, annotation_np[:, :, 0], p1,
                                     0.5)
                acc += out[0]
                roc += out[1]
                jac += out[2]
                recall += out[3]
                precision += out[4]
                F1 += out[5]
                AJI += out[6]
            coord.request_stop()
            coord.join(threads)
            loss, acc, F1 = np.array([loss, acc, F1]) / steps
            recall, precision, roc = np.array([recall, precision, roc]) / steps
            jac, AJI = np.array([jac, AJI]) / steps
            return loss, acc, F1, recall, precision, roc, jac, AJI
コード例 #3
0
ファイル: main.py プロジェクト: dav-sap/school_project
def analyze_image(file_loc):

    image_filename = file_loc

    image_filename_placeholder = tf.placeholder(tf.string)

    feed_dict_to_use = {image_filename_placeholder: image_filename}

    image_tensor = tf.read_file(image_filename_placeholder)

    image_tensor = tf.image.decode_jpeg(image_tensor, channels=3)

    # Fake batch for image and annotation by adding
    # leading empty axis.
    image_batch_tensor = tf.expand_dims(image_tensor, axis=0)

    # Be careful: after adaptation, network returns final labels
    # and not logits
    FCN_8s = adapt_network_for_any_size_input(FCN_8, 32)

    pred, fcn_16s_variables_mapping = FCN_8s(image_batch_tensor=image_batch_tensor,
                                             number_of_classes=number_of_classes,
                                             is_training=False)

    # The op for initializing the variables.
    initializer = tf.local_variables_initializer()

    saver = tf.train.Saver()

    with tf.Session() as sess:
        t0 = time.time()
        sess.run(initializer)

        saver.restore(sess, fcn_8s_checkpoint_path)
        t1 = time.time()
        print("Part1: " + str(t1 - t0))
        t0 = time.time()

        image_np, pred_np = sess.run([image_tensor, pred], feed_dict=feed_dict_to_use)
        #
        # io.imshow(image_np)
        # io.show()
        # io.imshow(pred_np.squeeze())
        # io.show()
        cut_img = pred_np.squeeze()
        complete = np.copy(image_np)
        t1 = time.time()
        print("Part2: " + str(t1 - t0))
        for row in range(cut_img.shape[0]):
            for col in range(cut_img.shape[1]):
                if cut_img[row, col] == 0:
                    complete[row, col] = 0
        io.imsave(server.ANALYZED_IMG_LOC, complete)
        return server.ANALYZED_IMG_LOC
コード例 #4
0
    def test8(self, steps, restore, p1):
        # Fake batch for image and annotation by adding
        # leading empty axis.
        image_batch_tensor = tf.expand_dims(self.image, axis=0)
        annotation_batch_tensor = tf.expand_dims(self.annotation, axis=0)
        # Be careful: after adaptation, network returns final labels
        # and not logits
        FCN_8s_bis = adapt_network_for_any_size_input(FCN_8s, 32)


        pred, fcn_16s_variables_mapping = FCN_8s_bis(image_batch_tensor=image_batch_tensor,
                                                  number_of_classes=self.num_labels,
                                                  is_training=False)
        prob = [h for h in [s for s in [t for t in pred.op.inputs][0].op.inputs][0].op.inputs][0]

        initializer = tf.local_variables_initializer()
        saver = tf.train.Saver()
        loss, roc = 0., 0.
        acc, F1, recall = 0., 0., 0.
        precision, jac, AJI = 0., 0., 0.
        with tf.Session() as sess:
            
            sess.run(initializer)
            saver.restore(sess, restore)
            
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(coord=coord)
            
            
            for i in xrange(steps):
                    
                image_np, annotation_np, pred_np, prob_np = sess.run([self.image, self.annotation, pred, prob])
                prob_float = np.exp(-prob_np[0,:,:,0]) / (np.exp(-prob_np[0,:,:,0]) + np.exp(-prob_np[0,:,:,1]))
                prob_int8 = misc.imresize(prob_float, size=image_np[:,:,0].shape)

                prob_float = (prob_int8.copy().astype(float) / 255)
                out = ComputeMetrics(prob_float, annotation_np[:,:,0], p1, 0.5)
                acc += out[0]
                roc += out[1]
                jac += out[2]
                recall += out[3]
                precision += out[4]
                F1 += out[5]
                AJI += out[6]
            coord.request_stop()
            coord.join(threads)
            loss, acc, F1 = np.array([loss, acc, F1]) / steps
            recall, precision, roc = np.array([recall, precision, roc]) / steps
            jac, AJI = np.array([jac, AJI]) / steps
            return loss, acc, F1, recall, precision, roc, jac, AJI 
コード例 #5
0
ファイル: FCN_Object.py プロジェクト: trasse/DRFNS
    def setup_val(self, tfname):
        """
        Setups the model in case we need to validate.
        """
        self.restore = glob(os.path.join(self.checkpoint8, "FCN__*",
                                         "*.data*"))[0].split(".data")[0]

        filename_queue = tf.train.string_input_producer([tfname],
                                                        num_epochs=10)
        self.image_queue, self.annotation_queue = read_tfrecord_and_decode_into_image_annotation_pair_tensors(
            filename_queue)
        self.image = tf.placeholder_with_default(self.image,
                                                 shape=[None, None, 3])
        self.annotation = tf.placeholder_with_default(self.annotation_queue,
                                                      shape=[None, None, 1])
        self.resized_image, resized_annotation = scale_randomly_image_with_annotation_with_fixed_size_output(
            self.image, self.annotation, (self.size, self.size))
        self.resized_annotation = tf.squeeze(resized_annotation)
        image_batch_tensor = tf.expand_dims(self.image, axis=0)
        annotation_batch_tensor = tf.expand_dims(self.annotation, axis=0)
        # Be careful: after adaptation, network returns final labels
        # and not logits
        FCN_8s_bis = adapt_network_for_any_size_input(FCN_8s, 32)
        self.pred, fcn_16s_variables_mapping = FCN_8s_bis(
            image_batch_tensor=image_batch_tensor,
            number_of_classes=self.num_labels,
            is_training=False)
        self.prob = [
            h for h in
            [s
             for s in [t
                       for t in self.pred.op.inputs][0].op.inputs][0].op.inputs
        ][0]
        initializer = tf.local_variables_initializer()
        self.saver = tf.train.Saver()
        with tf.Session() as sess:
            sess.run(initializer)
            self.saver.restore(sess, self.restore)
コード例 #6
0
number_of_classes = 21

filename_queue = tf.train.string_input_producer([tfrecord_filename],
                                                num_epochs=1)

image, annotation = read_tfrecord_and_decode_into_image_annotation_pair_tensors(
    filename_queue)

# Fake batch for image and annotation by adding
# leading empty axis.
image_batch_tensor = tf.expand_dims(image, axis=0)
annotation_batch_tensor = tf.expand_dims(annotation, axis=0)

# Be careful: after adaptation, network returns final labels
# and not logits
FCN_8s = adapt_network_for_any_size_input(FCN_8s, 32)

pred, fcn_16s_variables_mapping = FCN_8s(image_batch_tensor=image_batch_tensor,
                                         number_of_classes=number_of_classes,
                                         is_training=False)

# Take away the masked out values from evaluation
weights = tf.to_float(tf.not_equal(annotation_batch_tensor, 255))

# Define the accuracy metric: Mean Intersection Over Union
miou, update_op = slim.metrics.streaming_mean_iou(
    predictions=pred,
    labels=annotation_batch_tensor,
    num_classes=number_of_classes,
    weights=weights)
コード例 #7
0
def test_fcn_featurizer(test_size, x, train_fcn=False, checkpoint_path=cpstandard):
    """
    ========== Args ==========
      checkpoint_path: Str. Path to `.npy` file containing AlexNet parameters.
       can be found here: `https://github.com/warmspringwinds/tf-image-segmentation/`
      num_channels: Int. number of channels in the input image to be featurized.
       FCN is pretrained with 3 channels.
      train_fcn: Boolean. Whether or not to train the preloaded weights.
      
    ========== Returns ==========
        A featurizer function that takes in a tensor with shape (b, h, w, c) and
        returns a tensor with shape (b, dim).
    """
    size_muliple=32
    num_class=21
    num_channels=3
    image_shape = (test_size, None, None, num_channels)  # RGB + Segmentation id
    images = tf.placeholder(tf.uint8, shape=image_shape)
    # preprocessed_images = tf.image.resize_images(images, size=(229, 229))

    # # Be careful: after adaptation, network returns final labels
    # # and not logits
    # with tf.variable_scope("conv_to_channel3"):
    #     filter_m = tf.Variable(tf.random_normal([1,1,num_channels,3]))
    #     preprocessed_images_3_channels = tf.nn.conv2d(preprocessed_images, filter_m, strides=[1, 1, 1, 1], padding='VALID')
    #     shape_of_this = tf.shape(preprocessed_images_3_channels)

    model = adapt_network_for_any_size_input(FCN_8s, size_muliple)
    pred, fcn_16s_variables_mapping = model(image_batch_tensor=images,
                                          number_of_classes=num_class,
                                          is_training=train_fcn)
    # binary_pred = tf.nn.sigmoid(tf.cast(pred, tf.float32), name="sigmoid")
    binary_pred = pred
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        # restore checkpoint
        saver = tf.train.Saver()
        saver.restore(sess, checkpoint_path)
        # a = sess.run([shape_of_this], feed_dict={images: x})
        # print(a)
        original_imgs, output_masks = sess.run([images, binary_pred], feed_dict={images: x})
        for i in range(len(original_imgs)):
	        # io.imshow(original_imgs[i])
	        # io.show()
	        first_mask = output_masks[i]
	        first_mask[first_mask == 0] = 0.0
	        first_mask[first_mask == 3] = 50.0
	       	first_mask[first_mask == 8] = 100.0
	        first_mask[first_mask == 12] = 150.0
	        first_mask[first_mask == 13] = 200.0
	        first_mask[first_mask == 15] = 255.0

	        first_mask = first_mask.squeeze()
	        shape = first_mask.shape
	        three_d_first_mask = np.zeros((shape[0], shape[1], 3))
	        three_d_first_mask[:, :, 0] = first_mask
	        three_d_first_mask[:, :, 1] = first_mask
	        three_d_first_mask[:, :, 2] = first_mask
	        print(set(first_mask.flatten()))
	        three_d_first_mask = three_d_first_mask.astype(np.uint8)
	        io.imshow(three_d_first_mask)
	        misc.imsave(str(i) + '.png', three_d_first_mask)
コード例 #8
0
    tfrecord_filename = options.tf_records

    number_of_classes = options.labels

    filename_queue = tf.train.string_input_producer(
        [tfrecord_filename], num_epochs=1)

    image, annotation = read_tfrecord_and_decode_into_image_annotation_pair_tensors(filename_queue)

    # Fake batch for image and annotation by adding
    # leading empty axis.
    image_batch_tensor = tf.expand_dims(image, axis=0)
    annotation_batch_tensor = tf.expand_dims(annotation, axis=0)
    # Be careful: after adaptation, network returns final labels
    # and not logits
    FCN_8s = adapt_network_for_any_size_input(FCN_8s, 32)


    pred, fcn_16s_variables_mapping = FCN_8s(image_batch_tensor=image_batch_tensor,
                                              number_of_classes=number_of_classes,
                                              is_training=False)

    # Take away the masked out values from evaluation
    weights = tf.to_float( tf.not_equal(annotation_batch_tensor, 255) )

    # Define the accuracy metric: Mean Intersection Over Union
    miou, update_op = slim.metrics.streaming_mean_iou(predictions=pred,
                                                       labels=annotation_batch_tensor,
                                                       num_classes=number_of_classes,
                                                       weights=weights)
コード例 #9
0
def main(argv):

    sys.path.append("./tf-image-segmentation/")
    sys.path.append("./models/slim/")

    os.environ["CUDA_VISIBLE_DEVICES"] = '1'

    import logging

    logging.basicConfig(filename='log_examp.log', level=logging.INFO)
    logging.debug('This message should go to the log file')
    logging.info('So should this')
    logging.warning('And this, too')

    from tf_image_segmentation.models.fcn_8s import FCN_8s
    from tf_image_segmentation.utils.inference import adapt_network_for_any_size_input

    number_of_classes = 21

    image_filename = argv[1]

    image_filename_placeholder = tf.placeholder(tf.string)

    feed_dict_to_use = {image_filename_placeholder: image_filename}

    image_tensor = tf.read_file(image_filename_placeholder)

    image_tensor = tf.image.decode_jpeg(image_tensor, channels=3)

    image_batch_tensor = tf.expand_dims(image_tensor, axis=0)

    FCN_8s = adapt_network_for_any_size_input(FCN_8s, 32)

    pred, fcn_16s_variables_mapping = FCN_8s(
        image_batch_tensor=image_batch_tensor,
        number_of_classes=number_of_classes,
        is_training=False)

    initializer = tf.local_variables_initializer()

    saver = tf.train.Saver()

    with tf.Session() as sess:
        sess.run(initializer)

        saver.restore(sess, './fcn_8s_checkpoint/model_fcn8s_final.ckpt')

        image_np, pred_np = sess.run([image_tensor, pred],
                                     feed_dict=feed_dict_to_use)

        # io.imshow(image_np)
        # io.show()
        #
        # io.imshow(pred_np.squeeze())
        # io.show()

        import skimage.morphology

        imageSegmentationSign = int(argv[2])

        prediction_mask = (pred_np.squeeze() == imageSegmentationSign)

        cropped_object = image_np * np.dstack((prediction_mask, ) * 3)

        square = skimage.morphology.square(5)

        temp = skimage.morphology.binary_erosion(prediction_mask, square)

        negative_mask = (temp != True)

        eroding_countour = negative_mask * prediction_mask

        eroding_countour_img = np.dstack((eroding_countour, ) * 3)

        cropped_object[eroding_countour_img] = 248

        png_transparancy_mask = np.uint8(prediction_mask * 255)

        image_shape = cropped_object.shape

        png_array = np.zeros(shape=[image_shape[0], image_shape[1], 4],
                             dtype=np.uint8)

        png_array[:, :, :3] = cropped_object

        png_array[:, :, 3] = png_transparancy_mask

        # io.imshow(cropped_object)

        nowTime = time.strftime('%Y_%m_%d_%H%M%S', time.localtime(time.time()))

        io.imsave('image_' + argv[3], png_array)