def export_sparse_encoding(self, filename, dataset_path):
        """
        Converts ground truth images to sparse labels and saves them to disk in PNG format.

        :param filename:
        :param dataset_path:
        :return: None
        """
        # Load the list of image base names
        basenames = self.get_basenames(filename, dataset_path)

        gt_path = os.path.join(dataset_path, 'SegmentationClass')
        gt_sparse_path = os.path.join(dataset_path, 'SegmentationSparseClass')

        # Create sparse labels folder
        if not os.path.exists(gt_sparse_path):
            print('Creating sparse labels folder')
            os.makedirs(gt_sparse_path)
        else:
            print('Sparse labels folder already exists')

        for basename in basenames:
            gt = imread(os.path.join(gt_path, basename + '.png'))
            gt = colors2labels(gt, self.cmap, one_hot=False)
            gt = np.dstack([gt, np.copy(gt), np.copy(gt)])
            imwrite(os.path.join(gt_sparse_path, basename + '.png'), gt)
    def export_tfrecord(self, basenames, dataset_path, tfrecord_filename):
        """Exports a semantic image segmentation dataset to TFRecords.

        :param basenames:

        :return: the list of image base names for either the training or validation image set
        """
        print('Loading dataset...')
        # Create folder for TF records
        tfrecords_path = os.path.join(dataset_path, 'training/TFRecords')
        if not os.path.exists(tfrecords_path):
            print('Creating TFRecords folder')
            os.makedirs(tfrecords_path)
        else:
            print('TFRecords folder already exists')

        im_set, gt_set, shape_set = [], [], []
        for basename in basenames:
            # Save image in raw bytes format
            im = bytesread(os.path.join(dataset_path, TRAIN_IM_PATH, basename[0]))
            # Save ground truth as a ndarray
            gt = imread(os.path.join(dataset_path, TRAIN_GT_PATH, basename[1]))
            shape_set.append(gt.shape)
            gt = colors2labels(gt, self.cmap)
            im_set.append(im)
            gt_set.append(gt)

        print('Saving to ' + tfrecord_filename)
        self._export(im_set, gt_set, shape_set, os.path.join(tfrecords_path, tfrecord_filename))
    def export_sparse_encoding(self, dataset_path):
        """
        Converts ground truth images to sparse labels and saves them to disk in PNG format.
        Ground truth images are only available for the training set.

        :param dataset_path: The root path of the dataset.

        :return: None
        """
        # Load the list of image base names
        basenames = self.get_basenames(is_training=True, dataset_path=dataset_path)

        gt_path = os.path.join(dataset_path, TRAIN_GT_PATH)
        gt_sparse_path = os.path.join(dataset_path, GT_SPARSE_PATH)

        # Create sparse labels folder
        if not os.path.exists(gt_sparse_path):
            print('Creating sparse labels folder')
            os.makedirs(gt_sparse_path)
        else:
            print('Sparse labels folder already exists')

        for _, basename in basenames:
            gt = imread(os.path.join(gt_path, basename))
            gt = colors2labels(gt, self.cmap, one_hot=False)
            gt = np.dstack([gt, np.copy(gt), np.copy(gt)])
            imwrite(os.path.join(gt_sparse_path, basename), gt)
def decompose_image(filename_in, path_out):
    """Run the intrinsic image decomposition with caffe."""
    network_file = os.path.join(os.path.dirname(__file__),
                                'network_definition.prototxt')
    caffemodel = os.path.join(os.path.dirname(__file__),
                              'learned_weights.caffemodel')
    net = caffe.Net(network_file,
                    caffe.TEST,
                    weights=caffemodel)

    # print("Read file:", filename_in)
    image = iu.imread(filename_in)

    # get basename for output later
    basename = os.path.splitext(os.path.basename(filename_in))[0]

    # get result from caffe
    reflectance_gray = get_reflectance_caffe(net, image)

    # save result
    filename = os.path.join(path_out, basename + '-r.png')
    iu.imwrite(filename, reflectance_gray)

    # now colorize with input image again
    reflectance, shading = iu.colorize(reflectance_gray, image)

    # save color versions in sRGB
    filename = os.path.join(path_out, basename + '-r_colorized.png')
    iu.imwrite(filename, reflectance, sRGB=True)
    filename = os.path.join(path_out, basename + '-s_colorized.png')
    iu.imwrite(filename, shading, sRGB=True)

    return reflectance_gray
def read_filter_write(filter_type, filename_in, guidance_in, sigma_color,
                      sigma_spatial, path_out):
    """Read input and guidance image, apply filter and write result."""
    # get basename for output later
    basename = os.path.splitext(os.path.basename(filename_in))[0]
    # Read the images
    image = iu.imread(filename_in)
    joint = iu.imread(guidance_in)

    filtered = apply_filter(filter_type, image, joint, sigma_color,
                            sigma_spatial)

    # save the result
    params = "_{}_c{}s{}".format(filter_type, sigma_color, sigma_spatial)
    filename = os.path.join(path_out, basename + params + '.png')
    iu.imwrite(filename, filtered)

    return filtered
    def export_tfrecord(self, filename, dataset_path, tfrecord_filename):
        """
        Exports a semantic image segmentation dataset to TFRecords.
        Images are stored in JPEG format, and segmentation ground truth in PNG format.

        :param filename: the text file name, either 'train.txt' or 'val.txt'

        :return: the list of image base names for either the training or validation image set
        """
        print('Loading images...')
        basenames = self.get_basenames(filename, dataset_path)

        # Create folder for TF records
        tfrecords_path = os.path.join(dataset_path, 'TFRecords')
        if not os.path.exists(tfrecords_path):
            print('Creating TFRecords folder')
            os.makedirs(tfrecords_path)
        else:
            print('TFRecords folder already exists')

        im_set, gt_set, shape_set = [], [], []
        for basename in basenames:
            # Save image in raw bytes format
            im = bytesread(
                os.path.join(dataset_path, 'JPEGImages', basename + '.jpg'))
            # Save ground truth as a ndarray
            gt = imread(
                os.path.join(dataset_path, 'SegmentationClass',
                             basename + '.png'))
            shape_set.append(gt.shape)
            gt = colors2labels(gt, self.cmap)
            im_set.append(im)
            gt_set.append(gt)

        print('Saving to ' + tfrecord_filename)
        self._export(im_set, gt_set, shape_set,
                     os.path.join(tfrecords_path, tfrecord_filename))
 def process_func(self, example_line):
     return imread(example_line)
Exemple #8
0
        shape = variable.get_shape()
        print(shape)
        print(len(shape))
        variable_parameters = 1
        for dim in shape:
            print(dim)
            variable_parameters *= dim.value
        print(variable_parameters)
        total_parameters += variable_parameters
    print("\nTotal parameters:\n", total_parameters)

    # start = time.time()
    # Inference
    for i in range(len(a_list)):
        # Define shapes for images fed to the graph
        a_feed = im.imresize(im.imread(a_list[i]), [crop_size, crop_size])
        a_feed.shape = 1, crop_size, crop_size, 3

        # Feed in images to the graph
        a2b_result = sess.run(a2b, feed_dict={a_input: a_feed})

        # Create and save the output image
        a_img_opt = np.concatenate((a_feed, a2b_result), axis=0)
        img_name = os.path.basename(a_list[i])
        im.imwrite(im.immerge(a_img_opt, 1, 2), a_save_dir + '/' + img_name)
        print('Save %s' % (a_save_dir + '/' + img_name))

    #     if i == 100:
    #         end = time.time()
    # end2 = time.time()
    # print("Time to process first 100 images:", end - start)
    saver = tf.train.Saver()
    ckpt_path = utils.load_checkpoint('./checkpoints/' + dataset, sess, saver)
    if ckpt_path is None:
        raise Exception('No checkpoint!')
    else:
        print('Copy variables from % s' % ckpt_path)

    # test
    a_list = glob('./datasets/' + dataset + '/testA/*.jpg')
    b_list = glob('./datasets/' + dataset + '/testB/*.jpg')

    a_save_dir = './test_predictions/' + dataset + '/testA/'
    b_save_dir = './test_predictions/' + dataset + '/testB/'
    utils.mkdir([a_save_dir, b_save_dir])
    for i in range(len(a_list)):
        a_real_ipt = im.imresize(im.imread(a_list[i]), [crop_size, crop_size])
        a_real_ipt.shape = 1, crop_size, crop_size, 3
        a2b_opt, a2b2a_opt = sess.run([a2b, a2b2a],
                                      feed_dict={a_real: a_real_ipt})
        a_img_opt = np.concatenate((a_real_ipt, a2b_opt, a2b2a_opt), axis=0)

        img_name = os.path.basename(a_list[i])
        im.imwrite(im.immerge(a_img_opt, 1, 3), a_save_dir + img_name)
        print('Save %s' % (a_save_dir + img_name))

    for i in range(len(b_list)):
        b_real_ipt = im.imresize(im.imread(b_list[i]), [crop_size, crop_size])
        b_real_ipt.shape = 1, crop_size, crop_size, 3
        b2a_opt, b2a2b_opt = sess.run([b2a, b2a2b],
                                      feed_dict={b_real: b_real_ipt})
        b_img_opt = np.concatenate((b_real_ipt, b2a_opt, b2a2b_opt), axis=0)
                print("\nOperation name :", op.name)  # Operation name
                print("Tensor details :", str(op.values()))  # Tensor name

            # Assign input and output tensors
            a_input = graph1.get_tensor_by_name('inputA:0')  # Input Tensor
            a_output = graph1.get_tensor_by_name(
                'a2b_generator/Conv_7/Relu:0')  # Output Tensor

            # Initialize_all_variables
            tf.global_variables_initializer()

            start = time.time()
            # Inference
            for i in range(len(a_list)):
                # Define shapes for images fed to the graph
                a_feed = im.imresize(im.imread(a_list[i]),
                                     [crop_size, crop_size])
                a_feed.shape = 1, crop_size, crop_size, 3

                # Feed in images to the graph
                a2b_result = sess.run(a_output, feed_dict={a_input: a_feed})
                print(type(a2b_result))
                print(a2b_result.shape)

                # Create and save the output image
                output = tf
                # a_img_opt = a2b_result
                # img_name = os.path.basename(a_list[i])
                #
                # output = im.immerge(a_img_opt, 1, 1)
                #
    saver = tf.train.Saver()
    ckpt_path = utils.load_checkpoint('./checkpoints/' + dataset, sess, saver)
    if ckpt_path is None:
        raise Exception('No checkpoint!')
    else:
        print('Copy variables from % s' % ckpt_path)

    # test
    a_list = glob('./datasets/' + dataset + '/testA/*.jpg')
    b_list = glob('./datasets/' + dataset + '/testB/*.jpg')

    a_save_dir = './test_predictions/' + dataset + '/testA/'
    b_save_dir = './test_predictions/' + dataset + '/testB/'
    utils.mkdir([a_save_dir, b_save_dir])
    for i in range(len(a_list)):
        a_real_ipt = im.imresize(im.imread(a_list[i]), [crop_size, crop_size])
        a_real_ipt.shape = 1, crop_size, crop_size, 3
        a2b_opt, a2b2a_opt = sess.run([a2b, a2b2a], feed_dict={a_real: a_real_ipt})
        a_img_opt = np.concatenate((a_real_ipt, a2b_opt, a2b2a_opt), axis=0)

        img_name = os.path.basename(a_list[i])
        im.imwrite(im.immerge(a_img_opt, 1, 3), a_save_dir + img_name)
        print('Save %s' % (a_save_dir + img_name))

    for i in range(len(b_list)):
        b_real_ipt = im.imresize(im.imread(b_list[i]), [crop_size, crop_size])
        b_real_ipt.shape = 1, crop_size, crop_size, 3
        b2a_opt, b2a2b_opt = sess.run([b2a, b2a2b], feed_dict={b_real: b_real_ipt})
        b_img_opt = np.concatenate((b_real_ipt, b2a_opt, b2a2b_opt), axis=0)

        img_name = os.path.basename(b_list[i])