コード例 #1
0
    def test_image_segmentation_generator_preprocessing(self):
        image_seg_pairs = data_loader.get_pairs_from_paths(
            self.train_temp_dir, self.test_temp_dir)

        random.seed(0)
        random.shuffle(image_seg_pairs)

        random.seed(0)

        generator = data_loader.image_segmentation_generator(
            self.train_temp_dir,
            self.test_temp_dir,
            1,
            self.image_size * self.image_size,
            self.image_size,
            self.image_size,
            self.image_size,
            self.image_size,
            preprocessing=lambda x: x + 1)

        i = 0
        for (aug_im, aug_an), (expt_im_f,
                               expt_an_f) in zip(generator, image_seg_pairs):
            if i >= len(image_seg_pairs):
                break

            expt_im = data_loader.get_image_array(expt_im_f,
                                                  self.image_size,
                                                  self.image_size,
                                                  ordering='channel_last')

            expt_im += 1
            self.assertTrue(np.equal(expt_im, aug_im[0, :, :]).all())

            i += 1
コード例 #2
0
def evaluate(model=None,
             inp_images=None,
             annotations=None,
             inp_images_dir=None,
             annotations_dir=None,
             checkpoints_path=None):

    if model is None:
        assert (
            checkpoints_path
            is not None), "Please provide the model or the checkpoints_path"
        model = model_from_checkpoint_path(checkpoints_path)

    if inp_images is None:
        assert (inp_images_dir
                is not None), "Please privide inp_images or inp_images_dir"
        assert (annotations_dir
                is not None), "Please privide inp_images or inp_images_dir"

        paths = get_pairs_from_paths(inp_images_dir, annotations_dir)
        paths = list(zip(*paths))
        inp_images = list(paths[0])
        annotations = list(paths[1])

    assert type(inp_images) is list
    assert type(annotations) is list

    tp = np.zeros(model.n_classes)
    fp = np.zeros(model.n_classes)
    fn = np.zeros(model.n_classes)
    n_pixels = np.zeros(model.n_classes)

    for inp, ann in tqdm(zip(inp_images, annotations)):
        pr = predict(model, inp)
        gt = get_segmentation_array(ann,
                                    model.n_classes,
                                    model.output_width,
                                    model.output_height,
                                    no_reshape=True)
        gt = gt.argmax(-1)
        pr = pr.flatten()
        gt = gt.flatten()

        for cl_i in range(model.n_classes):

            tp[cl_i] += np.sum((pr == cl_i) * (gt == cl_i))
            fp[cl_i] += np.sum((pr == cl_i) * ((gt != cl_i)))
            fn[cl_i] += np.sum((pr != cl_i) * ((gt == cl_i)))
            n_pixels[cl_i] += np.sum(gt == cl_i)

    cl_wise_score = tp / (tp + fp + fn + 0.000000000001)
    n_pixels_norm = n_pixels / np.sum(n_pixels)
    frequency_weighted_IU = np.sum(cl_wise_score * n_pixels_norm)
    mean_IU = np.mean(cl_wise_score)
    return {
        "frequency_weighted_IU": frequency_weighted_IU,
        "mean_IU": mean_IU,
        "class_wise_IU": cl_wise_score
    }
コード例 #3
0
    def test_get_pairs_from_paths_with_no_matching_segs_with_escape(self):
        images = ["A.jpg", "B.jpg", "C.jpeg", "D.png"]
        segs = ["A.png", "B.png", "C.png"]
        self._setup_images_and_segs(images, segs)

        expected = [("A.jpg", "A.png"),
                    ("B.jpg", "B.png"),
                    ("C.jpeg", "C.png")]
        expected_values = []
        # Transform paths
        for (x, y) in expected:
            expected_values.append((os.path.join(self.img_path, x), os.path.join(self.seg_path, y)))
        self.assertEqual(expected_values, sorted(data_loader.get_pairs_from_paths(self.img_path, self.seg_path, ignore_non_matching=True)))
コード例 #4
0
    def test_get_pairs_from_paths_2(self):
        """ Normal execution with extra files """
        images = ["A.jpg", "B.jpg", "C.jpeg", "D.png", "E.txt"]
        segs = ["A.png", "B.png", "C.png", "D.png", "E.png"]
        self._setup_images_and_segs(images, segs)

        expected = [("A.jpg", "A.png"),
                    ("B.jpg", "B.png"),
                    ("C.jpeg", "C.png"),
                    ("D.png", "D.png")]
        expected_values = []
        # Transform paths
        for (x, y) in expected:
            expected_values.append((os.path.join(self.img_path, x), os.path.join(self.seg_path, y)))
        self.assertEqual(expected_values, sorted(data_loader.get_pairs_from_paths(self.img_path, self.seg_path)))
コード例 #5
0
    def test_image_segmentation_generator_custom_augmentation_with_other_inputs(
            self):
        other_paths = [self.other_temp_dir, self.other_temp_dir_2]
        random.seed(0)
        image_seg_pairs = data_loader.get_pairs_from_paths(
            self.train_temp_dir,
            self.test_temp_dir,
            other_inputs_paths=other_paths)

        random.seed(0)
        random.shuffle(image_seg_pairs)

        random.seed(0)
        generator = data_loader.image_segmentation_generator(
            self.train_temp_dir,
            self.test_temp_dir,
            1,
            self.image_size * self.image_size,
            self.image_size,
            self.image_size,
            self.image_size,
            self.image_size,
            do_augment=True,
            custom_augmentation=self.custom_aug,
            other_inputs_paths=other_paths)

        i = 0
        for (aug_im, aug_an), (expt_im_f, expt_an_f,
                               expt_oth) in zip(generator, image_seg_pairs):
            if i >= len(image_seg_pairs):
                break

            ims = [expt_im_f]
            ims.extend(expt_oth)

            for i in range(aug_im.shape[1]):
                expt_im = data_loader.get_image_array(ims[i],
                                                      self.image_size,
                                                      self.image_size,
                                                      ordering='channel_last')

                expt_im = cv2.flip(expt_im, flipCode=1)

                self.assertTrue(np.equal(expt_im, aug_im[0, i, :, :]).all())

            i += 1
コード例 #6
0
    def test_multi_image_segmentation_generator_preprocessing_with_other_inputs(
            self):
        other_paths = [self.other_temp_dir, self.other_temp_dir_2]
        random.seed(0)
        image_seg_pairs = data_loader.get_pairs_from_paths(
            self.train_temp_dir,
            self.test_temp_dir,
            other_inputs_paths=other_paths)

        random.seed(0)
        random.shuffle(image_seg_pairs)

        random.seed(0)
        generator = data_loader.image_segmentation_generator(
            self.train_temp_dir,
            self.test_temp_dir,
            1,
            self.image_size * self.image_size,
            self.image_size,
            self.image_size,
            self.image_size,
            self.image_size,
            preprocessing=[lambda x: x + 1, lambda x: x + 2, lambda x: x + 3],
            other_inputs_paths=other_paths)

        i = 0
        for (aug_im, aug_an), (expt_im_f, expt_an_f,
                               expt_oth) in zip(generator, image_seg_pairs):
            if i >= len(image_seg_pairs):
                break

            ims = [expt_im_f]
            ims.extend(expt_oth)

            for i in range(aug_im.shape[1]):
                expt_im = data_loader.get_image_array(ims[i],
                                                      self.image_size,
                                                      self.image_size,
                                                      ordering='channel_last')

                self.assertTrue(
                    np.equal(expt_im + (i + 1), aug_im[0, i, :, :]).all())

            i += 1
コード例 #7
0
    def test_image_segmentation_generator_custom_augmentation(self):
        random.seed(0)
        image_seg_pairs = data_loader.get_pairs_from_paths(
            self.train_temp_dir, self.test_temp_dir)

        random.seed(0)
        random.shuffle(image_seg_pairs)

        random.seed(0)

        generator = data_loader.image_segmentation_generator(
            self.train_temp_dir,
            self.test_temp_dir,
            1,
            self.image_size * self.image_size,
            self.image_size,
            self.image_size,
            self.image_size,
            self.image_size,
            do_augment=True,
            custom_augmentation=self.custom_aug)

        i = 0
        for (aug_im, aug_an), (expt_im_f,
                               expt_an_f) in zip(generator, image_seg_pairs):
            if i >= len(image_seg_pairs):
                break

            expt_im = data_loader.get_image_array(expt_im_f,
                                                  self.image_size,
                                                  self.image_size,
                                                  ordering='channel_last')

            expt_im = cv2.flip(expt_im, flipCode=1)
            self.assertTrue(np.equal(expt_im, aug_im).all())

            i += 1