Пример #1
0
    def __getitem__(self, idx):

        # When truths are resized, the values change
        # we apply a threshold to get back to binary

        im = self.imgs[idx]
        im = self.reshaper_img.augment_image(im)

        shape = im.shape

        truth = None

        if (self.truth_paths is not None):
            truth_path = self.truth_paths[idx]
            truth = imread(truth_path, scale=False)[..., :1]
            truth = truth > 0

            if (len(truth.shape) == 3):
                truth = np.mean(truth, axis=-1)

            truth = truth.astype(np.uint8)
        else:
            truth = np.zeros(im.shape[:2]).astype(np.uint8)

        # Apply data augmentation
        if (self.augmentations is not None):
            aug_det = self.augmentations.to_deterministic()
        else:
            aug_det = iaa.Noop()

        im_aug = aug_det.augment_images([im])[0]
        im_unnormal = im_aug.copy()

        truth = ia.SegmentationMapsOnImage(truth, shape=truth.shape)
        labels = ia.SegmentationMapsOnImage(self.labels[..., idx].astype(
            np.int16),
                                            shape=self.labels[..., idx].shape)

        if (self.normalization is not None):
            im = self.normalization.augment_image(im)
            im_aug = self.normalization.augment_image(im_aug)

        truth = truth.get_arr()[..., np.newaxis]
        labels = labels.get_arr()[..., np.newaxis]

        im_aug = self.reshaper_img.augment_image(im_aug)
        im_unnormal = self.reshaper_img.augment_image(im_unnormal)
        truth = self.reshaper_seg.augment_image(truth)
        labels = self.reshaper_seg.augment_image(labels)

        return {
            'image': im_aug,
            'image_noaug': im,
            'image_unnormal': im_unnormal,
            'frame_name': os.path.split(self.img_paths[idx])[-1],
            'labels': labels,
            'frame_idx': idx,
            'n_frames': self.__len__(),
            'label/segmentation': truth
        }
Пример #2
0
def test_SegmentationMapsOnImage_get_arr():
    dtypes = ["int8", "int16", "int32", "uint8", "uint16"]
    ndims = [2, 3]

    for dtype, ndim in itertools.product(dtypes, ndims):
        dtype = np.dtype(dtype)
        shape = (3, 3) if ndim == 2 else (3, 3, 1)
        arr = np.array([[0, 0, 1], [0, 2, 1], [1, 3, 1]],
                       dtype=dtype).reshape(shape)
        segmap = ia.SegmentationMapsOnImage(arr, shape=(3, 3))
        observed = segmap.get_arr()
        assert segmap.arr.dtype.name == "int32"
        assert segmap.arr.ndim == 3
        assert np.array_equal(observed, arr)
        assert observed.dtype.name == dtype.name
        assert observed.ndim == ndim
        assert np.array_equal(observed, arr)

    for ndim in ndims:
        shape = (3, 3) if ndim == 2 else (3, 3, 1)
        arr = np.array([[0, 0, 1], [0, 1, 1], [1, 1, 1]],
                       dtype=bool).reshape(shape)
        segmap = ia.SegmentationMapsOnImage(arr, shape=(3, 3))
        observed = segmap.get_arr()
        assert segmap.arr.dtype.name == "int32"
        assert segmap.arr.ndim == 3
        assert np.array_equal(observed, arr)
        assert observed.dtype.kind == "b"
        assert observed.ndim == ndim
        assert np.array_equal(observed, arr)
Пример #3
0
    def test_uint_int_arrs(self):
        dtypes = ["int8", "int16", "int32", "uint8", "uint16"]
        ndims = [2, 3]
        img_shapes = [(3, 3), (3, 3, 3), (4, 5, 3)]

        gen = itertools.product(dtypes, ndims, img_shapes)
        for dtype, ndim, img_shape in gen:
            with self.subTest(dtype=dtype, ndim=ndim, shape=img_shape):
                dtype = np.dtype(dtype)
                shape = (3, 3) if ndim == 2 else (3, 3, 1)
                arr = np.array([[0, 0, 1], [0, 2, 1], [1, 3, 1]],
                               dtype=dtype).reshape(shape)
                segmap = ia.SegmentationMapsOnImage(arr, shape=img_shape)
                assert segmap.shape == img_shape
                assert segmap.arr.dtype.name == "int32"
                assert segmap.arr.shape == (3, 3, 1)
                assert np.array_equal(segmap.arr,
                                      arr.reshape((3, 3, 1)).astype(np.int32))

                if ndim == 3:
                    arr = np.array([[0, 0, 1], [0, 2, 1], [1, 3, 1]],
                                   dtype=dtype).reshape((3, 3, 1))
                    arr = np.tile(arr, (1, 1, 5))
                    segmap = ia.SegmentationMapsOnImage(arr, shape=img_shape)
                    assert segmap.shape == img_shape
                    assert segmap.arr.dtype.name == "int32"
                    assert segmap.arr.shape == (3, 3, 5)
                    assert np.array_equal(segmap.arr, arr.astype(np.int32))
Пример #4
0
    def __getitem__(self, index):
        # load image and crop
        img = Image.open(self.image_lists[index]).convert('RGB')
        img = img.resize(self.resize)
        img = np.array(img)
        labels = self.label_lists[index]
        # load label
        if self.mode == 'train':
            label_ori = Image.open(self.label_lists[index]).convert('RGB')
            label_ori = label_ori.resize(self.resize)
            label_ori = np.array(label_ori)
            label = np.ones(shape=(label_ori.shape[0], label_ori.shape[1]), dtype=np.uint8)

            # convert RGB  to one hot

            for i in range(len(self.COLOR_DICT)):
                equality = np.equal(label_ori, self.COLOR_DICT[i])
                class_map = np.all(equality, axis=-1)
                label[class_map] = i

            # augment image and label
            if self.mode == 'train':
                seq_det = self.flip.to_deterministic()  # 固定变换
                segmap = ia.SegmentationMapsOnImage(label, shape=label.shape)
                img = seq_det.augment_image(img)
                label = seq_det.augment_segmentation_maps([segmap])[0].get_arr().astype(np.uint8)

            label_img = torch.from_numpy(label.copy()).float()
            labels = label_img
        imgs = img.transpose(2, 0, 1) / 255.0
        img = torch.from_numpy(imgs.copy()).float()
        labels =labels.unsqueeze(0)
        return img, labels
Пример #5
0
def _augment_seg(img, seg, augmentation_name="aug_all", other_imgs=None):

    global loaded_augmentation_name

    if (not IMAGE_AUGMENTATION_SEQUENCE) or\
       (augmentation_name != loaded_augmentation_name):
        _load_augmentation(augmentation_name)
        loaded_augmentation_name = augmentation_name

    # Create a deterministic augmentation from the random one
    aug_det = IMAGE_AUGMENTATION_SEQUENCE.to_deterministic()
    # Augment the input image
    image_aug = aug_det.augment_image(img)

    if other_imgs is not None:
        image_aug = [image_aug]

        for other_img in other_imgs:
            image_aug.append(aug_det.augment_image(other_img))

    segmap = ia.SegmentationMapsOnImage(seg, shape=img.shape)
    segmap_aug = aug_det.augment_segmentation_maps(segmap)
    segmap_aug = segmap_aug.get_arr()

    return image_aug, segmap_aug
Пример #6
0
 def segmap(self):
     arr = np.int32([
         [0, 1, 1],
         [0, 2, 1],
         [0, 1, 3]
     ])
     return ia.SegmentationMapsOnImage(arr, shape=(3, 3))
    def preprocess_all(self, img, mask, scale):
        w, h = img.size
        newW, newH = int(scale * w), int(scale * h)
        assert newW > 0 and newH > 0, 'Scale is too small'
        pil_img = img.resize((newW, newH))
        pil_mask = mask.resize((newW, newH))
        img_nd = np.array(pil_img)
        mask_nd = np.array(pil_mask)
        seq = iaa.Sequential([
            iaa.Sometimes(0.5, iaa.Crop(px=(0, 16))),
            iaa.Affine(rotate=(-90, 90)),
            iaa.Sometimes(0.5, iaa.Fliplr(0.5)),
            iaa.Sometimes(0.5,
                          iaa.GaussianBlur((0, 0.5)),
                          iaa.Sometimes(
                              0.5,
                              iaa.AdditiveGaussianNoise(loc=0,
                                                        scale=(0.0,
                                                               0.05 * 255),
                                                        per_channel=0.5)),
                          random_state=True)
        ])
        seg_map = ia.SegmentationMapsOnImage(mask_nd, shape=img_nd.shape)
        image_aug, seg_aug = seq(image=img_nd, segmentation_maps=seg_map)
        seg_map = seg_aug.get_arr()
        img_trans = image_aug.transpose((2, 0, 1))
        if img_trans.max() > 1:
            img_trans = img_trans / 255
        seg_map = np.expand_dims(seg_map, axis=2)
        seg_trans = seg_map.transpose((2, 0, 1))
        if seg_trans.max() > 1:
            seg_trans = seg_trans / 255

        return img_trans, seg_trans
Пример #8
0
    def test_alpha_with_draw_background_and_more_than_one_channel(self):
        # only segmap visible in foreground + multiple channels in segmap
        image = self.image

        arr_channel_1 = np.int32([[0, 1, 5], [0, 1, 1], [0, 4, 1]])
        arr_channel_2 = np.int32([[1, 1, 0], [2, 2, 0], [1, 1, 0]])
        arr_channel_3 = np.int32([[1, 0, 0], [0, 1, 0], [0, 0, 3]])
        arr_multi = np.stack([arr_channel_1, arr_channel_2, arr_channel_3],
                             axis=-1)

        col = ia.SegmentationMapsOnImage.DEFAULT_SEGMENT_COLORS
        expected_channel_1 = np.uint8([[image[0, 0, :], col[1], col[5]],
                                       [image[1, 0, :], col[1], col[1]],
                                       [image[2, 0, :], col[4], col[1]]])
        expected_channel_2 = np.uint8([[col[1], col[1], image[0, 2, :]],
                                       [col[2], col[2], image[1, 2, :]],
                                       [col[1], col[1], image[2, 2, :]]])
        expected_channel_3 = np.uint8(
            [[col[1], image[0, 1, :], image[0, 2, :]],
             [image[1, 0, :], col[1], image[1, 2, :]],
             [image[2, 0, :], image[2, 1, :], col[3]]])

        segmap_multi = ia.SegmentationMapsOnImage(arr_multi, shape=(3, 3, 3))

        observed = segmap_multi.draw_on_image(image,
                                              alpha=1.0,
                                              draw_background=False)

        assert isinstance(observed, list)
        assert len(observed) == 3
        assert np.array_equal(observed[0], expected_channel_1)
        assert np.array_equal(observed[1], expected_channel_2)
        assert np.array_equal(observed[2], expected_channel_3)
Пример #9
0
    def __getitem__(self, index):
        # 读图(转为Image对象)
        img = Image.open(self.image_lists[index])
        # 图像转numpy矩阵
        img = np.array(img)
        labels = self.label_lists[index]
        if self.mode != 'test':
            # 读标签(转为Image对象)
            label = Image.open(self.label_lists[index])
            # 标签转numpy矩阵
            label = np.array(label)
            # 标签归一化
            label[label != 255] = 0
            label[label == 255] = 1
            # 训练时对图像和标签数据增强
            if (self.mode == 'train'):
                # 创建数据增强的序列
                seq_det = self.seq.to_deterministic()
                # 将分割结果转换为SegmentationMapOnImage类型,方便后面可视化
                segmap = ia.SegmentationMapsOnImage(label, shape=label.shape)
                # 对图像进行数据增强
                img = seq_det.augment_image(img)
                # 将数据增强应用在分割标签上,并且转换成np类型
                label = seq_det.augment_segmentation_maps(
                    [segmap])[0].get_arr().astype(np.uint8)

            label = np.reshape(label, (1, ) + label.shape)
            label = torch.from_numpy(label.copy()).float()
            labels = label
        img = np.reshape(img, img.shape + (1, ))
        img = self.to_tensor(img.copy()).float()
        return img, labels
Пример #10
0
def base_apply_augs(sample, aug):
    truth = ia.SegmentationMapsOnImage(
        sample['label/segmentation'], shape=sample['label/segmentation'].shape)
    labels = ia.SegmentationMapsOnImage(sample['labels'],
                                        shape=sample['labels'].shape)
    im = sample['image']

    im = aug(image=im)
    truth = aug(segmentation_maps=truth).get_arr()
    labels = aug(segmentation_maps=labels).get_arr()

    sample['label/segmentation'] = truth
    sample['labels'] = labels
    sample['image'] = im

    return sample
Пример #11
0
    def test_resize_image_to_segmentation_map(self):
        # resizing of image to segmap
        arr = np.int32([[0, 1, 1], [0, 1, 1], [0, 1, 1]])
        segmap = ia.SegmentationMapsOnImage(arr, shape=(1, 3))

        image = np.uint8([[0, 10, 20]])
        image = np.tile(image[:, :, np.newaxis], (1, 1, 3))
        image_rs = ia.imresize_single_image(image,
                                            arr.shape[0:2],
                                            interpolation="cubic")

        a1 = 0.7
        a0 = 1.0 - a1

        observed = segmap.draw_on_image(image,
                                        alpha=a1,
                                        draw_background=True,
                                        resize="image")

        col0 = self.col(0)
        col1 = self.col(1)
        expected = np.uint8([[col0, col1, col1], [col0, col1, col1],
                             [col0, col1, col1]])
        expected = a0 * image_rs + a1 * expected
        d_max = np.max(
            np.abs(observed[0].astype(np.float32) -
                   expected.astype(np.float32)))
        assert isinstance(observed, list)
        assert len(observed) == 1
        assert observed[0].shape == expected.shape
        assert d_max <= 1.0 + 1e-4
Пример #12
0
    def __getitem__(self, index):
        # load image
        img = cv2.imread(self.images_list[index],
                         cv2.IMREAD_GRAYSCALE).astype(np.uint8)
        labels = self.labels_list[index]  # test模式时label的返回值
        # load label
        label = cv2.imread(self.labels_list[index],
                           cv2.IMREAD_GRAYSCALE).astype(np.uint8)
        label[label != 255] = 0
        label[label == 255] = 1

        if (self.mode == 'train'):  # 训练时对图像和标签数据增强
            seq_det = self.seq.to_deterministic()  # 创建数据增强的序列
            segmap = ia.SegmentationMapsOnImage(
                label,
                shape=label.shape)  # 将分割结果转换为SegmentationMapOnImage类型,方便后面可视化
            img = seq_det.augment_image(img)  # 对图像进行数据增强
            label = seq_det.augment_segmentation_maps([
                segmap
            ])[0].get_arr().astype(np.uint8)  # 将数据增强应用在分割标签上,并且转换成np类型
        elif self.mode == 'val':
            label = np.reshape(label, (1, ) + label.shape)
            label = torch.from_numpy(label.copy()).float()
            labels = label

        else:
            label = np.reshape(label, (1, ) + label.shape)
            label = torch.from_numpy(label.copy()).float()
            labels = [label, labels]

        img = np.reshape(img, img.shape + (1, ))
        img = self.to_tensor(img.copy()).float()

        return img, labels
Пример #13
0
    def test_resize_segmentation_map_to_image(self):
        # resizing of segmap to image
        arr = np.int32([[0, 1, 1]])
        segmap = ia.SegmentationMapsOnImage(arr, shape=(3, 3))

        image = np.uint8([[0, 10, 20], [30, 40, 50], [60, 70, 80]])
        image = np.tile(image[:, :, np.newaxis], (1, 1, 3))

        a1 = 0.7
        a0 = 1.0 - a1

        observed = segmap.draw_on_image(image,
                                        alpha=a1,
                                        draw_background=True,
                                        resize="segmentation_map")

        col0 = self.col(0)
        col1 = self.col(1)
        expected = np.uint8([[col0, col1, col1], [col0, col1, col1],
                             [col0, col1, col1]])
        expected = a0 * image + a1 * expected
        d_max = np.max(
            np.abs(observed[0].astype(np.float32) -
                   expected.astype(np.float32)))
        assert isinstance(observed, list)
        assert len(observed) == 1
        assert observed[0].shape == expected.shape
        assert d_max <= 1.0 + 1e-4
Пример #14
0
    def _aug(self, image, mask):
        mask = ia.SegmentationMapsOnImage(mask, image.shape[:2])

        seq = iaa.Sequential(
            [
                iaa.Fliplr(0.5),
                iaa.Rot90([0, 3]),
                iaa.SomeOf(
                    1,
                    [
                        iaa.Affine(scale={"x": (0.7, 1.5), "y": (1.6, 1.5)}),
                        iaa.Affine(rotate=(-30, 30)),
                        #     iaa.Add((-110, 111)),
                        #     iaa.GaussianBlur(sigma=1.8 * np.random.rand()),
                        #     iaa.Emboss(alpha=(0.0, 1.0), strength=(0.5, 1.5)),
                        #     iaa.AdditiveGaussianNoise(scale=0.05*255),
                        #     iaa.Multiply((0.5, 1.5)),
                        #     iaa.Affine(shear=(-20, 20)),
                        #     iaa.PiecewiseAffine(scale=(0.01, 0.02)),
                        iaa.PerspectiveTransform(scale=(0.01, 0.1)),
                    ],
                ),
            ],
            random_order=True,
        )

        image, mask = seq(image=image, segmentation_maps=mask)
        mask = mask.get_arr_int().astype(np.uint8)
        return image, mask
Пример #15
0
def _augment_seg(img: np.ndarray,
                 seg: np.ndarray,
                 augmentation_name: str,
                 prefix="_apply_aug",
                 resize_shape: t.Tuple[int, int] = ()):

    augmentation_func = getattr(sys.modules[__name__],
                                f"{prefix}_{augmentation_name}")

    # Create a deterministic augmentation from the random one
    aug_det = augmentation_func().to_deterministic()
    if resize_shape:
        img = Image.fromarray(img).resize(resize_shape, Image.BICUBIC)
        img = np.array(img, dtype='uint8')
    segmap = ia.SegmentationMapsOnImage(seg,
                                        nb_classes=np.max(seg) + 1,
                                        shape=img.shape)
    # Augment the input image
    image_aug, segmap = _safe_augmentation(image=img,
                                           segmentations=segmap,
                                           aug_func=aug_det)

    segmap_aug = segmap.get_arr_int()

    return image_aug, segmap_aug
Пример #16
0
    def applyAugmentations(self, img, gt_bboxes, gt_masks, gt_bboxes_ignore, isTrain):

        bboxesDType = gt_bboxes.dtype
        masksDType = gt_masks.dtype

        bbox_split = len(gt_bboxes)
        all_bboxes = np.concatenate((gt_bboxes, gt_bboxes_ignore), axis=0)

        imgaugBBoxes = [imgaug.BoundingBox(x[0], x[1], x[2], x[3]) for x in all_bboxes]
        imgaugBBoxesOnImage = imgaug.BoundingBoxesOnImage(imgaugBBoxes, img.shape)

        imgaugSegmentationMapsOnImage = imgaug.SegmentationMapsOnImage(gt_masks.transpose(1, 2, 0),
                                                                       tuple(gt_masks.shape[1:]))

        batch = imgaug.Batch(images=[img], segmentation_maps=imgaugSegmentationMapsOnImage,
                             bounding_boxes=imgaugBBoxesOnImage)
        aug = self.augmentor(isTrain)
        augmentedBatch = aug.augment_batch(batch)

        img_aug = augmentedBatch.images_aug[0]
        all_bboxes_aug = [np.array([bbox.x1, bbox.y1, bbox.x2, bbox.y2], dtype=bboxesDType) for bbox in
                          augmentedBatch.bounding_boxes_aug.bounding_boxes]
        all_bboxes_aug = np.array(all_bboxes_aug, dtype=bboxesDType)
        gt_bboxes_aug = all_bboxes_aug[:bbox_split]
        gt_bboxes_ignore_aug = all_bboxes_aug[bbox_split:]

        masks_aug = augmentedBatch.segmentation_maps_aug.arr.transpose(2, 0, 1).astype(masksDType)

        return img_aug, gt_bboxes_aug, masks_aug, gt_bboxes_ignore_aug
Пример #17
0
    def test_segmap_with_more_than_one_channel(self):
        # test segmentation maps with multiple channels
        arr_channel_1 = np.int32([[0, 1, 5], [0, 1, 1], [0, 4, 1]])
        arr_channel_2 = np.int32([[1, 1, 0], [2, 2, 0], [1, 1, 0]])
        arr_channel_3 = np.int32([[1, 0, 0], [0, 1, 0], [0, 0, 3]])
        arr_multi = np.stack([arr_channel_1, arr_channel_2, arr_channel_3],
                             axis=-1)

        col = ia.SegmentationMapsOnImage.DEFAULT_SEGMENT_COLORS
        expected_channel_1 = np.uint8([[col[0], col[1], col[5]],
                                       [col[0], col[1], col[1]],
                                       [col[0], col[4], col[1]]])
        expected_channel_2 = np.uint8([[col[1], col[1], col[0]],
                                       [col[2], col[2], col[0]],
                                       [col[1], col[1], col[0]]])
        expected_channel_3 = np.uint8([[col[1], col[0], col[0]],
                                       [col[0], col[1], col[0]],
                                       [col[0], col[0], col[3]]])

        segmap = ia.SegmentationMapsOnImage(arr_multi, shape=(3, 3, 3))

        observed = segmap.draw()

        assert isinstance(observed, list)
        assert len(observed) == 3
        assert np.array_equal(observed[0], expected_channel_1)
        assert np.array_equal(observed[1], expected_channel_2)
        assert np.array_equal(observed[2], expected_channel_3)
def augment_seg(img, seg, seq):
    aug_det = seq.to_deterministic()
    image_aug = aug_det.augment_image(img)
    segmap = ia.SegmentationMapsOnImage(seg, shape=img.shape)
    segmap_aug = aug_det.augment_segmentation_maps(segmap)
    segmap_aug = segmap_aug.get_arr()

    return image_aug, segmap_aug
Пример #19
0
 def test_uint64_fails(self):
     got_exception = False
     try:
         arr = np.array([[0, 0, 1], [0, 2, 1], [1, 3, 1]], dtype=np.int64)
         _segmap = ia.SegmentationMapsOnImage(arr, shape=(3, 3, 3))
     except Exception as exc:
         assert "only int8, int16 and int32 " in str(exc)
         got_exception = True
     assert got_exception
Пример #20
0
    def __getitem__(self, index):
        # load image
        img_cur = cv2.imread(self.images_list[index], cv2.IMREAD_GRAYSCALE)
        length = len(self.images_list)

        # 2.5D
        pre_index = 0 if index == 0 else index - 1
        next_index = index if index >= length - 1 else index + 1

        if pre_index == index:
            img_pre = img_cur
        else:
            if self.images_list[pre_index].split(
                    '/')[-2] == self.images_list[index].split('/')[-2]:
                img_pre = cv2.imread(self.images_list[pre_index],
                                     cv2.IMREAD_GRAYSCALE)
            else:
                img_pre = img_cur

        if next_index == index:
            img_next = img_cur
        else:
            if self.images_list[next_index].split(
                    '/')[-2] == self.images_list[index].split('/')[-2]:
                img_next = cv2.imread(self.images_list[next_index],
                                      cv2.IMREAD_GRAYSCALE)
            else:
                img_next = img_cur

        img = np.stack((img_pre, img_cur, img_next),
                       axis=2).astype(np.uint8)  # 3pic->1pic
        labels = self.labels_list[index]
        # load label
        label = cv2.imread(self.labels_list[index], cv2.IMREAD_GRAYSCALE)
        label = np.array(label)
        # label = np.ones(shape=(label.shape[0],label.shape[1]),dtype=np.uint8)
        label[label != 255] = 0
        label[label == 255] = 1

        # augment image and label
        if (self.mode == 'train'):  # 训练时对图像和标签数据增强
            seq_det = self.seq.to_deterministic()  # 创建数据增强的序列
            segmap = ia.SegmentationMapsOnImage(
                label,
                shape=label.shape)  # 将分割结果转换为SegmentationMapOnImage类型,方便后面可视化
            img = seq_det.augment_image(img)  # 对图像进行数据增强
            label = seq_det.augment_segmentation_maps([
                segmap
            ])[0].get_arr().astype(np.uint8)  # 将数据增强应用在分割标签上,并且转换成np类型
        label = np.reshape(label, (1, ) + label.shape)
        label = torch.from_numpy(label.copy()).float()
        labels = label
        img = self.to_tensor(img.copy()).float()
        # print(img.shape)
        # print(labels.shape)
        return img, labels
Пример #21
0
    def __getitem__(self, index):
        """
        Example:
            >>> # DISABLE_DOCTEST
            >>> self = SegmentationDataset.demo(augmenter=True)
            >>> output = self[10]
            >>> # xdoctest: +REQUIRES(--show)
            >>> import kwplot
            >>> plt = kwplot.autoplt()
            >>> colored_labels = self._colorized_labels(output['class_idxs'])
            >>> kwplot.figure(doclf=True)
            >>> kwplot.imshow(output['im'])
            >>> kwplot.imshow(colored_labels, alpha=.4)
        """
        outer, inner = self.subindex.unravel(index)
        gid = self._gids[outer]
        slider = self._sliders[outer]
        slices = slider[inner]

        tr = {'gid': gid, 'slices': slices}
        sample = self.sampler.load_sample(tr, with_annots=['segmentation'])

        imdata = sample['im']
        heatmap = self._sample_to_sseg_heatmap(sample)

        heatmap = heatmap.numpy()
        heatmap.data['class_idx'] = heatmap.data['class_idx'].astype(np.int32)
        cidx_segmap = heatmap.data['class_idx']

        if self.augmenter:
            augdet = self.augmenter.to_deterministic()
            imdata = augdet.augment_image(imdata)
            if hasattr(imgaug, 'SegmentationMapsOnImage'):
                # Oh imgaug, stop breaking.
                cidx_segmap_oi = imgaug.SegmentationMapsOnImage(cidx_segmap, cidx_segmap.shape)
                cidx_segmap_oi = augdet.augment_segmentation_maps(cidx_segmap_oi)
                assert cidx_segmap_oi.arr.shape[2] == 1
                cidx_segmap = cidx_segmap_oi.arr[..., 0]
                cidx_segmap = np.ascontiguousarray(cidx_segmap)
            else:
                cidx_segmap_oi = imgaug.SegmentationMapOnImage(cidx_segmap, cidx_segmap.shape, nb_classes=len(self.classes))
                cidx_segmap_oi = augdet.augment_segmentation_maps([cidx_segmap_oi])[0]
                cidx_segmap = cidx_segmap_oi.arr.argmax(axis=2)

        im_chw = torch.FloatTensor(
            imdata.transpose(2, 0, 1).astype(np.float32) / 255.)

        cidxs = torch.LongTensor(cidx_segmap)
        weight = (1 - (cidxs == 0).float())

        output = {
            'im': im_chw,
            'class_idxs': cidxs,
            'weight': weight,
        }
        return output
Пример #22
0
 def __augment__(self, img, seg):
     aug_det1 = self.augmenter1.to_deterministic()
     # change only orienation and border using mirror boundaries for both img and groundtruth
     seg_aug = aug_det1.augment_image(seg)
     segmap_aug = ia.SegmentationMapsOnImage(seg_aug, shape=img.shape)
     segmap_aug = 1 * segmap_aug.get_arr()
     image_aug1 = aug_det1.augment_image(img)
     # Add some noise and and blurring on the image
     image_aug = self.augmenter2.augment_image(image_aug1)
     return image_aug, segmap_aug
Пример #23
0
def test_SegmentationMapsOnImage_bool():
    # Test for #189 (boolean mask inputs into SegmentationMapsOnImage not working)
    for dt in [bool, np.bool]:
        arr = np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]], dtype=dt)
        assert arr.dtype.kind == "b"
        segmap = ia.SegmentationMapsOnImage(arr, shape=(3, 3))
        assert np.array_equal(
            segmap.arr,
            np.int32([[0, 0, 0], [0, 1, 0], [0, 0, 0]])[:, :, np.newaxis])
        assert segmap.get_arr().dtype.name == arr.dtype.name
        assert np.array_equal(segmap.get_arr(), arr)
Пример #24
0
    def test_bool_arr_3d(self):
        arr = np.array([[0, 0, 1], [0, 1, 1], [1, 1, 1]], dtype=bool).reshape(
            (3, 3, 1))
        arr = np.tile(arr, (1, 1, 5))

        segmap = ia.SegmentationMapsOnImage(arr, shape=(3, 3))

        assert segmap.shape == (3, 3)
        assert segmap.arr.dtype.name == "int32"
        assert segmap.arr.shape == (3, 3, 5)
        assert np.array_equal(segmap.arr, arr.astype(np.int32))
Пример #25
0
    def test_background_threshold_leads_to_deprecation_warning(self):
        arr = np.zeros((1, 1, 1), dtype=np.int32)
        segmap = ia.SegmentationMapsOnImage(arr, shape=(3, 3))
        image = np.zeros((1, 1, 3), dtype=np.uint8)

        with warnings.catch_warnings(record=True) as caught_warnings:
            warnings.simplefilter("always")
            _ = segmap.draw_on_image(image, background_threshold=0.01)

        assert len(caught_warnings) == 1
        assert ("The argument `background_threshold` is deprecated"
                in str(caught_warnings[0].message))
Пример #26
0
    def test_two_images_and_segmaps(self):
        rng = iarandom.RNG(0)
        images = rng.integers(0, 256, size=(2, 256, 256, 3), dtype=np.uint8)
        sm1 = np.zeros((256, 256, 1), dtype=np.int32)
        sm1[128 - 25:128 + 25, 128 - 25:128 + 25] = 1
        sm2 = np.zeros((256, 256, 1), dtype=np.int32)
        sm2[64 - 25:64 + 25, 64 - 25:64 + 25] = 2
        sm2[192 - 25:192 + 25, 192 - 25:192 + 25] = 3
        segmap1 = ia.SegmentationMapsOnImage(sm1, shape=images[0].shape)
        segmap2 = ia.SegmentationMapsOnImage(sm2, shape=images[1].shape)
        image1_w_overlay = segmap1.draw_on_image(images[0],
                                                 draw_background=True)[0]
        image2_w_overlay = segmap2.draw_on_image(images[1],
                                                 draw_background=True)[0]

        debug_image = iaa.draw_debug_image(
            images, segmentation_maps=[segmap1, segmap2])

        assert self._image_contains(images[0, ...], debug_image)
        assert self._image_contains(images[1, ...], debug_image)
        assert self._image_contains(image1_w_overlay, debug_image)
        assert self._image_contains(image2_w_overlay, debug_image)
Пример #27
0
    def apply_label(self, label):

        if self.img_type == 'numpy':
            label = 255 * label
        elif self.img_type == 'pil':
            label = numpy_to_pil(255 * pil_to_numpy(label))
        elif self.img_type == 'imgaug':
            label = ia.SegmentationMapsOnImage(255 * label.get_arr(),
                                               label.shape)
        elif self.img_type == 'tensor':
            label = label.mul(255)

        return label
Пример #28
0
    def test_fill_from_augmented_normalized_batch(self):
        batch = ia.UnnormalizedBatch(
            images=np.zeros((1, 2, 2, 3), dtype=np.uint8),
            heatmaps=[np.zeros((2, 2, 1), dtype=np.float32)],
            segmentation_maps=[np.zeros((2, 2, 1), dtype=np.int32)],
            keypoints=[[(0, 0)]],
            bounding_boxes=[[ia.BoundingBox(0, 0, 1, 1)]],
            polygons=[[ia.Polygon([(0, 0), (1, 0), (1, 1)])]],
            line_strings=[[ia.LineString([(0, 0), (1, 0)])]])
        batch_norm = ia.Batch(
            images=np.zeros((1, 2, 2, 3), dtype=np.uint8),
            heatmaps=[
                ia.HeatmapsOnImage(np.zeros((2, 2, 1), dtype=np.float32),
                                   shape=(2, 2, 3))
            ],
            segmentation_maps=[
                ia.SegmentationMapsOnImage(np.zeros((2, 2, 1), dtype=np.int32),
                                           shape=(2, 2, 3))
            ],
            keypoints=[
                ia.KeypointsOnImage([ia.Keypoint(0, 0)], shape=(2, 2, 3))
            ],
            bounding_boxes=[
                ia.BoundingBoxesOnImage([ia.BoundingBox(0, 0, 1, 1)],
                                        shape=(2, 2, 3))
            ],
            polygons=[
                ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
                                   shape=(2, 2, 3))
            ],
            line_strings=[
                ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
                                      shape=(2, 2, 3))
            ])
        batch_norm.images_aug = batch_norm.images_unaug
        batch_norm.heatmaps_aug = batch_norm.heatmaps_unaug
        batch_norm.segmentation_maps_aug = batch_norm.segmentation_maps_unaug
        batch_norm.keypoints_aug = batch_norm.keypoints_unaug
        batch_norm.bounding_boxes_aug = batch_norm.bounding_boxes_unaug
        batch_norm.polygons_aug = batch_norm.polygons_unaug
        batch_norm.line_strings_aug = batch_norm.line_strings_unaug

        batch = batch.fill_from_augmented_normalized_batch(batch_norm)

        assert batch.images_aug.shape == (1, 2, 2, 3)
        assert ia.is_np_array(batch.heatmaps_aug[0])
        assert ia.is_np_array(batch.segmentation_maps_aug[0])
        assert batch.keypoints_aug[0][0] == (0, 0)
        assert batch.bounding_boxes_aug[0][0].x1 == 0
        assert batch.polygons_aug[0][0].exterior[0][0] == 0
        assert batch.line_strings_aug[0][0].coords[0][0] == 0
Пример #29
0
def numpy_to_imgaug(img, is_label=False, is_weight=False, img_shape=None):

    if img_shape is None:
        img_shape = img.shape

    if is_label:
        if img.dtype != 'uint8':
            img = img.astype(np.uint8)
        return ia.SegmentationMapsOnImage(img, img_shape)
    elif is_weight:
        return ia.HeatmapsOnImage(img, img_shape)
    else:
        #Assume uint8 numpy array
        return img
Пример #30
0
def _augment_seg(img, seg, augmentation_name="aug_all"):

    global loaded_augmentation_name

    if (not IMAGE_AUGMENTATION_SEQUENCE) or\
       (augmentation_name != loaded_augmentation_name):
        _load_augmentation(augmentation_name)
        loaded_augmentation_name = augmentation_name

    segmap = ia.SegmentationMapsOnImage(seg, shape=img.shape) # note the s compared to above
    image_aug, segmap_aug = IMAGE_AUGMENTATION_SEQUENCE(image=img, segmentation_maps=segmap)
    segmap_aug = segmap_aug.get_arr()

    return image_aug, segmap_aug