예제 #1
0
def normalize_heatmaps(inputs, shapes=None):
    # TODO get rid of this deferred import
    from imgaug.augmentables.heatmaps import HeatmapsOnImage

    shapes = _preprocess_shapes(shapes)
    ntype = estimate_heatmaps_norm_type(inputs)
    _assert_exactly_n_shapes_partial = functools.partial(
        _assert_exactly_n_shapes,
        from_ntype=ntype, to_ntype="List[HeatmapsOnImage]", shapes=shapes)

    if ntype == "None":
        return None
    elif ntype == "array[float]":
        _assert_single_array_ndim(inputs, 4, "(N,H,W,C)", "HeatmapsOnImage")
        _assert_exactly_n_shapes_partial(n=len(inputs))
        return [HeatmapsOnImage(attr_i, shape=shape_i)
                for attr_i, shape_i in zip(inputs, shapes)]
    elif ntype == "HeatmapsOnImage":
        return [inputs]
    elif ntype == "iterable[empty]":
        return None
    elif ntype == "iterable-array[float]":
        _assert_many_arrays_ndim(inputs, 3, "(H,W,C)", "HeatmapsOnImage")
        _assert_exactly_n_shapes_partial(n=len(inputs))
        return [HeatmapsOnImage(attr_i, shape=shape_i)
                for attr_i, shape_i in zip(inputs, shapes)]
    else:
        assert ntype == "iterable-HeatmapsOnImage"
        return inputs  # len allowed to differ from len of images
예제 #2
0
    def _weight_on_image(weight, image=None, wandb_image=True):
        """Visualize a C-dimensional weight tensor as a RGB heatmap.
            If `image` is given, the heatmap will be drawn on image.
            Otherwise return a standalone heatmap.

        Args:
            weight (Tensor): C x H_b x W_b, with C being the number of channels.
                In case of a multi-channel heatmap, each channel will be visualized
                separately and stacked together in the horizontal direction.
            image (Optional[Tensor]): 3 x H x W
            wandb_image (bool): if True, return a wandb.Image object,
                otherwise return a numpy.ndarray

        Return:
            wandb.Image: shape (HxC) x W x 3 (same as image, if given. Otherwise H_b x W_b)
        """
        if image is not None:
            image = image.cpu().permute(1, 2, 0).numpy()
            heatmap_size = image.shape[:2]
        else:
            heatmap_size = weight[-2:]

        weight = weight.cpu().detach().permute(1, 2, 0).numpy()
        heatmap = HeatmapsOnImage(weight, heatmap_size, weight.min(),
                                  weight.max())

        if image is not None:
            h_image = np.hstack(heatmap.draw_on_image(image, alpha=0.5))
        else:
            h_image = np.hstack(heatmap.draw())

        if wandb_image:
            return wandb.Image(h_image, caption='weights_heatmap')
        return h_image
예제 #3
0
    def __call__(self, img, segmap, softmax):

        segmap = SegmentationMapsOnImage(segmap, shape=segmap.shape)
        softmax = HeatmapsOnImage(softmax, shape=softmax.shape)

        assert img.shape[:2] == segmap.shape[:2] == softmax.shape[:2]
        for a in self.augmentations:
            img, segmap, softmax = a(img, segmap, softmax)

        return img, segmap.get_arr(), softmax.get_arr()
예제 #4
0
    def test_augment_heatmaps__kernel_size_is_two__no_keep_size(self):
        from imgaug.augmentables.heatmaps import HeatmapsOnImage
        arr = np.float32([[0.5, 0.6, 0.7], [0.4, 0.5, 0.6]])
        heatmaps = HeatmapsOnImage(arr, shape=(6, 6, 3))
        aug = self.augmenter(2, keep_size=False)

        heatmaps_aug = aug.augment_heatmaps(heatmaps)

        expected = heatmaps.resize((1, 2))
        assert heatmaps_aug.shape == (3, 3, 3)
        assert heatmaps_aug.arr_0to1.shape == (1, 2, 1)
        assert np.allclose(heatmaps_aug.arr_0to1, expected.arr_0to1)
예제 #5
0
    def to_heatmaps(self, only_nonempty=False, not_none_if_no_nonempty=False):
        """
        Convert segmentation map to heatmaps object.

        Each segmentation map class will be represented as a single heatmap channel.

        Parameters
        ----------
        only_nonempty : bool, optional
            If True, then only heatmaps for classes that appear in the segmentation map will be
            generated. Additionally, a list of these class ids will be returned.

        not_none_if_no_nonempty : bool, optional
            If `only_nonempty` is True and for a segmentation map no channel was non-empty,
            this function usually returns None as the heatmaps object. If however this parameter
            is set to True, a heatmaps object with one channel (representing class 0)
            will be returned as a fallback in these cases.

        Returns
        -------
        imgaug.HeatmapsOnImage or None
            Segmentation map as a heatmaps object.
            If `only_nonempty` was set to True and no class appeared in the segmentation map,
            then this is None.

        class_indices : list of int
            Class ids (0 to C-1) of the classes that were actually added to the heatmaps.
            Only returned if `only_nonempty` was set to True.

        """
        # TODO get rid of this deferred import
        from imgaug.augmentables.heatmaps import HeatmapsOnImage

        if not only_nonempty:
            return HeatmapsOnImage.from_0to1(self.arr,
                                             self.shape,
                                             min_value=0.0,
                                             max_value=1.0)
        else:
            nonempty_mask = np.sum(self.arr, axis=(0, 1)) > 0 + 1e-4
            if np.sum(nonempty_mask) == 0:
                if not_none_if_no_nonempty:
                    nonempty_mask[0] = True
                else:
                    return None, []

            class_indices = np.arange(self.arr.shape[2])[nonempty_mask]
            channels = self.arr[..., class_indices]
            return HeatmapsOnImage(channels,
                                   self.shape,
                                   min_value=0.0,
                                   max_value=1.0), class_indices
예제 #6
0
    def __getitem__(self, idx):
        """Gets the next data

        :param idx:             id of element inside the pandas dataset
        :return:                input image, pafmap gt, keypoint gt, number of pafs, number of keypoints
        """
        row = self.dataset.iloc[idx]

        img = cv2.imread(row['path'])
        img_keypoints = row['keypoints']

        pafmap_mask, n_pafs = self.apply_pafmap_mask(
            img_keypoints, img.shape, self.coco_pafmap_joints
            if 'coco' in row['path'] else self.mpii_pafmap_joints)

        keypoint_mask, n_kps = self.apply_keypoint_mask(
            img_keypoints, img.shape, self.coco_keypoints
            if 'coco' in row['path'] else self.mpii_keypoints)

        img = cv2.resize(img, (self.input_shape[1], self.input_shape[0]))

        if self.augment:
            seq_det = self.seq.to_deterministic()

            pafmap_mask = HeatmapsOnImage(pafmap_mask,
                                          shape=img.shape,
                                          min_value=-1.0,
                                          max_value=1.0)

            keypoint_mask = HeatmapsOnImage(keypoint_mask,
                                            shape=img.shape,
                                            min_value=0.0,
                                            max_value=1.0)

            img = seq_det.augment_image(img)
            pafmap_mask = seq_det.augment_heatmaps(pafmap_mask).get_arr()
            keypoint_mask = seq_det.augment_heatmaps(keypoint_mask).get_arr()

        pafmap_mask = cv2.resize(pafmap_mask,
                                 (self.input_shape[1] // self.downscale,
                                  self.input_shape[0] // self.downscale))
        keypoint_mask = cv2.resize(keypoint_mask,
                                   (self.input_shape[1] // self.downscale,
                                    self.input_shape[0] // self.downscale))

        img = np.transpose(img, (2, 0, 1)).copy() / 255.
        pafmap_mask = np.transpose(pafmap_mask, (2, 0, 1)).copy()
        keypoint_mask = np.transpose(keypoint_mask, (2, 0, 1)).copy()

        return img, pafmap_mask, keypoint_mask, n_pafs, n_kps
예제 #7
0
파일: test_flip.py 프로젝트: tujizju/imgaug
 def heatmaps_flipped(self):
     heatmaps_arr = np.float32([
         [0.75, 0.75, 0.75],
         [0.00, 0.50, 0.75],
         [0.00, 0.50, 0.75],
     ])
     return HeatmapsOnImage(heatmaps_arr, shape=(3, 3, 3))
예제 #8
0
    def _depth_image(self, depth_tensor, caption='depth', wandb_image=True):
        depth = np.float32(
            depth_tensor.permute(1, 2, 0).cpu().detach().numpy())
        if self.use_log_depth:
            mask = np.isfinite(depth)
            depth = -depth
            depth[~mask] = depth[mask].min()
        else:
            mask = depth > 0
            depth[mask] = -np.log(
                depth[mask])  # plotting in log of inverse depth
            depth[~mask] = depth[mask].min(
            )  # assigning invalid pixels to minimum value
        heatmap = HeatmapsOnImage(depth, depth.shape, depth.min(), depth.max())
        heatmap_rgb = heatmap.draw(cmap='viridis')[0]

        if wandb_image:
            return wandb.Image(heatmap_rgb, caption=caption)
        return heatmap_rgb
예제 #9
0
    def test_augment_heatmaps__kernel_size_is_two__keep_size(self):
        from imgaug.augmentables.heatmaps import HeatmapsOnImage
        arr = np.float32([[0.5, 0.6, 0.7], [0.4, 0.5, 0.6]])
        heatmaps = HeatmapsOnImage(arr, shape=(6, 6, 3))
        aug = self.augmenter(2, keep_size=True)

        heatmaps_aug = aug.augment_heatmaps(heatmaps)

        assert heatmaps_aug.shape == (6, 6, 3)
        assert np.allclose(heatmaps_aug.arr_0to1, arr[..., np.newaxis])
예제 #10
0
def process_augmentation_depth_estimation(augmenter, image, depth):
    """
    processes data augmentation for the current image and the appropriate mask 
    """
    # convert depth into heatmap to use it in imgaug
    heatmap = HeatmapsOnImage(depth.astype(
        np.float32), shape=image.shape, min_value=0.0, max_value=255.0)
    aug_image, aug_depth = augmenter(
        image=image, heatmaps=heatmap)
    return aug_image, aug_depth.get_arr().astype(np.uint8)
예제 #11
0
    def __getitem__(self, index) -> dict:
        res = super().__getitem__(index)
        heat_keypoints = res[HK.AUG_KEYPOINTS] * self.h_r_ratio  # Shape: (P, J, X)
        res[HK.HEAT_KEYPOINTS] = heat_keypoints

        w, h = self.heat_size
        xy_mesh = np.meshgrid(np.arange(0, w), np.arange(0, h))  # shape: (X, H, W)
        xy_mesh = np.asarray(xy_mesh, dtype=np.float32)
        distance_map = heat_keypoints[..., np.newaxis][..., np.newaxis] - xy_mesh  # Shape: (PJXHW)
        pcm_map = np.exp(-(np.square(distance_map).sum(axis=2)) / np.square(self.__theta))  # Shape: (PJHW)
        vis = res[HK.VISIBILITIES]  # Shape: (PJ)
        vis = np.expand_dims(vis, (2, 3))  # Shape: (PJHW)
        # Occlusion masks
        vis_all = np.logical_or(vis == 1, vis == 2).astype(np.float32)  # Shape: (PJHW)
        vis_no_occ = (vis == 1).astype(np.float32)  # Shape: (PJHW)
        pcm_vis_all = pcm_map * vis_all
        pcm_vis_no_occ = pcm_map * vis_no_occ
        # Marge same joints from different person
        pcm_vis_all = np.amax(pcm_vis_all, axis=0)  # Shape: (JHW)
        pcm_vis_no_occ = np.amax(pcm_vis_no_occ, axis=0)
        res[HK.PCM_ALL] = pcm_vis_all
        res[HK.PCM_NOT_OCC] = pcm_vis_no_occ
        # Visual Debug
        if 'visual_debug' in self.kwargs and self.kwargs.get('visual_debug'):
            debug_pcm_all = pcm_vis_all.max(axis=0, initial=0)  # HW
            debug_pcm_all = HeatmapsOnImage(debug_pcm_all, shape=self.heat_size, min_value=0.0, max_value=1.0)
            res[HK.DEBUG_PCM_ALL] = debug_pcm_all.draw_on_image(res[HK.AUG_IMAGE])[0]

            debug_pcm_noocc = pcm_vis_no_occ.max(axis=0, initial=0)
            debug_pcm_noocc = HeatmapsOnImage(debug_pcm_noocc, shape=self.heat_size, min_value=0.0, max_value=1.0)
            res[HK.DEBUG_PCM_NOOCC] = debug_pcm_noocc.draw_on_image(res[HK.AUG_IMAGE])[0]

        return res
def process_segmentation_preds(segmentation_predictions, test_df):
    nums = [None for i in range(len(test_df))]

    for i in tqdm(range(len(test_df)), desc='postprocessing...'):
        image = Image.open(os.path.join(hparams.data_path,
                                        test_df['file'][i])).convert('RGB')
        image = np.asarray(image)
        image = pad_transform(image=image)

        mask = segmentation_predictions[i][0].astype(np.float32)
        heatmap = HeatmapsOnImage(mask, shape=image.shape[:2])
        heatmap = heatmap.resize(image.shape[:2])
        mask = heatmap.get_arr()
        mask = (mask > 0.5).astype(np.uint8)
        contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL,
                                       cv2.CHAIN_APPROX_SIMPLE)
        contours = [
            np.int0(cv2.boxPoints(cv2.minAreaRect(contour)))
            for contour in contours if cv2.contourArea(contour) > MIN_AREA
        ]
        nums[i] = [{'box': contour.tolist()} for contour in contours]
    return nums
예제 #13
0
    def test_augment_heatmaps__kernel_size_is_two__no_keep_size(self):
        from imgaug.augmentables.heatmaps import HeatmapsOnImage
        arr = np.float32([[0.5, 0.6, 0.7], [0.4, 0.5, 0.6]])
        heatmaps = HeatmapsOnImage(arr, shape=(6, 6, 3))
        aug = self.augmenter(2, keep_size=False)

        heatmaps_aug = aug.augment_heatmaps(heatmaps)

        # heatmap aug is only supposed to update the image shape as the library
        # can handle heatmaps of different size than the image, so heatmap
        # array stays the same
        assert heatmaps_aug.shape == (3, 3, 3)
        assert np.allclose(heatmaps_aug.arr_0to1, arr[..., np.newaxis])
예제 #14
0
    def transform(self, img, lbl, lbl_seg, index):
        """ perform standardisation and resizing """

        img = np.array(
            Image.fromarray(img).resize(
                (self.img_size[1], self.img_size[0])))  # uint8 with RGB mode
        img = img[:, :, ::-1]  # RGB -> BGR
        img = img.astype(np.float64)
        if self.img_norm:
            img = (img - [103.53, 116.28, 123.675
                          ]) / [57.375, 57.120000000000005, 58.395]
        img = img.transpose(2, 0, 1)  # NHWC -> NCHW

        classes = np.unique(lbl)
        lbl = lbl.astype(float)
        lbl = np.array(
            Image.fromarray(lbl).resize((self.img_size[1], self.img_size[0]),
                                        resample=Image.NEAREST))
        lbl = lbl.astype(int)

        if not np.all(classes == np.unique(lbl)):
            print("WARN: resizing labels yielded fewer classes")

        if not np.all(
                np.unique(lbl[lbl != self.ignore_index]) < self.n_classes):
            print("after det", classes, np.unique(lbl))
            print(self.n_classes, index)
            raise ValueError("Segmentation map contained invalid class values")

        lbl_seg = HeatmapsOnImage(lbl_seg, shape=lbl_seg.shape)
        lbl_seg = self.softmax_resize_seq(heatmaps=lbl_seg).get_arr()
        lbl_seg = lbl_seg.transpose(2, 0, 1)

        img = torch.from_numpy(img).float()
        lbl = torch.from_numpy(lbl).long()
        lbl_seg = torch.from_numpy(lbl_seg).float()

        return img, lbl, lbl_seg
예제 #15
0
파일: segmaps.py 프로젝트: AtomWrite/imgaug
    def to_heatmaps(self, only_nonempty=False, not_none_if_no_nonempty=False):
        """
        Convert segmentation map to heatmaps object.

        Each segmentation map class will be represented as a single heatmap channel.

        Parameters
        ----------
        only_nonempty : bool, optional
            If True, then only heatmaps for classes that appear in the segmentation map will be
            generated. Additionally, a list of these class ids will be returned.

        not_none_if_no_nonempty : bool, optional
            If `only_nonempty` is True and for a segmentation map no channel was non-empty,
            this function usually returns None as the heatmaps object. If however this parameter
            is set to True, a heatmaps object with one channel (representing class 0)
            will be returned as a fallback in these cases.

        Returns
        -------
        imgaug.HeatmapsOnImage or None
            Segmentation map as a heatmaps object.
            If `only_nonempty` was set to True and no class appeared in the segmentation map,
            then this is None.

        class_indices : list of int
            Class ids (0 to C-1) of the classes that were actually added to the heatmaps.
            Only returned if `only_nonempty` was set to True.

        """
        # TODO get rid of this deferred import
        from imgaug.augmentables.heatmaps import HeatmapsOnImage

        if not only_nonempty:
            return HeatmapsOnImage.from_0to1(self.arr, self.shape, min_value=0.0, max_value=1.0)
        else:
            nonempty_mask = np.sum(self.arr, axis=(0, 1)) > 0 + 1e-4
            if np.sum(nonempty_mask) == 0:
                if not_none_if_no_nonempty:
                    nonempty_mask[0] = True
                else:
                    return None, []

            class_indices = np.arange(self.arr.shape[2])[nonempty_mask]
            channels = self.arr[..., class_indices]
            return HeatmapsOnImage(channels, self.shape, min_value=0.0, max_value=1.0), class_indices
예제 #16
0
    def test_augment_heatmaps(self):
        from imgaug.augmentables.heatmaps import HeatmapsOnImage

        def do_return_augmentables(heatmaps, parents, hooks):
            return heatmaps

        aug_mock = mock.MagicMock(spec=meta.Augmenter)
        aug_mock.augment_heatmaps.side_effect = do_return_augmentables
        hm = np.ones((8, 12, 1), dtype=np.float32)
        hmoi = HeatmapsOnImage(hm, shape=(16, 24, 3))

        aug = iaa.WithHueAndSaturation(aug_mock)
        hmoi_aug = aug.augment_heatmaps(hmoi)
        assert hmoi_aug.shape == (16, 24, 3)
        assert hmoi_aug.arr_0to1.shape == (8, 12, 1)

        assert aug_mock.augment_heatmaps.call_count == 1
예제 #17
0
def process_augmentation_segmentation(augmenter, image, mask, depth=None):
    """
    processes data augmentation for the current image and the appropriate mask for semantic segmentation.
    """
    segmap = SegmentationMapsOnImage(mask, shape=image.shape)

    # use depth image
    if not depth is None:
        # convert depth into heatmap to use it in imgaug
        heatmap = HeatmapsOnImage(depth.astype(
            np.float32), shape=image.shape, min_value=0.0, max_value=255.0)
        aug_image, aug_mask, aug_depth = augmenter(
            image=image, segmentation_maps=segmap, heatmaps=heatmap)
        return aug_image, aug_mask.get_arr(), aug_depth.get_arr().astype(np.uint8)
    # only RGB
    else:
        aug_image, aug_seg = augmenter(image=image, segmentation_maps=segmap)
        return aug_image, aug_seg.get_arr(), None
예제 #18
0
파일: data.py 프로젝트: zylove006/imgaug
def quokka_heatmap(size=None, extract=None):
    """Return a heatmap (here: depth map) for the standard example quokka image.

    Added in 0.5.0. (Moved from ``imgaug.imgaug``.)

    Parameters
    ----------
    size : None or float or tuple of int, optional
        See :func:`~imgaug.imgaug.quokka`.

    extract : None or 'square' or tuple of number or imgaug.augmentables.bbs.BoundingBox or imgaug.augmentables.bbs.BoundingBoxesOnImage
        See :func:`~imgaug.imgaug.quokka`.

    Returns
    -------
    imgaug.augmentables.heatmaps.HeatmapsOnImage
        Depth map as an heatmap object. Values close to ``0.0`` denote objects
        that are close to the camera. Values close to ``1.0`` denote objects
        that are furthest away (among all shown objects).

    """
    # TODO get rid of this deferred import
    from . import imgaug as ia
    from imgaug.augmentables.heatmaps import HeatmapsOnImage

    img = imageio.imread(_QUOKKA_DEPTH_MAP_HALFRES_FP, pilmode="RGB")
    img = ia.imresize_single_image(img, (643, 960), interpolation="cubic")

    if extract is not None:
        bb = _quokka_normalize_extract(extract)
        img = bb.extract_from_image(img)
    if size is None:
        size = img.shape[0:2]

    shape_resized = _compute_resized_shape(img.shape, size)
    img = ia.imresize_single_image(img, shape_resized[0:2])
    img_0to1 = img[..., 0]  # depth map was saved as 3-channel RGB
    img_0to1 = img_0to1.astype(np.float32) / 255.0
    img_0to1 = 1 - img_0to1  # depth map was saved as 0 being furthest away

    return HeatmapsOnImage(img_0to1, shape=img_0to1.shape[0:2] + (3,))
예제 #19
0
    def augment(self, rgb_image: np.ndarray,
                bounding_boxes: List[Dict],
                ndvi_image: np.ndarray = None,
                forest_mask: np.ndarray = None):
        assert all(bb['bbox_mode'] == BoxMode.XYXY_ABS
                   for bb in bounding_boxes), \
            "Unsupported bbox_mode"

        bbox_on_image = BoundingBoxesOnImage([
            BoundingBox(x1=annotation["bbox"][0],
                        y1=annotation["bbox"][1],
                        x2=annotation["bbox"][2],
                        y2=annotation["bbox"][3],
                        label=annotation["category_id"])
            for annotation in bounding_boxes
        ], rgb_image.shape)

        hmap_on_image = None
        if ndvi_image is not None:
            hmap_on_image = HeatmapsOnImage(ndvi_image.astype(np.float32), rgb_image.shape,
                                            min_value=ndvi_image.min(),
                                            max_value=ndvi_image.max() + 1e-9)
        smap_on_image = None
        if forest_mask is not None:
            smap_on_image = SegmentationMapsOnImage(forest_mask,
                                                    rgb_image.shape)

        (aug_img, aug_smap,
         aug_hmap, aug_bbox) = self.augmentation_pipeline(image=rgb_image,
                                                          segmentation_maps=smap_on_image,
                                                          heatmaps=hmap_on_image,
                                                          bounding_boxes=bbox_on_image)

        return {"rgb_image": aug_img,
                "mask": aug_smap and aug_smap.arr.squeeze(),
                "ndvi": aug_hmap and aug_hmap.get_arr().squeeze(),
                "annotations": [{"bbox": [bb.x1_int, bb.y1_int, bb.x2_int, bb.y2_int],
                                 "bbox_mode": BoxMode.XYXY_ABS,
                                 "category_id": bb.label}
                                for bb in aug_bbox.clip_out_of_image()]}
예제 #20
0
def test_Alpha():
    reseed()

    base_img = np.zeros((3, 3, 1), dtype=np.uint8)

    heatmaps_arr = np.float32([[0.0, 0.0, 1.0],
                               [0.0, 0.0, 1.0],
                               [0.0, 1.0, 1.0]])
    heatmaps_arr_r1 = np.float32([[0.0, 0.0, 0.0],
                                  [0.0, 0.0, 0.0],
                                  [0.0, 0.0, 1.0]])
    heatmaps_arr_l1 = np.float32([[0.0, 1.0, 0.0],
                                  [0.0, 1.0, 0.0],
                                  [1.0, 1.0, 0.0]])
    heatmaps = HeatmapsOnImage(heatmaps_arr, shape=(3, 3, 3))

    segmaps_arr = np.int32([[0, 0, 1],
                            [0, 0, 1],
                            [0, 1, 1]])
    segmaps_arr_r1 = np.int32([[0, 0, 0],
                               [0, 0, 0],
                               [0, 0, 1]])
    segmaps_arr_l1 = np.int32([[0, 1, 0],
                               [0, 1, 0],
                               [1, 1, 0]])
    segmaps = SegmentationMapsOnImage(segmaps_arr, shape=(3, 3, 3))

    aug = iaa.Alpha(1, iaa.Add(10), iaa.Add(20))
    observed = aug.augment_image(base_img)
    expected = np.round(base_img + 10).astype(np.uint8)
    assert np.allclose(observed, expected)

    for per_channel in [False, True]:
        aug = iaa.Alpha(1, iaa.Affine(translate_px={"x": 1}), iaa.Affine(translate_px={"x": -1}),
                        per_channel=per_channel)
        observed = aug.augment_heatmaps([heatmaps])[0]
        assert observed.shape == heatmaps.shape
        assert 0 - 1e-6 < heatmaps.min_value < 0 + 1e-6
        assert 1 - 1e-6 < heatmaps.max_value < 1 + 1e-6
        assert np.allclose(observed.get_arr(), heatmaps_arr_r1)

    for per_channel in [False, True]:
        aug = iaa.Alpha(1,
                        iaa.Affine(translate_px={"x": 1}),
                        iaa.Affine(translate_px={"x": -1}),
                        per_channel=per_channel)
        observed = aug.augment_segmentation_maps([segmaps])[0]
        assert observed.shape == segmaps.shape
        assert np.array_equal(observed.get_arr(), segmaps_arr_r1)

    aug = iaa.Alpha(0, iaa.Add(10), iaa.Add(20))
    observed = aug.augment_image(base_img)
    expected = np.round(base_img + 20).astype(np.uint8)
    assert np.allclose(observed, expected)

    for per_channel in [False, True]:
        aug = iaa.Alpha(0,
                        iaa.Affine(translate_px={"x": 1}),
                        iaa.Affine(translate_px={"x": -1}),
                        per_channel=per_channel)
        observed = aug.augment_heatmaps([heatmaps])[0]
        assert observed.shape == heatmaps.shape
        assert 0 - 1e-6 < heatmaps.min_value < 0 + 1e-6
        assert 1 - 1e-6 < heatmaps.max_value < 1 + 1e-6
        assert np.allclose(observed.get_arr(), heatmaps_arr_l1)

    for per_channel in [False, True]:
        aug = iaa.Alpha(0,
                        iaa.Affine(translate_px={"x": 1}),
                        iaa.Affine(translate_px={"x": -1}),
                        per_channel=per_channel)
        observed = aug.augment_segmentation_maps([segmaps])[0]
        assert observed.shape == segmaps.shape
        assert np.array_equal(observed.get_arr(), segmaps_arr_l1)

    aug = iaa.Alpha(0.75, iaa.Add(10), iaa.Add(20))
    observed = aug.augment_image(base_img)
    expected = np.round(base_img + 0.75 * 10 + 0.25 * 20).astype(np.uint8)
    assert np.allclose(observed, expected)

    aug = iaa.Alpha(0.75, None, iaa.Add(20))
    observed = aug.augment_image(base_img + 10)
    expected = np.round(base_img + 0.75 * 10 + 0.25 * (10 + 20)).astype(np.uint8)
    assert np.allclose(observed, expected)

    aug = iaa.Alpha(0.75, iaa.Add(10), None)
    observed = aug.augment_image(base_img + 10)
    expected = np.round(base_img + 0.75 * (10 + 10) + 0.25 * 10).astype(np.uint8)
    assert np.allclose(observed, expected)

    base_img = np.zeros((1, 2, 1), dtype=np.uint8)
    nb_iterations = 1000
    aug = iaa.Alpha((0.0, 1.0), iaa.Add(10), iaa.Add(110))
    values = []
    for _ in sm.xrange(nb_iterations):
        observed = aug.augment_image(base_img)
        observed_val = np.round(np.average(observed)) - 10
        values.append(observed_val / 100)

    nb_bins = 5
    hist, _ = np.histogram(values, bins=nb_bins, range=(0.0, 1.0), density=False)
    density_expected = 1.0/nb_bins
    density_tolerance = 0.05
    for nb_samples in hist:
        density = nb_samples / nb_iterations
        assert density_expected - density_tolerance < density < density_expected + density_tolerance

    # bad datatype for factor
    got_exception = False
    try:
        _ = iaa.Alpha(False, iaa.Add(10), None)
    except Exception as exc:
        assert "Expected " in str(exc)
        got_exception = True
    assert got_exception

    # per_channel
    aug = iaa.Alpha(1.0, iaa.Add((0, 100), per_channel=True), None, per_channel=True)
    observed = aug.augment_image(np.zeros((1, 1, 1000), dtype=np.uint8))
    uq = np.unique(observed)
    assert len(uq) > 1
    assert np.max(observed) > 80
    assert np.min(observed) < 20

    aug = iaa.Alpha((0.0, 1.0), iaa.Add(100), None, per_channel=True)
    observed = aug.augment_image(np.zeros((1, 1, 1000), dtype=np.uint8))
    uq = np.unique(observed)
    assert len(uq) > 1
    assert np.max(observed) > 80
    assert np.min(observed) < 20

    aug = iaa.Alpha((0.0, 1.0), iaa.Add(100), iaa.Add(0), per_channel=0.5)
    seen = [0, 0]
    for _ in sm.xrange(200):
        observed = aug.augment_image(np.zeros((1, 1, 100), dtype=np.uint8))
        uq = np.unique(observed)
        if len(uq) == 1:
            seen[0] += 1
        elif len(uq) > 1:
            seen[1] += 1
        else:
            assert False
    assert 100 - 50 < seen[0] < 100 + 50
    assert 100 - 50 < seen[1] < 100 + 50

    # bad datatype for per_channel
    got_exception = False
    try:
        _ = iaa.Alpha(0.5, iaa.Add(10), None, per_channel="test")
    except Exception as exc:
        assert "Expected " in str(exc)
        got_exception = True
    assert got_exception

    # propagating
    aug = iaa.Alpha(0.5, iaa.Add(100), iaa.Add(50), name="AlphaTest")

    def propagator(images, augmenter, parents, default):
        if "Alpha" in augmenter.name:
            return False
        else:
            return default

    hooks = ia.HooksImages(propagator=propagator)
    image = np.zeros((10, 10, 3), dtype=np.uint8) + 1
    observed = aug.augment_image(image, hooks=hooks)
    assert np.array_equal(observed, image)

    # -----
    # keypoints
    # -----
    kps = [ia.Keypoint(x=5, y=10), ia.Keypoint(x=6, y=11)]
    kpsoi = ia.KeypointsOnImage(kps, shape=(20, 20, 3))

    aug = iaa.Alpha(1.0, iaa.Noop(), iaa.Affine(translate_px={"x": 1}))
    observed = aug.augment_keypoints([kpsoi])[0]
    expected = kpsoi.deepcopy()
    assert keypoints_equal([observed], [expected])

    aug = iaa.Alpha(0.501, iaa.Noop(), iaa.Affine(translate_px={"x": 1}))
    observed = aug.augment_keypoints([kpsoi])[0]
    expected = kpsoi.deepcopy()
    assert keypoints_equal([observed], [expected])

    aug = iaa.Alpha(0.0, iaa.Noop(), iaa.Affine(translate_px={"x": 1}))
    observed = aug.augment_keypoints([kpsoi])[0]
    expected = kpsoi.shift(x=1)
    assert keypoints_equal([observed], [expected])

    aug = iaa.Alpha(0.499, iaa.Noop(), iaa.Affine(translate_px={"x": 1}))
    observed = aug.augment_keypoints([kpsoi])[0]
    expected = kpsoi.shift(x=1)
    assert keypoints_equal([observed], [expected])

    # per_channel
    aug = iaa.Alpha(1.0, iaa.Noop(), iaa.Affine(translate_px={"x": 1}), per_channel=True)
    observed = aug.augment_keypoints([kpsoi])[0]
    expected = kpsoi.deepcopy()
    assert keypoints_equal([observed], [expected])

    aug = iaa.Alpha(0.0, iaa.Noop(), iaa.Affine(translate_px={"x": 1}), per_channel=True)
    observed = aug.augment_keypoints([kpsoi])[0]
    expected = kpsoi.shift(x=1)
    assert keypoints_equal([observed], [expected])

    aug = iaa.Alpha(iap.Choice([0.49, 0.51]), iaa.Noop(), iaa.Affine(translate_px={"x": 1}), per_channel=True)
    expected_same = kpsoi.deepcopy()
    expected_shifted = kpsoi.shift(x=1)
    seen = [0, 0]
    for _ in sm.xrange(200):
        observed = aug.augment_keypoints([kpsoi])[0]
        if keypoints_equal([observed], [expected_same]):
            seen[0] += 1
        elif keypoints_equal([observed], [expected_shifted]):
            seen[1] += 1
        else:
            assert False
    assert 100 - 50 < seen[0] < 100 + 50
    assert 100 - 50 < seen[1] < 100 + 50

    # empty keypoints
    aug = iaa.Alpha(0.501, iaa.Noop(), iaa.Affine(translate_px={"x": 1}))
    observed = aug.augment_keypoints(ia.KeypointsOnImage([], shape=(1, 2, 3)))
    assert len(observed.keypoints) == 0
    assert observed.shape == (1, 2, 3)

    # propagating
    aug = iaa.Alpha(0.0, iaa.Affine(translate_px={"x": 1}), iaa.Affine(translate_px={"y": 1}), name="AlphaTest")

    def propagator(kpsoi_to_aug, augmenter, parents, default):
        if "Alpha" in augmenter.name:
            return False
        else:
            return default

    hooks = ia.HooksKeypoints(propagator=propagator)
    observed = aug.augment_keypoints([kpsoi], hooks=hooks)[0]
    assert keypoints_equal([observed], [kpsoi])

    # -----
    # polygons
    # -----
    ps = [ia.Polygon([(5, 5), (10, 5), (10, 10)])]
    psoi = ia.PolygonsOnImage(ps, shape=(20, 20, 3))

    aug = iaa.Alpha(1.0, iaa.Noop(), iaa.Affine(translate_px={"x": 1}))
    observed = aug.augment_polygons([psoi])
    assert len(observed) == 1
    assert len(observed[0].polygons) == 1
    assert observed[0].shape == psoi.shape
    assert observed[0].polygons[0].exterior_almost_equals(psoi.polygons[0])
    assert observed[0].polygons[0].is_valid

    aug = iaa.Alpha(0.501, iaa.Noop(), iaa.Affine(translate_px={"x": 1}))
    observed = aug.augment_polygons([psoi])
    assert len(observed) == 1
    assert len(observed[0].polygons) == 1
    assert observed[0].shape == psoi.shape
    assert observed[0].polygons[0].exterior_almost_equals(psoi.polygons[0])
    assert observed[0].polygons[0].is_valid

    aug = iaa.Alpha(0.0, iaa.Noop(), iaa.Affine(translate_px={"x": 1}))
    observed = aug.augment_polygons([psoi])
    expected = psoi.shift(left=1)
    assert len(observed) == 1
    assert len(observed[0].polygons) == 1
    assert observed[0].shape == psoi.shape
    assert observed[0].polygons[0].exterior_almost_equals(expected.polygons[0])
    assert observed[0].polygons[0].is_valid

    aug = iaa.Alpha(0.499, iaa.Noop(), iaa.Affine(translate_px={"x": 1}))
    observed = aug.augment_polygons([psoi])
    expected = psoi.shift(left=1)
    assert len(observed) == 1
    assert len(observed[0].polygons) == 1
    assert observed[0].shape == psoi.shape
    assert observed[0].polygons[0].exterior_almost_equals(expected.polygons[0])
    assert observed[0].polygons[0].is_valid

    # per_channel
    aug = iaa.Alpha(1.0, iaa.Noop(), iaa.Affine(translate_px={"x": 1}), per_channel=True)
    observed = aug.augment_polygons([psoi])
    assert len(observed) == 1
    assert len(observed[0].polygons) == 1
    assert observed[0].shape == psoi.shape
    assert observed[0].polygons[0].exterior_almost_equals(psoi.polygons[0])
    assert observed[0].polygons[0].is_valid

    aug = iaa.Alpha(0.0, iaa.Noop(), iaa.Affine(translate_px={"x": 1}), per_channel=True)
    observed = aug.augment_polygons([psoi])
    expected = psoi.shift(left=1)
    assert len(observed) == 1
    assert len(observed[0].polygons) == 1
    assert observed[0].shape == psoi.shape
    assert observed[0].polygons[0].exterior_almost_equals(expected.polygons[0])
    assert observed[0].polygons[0].is_valid

    aug = iaa.Alpha(iap.Choice([0.49, 0.51]), iaa.Noop(), iaa.Affine(translate_px={"x": 1}), per_channel=True)
    expected_same = psoi.deepcopy()
    expected_shifted = psoi.shift(left=1)
    seen = [0, 0]
    for _ in sm.xrange(200):
        observed = aug.augment_polygons([psoi])[0]
        if observed.polygons[0].exterior_almost_equals(expected_same.polygons[0]):
            seen[0] += 1
        elif observed.polygons[0].exterior_almost_equals(expected_shifted.polygons[0]):
            seen[1] += 1
        else:
            assert False
    assert 100 - 50 < seen[0] < 100 + 50
    assert 100 - 50 < seen[1] < 100 + 50

    # empty polygons
    aug = iaa.Alpha(0.501, iaa.Noop(), iaa.Affine(translate_px={"x": 1}))
    observed = aug.augment_polygons(ia.PolygonsOnImage([], shape=(1, 2, 3)))
    assert len(observed.polygons) == 0
    assert observed.shape == (1, 2, 3)

    # propagating
    aug = iaa.Alpha(0.0, iaa.Affine(translate_px={"x": 1}), iaa.Affine(translate_px={"y": 1}), name="AlphaTest")

    def propagator(psoi_to_aug, augmenter, parents, default):
        if "Alpha" in augmenter.name:
            return False
        else:
            return default

    hooks = ia.HooksKeypoints(propagator=propagator)  # no hooks for polygons yet, so we use HooksKeypoints
    observed = aug.augment_polygons([psoi], hooks=hooks)[0]
    assert observed.polygons[0].exterior_almost_equals(psoi.polygons[0])

    # -----
    # get_parameters()
    # -----
    first = iaa.Noop()
    second = iaa.Sequential([iaa.Add(1)])
    aug = iaa.Alpha(0.65, first, second, per_channel=1)
    params = aug.get_parameters()
    assert isinstance(params[0], iap.Deterministic)
    assert isinstance(params[1], iap.Deterministic)
    assert 0.65 - 1e-6 < params[0].value < 0.65 + 1e-6
    assert params[1].value == 1

    # -----
    # get_children_lists()
    # -----
    first = iaa.Noop()
    second = iaa.Sequential([iaa.Add(1)])
    aug = iaa.Alpha(0.65, first, second, per_channel=1)
    children_lsts = aug.get_children_lists()
    assert len(children_lsts) == 2
    assert ia.is_iterable([lst for lst in children_lsts])
    assert first in children_lsts[0]
    assert second == children_lsts[1]
def temporal_prop(image_list, region_list, labels_list, file_paths):
    sigma = 9
    span = 21

    new_region_list = deepcopy(region_list)
    new_image_list = deepcopy(image_list)
    agg_conf = "/home/ash/Small-Obs-Project/vid_visualisation/methodology/agg_conf/"
    for i in tqdm(range(len(region_list))):

        img = image_list[i]
        label_frame = labels_list[i]
        img_height, img_width = img.shape[0], img.shape[1]

        orig_region = region_list[i]
        orig_mask = orig_region != 0.
        region_id_1, num_region_1 = sp.label(orig_mask)

        # print("file_name:",file_paths[i])

        # for regions in range(1,num_region_1+1):
        #     x, y = np.where(region_id_1 == regions)
        #     c_x, c_y = int(np.mean(x)), int(np.mean(y))
        #     cv2.circle(img, (c_y, c_x), 3, (255,0,0), 2)

        for j in range(-3, 0):
            if j == 0 or i + j < 0 or i + j > len(image_list) - 1:
                continue

            frame_img = new_image_list[i + j]
            frame_region = new_region_list[i + j]

            frame_mask = frame_region != 0
            region_id, num_region = sp.label(frame_mask)
            # print("history file name",file_paths[i+j],num_region)

            for k in range(1, num_region + 1):
                x, y = np.where(region_id == k)
                c_x, c_y = int(np.mean(x)), int(np.mean(y))
                # cv2.circle(frame_label, (c_y, c_x), 2, (255), 2)

                # Get crop around region
                (bound_left,
                 bound_right), (bound_down, bound_up) = get_crop_bounds(
                     c_x, c_y, 15, img_height, img_width)
                template = frame_img[bound_left:bound_right,
                                     bound_down:bound_up]
                src_region = frame_region[bound_left:bound_right,
                                          bound_down:bound_up]

                h, w = template.shape[0], template.shape[1]
                method = eval(methods[0])

                # Get area region to search in current image
                (left_margin,
                 right_margin), (down_margin, up_margin) = get_crop_bounds(
                     c_x, c_y, 75, img_height, img_width)
                dest_img = new_image_list[i]
                dest_template = dest_img[left_margin:right_margin,
                                         down_margin:up_margin]

                # Apply template Matching
                try:
                    res = cv2.matchTemplate(dest_template, template, method)
                    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
                    if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
                        top_left = min_loc
                    else:
                        top_left = max_loc
                    bottom_right = (top_left[0] + w, top_left[1] + h)
                except:
                    print("Template match error")
                    continue

                # cv2.imshow("template", template)
                # print(max_val)
                # cv2.imshow("dest_template", dest_template)
                # cv2.waitKey(0)

                if max_val >= 0.85:
                    center_point = (int(
                        (top_left[0] + bottom_right[0]) / 2) + down_margin,
                                    int((top_left[1] + bottom_right[1]) / 2) +
                                    left_margin)
                    x_0, y_0 = center_point[1], center_point[0]
                    left_corner = [0, 0]
                    right_corner = [0, 0]
                    left_corner[0] = top_left[0] + down_margin
                    left_corner[1] = top_left[1] + left_margin
                    right_corner[0] = bottom_right[0] + down_margin
                    right_corner[1] = bottom_right[1] + left_margin

                    # Check if a region is already there
                    if new_region_list[i][x_0, y_0] == 0:
                        new_region_list[i][
                            left_corner[1]:right_corner[1],
                            left_corner[0]:right_corner[0]] += src_region
                        # for x in range(x_0 - span, x_0 + span + 1):
                        #     for y in range(y_0 - span, y_0 + span + 1):
                        #         if 0 < x < img_height and 0 < y < img_width:
                        #             new_region_list[i][x, y] += np.exp(-0.5 * ((x - x_0) ** 2 + (y - y_0) ** 2) / sigma ** 2)
                        # cv2.circle(img, (y_0, x_0), 3, (0, 255, 0), 2)
                        # cv2.rectangle(img, tuple(left_corner), tuple(right_corner), 255, 2)

                # else:
                #     print("not detected",max_val)
                # cv2.imshow("template",template)
                # cv2.imshow("dest_template",dest_template)
                # cv2.imshow("src_image",frame_img)
                # cv2.imshow("dest_image",new_image_list[i])
                # cv2.waitKey(0)

        new_region_list[i] = np.clip(new_region_list[i], 0, 1)
        # new_region_list[i] = new_region_list[i].astype(np.float16)
        # np.save(file_paths[i],new_region_list[i])
        to_copy = new_region_list[i].copy()
        to_copy = to_copy.astype(np.float32)
        heatmap = HeatmapsOnImage(to_copy,
                                  shape=img.shape,
                                  min_value=0,
                                  max_value=1)
        new_img = cv2.cvtColor(img.copy(), cv2.COLOR_BGR2RGB)
        overlayed_heatmap = heatmap.draw_on_image(new_img,
                                                  alpha=0.35,
                                                  cmap='jet')[0]
        overlayed_heatmap = cv2.cvtColor(overlayed_heatmap, cv2.COLOR_RGB2BGR)
        # overlayed_heatmap = Image.fromarray(overlayed_heatmap)
        # overlayed_heatmap.save(os.path.join(agg_conf,"temp_conf_" + file_paths[i].split('.npy')[0]+'.png'))
        cv2.imwrite(file_paths[i].split('.npy')[0] + '.png', overlayed_heatmap)
        # cv2.imshow("temporal confidence",overlayed_heatmap)
        # cv2.waitKey(0)
        if cv2.waitKey(10) == ord('q'):
            print('Quitting....')
            break
예제 #22
0
    def get_train_sample(self, fg_ind, bg_ind, second_fg_ind=-1):
        first_fg_path = os.path.join(self.data_root, self.fg_list[fg_ind])
        first_alpha_path = os.path.join(self.data_root,
                                        self.alpha_list[fg_ind])
        bg_path = os.path.join(self.data_root, self.bg_list[bg_ind])

        def check_path(pp):
            if not os.path.isfile(pp):
                print("not exist ", pp)
                exit(0)

        # check_path(first_fg_path)
        # check_path(first_alpha_path)
        # check_path(bg_path)

        fg = cv2.imread(first_fg_path)
        alpha = self.load_alpha(first_alpha_path)
        bg = cv2.imread(bg_path)

        fg = self.color_aug(image=fg)

        heat_map_alpha = HeatmapsOnImage(alpha,
                                         shape=fg.shape,
                                         min_value=0.0,
                                         max_value=1.0)
        # fg, aug_alpha = self.compose_shape_aug(image=fg, heatmaps=heat_map_alpha)
        fg, aug_alpha = self.fg_simple_aug(image=fg, heatmaps=heat_map_alpha)
        alpha = np.squeeze(aug_alpha.get_arr())
        fg, alpha = self.random_crop_fg_on_transition(fg, alpha)

        # to avoid all the pixels are masked, scale down the large fg objects
        while np.sum(alpha < 0.01) < 100:
            # print("warning: fg-{} mask is to large. mask_area={}".format(fg_ind, np.sum(alpha<0.01)))
            heat_map_alpha = HeatmapsOnImage(alpha,
                                             shape=fg.shape,
                                             min_value=0.0,
                                             max_value=1.0)
            fg, aug_alpha = self.scale_down(image=fg, heatmaps=heat_map_alpha)
            alpha = np.squeeze(aug_alpha.get_arr())

        # compose fg bg
        bg = self.bg_aug(image=bg)
        # bg = cv2.resize(bg, dsize=(fg.shape[1], fg.shape[0]), interpolation=cv2.INTER_CUBIC)
        # TODO: augment bg
        compose = self.compose_fg_bg(fg, alpha, bg)

        if second_fg_ind >= 0:
            second_fg_path = os.path.join(self.data_root,
                                          self.fg_list[second_fg_ind])
            second_alpha_path = os.path.join(self.data_root,
                                             self.alpha_list[second_fg_ind])

            fg2 = cv2.imread(second_fg_path)
            fg2 = self.color_aug(image=fg2)

            alpha2 = self.load_alpha(second_alpha_path)
            # second_fg, second_alpha = self.resize_keep_ratio(second_fg, fg.shape[0], fg.shape[1], alpha=second_alpha)
            heat_map_alpha = HeatmapsOnImage(alpha2,
                                             shape=fg2.shape,
                                             min_value=0.0,
                                             max_value=1.0)
            # fg2, aug_alpha2 = self.compose_shape_aug(image=fg2, heatmaps=heat_map_alpha)
            fg2, aug_alpha2 = self.fg_simple_aug(image=fg2,
                                                 heatmaps=heat_map_alpha)
            alpha2 = np.squeeze(aug_alpha2.get_arr())  # dsize
            fg2, alpha2 = self.random_crop_fg_on_transition(fg2, alpha2)

            while np.sum(alpha2 < 0.01) < 100:
                # print("warning: fg-{} mask is to large. mask_area={}".format(second_fg_ind, np.sum(alpha2 < 0.01)))
                heat_map_alpha = HeatmapsOnImage(alpha2,
                                                 shape=fg2.shape,
                                                 min_value=0.0,
                                                 max_value=1.0)
                fg2, aug_alpha2 = self.scale_down(image=fg2,
                                                  heatmaps=heat_map_alpha)
                alpha2 = np.squeeze(aug_alpha2.get_arr())

            new_alpha = (alpha + alpha2) - alpha * alpha2
            if np.sum(new_alpha < 0.01) < 100:
                if not self.silence:
                    print(
                        "warning: skip compose second fg. mask_area={}".format(
                            np.sum(new_alpha < 0.01)))
            else:

                compose = self.compose_fg_bg(fg2, alpha2, compose)
                fg = self.vis_2fg(
                    fg, alpha, fg2, alpha2, compose,
                    bg)  # just for visualization # TODO: not real fg color

                alpha = (alpha + alpha2) - alpha * alpha2

        trimask = np.logical_and(alpha > 0.01, alpha < 0.99).astype('uint8')
        full_mask = np.uint8(alpha > 0.01)  # translucent+fg

        d = np.random.randint(5, 36)
        # d = 6
        # print("dilate", d)
        element = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (d, d))
        dilate_trimask = cv2.dilate(trimask, element)

        # trimask translation
        _x = np.random.randint(-d // 2 + 1, d // 2)
        _y = np.random.randint(-d // 2 + 1, d // 2)
        translation_matrix = np.float32([[1, 0, _x], [0, 1, _y]])
        dilate_trimask = cv2.warpAffine(src=dilate_trimask,
                                        M=translation_matrix,
                                        dsize=(dilate_trimask.shape[1],
                                               dilate_trimask.shape[0]),
                                        flags=cv2.INTER_NEAREST,
                                        borderMode=cv2.BORDER_REPLICATE)

        temp_full_mask = np.where(dilate_trimask > 0, 1,
                                  full_mask)  # unknown + fg
        if np.sum(temp_full_mask == 0) < 100:
            # print("warning: skip random dilate. area={}".format(np.sum(temp_full_mask==0)))
            temp_full_mask = np.where(trimask > 0, 1, full_mask)
            dilate_trimask = trimask
        full_mask = temp_full_mask
        trimask = dilate_trimask

        trimask = trimask[..., np.newaxis]
        alpha = alpha[..., np.newaxis]

        trimap = np.where(alpha > 0.99, 2, 0)
        trimap = np.where(trimask > 0, 1, trimap)  # (h,w,1)
        small_trimap = cv2.resize(trimap,
                                  dsize=(self.image_shape[1] // 8,
                                         self.image_shape[0] // 8),
                                  interpolation=cv2.INTER_NEAREST)
        small_trimap = small_trimap[..., np.newaxis]
        # print("small trimap shape", small_trimap.shape)

        compose = compose / 127.5 - 1.0
        bg = bg / 127.5 - 1.0
        fg = fg / 127.5 - 1.0

        compose = np.transpose(compose, (2, 0, 1))
        fg = np.transpose(fg, (2, 0, 1))
        bg = np.transpose(bg, (2, 0, 1))
        alpha = np.transpose(alpha, (2, 0, 1))

        trimap = np.transpose(trimap, (2, 0, 1))
        small_trimap = np.transpose(small_trimap, (2, 0, 1))

        # return compose, fg, bg, trimask, full_mask, small_mask, alpha, small_fg_mask, trimap, small_trimap
        return compose, fg, bg, alpha, trimap, small_trimap
    # raw_label += 1
    # aug = iaa.AddToBrightness(add=(30))

    segmap = SegmentationMapsOnImage(pred_mask, shape=img.shape)
    segmap = segmap.pad(top=0, right=0, bottom=0, left=0)
    overlayed_seg = segmap.draw_on_image(img,
                                         alpha=0.6,
                                         colors=[(0, 0, 0), (0, 0, 255)])[0]
    # overlayed_seg = aug(overlayed_seg)
    # segmap_img = SegmentationMapsOnImage(pred_img_mask, shape=img.shape)
    # overlayed_seg_img = segmap_img.draw_on_image(img, alpha=0.6, colors=[(0, 0, 0), (0, 0, 255)])[0]

    # label_seg = SegmentationMapsOnImage(raw_label, shape=img.shape)
    # overlayed_label = label_seg.draw_on_image(img,alpha=0.5,colors=[(0,0,0),(0,0,0),(0,0,153),(0,204,0)])[0]
    heatmap = HeatmapsOnImage(confidence_map,
                              shape=img.shape,
                              min_value=0,
                              max_value=1)
    new_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    overlayed_heatmap = heatmap.draw_on_image(new_img, alpha=0.45,
                                              cmap='jet')[0]
    overlayed_heatmap = cv2.cvtColor(overlayed_heatmap, cv2.COLOR_RGB2BGR)

    # cv2.imshow("prediction_image",overlayed_seg_img)
    # cv2.imshow("prediction_ours",overlayed_seg)
    # print(overlayed_seg.shape)
    # cap.write(overlayed_seg)
    # cap_2.write(overlayed_seg_img)
    # cv2.imshow("label",overlayed_label)
    # cv2.imshow("confidence map",overlayed_heatmap)
    # cv2.imshow("image",img)
    # cv2.imwrite(os.path.join(vis_path,file_names[index]),img)
예제 #24
0
def test_AlphaElementwise():
    reseed()

    base_img = np.zeros((3, 3, 1), dtype=np.uint8)

    heatmaps_arr = np.float32([[0.0, 0.0, 1.0],
                               [0.0, 0.0, 1.0],
                               [0.0, 1.0, 1.0]])
    heatmaps_arr_r1 = np.float32([[0.0, 0.0, 0.0],
                                  [0.0, 0.0, 0.0],
                                  [0.0, 0.0, 1.0]])
    heatmaps_arr_l1 = np.float32([[0.0, 1.0, 0.0],
                                  [0.0, 1.0, 0.0],
                                  [1.0, 1.0, 0.0]])
    heatmaps = HeatmapsOnImage(heatmaps_arr, shape=(3, 3, 3))

    segmaps_arr = np.int32([[0, 0, 1],
                            [0, 0, 1],
                            [0, 1, 1]])
    segmaps_arr_r1 = np.int32([[0, 0, 0],
                               [0, 0, 0],
                               [0, 0, 1]])
    segmaps_arr_l1 = np.int32([[0, 1, 0],
                               [0, 1, 0],
                               [1, 1, 0]])
    segmaps = SegmentationMapsOnImage(segmaps_arr, shape=(3, 3, 3))

    aug = iaa.AlphaElementwise(1, iaa.Add(10), iaa.Add(20))
    observed = aug.augment_image(base_img)
    expected = base_img + 10
    assert np.allclose(observed, expected)

    aug = iaa.AlphaElementwise(1,
                               iaa.Affine(translate_px={"x": 1}),
                               iaa.Affine(translate_px={"x": -1}))
    observed = aug.augment_heatmaps([heatmaps])[0]
    assert observed.shape == (3, 3, 3)
    assert 0 - 1e-6 < observed.min_value < 0 + 1e-6
    assert 1 - 1e-6 < observed.max_value < 1 + 1e-6
    assert np.allclose(observed.get_arr(), heatmaps_arr_r1)

    aug = iaa.AlphaElementwise(1,
                               iaa.Affine(translate_px={"x": 1}),
                               iaa.Affine(translate_px={"x": -1}))
    observed = aug.augment_segmentation_maps([segmaps])[0]
    assert observed.shape == (3, 3, 3)
    assert np.array_equal(observed.get_arr(), segmaps_arr_r1)

    aug = iaa.AlphaElementwise(0, iaa.Add(10), iaa.Add(20))
    observed = aug.augment_image(base_img)
    expected = base_img + 20
    assert np.allclose(observed, expected)

    aug = iaa.AlphaElementwise(0,
                               iaa.Affine(translate_px={"x": 1}),
                               iaa.Affine(translate_px={"x": -1}))
    observed = aug.augment_heatmaps([heatmaps])[0]
    assert observed.shape == (3, 3, 3)
    assert 0 - 1e-6 < observed.min_value < 0 + 1e-6
    assert 1 - 1e-6 < observed.max_value < 1 + 1e-6
    assert np.allclose(observed.get_arr(), heatmaps_arr_l1)

    aug = iaa.AlphaElementwise(0,
                               iaa.Affine(translate_px={"x": 1}),
                               iaa.Affine(translate_px={"x": -1}))
    observed = aug.augment_segmentation_maps([segmaps])[0]
    assert observed.shape == (3, 3, 3)
    assert np.array_equal(observed.get_arr(), segmaps_arr_l1)

    aug = iaa.AlphaElementwise(0.75, iaa.Add(10), iaa.Add(20))
    observed = aug.augment_image(base_img)
    expected = np.round(base_img + 0.75 * 10 + 0.25 * 20).astype(np.uint8)
    assert np.allclose(observed, expected)

    aug = iaa.AlphaElementwise(0.75, None, iaa.Add(20))
    observed = aug.augment_image(base_img + 10)
    expected = np.round(base_img + 0.75 * 10 + 0.25 * (10 + 20)).astype(np.uint8)
    assert np.allclose(observed, expected)

    aug = iaa.AlphaElementwise(0.75, iaa.Add(10), None)
    observed = aug.augment_image(base_img + 10)
    expected = np.round(base_img + 0.75 * (10 + 10) + 0.25 * 10).astype(np.uint8)
    assert np.allclose(observed, expected)

    base_img = np.zeros((100, 100), dtype=np.uint8)
    aug = iaa.AlphaElementwise((0.0, 1.0), iaa.Add(10), iaa.Add(110))
    observed = (aug.augment_image(base_img) - 10) / 100
    nb_bins = 10
    hist, _ = np.histogram(observed.flatten(),  bins=nb_bins, range=(0.0, 1.0), density=False)
    density_expected = 1.0/nb_bins
    density_tolerance = 0.05
    for nb_samples in hist:
        density = nb_samples / observed.size
        assert density_expected - density_tolerance < density < density_expected + density_tolerance

    base_img = np.zeros((1, 1, 100), dtype=np.uint8)
    aug = iaa.AlphaElementwise((0.0, 1.0), iaa.Add(10), iaa.Add(110), per_channel=True)
    observed = aug.augment_image(base_img)
    assert len(set(observed.flatten())) > 1

    # propagating
    aug = iaa.AlphaElementwise(0.5, iaa.Add(100), iaa.Add(50), name="AlphaElementwiseTest")

    def propagator(images, augmenter, parents, default):
        if "AlphaElementwise" in augmenter.name:
            return False
        else:
            return default

    hooks = ia.HooksImages(propagator=propagator)
    image = np.zeros((10, 10, 3), dtype=np.uint8) + 1
    observed = aug.augment_image(image, hooks=hooks)
    assert np.array_equal(observed, image)

    # -----
    # heatmaps and per_channel
    # -----
    class _DummyMaskParameter(iap.StochasticParameter):
        def __init__(self, inverted=False):
            super(_DummyMaskParameter, self).__init__()
            self.nb_calls = 0
            self.inverted = inverted

        def _draw_samples(self, size, random_state):
            self.nb_calls += 1
            h, w = size
            ones = np.ones((h, w), dtype=np.float32)
            zeros = np.zeros((h, w), dtype=np.float32)
            if self.nb_calls == 1:
                return zeros if not self.inverted else ones
            elif self.nb_calls in [2, 3]:
                return ones if not self.inverted else zeros
            else:
                assert False

    aug = iaa.AlphaElementwise(
        _DummyMaskParameter(inverted=False),
        iaa.Affine(translate_px={"x": 1}),
        iaa.Affine(translate_px={"x": -1}),
        per_channel=True
    )
    observed = aug.augment_heatmaps([heatmaps])[0]
    assert observed.shape == (3, 3, 3)
    assert 0 - 1e-6 < observed.min_value < 0 + 1e-6
    assert 1 - 1e-6 < observed.max_value < 1 + 1e-6
    assert np.allclose(observed.get_arr(), heatmaps_arr_r1)

    aug = iaa.AlphaElementwise(
        _DummyMaskParameter(inverted=True),
        iaa.Affine(translate_px={"x": 1}),
        iaa.Affine(translate_px={"x": -1}),
        per_channel=True
    )
    observed = aug.augment_heatmaps([heatmaps])[0]
    assert observed.shape == (3, 3, 3)
    assert 0 - 1e-6 < observed.min_value < 0 + 1e-6
    assert 1 - 1e-6 < observed.max_value < 1 + 1e-6
    assert np.allclose(observed.get_arr(), heatmaps_arr_l1)

    # -----
    # segmaps and per_channel
    # -----
    aug = iaa.AlphaElementwise(
        _DummyMaskParameter(inverted=False),
        iaa.Affine(translate_px={"x": 1}),
        iaa.Affine(translate_px={"x": -1}),
        per_channel=True
    )
    observed = aug.augment_segmentation_maps([segmaps])[0]
    assert observed.shape == (3, 3, 3)
    assert np.array_equal(observed.get_arr(), segmaps_arr_r1)

    aug = iaa.AlphaElementwise(
        _DummyMaskParameter(inverted=True),
        iaa.Affine(translate_px={"x": 1}),
        iaa.Affine(translate_px={"x": -1}),
        per_channel=True
    )
    observed = aug.augment_segmentation_maps([segmaps])[0]
    assert observed.shape == (3, 3, 3)
    assert np.array_equal(observed.get_arr(), segmaps_arr_l1)

    # -----
    # keypoints
    # -----
    kps = [ia.Keypoint(x=5, y=10), ia.Keypoint(x=6, y=11)]
    kpsoi = ia.KeypointsOnImage(kps, shape=(20, 20, 3))

    aug = iaa.AlphaElementwise(1.0, iaa.Noop(), iaa.Affine(translate_px={"x": 1}))
    observed = aug.augment_keypoints([kpsoi])[0]
    expected = kpsoi.deepcopy()
    assert keypoints_equal([observed], [expected])

    aug = iaa.AlphaElementwise(0.501, iaa.Noop(), iaa.Affine(translate_px={"x": 1}))
    observed = aug.augment_keypoints([kpsoi])[0]
    expected = kpsoi.deepcopy()
    assert keypoints_equal([observed], [expected])

    aug = iaa.AlphaElementwise(0.0, iaa.Noop(), iaa.Affine(translate_px={"x": 1}))
    observed = aug.augment_keypoints([kpsoi])[0]
    expected = kpsoi.shift(x=1)
    assert keypoints_equal([observed], [expected])

    aug = iaa.AlphaElementwise(0.499, iaa.Noop(), iaa.Affine(translate_px={"x": 1}))
    observed = aug.augment_keypoints([kpsoi])[0]
    expected = kpsoi.shift(x=1)
    assert keypoints_equal([observed], [expected])

    # per_channel
    aug = iaa.AlphaElementwise(1.0, iaa.Noop(), iaa.Affine(translate_px={"x": 1}), per_channel=True)
    observed = aug.augment_keypoints([kpsoi])[0]
    expected = kpsoi.deepcopy()
    assert keypoints_equal([observed], [expected])

    aug = iaa.AlphaElementwise(0.0, iaa.Noop(), iaa.Affine(translate_px={"x": 1}), per_channel=True)
    observed = aug.augment_keypoints([kpsoi])[0]
    expected = kpsoi.shift(x=1)
    assert keypoints_equal([observed], [expected])

    """
    TODO this test currently doesn't work as AlphaElementwise augments keypoints without sampling
    overlay factors per (x, y) location. (i.e. similar behaviour to Alpha)

    aug = iaa.Alpha(iap.Choice([0.49, 0.51]), iaa.Noop(), iaa.Affine(translate_px={"x": 1}), per_channel=True)
    expected_same = kpsoi.deepcopy()
    expected_both_shifted = kpsoi.shift(x=1)
    expected_first_shifted = KeypointsOnImage([kps[0].shift(x=1), kps[1]], shape=kpsoi.shape)
    expected_second_shifted = KeypointsOnImage([kps[0], kps[1].shift(x=1)], shape=kpsoi.shape)
    seen = [0, 0]
    for _ in sm.xrange(200):
        observed = aug.augment_keypoints([kpsoi])[0]
        if keypoints_equal([observed], [expected_same]):
            seen[0] += 1
        elif keypoints_equal([observed], [expected_both_shifted]):
            seen[1] += 1
        elif keypoints_equal([observed], [expected_first_shifted]):
            seen[2] += 1
        elif keypoints_equal([observed], [expected_second_shifted]):
            seen[3] += 1
        else:
            assert False
    assert 100 - 50 < seen[0] < 100 + 50
    assert 100 - 50 < seen[1] < 100 + 50
    """

    # propagating
    aug = iaa.AlphaElementwise(0.0, iaa.Affine(translate_px={"x": 1}), iaa.Affine(translate_px={"y": 1}),
                               name="AlphaElementwiseTest")

    def propagator(kpsoi_to_aug, augmenter, parents, default):
        if "AlphaElementwise" in augmenter.name:
            return False
        else:
            return default

    hooks = ia.HooksKeypoints(propagator=propagator)
    observed = aug.augment_keypoints([kpsoi], hooks=hooks)[0]
    assert keypoints_equal([observed], [kpsoi])

    # -----
    # polygons
    # -----
    ps = [ia.Polygon([(5, 5), (10, 5), (10, 10)])]
    psoi = ia.PolygonsOnImage(ps, shape=(20, 20, 3))

    aug = iaa.AlphaElementwise(1.0, iaa.Noop(), iaa.Affine(translate_px={"x": 1}))
    observed = aug.augment_polygons([psoi])
    assert len(observed) == 1
    assert len(observed[0].polygons) == 1
    assert observed[0].shape == psoi.shape
    assert observed[0].polygons[0].exterior_almost_equals(psoi.polygons[0])
    assert observed[0].polygons[0].is_valid

    aug = iaa.AlphaElementwise(0.501, iaa.Noop(), iaa.Affine(translate_px={"x": 1}))
    observed = aug.augment_polygons([psoi])
    assert len(observed) == 1
    assert len(observed[0].polygons) == 1
    assert observed[0].shape == psoi.shape
    assert observed[0].polygons[0].exterior_almost_equals(psoi.polygons[0])
    assert observed[0].polygons[0].is_valid

    aug = iaa.AlphaElementwise(0.0, iaa.Noop(), iaa.Affine(translate_px={"x": 1}))
    observed = aug.augment_polygons([psoi])
    expected = psoi.shift(left=1)
    assert len(observed) == 1
    assert len(observed[0].polygons) == 1
    assert observed[0].shape == psoi.shape
    assert observed[0].polygons[0].exterior_almost_equals(expected.polygons[0])
    assert observed[0].polygons[0].is_valid

    aug = iaa.AlphaElementwise(0.499, iaa.Noop(), iaa.Affine(translate_px={"x": 1}))
    observed = aug.augment_polygons([psoi])
    expected = psoi.shift(left=1)
    assert len(observed) == 1
    assert len(observed[0].polygons) == 1
    assert observed[0].shape == psoi.shape
    assert observed[0].polygons[0].exterior_almost_equals(expected.polygons[0])
    assert observed[0].polygons[0].is_valid

    # per_channel
    aug = iaa.AlphaElementwise(1.0, iaa.Noop(), iaa.Affine(translate_px={"x": 1}), per_channel=True)
    observed = aug.augment_polygons([psoi])
    assert len(observed) == 1
    assert len(observed[0].polygons) == 1
    assert observed[0].shape == psoi.shape
    assert observed[0].polygons[0].exterior_almost_equals(psoi.polygons[0])
    assert observed[0].polygons[0].is_valid

    aug = iaa.AlphaElementwise(0.0, iaa.Noop(), iaa.Affine(translate_px={"x": 1}), per_channel=True)
    observed = aug.augment_polygons([psoi])
    expected = psoi.shift(left=1)
    assert len(observed) == 1
    assert len(observed[0].polygons) == 1
    assert observed[0].shape == psoi.shape
    assert observed[0].polygons[0].exterior_almost_equals(expected.polygons[0])
    assert observed[0].polygons[0].is_valid

    aug = iaa.AlphaElementwise(iap.Choice([0.49, 0.51]), iaa.Noop(), iaa.Affine(translate_px={"x": 1}), per_channel=True)
    expected_same = psoi.deepcopy()
    expected_shifted = psoi.shift(left=1)
    seen = [0, 0]
    for _ in sm.xrange(200):
        observed = aug.augment_polygons([psoi])[0]
        if observed.polygons[0].exterior_almost_equals(expected_same.polygons[0]):
            seen[0] += 1
        elif observed.polygons[0].exterior_almost_equals(expected_shifted.polygons[0]):
            seen[1] += 1
        else:
            assert False
    assert 100 - 50 < seen[0] < 100 + 50
    assert 100 - 50 < seen[1] < 100 + 50

    # empty polygons
    aug = iaa.AlphaElementwise(0.501, iaa.Noop(), iaa.Affine(translate_px={"x": 1}))
    observed = aug.augment_polygons(ia.PolygonsOnImage([], shape=(1, 2, 3)))
    assert len(observed.polygons) == 0
    assert observed.shape == (1, 2, 3)

    # propagating
    aug = iaa.AlphaElementwise(0.0, iaa.Affine(translate_px={"x": 1}), iaa.Affine(translate_px={"y": 1}), name="AlphaTest")

    def propagator(psoi_to_aug, augmenter, parents, default):
        if "Alpha" in augmenter.name:
            return False
        else:
            return default

    hooks = ia.HooksKeypoints(propagator=propagator)  # no hooks for polygons yet, so we use HooksKeypoints
    observed = aug.augment_polygons([psoi], hooks=hooks)[0]
    assert observed.polygons[0].exterior_almost_equals(psoi.polygons[0])
예제 #25
0
def test_Flipud():
    reseed()

    base_img = np.array([[0, 0, 1], [0, 0, 1], [0, 1, 1]], dtype=np.uint8)
    base_img = base_img[:, :, np.newaxis]

    base_img_flipped = np.array([[0, 1, 1], [0, 0, 1], [0, 0, 1]],
                                dtype=np.uint8)
    base_img_flipped = base_img_flipped[:, :, np.newaxis]

    images = np.array([base_img])
    images_flipped = np.array([base_img_flipped])

    keypoints = [
        ia.KeypointsOnImage([
            ia.Keypoint(x=0, y=0),
            ia.Keypoint(x=1, y=1),
            ia.Keypoint(x=2, y=2)
        ],
                            shape=base_img.shape)
    ]
    keypoints_flipped = [
        ia.KeypointsOnImage([
            ia.Keypoint(x=0, y=3 - 0),
            ia.Keypoint(x=1, y=3 - 1),
            ia.Keypoint(x=2, y=3 - 2)
        ],
                            shape=base_img.shape)
    ]

    polygons = [
        ia.PolygonsOnImage([ia.Polygon([(0, 0), (2, 0), (2, 2)])],
                           shape=base_img.shape)
    ]
    polygons_flipped = [
        ia.PolygonsOnImage([ia.Polygon([(0, 3 - 0), (2, 3 - 0), (2, 3 - 2)])],
                           shape=base_img.shape)
    ]

    # 0% chance of flip
    aug = iaa.Flipud(0)
    aug_det = aug.to_deterministic()

    for _ in sm.xrange(10):
        observed = aug.augment_images(images)
        expected = images
        assert np.array_equal(observed, expected)

        observed = aug_det.augment_images(images)
        expected = images
        assert np.array_equal(observed, expected)

        observed = aug.augment_keypoints(keypoints)
        expected = keypoints
        assert keypoints_equal(observed, expected)

        observed = aug_det.augment_keypoints(keypoints)
        expected = keypoints
        assert keypoints_equal(observed, expected)

        for aug_ in [aug, aug_det]:
            observed = aug_.augment_polygons(polygons)
            assert len(observed) == 1
            assert len(observed[0].polygons) == 1
            assert observed[0].shape == polygons[0].shape
            assert observed[0].polygons[0].exterior_almost_equals(
                polygons[0].polygons[0])
            assert observed[0].polygons[0].is_valid

    # 0% chance of flip, heatmaps
    aug = iaa.Flipud(0)
    heatmaps = HeatmapsOnImage(np.float32([
        [0, 0.5, 0.75],
        [0, 0.5, 0.75],
        [0.75, 0.75, 0.75],
    ]),
                               shape=(3, 3, 3))
    observed = aug.augment_heatmaps([heatmaps])[0]
    expected = heatmaps.get_arr()
    assert observed.shape == heatmaps.shape
    assert heatmaps.min_value - 1e-6 < observed.min_value < heatmaps.min_value + 1e-6
    assert heatmaps.max_value - 1e-6 < observed.max_value < heatmaps.max_value + 1e-6
    assert np.array_equal(observed.get_arr(), expected)

    # 0% chance of flip, segmaps
    aug = iaa.Flipud(0)
    segmaps = SegmentationMapsOnImage(np.int32([
        [0, 1, 2],
        [0, 1, 2],
        [2, 2, 2],
    ]),
                                      shape=(3, 3, 3))
    observed = aug.augment_segmentation_maps([segmaps])[0]
    expected = segmaps.get_arr()
    assert observed.shape == segmaps.shape
    assert np.array_equal(observed.get_arr(), expected)

    # 100% chance of flip
    aug = iaa.Flipud(1.0)
    aug_det = aug.to_deterministic()

    for _ in sm.xrange(10):
        observed = aug.augment_images(images)
        expected = images_flipped
        assert np.array_equal(observed, expected)

        observed = aug_det.augment_images(images)
        expected = images_flipped
        assert np.array_equal(observed, expected)

        observed = aug.augment_keypoints(keypoints)
        expected = keypoints_flipped
        assert keypoints_equal(observed, expected)

        observed = aug_det.augment_keypoints(keypoints)
        expected = keypoints_flipped
        assert keypoints_equal(observed, expected)

        for aug_ in [aug, aug_det]:
            observed = aug_.augment_polygons(polygons)
            assert len(observed) == 1
            assert len(observed[0].polygons) == 1
            assert observed[0].shape == polygons[0].shape
            assert observed[0].polygons[0].exterior_almost_equals(
                polygons_flipped[0].polygons[0])
            assert observed[0].polygons[0].is_valid

    # 100% chance of flip, heatmaps
    aug = iaa.Flipud(1.0)
    heatmaps = ia.HeatmapsOnImage(np.float32([
        [0, 0.5, 0.75],
        [0, 0.5, 0.75],
        [0.75, 0.75, 0.75],
    ]),
                                  shape=(3, 3, 3))
    observed = aug.augment_heatmaps([heatmaps])[0]
    expected = np.flipud(heatmaps.get_arr())
    assert observed.shape == heatmaps.shape
    assert heatmaps.min_value - 1e-6 < observed.min_value < heatmaps.min_value + 1e-6
    assert heatmaps.max_value - 1e-6 < observed.max_value < heatmaps.max_value + 1e-6
    assert np.array_equal(observed.get_arr(), expected)

    # 100% chance of flip, segmaps
    aug = iaa.Flipud(1.0)
    segmaps = SegmentationMapsOnImage(np.int32([
        [0, 1, 2],
        [0, 1, 2],
        [2, 2, 2],
    ]),
                                      shape=(3, 3, 3))
    observed = aug.augment_segmentation_maps([segmaps])[0]
    expected = np.flipud(segmaps.get_arr())
    assert observed.shape == segmaps.shape
    assert np.array_equal(observed.get_arr(), expected)

    # 50% chance of flip
    aug = iaa.Flipud(0.5)
    aug_det = aug.to_deterministic()

    nb_iterations = 1000
    nb_images_flipped = 0
    nb_images_flipped_det = 0
    nb_keypoints_flipped = 0
    nb_keypoints_flipped_det = 0
    nb_polygons_flipped = 0
    nb_polygons_flipped_det = 0
    for _ in sm.xrange(nb_iterations):
        observed = aug.augment_images(images)
        if np.array_equal(observed, images_flipped):
            nb_images_flipped += 1

        observed = aug_det.augment_images(images)
        if np.array_equal(observed, images_flipped):
            nb_images_flipped_det += 1

        observed = aug.augment_keypoints(keypoints)
        if keypoints_equal(observed, keypoints_flipped):
            nb_keypoints_flipped += 1

        observed = aug_det.augment_keypoints(keypoints)
        if keypoints_equal(observed, keypoints_flipped):
            nb_keypoints_flipped_det += 1

        observed = aug.augment_polygons(polygons)
        if observed[0].polygons[0].exterior_almost_equals(
                polygons_flipped[0].polygons[0]):
            nb_polygons_flipped += 1

        observed = aug_det.augment_polygons(polygons)
        if observed[0].polygons[0].exterior_almost_equals(
                polygons_flipped[0].polygons[0]):
            nb_polygons_flipped_det += 1

    assert int(nb_iterations * 0.3) <= nb_images_flipped <= int(
        nb_iterations * 0.7)
    assert int(nb_iterations * 0.3) <= nb_keypoints_flipped <= int(
        nb_iterations * 0.7)
    assert int(nb_iterations * 0.3) <= nb_polygons_flipped <= int(
        nb_iterations * 0.7)
    assert nb_images_flipped_det in [0, nb_iterations]
    assert nb_keypoints_flipped_det in [0, nb_iterations]
    assert nb_polygons_flipped_det in [0, nb_iterations]

    # 50% chance of flipped, multiple images, list as input
    images_multi = [base_img, base_img]
    aug = iaa.Flipud(0.5)
    aug_det = aug.to_deterministic()
    nb_iterations = 1000
    nb_flipped_by_pos = [0] * len(images_multi)
    nb_flipped_by_pos_det = [0] * len(images_multi)
    for _ in sm.xrange(nb_iterations):
        observed = aug.augment_images(images_multi)
        for i in sm.xrange(len(images_multi)):
            if np.array_equal(observed[i], base_img_flipped):
                nb_flipped_by_pos[i] += 1

        observed = aug_det.augment_images(images_multi)
        for i in sm.xrange(len(images_multi)):
            if np.array_equal(observed[i], base_img_flipped):
                nb_flipped_by_pos_det[i] += 1

    for val in nb_flipped_by_pos:
        assert int(nb_iterations * 0.3) <= val <= int(nb_iterations * 0.7)

    for val in nb_flipped_by_pos_det:
        assert val in [0, nb_iterations]

    # test StochasticParameter as p
    aug = iaa.Flipud(p=iap.Choice([0, 1], p=[0.7, 0.3]))
    seen = [0, 0]
    for _ in sm.xrange(1000):
        observed = aug.augment_image(base_img)
        if np.array_equal(observed, base_img):
            seen[0] += 1
        elif np.array_equal(observed, base_img_flipped):
            seen[1] += 1
        else:
            assert False
    assert 700 - 75 < seen[0] < 700 + 75
    assert 300 - 75 < seen[1] < 300 + 75

    # test exceptions for wrong parameter types
    got_exception = False
    try:
        _ = iaa.Flipud(p="test")
    except Exception:
        got_exception = True
    assert got_exception

    # test get_parameters()
    aug = iaa.Flipud(p=0.5)
    params = aug.get_parameters()
    assert isinstance(params[0], iap.Binomial)
    assert isinstance(params[0].p, iap.Deterministic)
    assert 0.5 - 1e-4 < params[0].p.value < 0.5 + 1e-4

    ###################
    # test other dtypes
    ###################
    aug = iaa.Flipud(1.0)

    image = np.zeros((3, 3), dtype=bool)
    image[0, 0] = True
    expected = np.zeros((3, 3), dtype=bool)
    expected[2, 0] = True
    image_aug = aug.augment_image(image)
    assert image_aug.dtype.type == image.dtype.type
    assert np.all(image_aug == expected)

    for dtype in [
            np.uint8, np.uint16, np.uint32, np.uint64, np.int8, np.int32,
            np.int64
    ]:
        min_value, center_value, max_value = iadt.get_value_range_of_dtype(
            dtype)
        value = max_value
        image = np.zeros((3, 3), dtype=dtype)
        image[0, 0] = value
        expected = np.zeros((3, 3), dtype=dtype)
        expected[2, 0] = value
        image_aug = aug.augment_image(image)
        assert image_aug.dtype.type == dtype
        assert np.array_equal(image_aug, expected)

    for dtype, value in zip([np.float16, np.float32, np.float64, np.float128],
                            [5000, 1000**2, 1000**3, 1000**4]):
        atol = 1e-9 * max_value if dtype != np.float16 else 1e-3 * max_value
        image = np.zeros((3, 3), dtype=dtype)
        image[0, 0] = value
        expected = np.zeros((3, 3), dtype=dtype)
        expected[2, 0] = value
        image_aug = aug.augment_image(image)
        assert image_aug.dtype.type == dtype
        assert np.allclose(image_aug, expected, atol=atol)
def working():
    return render_template("works.html")
@app.route('/analyst')
def analysing():
	model=pickle.load(open("C:/Users/LENOVO/Desktop/project/modelhog.pickle","rb"))
	path1="C:/Users/LENOVO/Desktop/project/static/frame re2 sec.jpg"
ia.seed(1)

depth = np.linspace(0, 50, 128).astype(np.float32)  
depth = np.tile(depth.reshape(1, 128), (128, 1))    

depth[64-2:64+2, 16:128-16] = 0.75 * 50.0  
depth[16:128-16, 64-2:64+2] = 1.0 * 50.0   

# Convert our numpy array depth map to a heatmap object.
depth = HeatmapsOnImage(depth, shape=image.shape, min_value=0.0, max_value=50.0)


depth = depth.avg_pool(2)

# Define our augmentation pipeline.
seq = iaa.Sequential([
    iaa.Dropout([0.05, 0.2]),      
    iaa.Sharpen((0.0, 1.0)),       
    iaa.Affine(rotate=(-45, 45)),  
    iaa.ElasticTransformation(alpha=50, sigma=5)  
], random_order=True)

# Augment images and heatmaps.
images_aug = []
heatmaps_aug = []