コード例 #1
0
    def __call__(self, dataset_dict):
        dataset_dict = copy.deepcopy(
            dataset_dict)  # it will be modified by code below
        image = utils.read_image(dataset_dict["file_name"], format="BGR")

        aug_input = T.AugInput(image)
        transforms = self.augmentations(aug_input)
        image = aug_input.image

        # if not self.is_train:
        #     # USER: Modify this if you want to keep them for some reason.
        #     dataset_dict.pop("annotations", None)
        #     dataset_dict.pop("sem_seg_file_name", None)
        #     return dataset_dict

        image_shape = image.shape[:2]  # h, w
        dataset_dict["image"] = torch.as_tensor(
            image.transpose(2, 0, 1).astype("float32"))
        annos = [
            utils.transform_instance_annotations(obj, transforms, image_shape)
            for obj in dataset_dict.pop("annotations")
            if obj.get("iscrowd", 0) == 0
        ]
        instances = utils.annotations_to_instances(annos, image_shape)
        dataset_dict["instances"] = utils.filter_empty_instances(instances)
        return dataset_dict
コード例 #2
0
def custom_mapper(dataset_dict):
    # Implement a mapper, similar to the default DatasetMapper, but with your own customizations
    dataset_dict = copy.deepcopy(dataset_dict)  # it will be modified by code below
    image = utils.read_image(dataset_dict["file_name"], format="BGR")
    # transform_list = [T.Resize(800,600),
    #                   T.RandomFlip(prob=0.5, horizontal=True, vertical=True),
    #                   T.RandomContrast(0.8, 3),
    #                   T.RandomBrightness(0.8, 1.6),
    #                   ]

    transform_list = [#T.Resize((800, 800)),
                      T.RandomContrast(0.8, 3),
                      T.RandomBrightness(0.8, 1.6),
                      T.RandomFlip(prob=0.5, horizontal=False, vertical=True),
                      T.RandomFlip(prob=0.5, horizontal=True, vertical=False)] ### 数据增强方式

    image, transforms = T.apply_transform_gens(transform_list, image) ## # 数组增强
    dataset_dict["image"] = torch.as_tensor(image.transpose(2, 0, 1).astype("float32")) ##转成Tensor

    annos = [
        utils.transform_instance_annotations(obj, transforms, image.shape[:2])
        for obj in dataset_dict.pop("annotations")
        if obj.get("iscrowd", 0) == 0
    ]

    instances = utils.annotations_to_instances(annos, image.shape[:2]) # 将标注转成Instance(Tensor)
    dataset_dict["instances"] = utils.filter_empty_instances(instances) ## 去除空的
    return dataset_dict
コード例 #3
0
ファイル: train.py プロジェクト: KaivinC/CV_HW3
def mapper(dataset_dict):
    # Implement a mapper, similar to the default DatasetMapper, but with your own customizations
    dataset_dict = copy.deepcopy(
        dataset_dict)  # it will be modified by code below
    image = utils.read_image(dataset_dict["file_name"], format="BGR")

    image, transforms = T.apply_transform_gens([
        T.RandomFlip(prob=0.50, horizontal=True, vertical=False),
        T.RandomApply(tfm_or_aug=T.RandomBrightness(intensity_min=0.7,
                                                    intensity_max=1.1),
                      prob=0.40),
        T.RandomApply(tfm_or_aug=T.RandomSaturation(intensity_min=0.7,
                                                    intensity_max=1.1),
                      prob=0.40)
    ], image)

    dataset_dict["image"] = torch.as_tensor(
        image.transpose(2, 0, 1).astype("float32"))

    annos = [
        utils.transform_instance_annotations(obj, transforms, image.shape[:2])
        for obj in dataset_dict.pop("annotations")
        if obj.get("iscrowd", 0) == 0
    ]
    instances = utils.annotations_to_instances(annos, image.shape[:2])
    dataset_dict["instances"] = utils.filter_empty_instances(instances)
    return dataset_dict
コード例 #4
0
def augment(record):
    record = deepcopy(record)
    image = plt.imread(record["filepath"])
    annotations = record["annotations"]

    boxes = [annotation["bbox"] for annotation in annotations]
    classes = [annotation["category_id"] for annotation in annotations]
    boxes = BoundingBoxesOnImage(
        [
            BoundingBox(*box, label=class_)
            for box, class_ in zip(boxes, classes)
        ],
        shape=image.shape,
    )
    image, boxes = AUGMENTER(image=image, bounding_boxes=boxes)
    classes = [bbox.label for bbox in boxes.bounding_boxes]
    boxes = np.array([[box.x1, box.y1, box.x2, box.y2] for box in boxes.items])
    image = image[..., [2, 1]]
    image = image.transpose(2, 0, 1).astype(np.float32)

    annotations = [{
        "bbox": box,
        "bbox_mode": BoxMode.XYXY_ABS,
        "category_id": class_
    } for box, class_ in zip(boxes, classes)]
    record["image"] = torch.as_tensor(image)
    instances = utils.annotations_to_instances(annotations, image.shape[1:])
    record["instances"] = utils.filter_empty_instances(instances)
    return record
コード例 #5
0
def custom_mapper(dataset_dict, size, flip_prob, min_brightness, max_brightness, \
                min_contrast, max_contrast, min_saturation, max_saturation):
    # Implement a mapper, similar to the default DatasetMapper, but with your own customizations
    dataset_dict = copy.deepcopy(dataset_dict)  # it will be modified by code below
    image = detection_utils.read_image(dataset_dict["file_name"], format="BGR")
    transform_list = [ 
                    T.Resize(size),
                    T.RandomBrightness(min_brightness, max_brightness),
                    T.RandomContrast(min_contrast, max_contrast),
                    T.RandomSaturation(min_saturation, max_saturation),

                    T.RandomFlip(prob=flip_prob, horizontal=False, vertical=True),
                    T.RandomFlip(prob=flip_prob, horizontal=True, vertical=False), 
                ]
    image, transforms = T.apply_transform_gens(transform_list, image)
    dataset_dict["image"] = torch.as_tensor(image.transpose(2, 0, 1).astype("float32"))

    annos = [
        detection_utils.transform_instance_annotations(obj, transforms, image.shape[:2])
        for obj in dataset_dict.pop("annotations")
        if obj.get("iscrowd", 0) == 0
    ]
    instances = detection_utils.annotations_to_instances(annos, image.shape[:2])
    dataset_dict["instances"] = detection_utils.filter_empty_instances(instances)
    return dataset_dict
コード例 #6
0
def custom_mapper(dataset_dict):
    # it will be modified by code below
    dataset_dict = copy.deepcopy(dataset_dict)
    image = utils.read_image(dataset_dict["file_name"], format="BGR")
    transform_list = [
        T.Resize((512, 512)),
        T.RandomBrightness(0.8, 1.8),
        T.RandomContrast(0.6, 1.3),
        T.RandomSaturation(0.8, 1.4),
        T.RandomRotation(angle=[30, 30]),
        T.RandomLighting(0.7),
        T.RandomFlip(prob=0.4, horizontal=False, vertical=True),
    ]
    image, transforms = T.apply_transform_gens(transform_list, image)
    dataset_dict["image"] = torch.as_tensor(
        image.transpose(2, 0, 1).astype("float32"))

    annos = [
        utils.transform_instance_annotations(obj, transforms, image.shape[:2])
        for obj in dataset_dict.pop("annotations")
        if obj.get("iscrowd", 0) == 0
    ]
    instances = utils.annotations_to_instances(annos, image.shape[:2])
    dataset_dict["instances"] = utils.filter_empty_instances(instances)
    return dataset_dict
コード例 #7
0
    def __call__(self, dataset_dict):
        dataset_dict = copy.deepcopy(dataset_dict)  # it will be modified by code below
        image = utils.read_image(dataset_dict["file_name"], format="BGR")

        # aug_input = T.AugInput(image)
        # transforms = self.augmentations(aug_input)
        # image = aug_input.image

        prev_anno = dataset_dict["annotations"]
        bboxes = np.array([obj["bbox"] for obj in prev_anno], dtype=np.float32)
        # category_id = np.array([obj["category_id"] for obj in dataset_dict["annotations"]], dtype=np.int64)
        category_id = np.arange(len(dataset_dict["annotations"]))

        transformed = self.transform(image=image, bboxes=bboxes, category_ids=category_id)
        image = transformed["image"]
        annos = []
        for i, j in enumerate(transformed["category_ids"]):
            d = prev_anno[j]
            d["bbox"] = transformed["bboxes"][i]
            annos.append(d)
        dataset_dict.pop("annotations", None)  # Remove unnecessary field.

        # if not self.is_train:
        #     # USER: Modify this if you want to keep them for some reason.
        #     dataset_dict.pop("annotations", None)
        #     dataset_dict.pop("sem_seg_file_name", None)
        #     return dataset_dict

        image_shape = image.shape[:2]  # h, w
        dataset_dict["image"] = torch.as_tensor(image.transpose(2, 0, 1).astype("float32"))
        instances = utils.annotations_to_instances(annos, image_shape)
        dataset_dict["instances"] = utils.filter_empty_instances(instances)
        return dataset_dict
コード例 #8
0
def mapper(dataset_dict):
 # 自定义mapper
    dataset_dict = copy.deepcopy(dataset_dict)  # 后面要改变这个dict,所以先复制
    image = utils.read_image(dataset_dict["file_name"], format="BGR")  # 读取图片,numpy array
#     image, transforms = T.apply_transform_gens(
#         [T.Resize((800, 800)), T.RandomContrast(0.1, 3), T.RandomSaturation(0.1, 2), T.RandomRotation(angle=[0, 180]), 
#          T.RandomFlip(prob=0.4, horizontal=False, vertical=True), T.RandomCrop('relative_range', (0.4, 0.6))], image)  # 数组增强
    
#     image, transforms = T.apply_transform_gens(
#         [T.Resize((800, 800)), T.RandomContrast(0.1, 3), T.RandomSaturation(0.1, 2),
#          T.RandomFlip(prob=0.4, horizontal=True, vertical=False), T.RandomCrop('relative_range', (0.4, 0.6))], image)
    image, transforms = T.apply_transform_gens(
        [T.Resize((800, 800)), T.RandomContrast(0.1, 3), T.RandomSaturation(0.1, 2),
         T.RandomFlip(prob=0.4, horizontal=True, vertical=False)], image)
    # 数组增强
   
    dataset_dict["image"] = torch.as_tensor(image.transpose(2, 0, 1).astype("float32")) # 转成Tensor

    annos = [
        utils.transform_instance_annotations(obj, transforms, image.shape[:2])
        for obj in dataset_dict.pop("annotations")
        if obj.get("iscrowd", 0) == 0
    ] # 数据增强要同步标注
    instances = utils.annotations_to_instances(annos, image.shape[:2])  # 将标注转成Instance(Tensor)
    dataset_dict["instances"] = utils.filter_empty_instances(instances)  # 去除空的
    return dataset_dict
コード例 #9
0
ファイル: custom.py プロジェクト: JoanFM/M5_VisualRecognition
    def __call__(self, dataset_dict):

        self.tfm_gens = []

        dataset_dict = deepcopy(dataset_dict)
        image = utils.read_image(dataset_dict["file_name"],
                                 format=self.img_format)
        utils.check_image_size(dataset_dict, image)

        if self.is_train:
            # Crop
            if 'crop' in self.da.keys():
                crop_gen = T.RandomCrop(self.da['crop']['type'],
                                        self.da['crop']['size'])
                self.tfm_gens.append(crop_gen)
            # Horizontal flip
            if 'flip' in self.da.keys():
                flip_gen = T.RandomFlip(
                    prob=self.da['flip']['prob'],
                    horizontal=self.da['flip']['horizontal'],
                    vertical=self.da['flip']['vertical'])
                self.tfm_gens.append(flip_gen)

        image, transforms = T.apply_transform_gens(self.tfm_gens, image)

        image_shape = image.shape[:2]  # h, w

        dataset_dict["image"] = torch.as_tensor(
            np.ascontiguousarray(image.transpose(2, 0, 1)))

        if not self.is_train:
            dataset_dict.pop("annotations", None)
            dataset_dict.pop("sem_seg_file_name", None)
            return dataset_dict

        if "annotations" in dataset_dict:
            for anno in dataset_dict["annotations"]:
                if not self.mask_on:
                    anno.pop("segmentation", None)
                if not self.keypoint_on:
                    anno.pop("keypoints", None)

            annos = [
                utils.transform_instance_annotations(
                    obj,
                    transforms,
                    image_shape,
                    keypoint_hflip_indices=self.keypoint_hflip_indices)
                for obj in dataset_dict.pop("annotations")
                if obj.get("iscrowd", 0) == 0
            ]
            instances = utils.annotations_to_instances(
                annos, image_shape, mask_format=self.mask_format)

            if self.crop_gen and instances.has("gt_masks"):
                instances.gt_boxes = instances.gt_masks.get_bounding_boxes()
            dataset_dict["instances"] = utils.filter_empty_instances(instances)

        return dataset_dict
コード例 #10
0
    def __call__(self, dataset_dict):
        """
        Args:
            dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.

        Returns:
            dict: a format that builtin models in detectron2 accept
        """
        dataset_dict = copy.deepcopy(
            dataset_dict)  # it will be modified by code below
        image = utils.read_image(dataset_dict["file_name"],
                                 format=self.img_format)
        utils.check_image_size(dataset_dict, image)

        image, transforms = T.apply_transform_gens(self.augmentation, image)
        image_shape = image.shape[:2]  # h, w
        dataset_dict["image"] = torch.as_tensor(
            image.transpose(2, 0, 1).astype("float32"))

        if not self.is_train:
            dataset_dict.pop("annotations", None)
            return dataset_dict

        for anno in dataset_dict["annotations"]:
            if not self.mask_on:
                anno.pop("segmentation", None)
            if not self.keypoint_on:
                anno.pop("keypoints", None)

        # USER: Implement additional transformations if you have other types of data
        # USER: Don't call transpose_densepose if you don't need
        annos = [
            self._transform_densepose(
                utils.transform_instance_annotations(
                    obj,
                    transforms,
                    image_shape,
                    keypoint_hflip_indices=self.keypoint_hflip_indices),
                transforms,
            ) for obj in dataset_dict.pop("annotations")
            if obj.get("iscrowd", 0) == 0
        ]

        if self.mask_on:
            self._add_densepose_masks_as_segmentation(annos, image_shape)

        instances = utils.annotations_to_instances(annos,
                                                   image_shape,
                                                   mask_format="bitmask")
        densepose_annotations = [obj.get("densepose") for obj in annos]
        if densepose_annotations and not all(v is None
                                             for v in densepose_annotations):
            instances.gt_densepose = DensePoseList(densepose_annotations,
                                                   instances.gt_boxes,
                                                   image_shape)

        dataset_dict["instances"] = instances[instances.gt_boxes.nonempty()]
        return dataset_dict
コード例 #11
0
ファイル: mapper.py プロジェクト: vlfom/CSD-detectron2
        def apply_image_augmentations(image, dataset_dict, sem_seg_gt,
                                      augmentations):
            """Applies given augmentation to the given image and its attributes (segm, instances, etc).

            Almost no changes from D2's original code (apart from erasing non-relevant portions, e.g. for
            keypoints), just wrapped it in a function to avoid duplicate code."""

            aug_input = T.AugInput(image, sem_seg=sem_seg_gt)
            transforms = augmentations(aug_input)
            image, sem_seg_gt = aug_input.image, aug_input.sem_seg

            image_shape = image.shape[:2]  # h, w
            # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
            # but not efficient on large generic data structures due to the use of pickle & mp.Queue.
            # Therefore it's important to use torch.Tensor.
            dataset_dict["image"] = torch.as_tensor(
                np.ascontiguousarray(image.transpose(2, 0, 1)))
            if sem_seg_gt is not None:
                dataset_dict["sem_seg"] = torch.as_tensor(
                    sem_seg_gt.astype("long"))

            if not self.is_train:
                dataset_dict.pop("annotations", None)
                dataset_dict.pop("sem_seg_file_name", None)
                return dataset_dict

            if "annotations" in dataset_dict:
                for anno in dataset_dict["annotations"]:
                    if not self.use_instance_mask:
                        anno.pop("segmentation", None)
                    if not self.use_keypoint:
                        anno.pop("keypoints", None)

                annos = [
                    utils.transform_instance_annotations(
                        obj,
                        transforms,
                        image_shape,
                        keypoint_hflip_indices=self.keypoint_hflip_indices,
                    ) for obj in dataset_dict.pop("annotations")
                    if obj.get("iscrowd", 0) == 0
                ]
                instances = utils.annotations_to_instances(
                    annos, image_shape, mask_format=self.instance_mask_format)

                # After transforms such as cropping are applied, the bounding box may no longer
                # tightly bound the object. As an example, imagine a triangle object
                # [(0,0), (2,0), (0,2)] cropped by a box [(1,0),(2,2)] (XYXY format). The tight
                # bounding box of the cropped triangle should be [(1,0),(2,1)], which is not equal to
                # the intersection of original bounding box and the cropping box.
                if self.recompute_boxes:
                    instances.gt_boxes = instances.gt_masks.get_bounding_boxes(
                    )
                dataset_dict["instances"] = utils.filter_empty_instances(
                    instances)

            return dataset_dict, transforms
コード例 #12
0
    def test_random_instance_crop(self):
        from detectron2.data import detection_utils as du
        from detectron2.data.transforms.augmentation import (
            AugInput,
            AugmentationList,
        )
        from detectron2.structures import BoxMode

        aug = tf_crop.RandomInstanceCrop([1.0, 1.0])

        img_w, img_h = 10, 7
        annotations = [
            {
                "category_id": 0,
                "bbox": [1, 1, 4, 3],
                "bbox_mode": BoxMode.XYWH_ABS,
            },
            {
                "category_id": 0,
                "bbox": [2, 2, 4, 3],
                "bbox_mode": BoxMode.XYWH_ABS,
            },
            {
                "category_id": 0,
                "bbox": [6, 5, 3, 2],
                "bbox_mode": BoxMode.XYWH_ABS,
            },
        ]

        img = np.ones([img_h, img_w, 3]) * 3

        inputs = AugInput(image=img)
        # pass additional arguments
        inputs.annotations = annotations
        transforms = AugmentationList([aug])(inputs)

        self.assertIn(inputs.image.shape,
                      [torch.Size([3, 4, 3]),
                       torch.Size([2, 3, 3])])

        # from dataset mapper unused annotations will be filtered out due to the
        #  iscrowd flag
        image_shape = inputs.image.shape[:2]
        annos = [
            du.transform_instance_annotations(
                obj,
                transforms,
                image_shape,
            ) for obj in annotations if obj.get("iscrowd", 0) == 0
        ]
        instances = du.annotations_to_instances(annos, image_shape)
        filtered_instances = du.filter_empty_instances(instances)
        self.assertEqual(len(filtered_instances), 1)
        self.assertArrayEqual(
            filtered_instances.gt_boxes.tensor.tolist(),
            [[0, 0, image_shape[1], image_shape[0]]],
        )
コード例 #13
0
    def __call__(self, dataset_dict):
        """
        Args:
            dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.

        Returns:
            dict: a format that builtin models in detectron2 accept
        """
        dataset_dict = copy.deepcopy(
            dataset_dict)  # it will be modified by code below
        image = utils.read_image(dataset_dict["file_name"],
                                 format=self.img_format)
        utils.check_image_size(dataset_dict, image)

        if self.crop_gen is None:
            image, transforms = T.apply_transform_gens(self.tfm_gens, image)
        else:
            if np.random.rand() > 0.5:
                image, transforms = T.apply_transform_gens(
                    self.tfm_gens, image)
            else:
                image, transforms = T.apply_transform_gens(
                    self.tfm_gens[:-1] + self.crop_gen + self.tfm_gens[-1:],
                    image)

        image_shape = image.shape[:2]  # h, w

        # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
        # but not efficient on large generic data structures due to the use of pickle & mp.Queue.
        # Therefore it's important to use torch.Tensor.
        dataset_dict["image"] = torch.as_tensor(
            np.ascontiguousarray(image.transpose(2, 0, 1)))

        if not self.is_train:
            # USER: Modify this if you want to keep them for some reason.
            dataset_dict.pop("annotations", None)
            return dataset_dict

        if "annotations" in dataset_dict:
            # USER: Modify this if you want to keep them for some reason.
            for anno in dataset_dict["annotations"]:
                if not self.mask_on:
                    anno.pop("segmentation", None)
                anno.pop("keypoints", None)

            # USER: Implement additional transformations if you have other types of data
            annos = [
                utils.transform_instance_annotations(obj, transforms,
                                                     image_shape)
                for obj in dataset_dict.pop("annotations")
                if obj.get("iscrowd", 0) == 0
            ]
            instances = utils.annotations_to_instances(annos, image_shape)
            dataset_dict["instances"] = utils.filter_empty_instances(instances)

        return dataset_dict
コード例 #14
0
    def __call__(self, dataset_dict):
        """
        :dataset_dict: this contains a single record(as per dataset) which the dataloader requested
        :return: Apply augmentation and convert record(as per dataset) into format required by
        model (https://detectron2.readthedocs.io/en/v0.2.1/tutorials/models.html#model-input-format)
        """
        dataset_dict = copy.deepcopy(dataset_dict)

        # detectron utils reads images in batch, thus shape of image = (Height x Width x Channel)
        image = utils.read_image(dataset_dict["file_name"], format="BGR")

        # convert record into model input format
        prev_anno = dataset_dict["annotations"]

        bboxes = np.array([obj["bbox"] for obj in prev_anno], dtype=np.float32)

        # make dummy category ids for indexing purposes
        category_id = np.arange(len(dataset_dict["annotations"]))

        # transform the input image
        print(dataset_dict["file_name"])
        transformed = self.transform(image=image, bboxes=bboxes, category_ids=category_id)

        # extract transformed image
        image = transformed["image"]

        # prep annotations for model input
        annos = []

        for i, j in enumerate(transformed["category_ids"]):
            # fetch old annotation using dummy category_id
            temp = prev_anno[j]

            # updating the bboxes of old annotation
            temp["bbox"] = transformed["bboxes"][i]

            annos.append(temp)

        # delete annotations as model will use "instances" instead of annotations
        dataset_dict.pop("annotations", None)

        # image.shape returns H x W x C
        # image.shape[:2] returns H x W
        image_shape = image.shape[:2]

        # transpose operation converts H x W x C --> C x H x W
        dataset_dict["image"] = torch.as_tensor(image.transpose(2, 0, 1).astype("float32"))

        # convert annotations to instances format
        instances = utils.annotations_to_instances(annos, image_shape)
        # delete any images which do not contain annotations
        dataset_dict["instances"] = utils.filter_empty_instances(instances)

        return dataset_dict
コード例 #15
0
ファイル: dataset.py プロジェクト: EddieMG/T4M5
    def __call__(self, dataset_dict):

        self.tfm_gens = []

        dataset_dict = deepcopy(dataset_dict)
        image = utils.read_image(dataset_dict["file_name"],
                                 format=self.img_format)
        utils.check_image_size(dataset_dict, image)

        if self.is_train:
            # Crop
            '''print("Augmentation: ", "T.RandomCrop('relative', [0.8, 0.4])")
            crop_gen = T.RandomCrop('relative', [0.8, 0.4])
            self.tfm_gens.append(crop_gen)'''
            # Horizontal flip
            print("Augmentation: ",
                  "T.RandomFlip(prob=0.5, horizontal=True, vertical=False)")
            flip_gen = T.RandomFlip(prob=0.5, horizontal=True, vertical=False)
            self.tfm_gens.append(flip_gen)

        image, transforms = T.apply_transform_gens(self.tfm_gens, image)

        image_shape = image.shape[:2]  # h, w

        dataset_dict["image"] = torch.as_tensor(
            np.ascontiguousarray(image.transpose(2, 0, 1)))

        if not self.is_train:
            dataset_dict.pop("annotations", None)
            return dataset_dict

        if "annotations" in dataset_dict:
            for anno in dataset_dict["annotations"]:
                if not self.mask_on:
                    anno.pop("segmentation", None)

            annos = [
                utils.transform_instance_annotations(
                    obj,
                    transforms,
                    image_shape,
                    keypoint_hflip_indices=self.keypoint_hflip_indices)
                for obj in dataset_dict.pop("annotations")
                if obj.get("iscrowd", 0) == 0
            ]
            instances = utils.annotations_to_instances(
                annos, image_shape, mask_format=self.mask_format)

            if self.crop_gen and instances.has("gt_masks"):
                instances.gt_boxes = instances.gt_masks.get_bounding_boxes()
            dataset_dict["instances"] = utils.filter_empty_instances(instances)

        return dataset_dict
コード例 #16
0
    def __call__(self, dataset_dict):
        if DEBUG:
            visualize_data_dict(dataset_dict, save_path=Path('erase.png'))

        dataset_dict = copy.deepcopy(dataset_dict)
        dataset_dict['image'] = load_input(dataset_dict['file_name'])
        image_shape = (dataset_dict["height"], dataset_dict["width"])

        annos = dataset_dict["annotations"]
        instances = detection_utils.annotations_to_instances(
            annos, image_shape, mask_format="bitmask")
        dataset_dict["instances"] = instances
        return dataset_dict
コード例 #17
0
    def test_transform_simple_annotation(self):
        transforms = T.TransformList([T.HFlipTransform(400)])
        anno = {
            "bbox":
            np.asarray([10, 10, 200, 300]),
            "bbox_mode":
            BoxMode.XYXY_ABS,
            "category_id":
            3,
            "segmentation": [[10, 10, 100, 100, 100, 10],
                             [150, 150, 200, 150, 200, 200]],
        }

        output = detection_utils.transform_instance_annotations(
            anno, transforms, (400, 400))
        self.assertTrue(np.allclose(output["bbox"], [200, 10, 390, 300]))
        self.assertEqual(len(output["segmentation"]),
                         len(anno["segmentation"]))
        self.assertTrue(
            np.allclose(output["segmentation"][0],
                        [390, 10, 300, 100, 300, 10]))

        detection_utils.annotations_to_instances([output, output], (400, 400))
コード例 #18
0
def mapper(dataset_dict):
    # Implement a mapper, similar to the default DatasetMapper, but with your own customizations
    dataset_dict = copy.deepcopy(dataset_dict)  # it will be modified by code below
    image = utils.read_image(dataset_dict["file_name"], format="BGR")
    image, transforms = T.apply_transform_gens([T.Resize((800, 800))], image)
    dataset_dict["image"] = torch.as_tensor(image.transpose(2, 0, 1).astype("float32"))
    annos = [
		utils.transform_instance_annotations(obj, transforms, image.shape[:2])
		for obj in dataset_dict.pop("annotations")
		if obj.get("iscrowd", 0) == 0
	]
    instances = utils.annotations_to_instances(annos, image.shape[:2])
    dataset_dict["instances"] = utils.filter_empty_instances(instances)
    return dataset_dict
コード例 #19
0
ファイル: trainer.py プロジェクト: stepp1/compVision-DCC
def custom_mapper(dataset_dict, transform_list):
    dataset_dict = copy.deepcopy(
        dataset_dict)  # it will be modified by code below
    image = utils.read_image(dataset_dict["file_name"], format="BGR")

    image, transforms = T.apply_transform_gens(transform_list, image)
    dataset_dict["image"] = torch.as_tensor(
        image.transpose(2, 0, 1).astype("float32"))

    annos = [
        utils.transform_instance_annotations(obj, transforms, image.shape[:2])
        for obj in dataset_dict.pop("annotations")
    ]
    instances = utils.annotations_to_instances(annos, image.shape[:2])
    dataset_dict["instances"] = utils.filter_empty_instances(instances)
    return dataset_dict
コード例 #20
0
def __call__(self, dataset_dict):
    """
    Args:
        dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
    Returns:
        dict: a format that builtin models in detectron2 accept
    """
    dataset_dict = copy.deepcopy(
        dataset_dict)  # it will be modified by code below
    # USER: Write your own image loading if it's not from a file
    image = utils.read_image(dataset_dict["file_name"],
                             format=self.image_format)
    utils.check_image_size(dataset_dict, image)

    aug_input = T.AugInput(image)
    transforms = self.augmentations(aug_input)
    image = aug_input.image

    image_shape = image.shape[:2]  # h, w
    # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
    # but not efficient on large generic data structures due to the use of pickle & mp.Queue.
    # Therefore it's important to use torch.Tensor.
    dataset_dict["image"] = torch.as_tensor(
        np.ascontiguousarray(image.transpose(2, 0, 1)))

    if "annotations" in dataset_dict:
        # USER: Implement additional transformations if you have other types of data
        annos = [
            utils.transform_instance_annotations(
                obj,
                transforms,
                image_shape,
                keypoint_hflip_indices=self.keypoint_hflip_indices)
            for obj in dataset_dict.pop("annotations")
            if obj.get("iscrowd", 0) == 0
        ]
        instances = utils.annotations_to_instances(
            annos, image_shape, mask_format=self.instance_mask_format)

        # After transforms such as cropping are applied, the bounding box may no longer
        # tightly bound the object. As an example, imagine a triangle object
        # [(0,0), (2,0), (0,2)] cropped by a box [(1,0),(2,2)] (XYXY format). The tight
        # bounding box of the cropped triangle should be [(1,0),(2,1)], which is not equal to
        # the intersection of original bounding box and the cropping box.

        dataset_dict["instances"] = utils.filter_empty_instances(instances)
    return dataset_dict
コード例 #21
0
ファイル: training.py プロジェクト: 818ajian/uav-forests
    def __call__(self, data_dict):
        data_dict = copy.deepcopy(data_dict)

        image = cv2.imread(data_dict["file_name"], cv2.IMREAD_UNCHANGED)
        image = image[:, :, :self.nb_channels]

        # if image.shape[2] == 4:
        #     conversion = cv2.COLOR_BGRA2RGBA
        # else:
        #     conversion = cv2.COLOR_BGR2RGB
        # image = cv2.cvtColor(image, conversion)

        # data_dict["image"] = torch.as_tensor(image.transpose(2, 0, 1)
        #                                      .astype("float32"))

        if self.augmenter:
            rgb_image = image[:, :, :3]
            ndvi_image = None
            if image.shape[2] == 4:
                ndvi_image = image[:, :, 3]

            data_dict["original_annotations"] = data_dict["annotations"]
            data_dict["original_image"] = image

            augmented = self.augmenter(rgb_image, data_dict["annotations"],
                                       ndvi_image)

            data_dict["annotations"] = augmented["annotations"]
            if augmented['ndvi'] is not None:
                image = np.dstack([augmented["rgb_image"], augmented["ndvi"]])
            else:
                image = augmented["rgb_image"]

        if self.cfg.INPUT.FORMAT == "BGR":
            image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
        elif self.cfg.INPUT.FORMAT == "BGRN":
            image = cv2.cvtColor(image, cv2.COLOR_RGBA2BGRA)

        data_dict["image"] = torch.as_tensor(
            image.transpose(2, 0, 1).astype("float32"))

        instances = det_utils.annotations_to_instances(
            data_dict["annotations"], image.shape[:2])

        data_dict["instances"] = det_utils.filter_empty_instances(instances)

        return data_dict
コード例 #22
0
    def __call__(self, dataset_dict):
        """
        Args:
            dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
        Returns:
            dict: a format that builtin models in detectron2 accept
        """
        dataset_dict = copy.deepcopy(
            dataset_dict)  # it will be modified by code below
        image = utils.read_image(dataset_dict["file_name"],
                                 format=self.img_format)
        utils.check_image_size(dataset_dict, image)

        image, transforms = T.apply_transform_gens(self.augmentation, image)
        image_shape = image.shape[:2]  # h, w
        dataset_dict["image"] = torch.as_tensor(
            image.transpose(2, 0, 1).astype("float32"))

        if not self.is_train:
            dataset_dict.pop("annotations", None)
            return dataset_dict

        annos = [
            utils.transform_instance_annotations(obj, transforms,
                                                 image.shape[:2])
            for obj in dataset_dict.pop("annotations")
        ]
        dataset_dict["instances"] = utils.annotations_to_instances(
            annos, image.shape[:2])

        #         # USER: Implement additional transformations if you have other types of data
        #         # USER: Don't call transpose_densepose if you don't need
        #         annos = [
        #             self._transform_densepose(
        #                 utils.transform_instance_annotations(
        #                     obj, transforms, image_shape, keypoint_hflip_indices=self.keypoint_hflip_indices
        #                 ),
        #                 transforms,
        #             )
        #             for obj in dataset_dict.pop("annotations")
        #             if obj.get("iscrowd", 0) == 0
        #         ]

        #         instances = utils.annotations_to_instances(annos, image_shape, mask_format="bitmask")

        #         dataset_dict["instances"] = instances[instances.gt_boxes.nonempty()]
        return dataset_dict
コード例 #23
0
    def __call__(self, dataset_dict):
        dataset_dict = copy.deepcopy(dataset_dict)
        image = utils.read_image(dataset_dict["file_name"], format="BGR")
        aug_input = T.AugInput(image)
        transforms = self.augmentations(aug_input)
        image = aug_input.image

        image_shape = image.shape[:2]  #h, w
        dataset_dict["image"] = torch.as_tensor(
            image.transpose(2, 0, 1).astype("float32"))
        annos = [
            utils.transform_instance_annotations(obj, transforms, image_shape)
            for obj in dataset_dict.pop("annotations")
            if obj.get("iscrowd", 0) == 0
        ]
        instances = utils.annotations_to_instances(annos, image_shape)
        dataset_dict["instances"] = utils.filter_empty_instances(instances)

        return dataset_dict
コード例 #24
0
    def __call__(self, dataset_dict):
        """
        Args:
            dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.

        Returns:
            dict: a format that builtin models in detectron2 accept
        """
        image_shape = dataset_dict['height'], dataset_dict['width']
        annos = dataset_dict["annotations"]
        mislabeled = [obj["mislabeled"] for obj in annos]

        instances = utils.annotations_to_instances(
            annos, image_shape, mask_format=self.instance_mask_format)
        instances.gt_mislabeled = torch.tensor(mislabeled, dtype=torch.bool)
        dataset_dict = super(CustomDatasetMapper, self).__call__(dataset_dict)
        dataset_dict["instances"] = utils.filter_empty_instances(instances)

        return dataset_dict
コード例 #25
0
def customMapper(dataset_dict):
  dataset_dict = copy.deepcopy(dataset_dict)
  image = utils.read_image(dataset_dict["file_name"], format="BGR")

  transform_list = [
                    T.Resize((600, 800)),
                    T.RandomFlip(prob=0.6, horizontal=True, vertical=False),
                    T.RandomFlip(prob=0.6, horizontal=False, vertical=True),
                    ]
  image, transforms = T.apply_transform_gens(transform_list, image)
  dataset_dict["image"] = torch.as_tensor(image.transpose(2, 0, 1).astype("float32"))
  annos = [
		utils.transform_instance_annotations(obj, transforms, image.shape[:2])
		for obj in dataset_dict.pop("annotations")
		if obj.get("iscrowd", 0) == 0
	]
  instances = utils.annotations_to_instances(annos, image.shape[:2])
  dataset_dict["instances"] = utils.filter_empty_instances(instances)
  return dataset_dict
コード例 #26
0
def mapper(dataset_dict):
    dataset_dict = copy.deepcopy(dataset_dict)
    image = np.stack([
        utils.read_image(img, format="BGR") for img in dataset_dict["filename"]
    ])
    image = torch.from_numpy(image)
    annos = [
        # utils.transform_instance_annotations(annotation, [], image.shape[1:])
        annotation for annotation in dataset_dict.pop("annotations")
    ]
    # The model's input
    return {
        "image":
        image,
        "instances":
        utils.annotations_to_instances(annos,
                                       image.shape[2:],
                                       mask_format="bitmap"),
    }
コード例 #27
0
    def test_transform_RLE_resize(self):
        transforms = T.TransformList(
            [T.HFlipTransform(400), T.ScaleTransform(300, 400, 400, 400, "bilinear")]
        )
        mask = np.zeros((300, 400), order="F").astype("uint8")
        mask[:, :200] = 1

        anno = {
            "bbox": np.asarray([10, 10, 200, 300]),
            "bbox_mode": BoxMode.XYXY_ABS,
            "segmentation": mask_util.encode(mask[:, :, None])[0],
            "category_id": 3,
        }
        output = detection_utils.transform_instance_annotations(
            copy.deepcopy(anno), transforms, (400, 400)
        )

        inst = detection_utils.annotations_to_instances(
            [output, output], (400, 400), mask_format="bitmask"
        )
        self.assertTrue(isinstance(inst.gt_masks, BitMasks))
コード例 #28
0
 def __call__(self, dataset_dict):
     dataset_dict = copy.deepcopy(dataset_dict)
     # it will be modified by code below
     # can use other ways to read image
     image = utils.read_image(dataset_dict["file_name"], format="BGR")
     # See "Data Augmentation" tutorial for details usage
     auginput = T.AugInput(image)
     transform = T.Resize((800, 800))(auginput)
     print(f'resized image {image["file_name"]}')
     image = torch.from_numpy(auginput.image.transpose(2, 0, 1))
     annos = [
         utils.transform_instance_annotations(annotation, [transform],
                                              image.shape[1:])
         for annotation in dataset_dict.pop("annotations")
     ]
     return {
         # create the format that the model expects
         "image": image,
         "instances":
         utils.annotations_to_instances(annos, image.shape[1:])
     }
コード例 #29
0
def custom_mapper(input_dict):
    dataset_dict = copy.deepcopy(
        input_dict)  # it will be modified by code below
    image = utils.read_image(dataset_dict["file_name"], format="BGR")
    transform_list = [
        T.Resize((1200, 1200)),
        T.RandomFlip(prob=0.6, horizontal=True, vertical=False),
        T.RandomFlip(prob=0.6, horizontal=False, vertical=True),
        T.RandomContrast(0.7, 3.2),
        T.RandomBrightness(0.6, 1.8),
    ]
    image, transforms = T.apply_transform_gens(transform_list, image)
    dataset_dict["image"] = torch.as_tensor(
        image.transpose(2, 0, 1).astype("float32"))
    annos = [
        utils.transform_instance_annotations(obj, transforms, image.shape[:2])
        for obj in dataset_dict.pop("annotations")
    ]
    instances = utils.annotations_to_instances(annos, image.shape[:2])
    dataset_dict["instances"] = utils.filter_empty_instances(instances)
    return dataset_dict
コード例 #30
0
ファイル: detectron2_neurons.py プロジェクト: nel-lab/volpy
def mapper(dataset_dict):
    # Implement a mapper, similar to the default DatasetMapper, but with your own customizations
    dataset_dict = copy.deepcopy(
        dataset_dict)  # it will be modified by code below
    image = utils.read_image(dataset_dict["file_name"], format="BGR")
    masks = anno_to_mask(image, dataset_dict['annotations'])
    #plt.figure(1);plt.subplot(211);plt.imshow(image);plt.subplot(212);plt.imshow(masks.sum(axis=2))
    # Crop and augmentation
    image, masks = random_crop(image, masks)
    masks = masks[:, :, np.where(masks.sum(axis=(0, 1)) > 50)[0]]
    augmentation = create_augmentation()
    image, masks = imgaug_augmentation(image, masks, augmentation)
    masks = masks[:, :, np.where(masks.sum(axis=(0, 1)) > 50)[0]]
    masks = masks.transpose([2, 0, 1])
    annotations = masks_to_anno(masks)
    #plt.figure(2);plt.subplot(211);plt.imshow(image);plt.subplot(212);plt.imshow(masks.sum(axis=0))
    # Save into dataset_dict
    dataset_dict["image"] = torch.as_tensor(
        image.transpose(2, 0, 1).astype("float32"))
    instances = utils.annotations_to_instances(annotations, image.shape[:2])
    dataset_dict["instances"] = utils.filter_empty_instances(instances)
    return dataset_dict