예제 #1
0
def mapper(dataset_dict):
    dataset_dict = copy.deepcopy(dataset_dict)
    image = utils.read_image(dataset_dict["file_name"], format="BGR")
    image, transforms = T.apply_transform_gens([T.Resize((1152, 1152))], image)
    dataset_dict["image"] = torch.as_tensor(
        image.transpose(2, 0, 1).astype("float32"))

    annos = [
        transform_instance_annotations(obj, transforms, image.shape[:2])
        for obj in dataset_dict.pop("annotations")
        if obj.get("iscrowd", 0) == 0
    ]
    instances = utils.annotations_to_instances_rotated(annos, image.shape[:2])
    dataset_dict["instances"] = utils.filter_empty_instances(instances)
    return dataset_dict
예제 #2
0
    def __call__(self, dataset_dict):
        """
        Args:
            dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.

        Returns:
            dict: a format that builtin models in detectron2 accept
        """
        # Implement a mapper, similar to the default DatasetMapper, but with own customizations
        dataset_dict = copy.deepcopy(
            dataset_dict)  # it will be modified by code below
        image = utils.read_image(dataset_dict["file_name"], format="BGR")

        # Custom augs to be used while training
        # Only HFlip and Resize are supported for rotated_boxes
        augs = [T.RandomFlip(0.4, horizontal=True,
                             vertical=False)]  #[T.RandomRotation([0,90])]

        if self.is_train:
            tfm_gens = self.tfm_gens + augs
        else:
            tfm_gens = self.tfm_gens

        logging.getLogger(__name__).info("Original Augmentation: " +
                                         str(self.tfm_gens))

        logging.getLogger(__name__).info("Updated Augmentation List: " +
                                         str(tfm_gens))

        image, transforms = T.apply_transform_gens(tfm_gens, image)
        dataset_dict["image"] = torch.as_tensor(
            image.transpose(2, 0, 1).astype("float32"))

        for a in dataset_dict['annotations']:
            a['bbox'] = transforms.apply_rotated_box(np.asarray(
                [a['bbox']]))[0].tolist()

        annos = dataset_dict['annotations']
        instances = utils.annotations_to_instances_rotated(
            annos, image.shape[:2])
        dataset_dict["instances"] = utils.filter_empty_instances(instances)
        return dataset_dict
예제 #3
0
    def __call__(self, dataset_dict):

        dataset_dict = copy.deepcopy(dataset_dict)
        image = utils.read_image(dataset_dict["file_name"],
                                 format=self.image_format)
        utils.check_image_size(dataset_dict, image)

        aug_input = T.AugInput(image, sem_seg=None)
        transforms = self.augmentations(aug_input)

        image = aug_input.image

        image_shape = image.shape[:2]
        dataset_dict["image"] = torch.as_tensor(
            np.ascontiguousarray(image.transpose(2, 0, 1)))

        if not self.is_train:
            dataset_dict.pop("annotations", None)
            return dataset_dict

        if "annotations" in dataset_dict:

            annos = [
                self.transform_instance_annotations_rotated(
                    obj, transforms, image_shape)
                # obj
                for obj in dataset_dict.pop("annotations")
                if obj.get("iscrowd", 0) == 0
            ]

            instances = utils.annotations_to_instances_rotated(
                annos, image_shape)

            if self.recompute_boxes:
                instances.gt_boxes = instances.gt_masks.get_bounding_boxes()
            dataset_dict["instances"] = utils.filter_empty_instances(instances)

        return dataset_dict
def rotated_mapper(original_dataset_dict):
    # Implement a mapper, similar to the default DatasetMapper, but with our own customizations

    dataset_dict = copy.deepcopy(
        original_dataset_dict)  # it will be modified by code below
    original_gsd = dataset_dict["gsd"]
    target_gsd = np.random.uniform(0.09, 0.13)  # randomize target gsd
    scale = original_gsd / target_gsd

    target_size = 400
    target_crop = int(target_size / scale)
    target_crop = (target_crop, target_crop)

    image_np = detection_utils.read_image(dataset_dict["file_name"],
                                          format="BGR")

    boxes = np.asarray([anno['bbox'] for anno in dataset_dict['annotations']])

    # select anno at random
    # draw random center

    # h, w = image_np.shape[:2]
    # rand_box = boxes[np.random.randint(len(boxes))]
    # ch, cw = rand_box[:2]
    # xmin = np.min()
    # xmax = np.max()
    # ymin = 3
    # ymax = 4

    # h0 = np.random.randint(min(h, ymin), min(h, ymax) + 1)
    # w0 = np.random.randint(min(w, xmin), min(w, xmax) + 1)
    # assert h >= target_crop[1] and w >= target_crop[0], "Shape computation has bugs."

    # crop = T.CropTransform(w0, h0, target_crop)

    # make sure random crop contains annotations
    i = 0
    while True:
        random_crop = T.RandomCrop('absolute',
                                   target_crop).get_transform(image_np)
        cropped_boxes = RotatedBoxes(
            random_crop.apply_coords(copy.deepcopy(boxes)))
        inside_ind = cropped_boxes.inside_box(target_crop)
        if 1 < sum(inside_ind) <= 100:
            break
        i += 1
        if i > 150:
            return None

    image, transforms = T.apply_transform_gens([
        random_crop,
        T.Resize((target_size, target_size)),
    ], image_np)
    dataset_dict["image"] = torch.as_tensor(
        image.transpose(2, 0, 1).astype("float32"))

    annos = [
        rotated_transform_instance_annotations(obj, transforms,
                                               image.shape[:2])
        for obj in dataset_dict.pop("annotations")
        if obj.get("iscrowd", 0) == 0
    ]
    instances = detection_utils.annotations_to_instances_rotated(
        annos, image.shape[:2])
    instances = detection_utils.filter_empty_instances(instances)
    inside_ind = instances.gt_boxes.inside_box(image.shape[:2])
    instances = instances[inside_ind]

    assert ((instances.gt_boxes.tensor.numpy()[:, 2] > 0).all().item()
            ), "width not > 0\n\n" + str(instances.gt_boxes.tensor.numpy())

    dataset_dict["instances"] = instances
    return dataset_dict