Exemplo n.º 1
0
    def load_mask(self, img, index):
        # imgh, imgw = img.shape[:2]

        # external mask, random order
        if self.mask_type == 0:
            mask_index = random.randint(0, len(self.mask_data) - 1)
            mask = load_grayscale(self.mask_data[mask_index])
            mask = self.resize(mask, False)
            mask = (mask > 0).astype(
                np.uint8)  # threshold due to interpolation
            if self.mask_reverse:
                return (1 - mask) * 255
            else:
                return mask * 255
        # generate random mask
        if self.mask_type == 1:
            mask = 1 - generate_stroke_mask([256, 256])
            return (mask * 255).astype(np.uint8)

        # external mask, fixed order
        if self.mask_type == 2:
            mask_index = index
            mask = load_grayscale(self.mask_data[mask_index])
            mask = self.resize(mask, False)
            mask = (mask > 0).astype(
                np.uint8)  # threshold due to interpolation
            if self.mask_reverse:
                return (1 - mask) * 255
            else:
                return mask * 255
Exemplo n.º 2
0
def confusion_matrix_from_files(y_true_path: Path, y_pred_path: Path,
                                num_classes: int) -> np.array:
    y_true = load_grayscale(y_true_path)
    y_pred = load_grayscale(y_pred_path)

    if not y_true.shape == y_pred.shape:
        raise ValueError(
            f"y_true and y_pred should have the same shape. "
            f"y_true shape = {y_true.shape} y_pred.shape = {y_pred.shape} "
            f"y_pred_path = {y_pred_path} "
            f"y_true_path = {y_true_path}")

    return calculate_confusion_matrix_from_arrays(y_true,
                                                  y_pred,
                                                  num_classes=num_classes)
Exemplo n.º 3
0
def get_annotation_info(annotation: pd.DataFrame, i: int, hash2id: dict,
                        image_sizes: dict, mask_path: Path) -> dict:
    """

    Args:
        annotation
        i
        hash2id:
        image_sizes:
        mask_path:

    Returns:

    """
    image_id = annotation.loc[i, "ImageID"]

    image_width, image_height = image_sizes[image_id]

    mask_file_name = annotation.loc[i, "MaskPath"]

    png = (load_grayscale(mask_path / mask_file_name) > 0).astype(np.uint8)
    png = cv2.resize(png, (image_width, image_height), cv2.INTER_NEAREST)

    segmentation = binary_mask2coco(png)

    if not segmentation:
        return {}

    class_name = annotation.loc[i, "LabelName"]

    bbox = coco_seg2bbox(segmentation, image_height, image_width)

    annotation_id = str(hash(f"{image_id}_{i}"))

    area = bbox[2] * bbox[3]  # bbox_width * bbox_height

    return {
        "id": annotation_id,
        "image_id": image_id,
        "category_id": hash2id[class_name],
        "iscrowd": 0,
        "area": area,
        "bbox": bbox,
        "segmentation": segmentation,
    }
Exemplo n.º 4
0
    def __getitem__(self, idx: int) -> Dict[str, Any]:
        image_path, mask_path = self.samples[idx]

        image = load_rgb(image_path)
        mask = load_grayscale(mask_path)

        # apply augmentations
        sample = self.transform(image=image, mask=mask)
        image, mask = sample["image"], sample["mask"]

        mask = (mask > 0).astype(np.uint8)

        mask = torch.from_numpy(mask)

        return {
            "image_id": image_path.stem,
            "features": tensor_from_rgb_image(image),
            "masks": torch.unsqueeze(mask, 0).float(),
        }