def test_get_callback_body(self, mask_file):
     instance_mask = InstanceMask(mask_file)
     instance_mask.all_attributes = {
         1: {
             "occluded": True
         },
         2: {
             "occluded": False
         }
     }
     assert instance_mask.get_callback_body() == {
         "checksum":
         "c86aca4e348b051f60c2d7d1bf750fb3accdfeaf",
         "fileSize":
         12,
         "info": [
             {
                 "attributes": {
                     "occluded": True
                 },
                 "instanceId": 1
             },
             {
                 "attributes": {
                     "occluded": False
                 },
                 "instanceId": 2
             },
         ],
     }
예제 #2
0
def _get_mots_data(
    image_path: str,
    original_mask_subdir: str,
    mask_subdir: str,
    stem: str,
    label_content: Dict[str, Any],
) -> Data:
    data = Data(image_path)
    labeled_multipolygons = []
    for label_info in label_content.get("labels", ()):
        if "poly2d" not in label_info:
            continue
        labeled_multipolygon = LabeledMultiPolygon(
            polygons=(poly2d_info["vertices"]
                      for poly2d_info in label_info["poly2d"]),
            category=label_info["category"],
            attributes=label_info["attributes"],
            instance=str(label_info["id"]),
        )
        labeled_multipolygons.append(labeled_multipolygon)

    mask_path = os.path.join(mask_subdir, f"{stem}.png")
    mask_info = _save_and_get_mask_info(
        os.path.join(original_mask_subdir, f"{stem}.png"),
        mask_path,
        os.path.join(mask_subdir, f"{stem}.json"),
    )
    ins_mask = InstanceMask(mask_path)
    ins_mask.all_attributes = mask_info["all_attributes"]

    label = data.label
    label.multi_polygon = labeled_multipolygons
    label.instance_mask = ins_mask
    return data
예제 #3
0
def _get_instance_mask(stem: str, original_mask_dir: str, mask_dir: str) -> InstanceMask:
    mask_path = os.path.join(mask_dir, f"{stem}.png")
    mask_info = _save_and_get_mask_info(
        os.path.join(original_mask_dir, f"{stem}.png"),
        mask_path,
        os.path.join(mask_dir, f"{stem}.json"),
        "ins",
    )

    ins_mask = InstanceMask(mask_path)
    ins_mask.all_attributes = mask_info["all_attributes"]
    return ins_mask
예제 #4
0
    def test_get_data(self, accesskey, url, tmp_path, mask_file):
        gas_client = GAS(access_key=accesskey, url=url)
        dataset_name = get_dataset_name()
        dataset_client = gas_client.create_dataset(dataset_name)

        dataset_client.create_draft("draft-1")
        dataset_client.upload_catalog(Catalog.loads(CATALOG))
        segment_client = dataset_client.get_or_create_segment("segment1")
        path = tmp_path / "sub"
        path.mkdir()

        # Upload data with label
        for i in range(10):
            local_path = path / f"hello{i}.txt"
            local_path.write_text(f"CONTENT{i}")
            data = Data(local_path=str(local_path))
            data.label = Label.loads(LABEL)

            semantic_mask = SemanticMask(str(mask_file))
            semantic_mask.all_attributes = {0: {"occluded": True}, 1: {"occluded": False}}
            data.label.semantic_mask = semantic_mask

            instance_mask = InstanceMask(str(mask_file))
            instance_mask.all_attributes = {0: {"occluded": True}, 1: {"occluded": False}}
            data.label.instance_mask = instance_mask

            panoptic_mask = PanopticMask(str(mask_file))
            panoptic_mask.all_category_ids = {100: 0, 101: 1}
            data.label.panoptic_mask = panoptic_mask
            segment_client.upload_data(data)

        for i in range(10):
            data = segment_client.get_data(f"hello{i}.txt")
            assert data.path == f"hello{i}.txt"
            assert data.label.box2d == Label.loads(LABEL).box2d

            stem = os.path.splitext(data.path)[0]
            remote_semantic_mask = data.label.semantic_mask
            semantic_mask = RemoteSemanticMask.from_response_body(SEMANTIC_MASK_LABEL)
            assert remote_semantic_mask.path == f"{stem}.png"
            assert remote_semantic_mask.all_attributes == semantic_mask.all_attributes

            remote_instance_mask = data.label.instance_mask
            instance_mask = RemoteInstanceMask.from_response_body(INSTANCE_MASK_LABEL)
            assert remote_instance_mask.path == f"{stem}.png"
            assert remote_instance_mask.all_attributes == instance_mask.all_attributes

            remote_panoptic_mask = data.label.panoptic_mask
            panoptic_mask = RemotePanopticMask.from_response_body(PANOPTIC_MASK_LABEL)
            assert remote_panoptic_mask.path == f"{stem}.png"
            assert remote_panoptic_mask.all_category_ids == panoptic_mask.all_category_ids

        gas_client.delete_dataset(dataset_name)
예제 #5
0
    def test_upload_dataset_with_mask(self, accesskey, url, tmp_path, mask_file):
        gas_client = GAS(access_key=accesskey, url=url)
        dataset_name = get_dataset_name()
        gas_client.create_dataset(dataset_name)

        dataset = Dataset(name=dataset_name)
        segment = dataset.create_segment("Segment1")
        # When uploading label, upload catalog first.
        dataset._catalog = Catalog.loads(CATALOG_CONTENTS)

        path = tmp_path / "sub"
        path.mkdir()
        local_path = path / "hello.txt"
        local_path.write_text("CONTENT")
        data = Data(local_path=str(local_path))
        remote_semantic_mask = SemanticMask(str(mask_file))
        remote_semantic_mask.all_attributes = {0: {"occluded": True}, 1: {"occluded": False}}
        data.label.semantic_mask = remote_semantic_mask

        instance_mask = InstanceMask(str(mask_file))
        instance_mask.all_attributes = {0: {"occluded": True}, 1: {"occluded": False}}
        data.label.instance_mask = instance_mask

        panoptic_mask = PanopticMask(str(mask_file))
        panoptic_mask.all_category_ids = {100: 0, 101: 1}
        data.label.panoptic_mask = panoptic_mask
        segment.append(data)

        dataset_client = gas_client.upload_dataset(dataset)
        dataset_client.commit("upload dataset with label")
        dataset = Dataset(dataset_name, gas_client)
        remote_semantic_mask = dataset[0][0].label.semantic_mask
        semantic_mask = RemoteSemanticMask.from_response_body(SEMANTIC_MASK_LABEL)
        assert dataset.catalog == Catalog.loads(CATALOG_CONTENTS)
        assert remote_semantic_mask.path == semantic_mask.path
        assert remote_semantic_mask.all_attributes == semantic_mask.all_attributes

        remote_instance_mask = dataset[0][0].label.instance_mask
        instance_mask = RemoteInstanceMask.from_response_body(INSTANCE_MASK_LABEL)
        assert dataset.catalog == Catalog.loads(CATALOG_CONTENTS)
        assert remote_instance_mask.path == instance_mask.path
        assert remote_instance_mask.all_attributes == instance_mask.all_attributes

        remote_panoptic_mask = dataset[0][0].label.panoptic_mask
        panoptic_mask = RemotePanopticMask.from_response_body(PANOPTIC_MASK_LABEL)
        assert dataset.catalog == Catalog.loads(CATALOG_CONTENTS)
        assert remote_panoptic_mask.path == panoptic_mask.path
        assert remote_panoptic_mask.all_category_ids == panoptic_mask.all_category_ids

        gas_client.delete_dataset(dataset_name)
예제 #6
0
def _get_instance_mask(
    image_path: str,
    mask_subdir: str,
    original_mask_subdir: str,
    instance_num: int,
    filename_reformatter: Callable[[str], str],
) -> InstanceMask:
    filename = filename_reformatter(os.path.basename(image_path))
    mask_path = os.path.join(mask_subdir,
                             f"{os.path.splitext(filename)[0]}.png")
    if instance_num == 1:
        mask = _get_reformatted_mask(
            os.path.join(original_mask_subdir, filename))
    else:
        mask = _get_reformatted_mask(
            os.path.join(original_mask_subdir, "1", filename))
        for instance_id in range(2, instance_num + 1):
            alter_mask = np.array(
                Image.open(
                    os.path.join(original_mask_subdir, str(instance_id),
                                 filename)), )[:, :, 0]
            mask[alter_mask == 255] = instance_id

    Image.fromarray(mask).save(mask_path)
    return InstanceMask(mask_path)
예제 #7
0
def _get_data(image_path: str, root_path: str, segment_name: str,
              folder_name: str) -> Data:
    filename = os.path.basename(image_path)
    city = filename.split("_", 1)[0]
    image_prefix = filename.rsplit("_", 1)[0]
    label_dir = os.path.join(root_path, folder_name, segment_name, city)
    data = Data(image_path)
    # get semantic mask and instance mask
    label = data.label
    label.semantic_mask = SemanticMask(
        os.path.join(label_dir, f"{image_prefix}_{folder_name}_labelIds.png"))
    label.instance_mask = InstanceMask(
        os.path.join(label_dir,
                     f"{image_prefix}_{folder_name}_instanceIds.png"))
    # get polygons
    polygons: List[LabeledPolygon] = []
    with open(
            os.path.join(label_dir,
                         f"{image_prefix}_{folder_name}_polygons.json"),
            encoding="utf-8",
    ) as fp:
        objects = json.load(fp)["objects"]
    for obj in objects:
        polygons.append(LabeledPolygon(obj["polygon"], category=obj["label"]))
    label.polygon = polygons

    return data
예제 #8
0
def VOC2012Segmentation(path: str) -> Dataset:
    """`VOC2012Segmentation <http://host.robots.ox.ac.uk/pascal/VOC/voc2012/>`_ dataset.

    The file structure should be like::

        <path>/
            JPEGImages/
                <image_name>.jpg
                ...
            SegmentationClass/
                <mask_name>.png
                ...
            SegmentationObject/
                <mask_name>.png
                ...
            ImageSets/
                Segmentation/
                    train.txt
                    val.txt
                    ...
                ...
            ...

    Arguments:
        path: The root directory of the dataset.

    Returns:
        Loaded :class: `~tensorbay.dataset.dataset.Dataset` instance.

    """
    root_path = os.path.abspath(os.path.expanduser(path))

    image_path = os.path.join(root_path, "JPEGImages")
    semantic_mask_path = os.path.join(root_path, "SegmentationClass")
    instance_mask_path = os.path.join(root_path, "SegmentationObject")
    image_set_path = os.path.join(root_path, "ImageSets", "Segmentation")

    dataset = Dataset(DATASET_NAME)
    dataset.load_catalog(
        os.path.join(os.path.dirname(__file__), "catalog.json"))

    for segment_name in _SEGMENT_NAMES:
        segment = dataset.create_segment(segment_name)
        with open(os.path.join(image_set_path, f"{segment_name}.txt"),
                  encoding="utf-8") as fp:
            for stem in fp:
                stem = stem.strip()
                data = Data(os.path.join(image_path, f"{stem}.jpg"))
                label = data.label
                mask_filename = f"{stem}.png"
                label.semantic_mask = SemanticMask(
                    os.path.join(semantic_mask_path, mask_filename))
                label.instance_mask = InstanceMask(
                    os.path.join(instance_mask_path, mask_filename))

                segment.append(data)

    return dataset
    def test_cache_dataset(self, accesskey, url, tmp_path):
        gas_client = GAS(access_key=accesskey, url=url)
        dataset_name = get_dataset_name()
        gas_client.create_dataset(dataset_name)

        dataset = Dataset(name=dataset_name)
        segment = dataset.create_segment("Segment1")
        # When uploading label, upload catalog first.
        dataset._catalog = Catalog.loads(_CATALOG)

        path = tmp_path / "sub"
        semantic_path = tmp_path / "semantic_mask"
        instance_path = tmp_path / "instance_mask"
        path.mkdir()
        semantic_path.mkdir()
        instance_path.mkdir()
        for i in range(_SEGMENT_LENGTH):
            local_path = path / f"hello{i}.txt"
            local_path.write_text("CONTENT")
            data = Data(local_path=str(local_path))
            data.label = Label.loads(_LABEL)

            semantic_mask = semantic_path / f"semantic_mask{i}.png"
            semantic_mask.write_text("SEMANTIC_MASK")
            data.label.semantic_mask = SemanticMask(str(semantic_mask))

            instance_mask = instance_path / f"instance_mask{i}.png"
            instance_mask.write_text("INSTANCE_MASK")
            data.label.instance_mask = InstanceMask(str(instance_mask))
            segment.append(data)

        dataset_client = gas_client.upload_dataset(dataset)
        dataset_client.commit("commit-1")
        cache_path = tmp_path / "cache_test"
        dataset_client.enable_cache(str(cache_path))
        segment1 = Segment("Segment1", client=dataset_client)
        for data in segment1:
            data.open()
            data.label.semantic_mask.open()
            data.label.instance_mask.open()

        segment_cache_path = (cache_path / dataset_client.dataset_id /
                              dataset_client.status.commit_id / "Segment1")
        semantic_mask_cache_path = segment_cache_path / "semantic_mask"
        instance_mask_cache_path = segment_cache_path / "instance_mask"

        for cache_dir, extension in (
            (segment_cache_path, "txt"),
            (semantic_mask_cache_path, "png"),
            (instance_mask_cache_path, "png"),
        ):
            assert set(cache_dir.glob(f"*.{extension}")) == set(
                cache_dir / f"hello{i}.{extension}"
                for i in range(_SEGMENT_LENGTH))

        gas_client.delete_dataset(dataset_name)
예제 #10
0
def _get_unsupervised_labeled_data(image_path: str, root_path: str,
                                   resolution: str, segment_name: str) -> Data:
    data = Data(image_path)
    label = data.label
    mask_stem = os.path.splitext(os.path.basename(data.path))[0]
    mask_path = os.path.join(resolution, segment_name, f"{mask_stem}.png")

    label.instance_mask = InstanceMask(
        os.path.join(root_path, "Annotations_unsupervised", mask_path))
    return data
예제 #11
0
def _get_cheetah_instance_mask(image_path: str, mask_subdir: str,
                               original_mask_subdir: str, _: int,
                               __: Callable[[str], str]) -> InstanceMask:
    filename = os.path.basename(image_path)
    new_filename = f"{os.path.splitext(filename)[0]}.png"
    mask_path = os.path.join(mask_subdir, new_filename)
    mask = _get_reformatted_mask(
        os.path.join(original_mask_subdir, "1", filename))
    alter_mask = np.array(
        Image.open(os.path.join(original_mask_subdir, "2",
                                new_filename)), )[:, :, 0]
    mask[alter_mask == 255] = 2

    Image.fromarray(mask).save(mask_path)
    return InstanceMask(mask_path)
예제 #12
0
def _get_instance_mask(
    image_path: str,
    mask_dir: str,
    original_mask_dir: str,
    filename_reformatter: Callable[[str], str],
) -> InstanceMask:
    stem = os.path.splitext(os.path.basename(image_path))[0]
    mask_path = os.path.join(mask_dir, f"{stem}.png")
    mask = np.array(
        Image.open(os.path.join(original_mask_dir, filename_reformatter(stem))),
    )[:, :, 0]
    # reformat mask
    # from {background: 0, overlap: 1~254, target: 255}
    # to {background: 0, target: 1, overlap: 255}
    overlap = np.logical_and(mask > 0, mask < 255)
    mask[mask == 255] = 1
    mask[overlap] = 255
    Image.fromarray(mask).save(mask_path)
    return InstanceMask(mask_path)
예제 #13
0
def CIHP(path: str) -> Dataset:
    """`CIHP <https://github.com/Engineering-Course/CIHP_PGN>`_ dataset.

    The file structure should be like::

        <path>
            Testing/
                Images/
                    0000002.jpg
                    ...
                test_id.txt
            Training/
                Images/
                    0000006.jpg
                    ...
                Category_ids/
                    0000006.png
                    ...
                Instance_ids/
                    0000006.png
                    ...
                train_id.txt
            Validation/
                Images/
                    0000001.jpg
                    ...
                Category_ids/
                    0000001.png
                    ...
                Instance_ids/
                    0000001.png
                    ...
                val_id.txt

    Arguments:
        path: The root directory of the dataset.

    Returns:
        Loaded :class:`~tensorbay.dataset.dataset.Dataset` instance.

    """
    root_path = os.path.join(os.path.abspath(os.path.expanduser(path)),
                             "instance-level_human_parsing")

    dataset = Dataset(DATASET_NAME)
    dataset.load_catalog(
        os.path.join(os.path.dirname(__file__), "catalog.json"))

    for segment_name, segment_path in _SEGMENTS_INFO.items():
        segment = dataset.create_segment(segment_name)
        segment_abspath = os.path.join(root_path, segment_path)
        image_path = os.path.join(segment_abspath, "Images")
        with open(os.path.join(segment_abspath, f"{segment_name}_id.txt"),
                  encoding="utf-8") as fp:
            if segment_name == "test":
                for stem in fp:
                    segment.append(
                        Data(os.path.join(image_path, f"{stem.rstrip()}.jpg")))
            else:
                category_path = os.path.join(segment_abspath, "Category_ids")
                instance_path = os.path.join(segment_abspath, "Instance_ids")
                for stem in fp:
                    stem = stem.rstrip()
                    data = Data(os.path.join(image_path, f"{stem}.jpg"))
                    label = data.label
                    png_filename = f"{stem}.png"
                    label.semantic_mask = SemanticMask(
                        os.path.join(category_path, png_filename))
                    label.instance_mask = InstanceMask(
                        os.path.join(instance_path, png_filename))
                    segment.append(data)
    return dataset
 def test_init(self):
     instance_mask = InstanceMask("hello.png")
     assert instance_mask.path == "hello.png"