コード例 #1
0
ファイル: bgr.py プロジェクト: cm107/common_utils
    def test(self):
        bgr0 = BGR(50, 25, 99)
        bgr1 = BGR(50, 200, 99)

        bgr_interval = BGR_Interval.from_bgr_pair(BGR(0, 0, 0), BGR(100, 100, 100))
        bgr_interval0 = BGR_Interval.from_bgr_pair(BGR(25, 25, 25), BGR(50, 50, 50))
        bgr_interval1 = BGR_Interval.from_bgr_pair(BGR(25, 25, 25), BGR(50, 125, 50))
        left_bgr_interval, right_bgr_interval = bgr_interval.split_at(bgr=bgr0)

        logger.cyan(f"bgr_interval: {bgr_interval}")
        logger.cyan(f"Split at {bgr0}")
        logger.yellow(f"Result: {left_bgr_interval}, {right_bgr_interval}")
        logger.yellow(f"bgr_interval.contains({bgr0}): {bgr_interval.contains(bgr0)}")
        logger.yellow(f"bgr_interval.contains({bgr1}): {bgr_interval.contains(bgr1)}")
        logger.yellow(f"bgr_interval.contains_detailed({bgr1}): {bgr_interval.contains_detailed(bgr1)}")
        logger.yellow(f"bgr_interval.contains_bgr_interval(left_bgr_interval): {bgr_interval.contains_bgr_interval(left_bgr_interval)}")
        logger.yellow(f"bgr_interval.contains_bgr_interval(right_bgr_interval): {bgr_interval.contains_bgr_interval(right_bgr_interval)}")
        logger.purple(f"bgr_interval.contains_bgr_interval_detailed(bgr_interval0): {bgr_interval.contains_bgr_interval_detailed(bgr_interval0)}")
        logger.purple(f"bgr_interval.contains_bgr_interval_detailed(bgr_interval1): {bgr_interval.contains_bgr_interval_detailed(bgr_interval1)}")
コード例 #2
0
from logger import logger
from annotation_utils.coco.structs import COCO_Dataset

dataset = COCO_Dataset.load_from_path(
    json_path='bk_28_02_2020_11_18_30_coco-data/HSR-coco.json',
    img_dir='bk_28_02_2020_11_18_30_coco-data')
logger.purple(
    f'Flag0 len(dataset.images): {len(dataset.images)}, len(dataset.annotations): {len(dataset.annotations)}'
)
dataset.prune_keypoints(min_num_kpts=11, verbose=True)
logger.purple(
    f'Flag1 len(dataset.images): {len(dataset.images)}, len(dataset.annotations): {len(dataset.annotations)}'
)
dataset.move_images(dst_img_dir='test_img',
                    preserve_filenames=True,
                    update_img_paths=True,
                    overwrite=True,
                    show_pbar=True)
logger.purple(
    f'Flag2 len(dataset.images): {len(dataset.images)}, len(dataset.annotations): {len(dataset.annotations)}'
)
dataset.save_to_path(save_path='prune_test.json', overwrite=True)
dataset.display_preview(kpt_idx_offset=-1)
# coco_data_path = "/home/pasonatech/Desktop/10/10_9/crescent_blender_ram"

#blend proc dataset
coco_data_path = "/home/pasonatech/blender_proc/BlenderProc-master/examples/crescent_test/collage_merged_img"

register_coco_instances(
    name = "marker",
    metadata = {},
    json_file = f'{coco_data_path}/coco_annotations.json',
    image_root = f'{coco_data_path}',
)

MetadataCatalog.get("marker").thing_classes = ['1']
abc_metadata_train = MetadataCatalog.get("marker")
logger.purple(abc_metadata_train)
dataset_dicts = DatasetCatalog.get("marker")
logger.blue(dataset_dicts)

#fine tuning
from detectron2.engine import DefaultTrainer
from detectron2.config import get_cfg
from detectron2.model_zoo import get_config_file, get_checkpoint_url

#model_config_path = "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"
model_config_path = "COCO-Detection/faster_rcnn_R_50_FPN_1x.yaml"


cfg = get_cfg()
cfg.merge_from_file(get_config_file(model_config_path))
cfg.DATASETS.TRAIN = ("marker",)
コード例 #4
0
    def to_dict0(self) -> dict:
        # TODO: Work In Progress
        img_dir_list = [
            Path(config.img_dir).abs() for config in self.dataset_config_list
        ]
        ann_path_list = [
            Path(config.ann_path).abs() for config in self.dataset_config_list
        ]
        ann_format_list = [
            config.ann_format for config in self.dataset_config_list
        ]
        img_container_dir = Path.get_longest_container_dir(img_dir_list)
        ann_container_dir = Path.get_longest_container_dir(ann_path_list)
        collection_dir = Path.get_longest_container_dir(
            [img_container_dir, ann_container_dir])
        dataset_names = [
            img_dir.replace(f'{collection_dir.path_str}/', '') \
                if img_dir != collection_dir else Path('') \
                for img_dir in img_dir_list
        ]

        # rel_img_dir_list = [None] * len(dataset_names)
        rel_img_dir_list = [Path('') * len(dataset_names)]

        logger.purple(f'Before dataset_names: {dataset_names}')
        logger.purple(f'Before rel_img_dir_list: {rel_img_dir_list}')

        while True:
            logger.blue(f'Flag0')
            # Adjust dataset_names tails
            tail_moved = Path.tail2head(dataset_names, rel_img_dir_list)

            if not tail_moved:
                break

        while True:
            logger.blue(f'Flag1')
            # Adjust dataset_names heads
            head_moved = Path.head2tail(dataset_names, collection_dir)

            if not head_moved:
                break

        logger.purple(f'After dataset_names: {dataset_names}')
        logger.purple(f'After rel_img_dir_list: {rel_img_dir_list}')

        rel_img_dir_list = [
            rel_img_dir if rel_img_dir is not None else Path('')
            for rel_img_dir in rel_img_dir_list
        ]
        rel_ann_path_list = [
            ann_path.replace(f'{collection_dir}/{dataset_name}/', '') \
                if dataset_name != Path('') else ann_path.replace(f'{collection_dir}/', '') \
                for ann_path, dataset_name in zip(ann_path_list, dataset_names)
        ]

        dataset_names = [
            dataset_name.path_str if dataset_name.path_str != '' else '.'
            for dataset_name in dataset_names
        ]
        rel_img_dir = rel_img_dir_list[0].path_str if len(
            list(dict.fromkeys(rel_img_dir_list))) == 1 else [
                rel_img_dir.path_str for rel_img_dir in rel_img_dir_list
            ]
        if type(rel_img_dir) is str:
            rel_img_dir = rel_img_dir if rel_img_dir != '' else '.'
        elif type(rel_img_dir) is list:
            rel_img_dir = [
                dir_path if dir_path != '' else '.' for dir_path in rel_img_dir
            ]
        else:
            raise Exception
        rel_ann_path = rel_ann_path_list[0].path_str if len(
            list(dict.fromkeys(rel_ann_path_list))) == 1 else [
                rel_ann_path.path_str for rel_ann_path in rel_ann_path_list
            ]
        ann_format = ann_format_list[0] if len(
            list(dict.fromkeys(ann_format_list))) == 1 else ann_format_list

        return {
            'collection_dir': collection_dir.path_str,
            'dataset_names': dataset_names,
            'dataset_specific': {
                'img_dir': rel_img_dir,
                'ann_path': rel_ann_path,
                'ann_format': ann_format
            }
        }
コード例 #5
0
# logger.cyan(info1.contributor)

# logger.purple(f'info:\n{info}')
# logger.purple(f'info0:\n{info0}')

# logger.purple(f'info.to_dict():\n{info.to_dict()}')

info2 = COCO_Info.from_dict({
    'description': 'This is a test',
    'url': 'https://test/url.com',
    'version': '1.0',
    'year': '2020',
    'contributor': 'Clayton',
    'date_created': '2020/03/10'
})
logger.purple(f'info2:\n{info2}')

info2.save_to_path('info.json', overwrite=True)

from annotation_utils.coco.structs import COCO_Dataset
dataset = COCO_Dataset.combine_from_config(
    config_path=
    '/home/clayton/workspace/prj/data_keep/data/toyota/dataset/config/yaml/box_hsr_kpt_sim.yaml',
    img_sort_attr_name='file_name',
    show_pbar=True)
# dataset.display_preview(kpt_idx_offset=-1)
coco_ann = dataset.annotations[123]
logger.purple(coco_ann)
from common_utils.common_types.keypoint import Keypoint3D_List
coco_ann.keypoints_3d = Keypoint3D_List()
logger.purple(f'coco_ann.to_dict():\n{coco_ann.to_dict()}')
コード例 #6
0
from logger import logger
from annotation_utils.coco.structs import COCO_Dataset
dataset = COCO_Dataset.load_from_path(
    json_path='/home/clayton/workspace/prj/data_keep/data/toyota/dataset/sim/20200228/28_02_2020_11_18_30_coco-data/HSR-coco.json',
    img_dir='/home/clayton/workspace/prj/data_keep/data/toyota/dataset/sim/20200228/28_02_2020_11_18_30_coco-data'
)
for coco_cat in dataset.categories:
    logger.purple(f'name: {coco_cat.name}')
    label_skeleton = coco_cat.get_label_skeleton(skeleton_idx_offset=1)
    logger.cyan(f'label_skeleton: {label_skeleton}')
コード例 #7
0
 def load(self):
     logger.purple(f"Loading: {self.annotation_path}")
     self.load_data()
     self.load_shapes()
     logger.info(f"LabelMe Annotation Loaded: {self.annotation_path}")
コード例 #8
0
from logger import logger
from annotation_utils.ndds.structs import ObjectSettings

settings = ObjectSettings.load_from_path(
    '/home/clayton/workspace/prj/data_keep/data/ndds/HSR/_object_settings.json'
)
assert settings == ObjectSettings.from_dict(settings.to_dict())

logger.purple(settings)
コード例 #9
0
from logger import logger
from common_utils.common_types.segmentation import Segmentation, Polygon
import numpy as np

contours = [
    np.array([[[0, 0]], [[1, 0]], [[1, 2]], [[0, 1]]]),
    np.array([[[0, 0]], [[1, 0]], [[1, 2]], [[0, 1]]]),
    np.array([[[1, 0]], [[2, 0]], [[2, 2]], [[1, 1]]]),
    np.array([[[1, 1]], [[2, 1]], [[2, 3]], [[2, 10]], [[1, 2]]]),
    np.array([[[50, 100]], [[100, 200]]])
]

# seg = Segmentation.from_contour(contour_list=contours)
seg = Segmentation.from_contour(contour_list=contours,
                                exclude_invalid_polygons=True)
logger.purple(seg)
コード例 #10
0
                category_id=dataset.categories.get_unique_category_from_name(
                    'category_b').id,
                image_id=dataset.images[i].id,
                bbox=BBox(xmin=0, ymin=0, xmax=100, ymax=100),
                id=len(dataset.annotations)))
    else:
        dataset.annotations.append(
            COCO_Annotation(
                category_id=dataset.categories.get_unique_category_from_name(
                    'category_b').id,
                image_id=dataset.images[i].id,
                bbox=BBox(xmin=0, ymin=0, xmax=100, ymax=100),
                id=len(dataset.annotations)))
        dataset.annotations.append(
            COCO_Annotation(
                category_id=dataset.categories.get_unique_category_from_name(
                    'category_c').id,
                image_id=dataset.images[i].id,
                bbox=BBox(xmin=0, ymin=0, xmax=100, ymax=100),
                id=len(dataset.annotations)))

logger.purple('Before:')
dataset.print_handler_lengths()

dataset.remove_categories_by_name(category_names=['category_b', 'category_c'],
                                  verbose=True)

logger.purple('After:')
dataset.print_handler_lengths()

dataset.save_to_path(save_path='remove_test.json', overwrite=True)
コード例 #11
0
        Polygon.from_list(
            points=[
                [1, 1], [2, 1], [2, 3], [2, 10], [1, 2]
            ],
            dimensionality=2,
            demarcation=True
        )
    ]
)
for poly in seg:
    new_points = []
    for val in poly.points:
        new_val = 100 * val
        new_points.append(new_val)
    poly.points = new_points

logger.purple(f'seg:\n{seg}')
logger.purple(f'seg.to_bbox():\n{seg.to_bbox()}')
logger.purple(f'seg.area():\n{seg.area()}')
logger.purple(f'seg.centroid():\n{seg.centroid()}')
logger.purple(f'seg.within(seg.to_bbox()): {seg.within(seg.to_bbox())}')

from common_utils.cv_drawing_utils import draw_segmentation
from streamer.cv_viewer import cv_simple_image_viewer
import numpy as np

seg_bbox = seg.to_bbox()
seg_bbox_h, seg_bbox_w = seg_bbox.shape()
blank_frame = np.zeros(shape=[int(seg_bbox_h), int(seg_bbox_w), 3])
vis = draw_segmentation(img=blank_frame, segmentation=seg)
quit_flag = cv_simple_image_viewer(img=vis, preview_width=1000)
コード例 #12
0
from logger import logger
from annotation_utils.coco.structs import COCO_License_Handler, COCO_License

license0 = COCO_License(url='url_a', id=0, name='license_a')
license1 = COCO_License(url='url_b', id=1, name='license_b')
license2 = COCO_License(url='url_c', id=2, name='license_c')
license_handler = COCO_License_Handler([license0, license1, license2])

license_handler.append(COCO_License(url='url_d', id=3, name='license_d'))
logger.purple(license_handler.license_list)
license_handler0 = license_handler.copy()
del license_handler0[1]
license_handler0[1] = COCO_License(url='url_x', id=99, name='license_x')
for coco_license in license_handler0:
    logger.cyan(coco_license)
logger.blue(len(license_handler0))
license_handler0.sort(attr_name='name')
for coco_license in license_handler0:
    logger.cyan(coco_license)

logger.info('Shuffle')
license_handler0.shuffle()
for coco_license in license_handler0:
    logger.cyan(coco_license)

coco_license = license_handler0.get_obj_from_id(3)
logger.purple(f'coco_license: {coco_license}')

logger.purple(
    f'license_handler0.to_dict_list():\n{license_handler0.to_dict_list()}')
license_handler0.save_to_path('license_handler.json', overwrite=True)
コード例 #13
0
import time
from logger import logger
from common_utils.path_utils import find_shortest_common_rel_path

path_list = [
    '/path/to/dir/a/b/c/d.png', 'path/lskdjf/to/dir/a/b/c/d.png',
    'path/to/a/dir/a/b/c/d.png', 'lksjdfljksdlkfjlsdkfj/c/d.png'
]
time0 = time.time_ns()
result = find_shortest_common_rel_path(path_list)
time1 = time.time_ns()
assert result == 'c/d.png'
logger.cyan(result)
logger.purple(f'{time1-time0} ns ellapsed')
コード例 #14
0
from annotation_utils.ndds.structs.settings import CameraConfig
from logger import logger

config = CameraConfig.load_from_path(
    '/home/clayton/workspace/prj/data_keep/data/ndds/HSR/_camera_settings.json'
)
logger.purple(config)
assert config == CameraConfig.from_dict(config.to_dict())
コード例 #15
0
from logger import logger
from common_utils.common_types.point import Point2D_List
from annotation_utils.labelme.structs import LabelmeAnnotation, LabelmeShape

ann = LabelmeAnnotation.load_from_path(
    '/home/clayton/workspace/test/labelme_testing/orig_cat.json')
ann.shapes.append(shape=LabelmeShape(
    label='test_bbox',
    points=Point2D_List.from_list([[50, 50], [100, 100]], demarcation=True),
    shape_type='rectangle'))

for shape in ann.shapes:
    logger.purple(f'shape.label: {shape.label}')
    logger.purple(f'shape.shape_type: {shape.shape_type}')
    logger.cyan(
        f'shape.points.to_numpy().shape: {shape.points.to_numpy().shape}')

ann.save_to_path('/home/clayton/workspace/test/labelme_testing/cat.json')
コード例 #16
0
from logger import logger
from annotation_utils.dataset.config import DatasetConfigCollectionHandler

handler = DatasetConfigCollectionHandler.load_from_path(
    '/home/clayton/workspace/prj/data_keep/data/toyota/dataset/config/json/box_hsr_kpt_train.json'
)
logger.purple(handler.to_dict_list())
for collection in handler:
    for config in collection:
        logger.blue(config)
handler.save_to_path('test.yaml', overwrite=True)
handler0 = DatasetConfigCollectionHandler.load_from_path('test.yaml')
handler0.save_to_path('test0.yaml', overwrite=True)

fp, fp0 = open('test.yaml'), open('test0.yaml')
line, line0 = fp.readline(), fp0.readline()
for i, [line, line0] in enumerate(zip(fp, fp0)):
    logger.white(f'{i}: {line.strip()}')
    logger.white(f'{i}: {line0.strip()}')
    assert line.strip() == line0.strip()