Ejemplo n.º 1
0
    def get_image_and_ann():
        mkdir(image_dir_path)
        mkdir(ann_dir)
        image_path = os.path.join(image_dir_path, image_name)
        api.image.download_path(image_id, image_path)
        image_ext_to_png(image_path)

        mask_color, mask_label, poly_json = from_ann_to_cityscapes_mask(
            ann, name2id, app_logger, train_val_flag)
        # dump_json_file(poly_json,
        #                os.path.join(ann_dir, get_file_name(base_image_name) + cityscapes_polygons_suffix))
        # write(
        #     os.path.join(ann_dir,
        #                  get_file_name(base_image_name) + cityscapes_color_suffix), mask_color)
        # write(
        #     os.path.join(ann_dir,
        #                  get_file_name(base_image_name) + cityscapes_labels_suffix), mask_label)

        dump_json_file(
            poly_json,
            os.path.join(
                ann_dir,
                get_file_name(base_image_name).replace('_leftImg8bit', '') +
                cityscapes_polygons_suffix))
        write(
            os.path.join(
                ann_dir,
                get_file_name(base_image_name).replace('_leftImg8bit', '') +
                cityscapes_color_suffix), mask_color)
        write(
            os.path.join(
                ann_dir,
                get_file_name(base_image_name).replace('_leftImg8bit', '') +
                cityscapes_labels_suffix), mask_label)
Ejemplo n.º 2
0
 def _create(self):
     if dir_exists(self.directory):
         raise RuntimeError(
             "Can not create new project {!r}. Directory {!r} already exists"
             .format(self.name, self.directory))
     mkdir(self.directory)
     self.set_meta(ProjectMeta())
Ejemplo n.º 3
0
def save_project_as_pascal_voc_detection(save_path, project: Project):

    # Create root pascal 'datasets' folders
    for dataset in project.datasets:
        pascal_dataset_path = os.path.join(save_path, dataset.name)
        pascal_dataset_relative_path = os.path.relpath(pascal_dataset_path,
                                                       save_path)

        images_dir = os.path.join(pascal_dataset_path, 'JPEGImages')
        anns_dir = os.path.join(pascal_dataset_path, 'Annotations')
        lists_dir = os.path.join(pascal_dataset_path, 'ImageSets/Layout')

        fs_utils.mkdir(pascal_dataset_path)
        for subdir in [
                'ImageSets',  # Train list, Val list, etc.
                'ImageSets/Layout',
                'Annotations',
                'JPEGImages'
        ]:
            fs_utils.mkdir(os.path.join(pascal_dataset_path, subdir))

        samples_by_tags = defaultdict(list)  # TRAIN: [img_1, img2, ..]

        for item_name in dataset:
            img_path, ann_path = dataset.get_item_paths(item_name)
            no_ext_name = fs_utils.get_file_name(item_name)
            pascal_img_path = os.path.join(images_dir,
                                           no_ext_name + OUT_IMG_EXT)
            pascal_ann_path = os.path.join(anns_dir, no_ext_name + XML_EXT)

            if item_name.endswith(OUT_IMG_EXT):
                fs_utils.copy_file(img_path, pascal_img_path)
            else:
                img = image_utils.read(img_path)
                image_utils.write(pascal_img_path, img)

            ann = Annotation.load_json_file(ann_path,
                                            project_meta=project.meta)

            # Read tags for images lists generation
            for tag in ann.img_tags:
                samples_by_tags[tag.name].append(
                    (no_ext_name, len(ann.labels)))

            writer = pascal_voc_writer.Writer(
                path=pascal_dataset_relative_path,
                width=ann.img_size[1],
                height=ann.img_size[0])

            for label in ann.labels:
                obj_class = label.obj_class
                rect: Rectangle = label.geometry.to_bbox()
                writer.addObject(name=obj_class.name,
                                 xmin=rect.left,
                                 ymin=rect.top,
                                 xmax=rect.right,
                                 ymax=rect.bottom)
            writer.save(pascal_ann_path)

        save_images_lists(lists_dir, samples_by_tags)
Ejemplo n.º 4
0
 def _create(self):
     if dir_exists(self.directory):
         if len(list_files_recursively(self.directory)) > 0:
             raise RuntimeError(
                 "Cannot create new project {!r}. Directory {!r} already exists and is not empty"
                 .format(self.name, self.directory))
     else:
         mkdir(self.directory)
     self.set_meta(ProjectMeta())
Ejemplo n.º 5
0
 def _create(self):
     '''
     Creates a leaf directory and empty meta.json file. Generate exception error if project directory already exists and is not empty.
     '''
     if dir_exists(self.directory):
         if len(list_files_recursively(self.directory)) > 0:
             raise RuntimeError(
                 "Cannot create new project {!r}. Directory {!r} already exists and is not empty"
                 .format(self.name, self.directory))
     else:
         mkdir(self.directory)
     self.set_meta(ProjectMeta())
Ejemplo n.º 6
0
    def __init__(self,
                 logger=None,
                 task_id=None,
                 server_address=None,
                 agent_token=None,
                 ignore_errors=False,
                 ignore_task_id=False):
        self._ignore_task_id = ignore_task_id
        self.logger = take_with_default(logger, default_logger)
        self._ignore_errors = ignore_errors
        self.task_id = take_with_default(task_id, os.environ["TASK_ID"])
        self.server_address = take_with_default(server_address,
                                                os.environ[SERVER_ADDRESS])
        self.agent_token = take_with_default(agent_token,
                                             os.environ[AGENT_TOKEN])
        self.public_api = Api.from_env(ignore_task_id=self._ignore_task_id)
        self._app_url = self.public_api.app.get_url(self.task_id)
        self._session_dir = "/sessions/{}".format(self.task_id)
        debug_app_dir = os.environ.get("DEBUG_APP_DIR", "")
        if debug_app_dir != "":
            self._session_dir = debug_app_dir
        mkdir(self.data_dir)

        self.api = AgentAPI(token=self.agent_token,
                            server_address=self.server_address,
                            ext_logger=self.logger)
        self.api.add_to_metadata('x-task-id', str(self.task_id))

        self.callbacks = {}
        self.processing_queue = queue.Queue()  #(maxsize=self.QUEUE_MAX_SIZE)
        self.logger.debug('App is created',
                          extra={
                              "task_id": self.task_id,
                              "server_address": self.server_address
                          })

        self._ignore_stop_for_debug = False
        self._error = None
        self.stop_event = asyncio.Event()

        self.executor = concurrent.futures.ThreadPoolExecutor()
        self.loop = asyncio.get_event_loop()
        # May want to catch other signals too
        signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT,
                   signal.SIGQUIT)
        for s in signals:
            self.loop.add_signal_handler(
                s, lambda s=s: asyncio.create_task(self._shutdown(signal=s)))
        # comment out the line below to see how unhandled exceptions behave
        self.loop.set_exception_handler(self.handle_exception)
Ejemplo n.º 7
0
 def _create(self):
     mkdir(self.ann_dir)
     mkdir(self.img_dir)
Ejemplo n.º 8
0
from supervisely_lib.io.fs import mkdir

my_app = sly.AppService()
api: sly.Api = my_app.public_api

task_id = os.environ["TASK_ID"]
user_id = os.environ["context.userId"]
team_id = int(os.environ['context.teamId'])
workspace_id = int(os.environ['context.workspaceId'])
project_id = int(os.environ['context.projectId'])

user = api.user.get_info_by_id(user_id)
project = api.project.get_info_by_id(project_id)
meta_json = api.project.get_meta(project_id)
meta = sly.ProjectMeta.from_json(meta_json)


storage_dir = os.path.join(my_app.data_dir, "storage_dir")
coco_base_dir = os.path.join(storage_dir, project.name)
sly_base_dir = os.path.join(storage_dir, "sly_base_dir")

mkdir(storage_dir, True)
mkdir(coco_base_dir)
mkdir(sly_base_dir)


isObjectDetection = True
isStuffSegmentation = False
# isKeypointDetection = False
# isPanopticSegmentation = False
# isImageCaptioning = False
PROJECT_ID = int(os.environ["modal.state.slyProjectId"])

LOG_LEVEL = str(os.environ["LOG_LEVEL"])

OPTIONS = os.environ['modal.state.Options']
BATCH_SIZE = int(os.environ['modal.state.batchSize'])

SELECTED_DATASETS = json.loads(os.environ["modal.state.selectedDatasets"].replace("'", '"'))
ALL_DATASETS = os.getenv("modal.state.allDatasets").lower() in ('true', '1', 't')
if ALL_DATASETS:
    SELECTED_DATASETS = [dataset.name for dataset in api.dataset.get_list(PROJECT_ID)]

need_download_threshold = 0.15

storage_dir = os.path.join(my_app.data_dir, "sly_base_sir")
mkdir(storage_dir, True)
video_dir = os.path.join(storage_dir, "video")
mkdir(video_dir)
img_dir = os.path.join(storage_dir, "images")
mkdir(img_dir)

project = api.project.get_info_by_id(PROJECT_ID)
if project is None:
    raise RuntimeError("Project {!r} not found".format(project.name))
if project.type != str(sly.ProjectType.VIDEOS):
    raise TypeError("Project type is {!r}, but have to be {!r}".format(project.type, sly.ProjectType.VIDEOS))

meta_json = api.project.get_meta(project.id)
meta = sly.ProjectMeta.from_json(meta_json)

if "object_id" not in [tag.name for tag in meta.tag_metas]:
def convert_project(dest_dir, result_dir, app_logger):
    datasets_paths = glob(dest_dir + "/*/")
    if len(datasets_paths) == 0:
        g.logger.warn('There are no datasets in project')

    meta_json = sly.json.load_json_file(os.path.join(dest_dir, 'meta.json'))
    meta = sly.ProjectMeta.from_json(meta_json)
    for ds_path in datasets_paths:
        ds_name = ds_path.split('/')[-2]
        anns_paths = glob(ds_path + "ann" + "/*")
        progress = sly.Progress('Processing Video', len(anns_paths), app_logger)
        for ann_path in anns_paths:
            ann_json = sly.json.load_json_file(ann_path)
            ann = sly.VideoAnnotation.from_json(ann_json, meta)
            video_name = sly.io.fs.get_file_name(ann_path)
            video_path = os.path.join(ds_path, "video", video_name)
            video_info = sly.video.get_info(video_path)['streams'][0]

            curr_objs_geometry_types = [obj.obj_class.geometry_type for obj in ann.objects]
            if os.environ['modal.state.shapes'] == "false" and Rectangle not in curr_objs_geometry_types:
                g.logger.warn('Video {} does not contain figures with shape Rectangle'.format(video_name))
                continue

            if len(ann.figures) > 0:
                result_images = os.path.join(result_dir, ds_name, "train", get_file_name(video_name), g.images_dir_name)
                result_anns = os.path.join(result_dir, ds_name, "train", get_file_name(video_name), g.ann_dir_name)
                seq_path = os.path.join(result_dir, ds_name, "train", get_file_name(video_name), g.seq_name)
                mkdir(result_images)
                mkdir(result_anns)
            if len(ann.figures) == 0:
                result_images = os.path.join(result_dir, ds_name, "test", get_file_name(video_name), g.images_dir_name)
                seq_path = os.path.join(result_dir, ds_name, "test", get_file_name(video_name), g.seq_name)
                mkdir(result_images)

            with open(seq_path, 'a') as f:
                f.write('[Sequence]\n')
                f.write('name={}\n'.format(get_file_name(video_name)))
                f.write('imDir={}\n'.format(g.images_dir_name))
                f.write('frameRate={}\n'.format(round(1 / video_info['framesToTimecodes'][1])))
                f.write('seqLength={}\n'.format(video_info['framesCount']))
                f.write('imWidth={}\n'.format(video_info['width']))
                f.write('imHeight={}\n'.format(video_info['height']))
                f.write('imExt={}\n'.format(g.image_ext))

            id_to_video_obj = {}
            for idx, curr_video_obj in enumerate(ann.objects):
                id_to_video_obj[curr_video_obj] = idx + 1

            for frame_index, frame in enumerate(ann.frames):
                for figure in frame.figures:
                    if os.environ['modal.state.shapes'] == "false" and figure.video_object.obj_class.geometry_type != Rectangle:
                        continue

                    rectangle_geom = figure.geometry.to_bbox()
                    left = rectangle_geom.left
                    top = rectangle_geom.top
                    width = rectangle_geom.width
                    height = rectangle_geom.height
                    conf_val = 1
                    for curr_tag in figure.video_object.tags:
                        if g.conf_tag_name == curr_tag.name and (
                                curr_tag.frame_range is None or frame_index in range(curr_tag.frame_range[0],
                                                                                     curr_tag.frame_range[1] + 1)):
                            conf_val = 0
                    curr_gt_data = '{},{},{},{},{},{},{},{},{},{}\n'.format(frame_index + 1,
                                                                            id_to_video_obj[figure.video_object],
                                                                            left, top, width - 1, height - 1,
                                                                            conf_val, -1, -1, -1)
                    filename = 'gt_{}.txt'.format(figure.parent_object.obj_class.name)
                    with open(os.path.join(result_anns, filename), 'a') as f:  # gt_path
                        f.write(curr_gt_data)
                if frame_index == ann.frames_count:
                    break

            vidcap = cv2.VideoCapture(video_path)
            success, image = vidcap.read()
            count = 1
            while success:
                image_name = str(count).zfill(6) + g.image_ext
                image_path = os.path.join(result_images, image_name)
                cv2.imwrite(image_path, image)
                success, image = vidcap.read()
                count += 1

            progress.iter_done_report()
Ejemplo n.º 11
0
 def _prepare_next_dir(self):
     self._idx += 1
     self._subdir = '{:08}'.format(self._idx)
     self._odir = os.path.join(self._base_out_dir, self._subdir)
     sly_fs.mkdir(self._odir)
Ejemplo n.º 12
0
 def _create(self):
     '''
     Creates a leaf directory and all intermediate ones for items and annatations.
     '''
     mkdir(self.ann_dir)
     mkdir(self.item_dir)
Ejemplo n.º 13
0
def export_coco(api: sly.Api, task_id, context, state, app_logger):
    datasets = [ds for ds in api.dataset.get_list(g.project_id)]
    for dataset in datasets:
        coco_dataset_dir = os.path.join(g.coco_base_dir, dataset.name)
        mkdir(os.path.join(coco_dataset_dir))
        ann_dir = os.path.join(g.coco_base_dir, 'annotations')
        mkdir(ann_dir)

        images = api.image.get_list(dataset.id)
        for batch in sly.batched(images):
            image_ids = [image_info.id for image_info in batch]
            image_names = [image_info.name for image_info in batch]
            image_paths = [
                os.path.join(coco_dataset_dir, image_info.name)
                for image_info in batch
            ]
            api.image.download_paths(dataset.id, image_ids, image_paths)

            ann_infos = api.annotation.download_batch(dataset.id, image_ids)
            anns = [
                sly.Annotation.from_json(x.annotation, g.meta)
                for x in ann_infos
            ]

            meta = convert_geometry.prepare_meta(g.meta)
            new_anns = [
                convert_geometry.convert_annotation(ann, meta) for ann in anns
            ]

            data = dict(
                info=dict(
                    description=None,
                    url=None,
                    version=1.0,
                    year=dataset.created_at[:4],
                    contributor=g.user.name,
                    date_created=dataset.created_at,
                ),
                licenses=[dict(
                    url=None,
                    id=0,
                    name=None,
                )],
                images=[
                    # license, url, file_name, height, width, date_captured, id
                ],
                type="instances",
                annotations=[
                    # segmentation, area, iscrowd, image_id, bbox, category_id, id
                ],
                categories=[
                    # supercategory, id, name
                ],
            )

            for image_info, ann in zip(batch, new_anns):
                data["images"].append(
                    dict(
                        license=None,
                        url=image_info.
                        full_storage_url,  # coco_url, flickr_url
                        filename=image_info.name,
                        height=image_info.height,
                        width=image_info.width,
                        date_captured=image_info.created_at,
                        id=image_info.id,
                    ))

                for label in ann.labels:
                    segmentation = label.geometry.to_json(
                    )["points"]["exterior"]
                    segmentation = [
                        coord for sublist in segmentation for coord in sublist
                    ]

                    bbox = label.geometry.to_bbox().to_json(
                    )["points"]["exterior"]
                    bbox = [coord for sublist in bbox for coord in sublist]
                    x, y, max_x, max_y = bbox
                    width = max_x - x
                    height = max_y - y
                    bbox = (x, y, width, height)

                    data["annotations"].append(
                        dict(
                            segmentation=[segmentation],
                            area=label.geometry.area,  # wrong?
                            iscrowd=0,
                            image_id=image_info.id,
                            bbox=bbox,
                            category_id=None,
                            id=None,  # label.id?
                        ))

                    data["categories"].append(
                        dict(supercategory=None,
                             id=None,
                             name=label.obj_class.name))

        dump_json_file(data,
                       os.path.join(ann_dir, f"instances_{dataset.name}.json"))
    g.my_app.stop()
import supervisely_lib as sly
from supervisely_lib.io.fs import mkdir

my_app = sly.AppService()
api: sly.Api = my_app.public_api

TEAM_ID = int(os.environ['context.teamId'])
WORKSPACE_ID = int(os.environ['context.workspaceId'])
PROJECT_ID = int(os.environ['modal.state.slyProjectId'])
TASK_ID = int(os.environ["TASK_ID"])

images_dir_name = 'img1'
ann_dir_name = 'gt'
image_ext = '.jpg'
seq_name = 'seqinfo.ini'
conf_tag_name = 'ignore_conf'
logger = sly.logger

project = api.project.get_info_by_id(PROJECT_ID)
project_name = project.name
archive_name = '{}_{}_{}.tar.gz'.format(TASK_ID, PROJECT_ID, project_name)
result_archive = os.path.join(my_app.data_dir, archive_name)

storage_dir = os.path.join(my_app.data_dir, "mot_exporter")
mot_base_dir = os.path.join(storage_dir, "mot_base_dir")
sly_base_dir = os.path.join(storage_dir, "supervisely")

mkdir(storage_dir, remove_content_if_exists=True)
mkdir(sly_base_dir)
mkdir(mot_base_dir)
Ejemplo n.º 15
0
 def _create(self):
     mkdir(self.ann_dir)
     mkdir(self.item_dir)