Exemplo n.º 1
0
    def merge_metas(self, src_project_id, dst_project_id):
        if src_project_id == dst_project_id:
            return self.get_meta(src_project_id)

        src_meta = ProjectMeta.from_json(self.get_meta(src_project_id))
        dst_meta = ProjectMeta.from_json(self.get_meta(dst_project_id))

        new_dst_meta = src_meta.merge(dst_meta)
        new_dst_meta_json = new_dst_meta.to_json()
        self.update_meta(dst_project_id, new_dst_meta.to_json())

        return new_dst_meta_json
    def run_inference(self):
        progress_report_thread = Thread(target=progress_report_thread_fn,
                                        args=(self._in_project,
                                              self._progress_report_queue),
                                        daemon=True)
        progress_report_thread.start()

        feed_status = populate_inference_requests_queue(
            self._in_project, self._inference_processes,
            self._inference_request_queue)
        for _ in self._inference_processes:
            self._inference_request_queue.put(None)

        out_meta_json = self._result_meta_queue.get()
        self._out_project.set_meta(ProjectMeta.from_json(out_meta_json))

        for p in self._inference_processes:
            p.join()

        if not feed_status or not all(p.exitcode == 0
                                      for p in self._inference_processes):
            raise RuntimeError(
                'One of the inference processes encountered an error.')

        self._progress_report_queue.put(None)
        progress_report_thread.join()
        report_inference_finished()
Exemplo n.º 3
0
def _download_project_optimized(api: Api,
                                project_id,
                                project_dir,
                                datasets_whitelist=None,
                                cache=None,
                                progress_cb=None):
    project_info = api.project.get_info_by_id(project_id)
    project_id = project_info.id
    logger.info(
        f"Annotations are not cached (always download latest version from server)"
    )
    project_fs = Project(project_dir, OpenMode.CREATE)
    meta = ProjectMeta.from_json(api.project.get_meta(project_id))
    project_fs.set_meta(meta)
    for dataset_info in api.dataset.get_list(project_id):
        dataset_name = dataset_info.name
        dataset_id = dataset_info.id
        need_download = True
        if datasets_whitelist is not None and dataset_id not in datasets_whitelist:
            need_download = False
        if need_download is True:
            dataset = project_fs.create_dataset(dataset_name)
            _download_dataset(api,
                              dataset,
                              dataset_id,
                              cache=cache,
                              progress_cb=progress_cb)
Exemplo n.º 4
0
def single_inference_process_fn(inference_initializer, inference_mode_config,
                                in_project_meta_json, request_queue,
                                response_queue):
    """Loads a separate model, processes requests from request_queue, results go to result_queue.

    None request signals the process to finish.
    """
    single_image_inference = inference_initializer()
    inference_mode = InferenceModeFactory.create(
        inference_mode_config, ProjectMeta.from_json(in_project_meta_json),
        single_image_inference)
    out_meta_json = inference_mode.out_meta.to_json()

    req = ''
    while req is not None:
        req = request_queue.get()
        if req is not None:
            in_img = sly_image.read(req.item_paths.img_path)
            in_ann = Annotation.load_json_file(req.item_paths.ann_path,
                                               inference_mode.out_meta)
            ann = inference_mode.infer_annotate(in_img, in_ann)
            resp = InferenceResponse(ds_name=req.ds_name,
                                     item_name=req.item_name,
                                     item_paths=req.item_paths,
                                     ann_json=ann.to_json(),
                                     meta_json=out_meta_json)
            response_queue.put(resp)
        request_queue.task_done()
Exemplo n.º 5
0
def download_project(api, project_id, dest_dir, dataset_ids=None, log_progress=False, batch_size=10):
    dataset_ids = set(dataset_ids) if (dataset_ids is not None) else None
    project_fs = Project(dest_dir, OpenMode.CREATE)
    meta = ProjectMeta.from_json(api.project.get_meta(project_id))
    project_fs.set_meta(meta)

    for dataset_info in api.dataset.get_list(project_id):
        dataset_id = dataset_info.id
        if dataset_ids is not None and dataset_id not in dataset_ids:
            continue

        dataset_fs = project_fs.create_dataset(dataset_info.name)
        images = api.image.get_list(dataset_id)

        ds_progress = None
        if log_progress:
            ds_progress = Progress(
                'Downloading dataset: {!r}'.format(dataset_info.name), total_cnt=len(images))

        for batch in batched(images, batch_size):
            image_ids = [image_info.id for image_info in batch]
            image_names = [image_info.name for image_info in batch]

            # download images in numpy format
            batch_imgs_bytes = api.image.download_bytes(dataset_id, image_ids)

            # download annotations in json format
            ann_infos = api.annotation.download_batch(dataset_id, image_ids)
            ann_jsons = [ann_info.annotation for ann_info in ann_infos]

            for name, img_bytes, ann in zip(image_names, batch_imgs_bytes, ann_jsons):
                dataset_fs.add_item_raw_bytes(name, img_bytes, ann)

            if log_progress:
                ds_progress.iters_done_report(len(batch))
Exemplo n.º 6
0
def download_video_project(api, project_id, dest_dir, dataset_ids=None, download_videos=True, log_progress=False):
    '''
    Download project with given id in destination directory
    :param api: Api class object
    :param project_id: int
    :param dest_dir: str
    :param dataset_ids: list of integers
    :param download_videos: bool
    :param log_progress: bool
    '''
    LOG_BATCH_SIZE = 1

    key_id_map = KeyIdMap()

    project_fs = VideoProject(dest_dir, OpenMode.CREATE)

    meta = ProjectMeta.from_json(api.project.get_meta(project_id))
    project_fs.set_meta(meta)

    datasets_infos = []
    if dataset_ids is not None:
        for ds_id in dataset_ids:
            datasets_infos.append(api.dataset.get_info_by_id(ds_id))
    else:
        datasets_infos = api.dataset.get_list(project_id)

    for dataset in datasets_infos:
        dataset_fs = project_fs.create_dataset(dataset.name)
        videos = api.video.get_list(dataset.id)

        ds_progress = None
        if log_progress:
            ds_progress = Progress('Downloading dataset: {!r}'.format(dataset.name), total_cnt=len(videos))
        for batch in batched(videos, batch_size=LOG_BATCH_SIZE):
            video_ids = [video_info.id for video_info in batch]
            video_names = [video_info.name for video_info in batch]

            ann_jsons = api.video.annotation.download_bulk(dataset.id, video_ids)

            for video_id, video_name, ann_json in zip(video_ids, video_names, ann_jsons):
                if video_name != ann_json[ApiField.VIDEO_NAME]:
                    raise RuntimeError("Error in api.video.annotation.download_batch: broken order")

                video_file_path = dataset_fs.generate_item_path(video_name)
                if download_videos is True:
                    api.video.download_path(video_id, video_file_path)
                else:
                    touch(video_file_path)

                dataset_fs.add_item_file(video_name,
                                         video_file_path,
                                         ann=VideoAnnotation.from_json(ann_json, project_fs.meta, key_id_map),
                                         _validate_item=False)

            ds_progress.iters_done_report(len(batch))

    project_fs.set_key_id_map(key_id_map)
Exemplo n.º 7
0
    def merge_metas(self, src_project_id, dst_project_id):
        '''
        Add metadata from given progect to given destination project
        :param src_project_id: int
        :param dst_project_id: int
        :return: merged project metainformation
        '''
        if src_project_id == dst_project_id:
            return self.get_meta(src_project_id)

        src_meta = ProjectMeta.from_json(self.get_meta(src_project_id))
        dst_meta = ProjectMeta.from_json(self.get_meta(dst_project_id))

        new_dst_meta = src_meta.merge(dst_meta)
        new_dst_meta_json = new_dst_meta.to_json()
        self.update_meta(dst_project_id, new_dst_meta.to_json())

        return new_dst_meta_json
Exemplo n.º 8
0
    def _read(self):
        meta_json = load_json_file(self._get_project_meta_path())
        self._meta = ProjectMeta.from_json(meta_json)

        possible_datasets = get_subdirs(self.directory)
        for ds_name in possible_datasets:
            current_dataset = Dataset(os.path.join(self.directory, ds_name),
                                      OpenMode.READ)
            self._datasets = self._datasets.add(current_dataset)

        if self.total_items == 0:
            raise RuntimeError('Project is empty')
Exemplo n.º 9
0
    def _read(self):
        '''
        Download project from given project directory. Checks item and annotation directoris existing and dataset not empty.
        Consistency checks. Every image must have an annotation, and the correspondence must be one to one.
        '''
        meta_json = load_json_file(self._get_project_meta_path())
        self._meta = ProjectMeta.from_json(meta_json)

        possible_datasets = get_subdirs(self.directory)
        for ds_name in possible_datasets:
            current_dataset = self.dataset_class(os.path.join(self.directory, ds_name), OpenMode.READ)
            self._datasets = self._datasets.add(current_dataset)

        if self.total_items == 0:
            raise RuntimeError('Project is empty')
def single_inference_process_fn(inference_initializer, inference_mode_config,
                                in_project_meta_json, request_queue,
                                result_meta_queue, progress_queue, project):
    """Loads a separate model, processes requests from request_queue, results go to result_queue.

    None request signals the process to finish.
    """
    single_image_inference = inference_initializer()
    inference_mode = InferenceModeFactory.create(
        inference_mode_config, ProjectMeta.from_json(in_project_meta_json),
        single_image_inference)

    project_meta_sent = False
    req = ''
    while req is not None:
        req = request_queue.get()
        if req is not None:
            # Send the resulting project meta to the parent project to make sure we only write the meta JSON once.
            if not project_meta_sent:
                try:
                    result_meta_queue.put(inference_mode.out_meta.to_json(),
                                          block=False)
                except queue.Full:
                    pass
            project_meta_sent = True

            in_ann = Annotation.load_json_file(req.item_paths.ann_path,
                                               inference_mode.out_meta)
            ann = inference_mode.infer_annotate_image_file(
                req.item_paths.img_path, in_ann)
            out_dataset = project.datasets.get(req.ds_name)
            out_dataset.add_item_file(req.item_name,
                                      req.item_paths.img_path,
                                      ann=ann,
                                      _validate_img=False,
                                      _use_hardlink=True)
            progress_queue.put(1)
Exemplo n.º 11
0
def result_writer_thread_fn(in_project, inference_result_queue):
    """Gets inference result annotations from the queue and writes them to the output dataset.

    None result signals the thread to finish.
    """

    out_project = None
    progress_bar = Progress('Model applying: ', in_project.total_items)
    resp = ''
    while resp is not None:
        resp = inference_result_queue.get()
        if resp is not None:
            if out_project is None:
                out_dir = os.path.join(TaskPaths.RESULTS_DIR, in_project.name)
                out_project = Project(out_dir, OpenMode.CREATE)
                out_project.set_meta(ProjectMeta.from_json(resp.meta_json))
            out_dataset = out_project.datasets.get(resp.ds_name)
            if out_dataset is None:
                out_dataset = out_project.create_dataset(resp.ds_name)
            out_dataset.add_item_file(resp.item_name,
                                      resp.item_paths.img_path,
                                      ann=resp.ann_json)
            progress_bar.iter_done_report()
        inference_result_queue.task_done()
 def _in_project_meta_from_msg(in_msg):
     pr_meta_json = in_msg.get('meta')
     return ProjectMeta.from_json(
         pr_meta_json) if pr_meta_json is not None else None
Exemplo n.º 13
0
def convert():
    task_settings = load_json_file(sly.TaskPaths.TASK_CONFIG_PATH)
    in_datasets = find_input_datasets()

    convert_options = task_settings['options']

    logo_file_name = "logo.png"

    pr = sly.Project(
        os.path.join(sly.TaskPaths.RESULTS_DIR,
                     task_settings['res_names']['project']),
        sly.OpenMode.CREATE)
    # Set meta file to FSOCO template at end to avoid being set to default-initialized ProjectMeta
    meta_json = load_json_file("meta.json")
    pr.set_meta(ProjectMeta.from_json(meta_json))
    sly.logger.info('Set project meta file to FSOCO template.')
    for ds_name, img_paths in in_datasets.items():
        # Read watermark logo image
        logo_path = [
            path for path in img_paths if path.endswith(logo_file_name)
        ]
        if len(logo_path) != 1:
            sly.logger.error(
                "You either have no logo in the project directory or more than one."
            )
            sly.logger.info("Got following logo paths: {}".format(logo_path))
            return 1
        logo_path = logo_path.pop()
        logo_img = cv2.imread(logo_path)
        if isinstance(logo_img, type(None)):
            sly.logger.error(
                "Couldn't load logo image with path: {} . Please make sure your logo has the right file format."
                .format(logo_path))
        # Filter out logo file to avoid adding it to the dataset
        img_paths = [
            path for path in img_paths if not path.endswith(logo_file_name)
        ]
        sly.logger.info(
            'Found {} files with supported image extensions in Dataset {!r}.'.
            format(len(img_paths), ds_name))
        ds = pr.create_dataset(ds_name)
        progress = sly.Progress('Dataset: {!r}'.format(ds_name),
                                len(img_paths))
        for img_path in img_paths:
            try:
                watermark_date = last_modified = time.ctime(
                    os.path.getmtime(img_path))
                creation_time = time.ctime(os.path.getctime(img_path))
                if not last_modified:
                    watermark_date = creation_time
                item_name = os.path.basename(img_path)

                img = cv2.imread(img_path)
                if isinstance(img, type(None)):
                    sly.logger.error(
                        "Couldn't load image with path: {} . Please make sure your images can be read by cv.imread()"
                        .format(img_path))
                    break

                img = watermark(img, logo_img, watermark_date)
                cv2.imwrite(img_path, img)

                ds.add_item_file(item_name, img_path, _use_hardlink=True)
            except Exception as e:
                exc_str = str(e)
                sly.logger.warn(
                    'Input sample skipped due to error: {}'.format(exc_str),
                    exc_info=True,
                    extra={
                        'exc_str': exc_str,
                        'dataset_name': ds_name,
                        'image_name': img_path,
                    })
            progress.iter_done_report()

    if pr.total_items == 0:
        raise RuntimeError(
            'Result project is empty! All input images have unsupported format!'
        )
Exemplo n.º 14
0
def download_pointcloud_project(api, project_id, dest_dir, dataset_ids=None, download_items=True, log_progress=False):
    LOG_BATCH_SIZE = 1

    key_id_map = KeyIdMap()

    project_fs = PointcloudProject(dest_dir, OpenMode.CREATE)

    meta = ProjectMeta.from_json(api.project.get_meta(project_id))
    project_fs.set_meta(meta)

    datasets_infos = []
    if dataset_ids is not None:
        for ds_id in dataset_ids:
            datasets_infos.append(api.dataset.get_info_by_id(ds_id))
    else:
        datasets_infos = api.dataset.get_list(project_id)

    for dataset in datasets_infos:
        dataset_fs = project_fs.create_dataset(dataset.name)
        pointclouds = api.pointcloud.get_list(dataset.id)

        ds_progress = None
        if log_progress:
            ds_progress = Progress('Downloading dataset: {!r}'.format(dataset.name), total_cnt=len(pointclouds))
        for batch in batched(pointclouds, batch_size=LOG_BATCH_SIZE):
            pointcloud_ids = [pointcloud_info.id for pointcloud_info in batch]
            pointcloud_names = [pointcloud_info.name for pointcloud_info in batch]

            ann_jsons = api.pointcloud.annotation.download_bulk(dataset.id, pointcloud_ids)

            for pointcloud_id, pointcloud_name, ann_json in zip(pointcloud_ids, pointcloud_names, ann_jsons):
                if pointcloud_name != ann_json[ApiField.NAME]:
                    raise RuntimeError("Error in api.video.annotation.download_batch: broken order")

                pointcloud_file_path = dataset_fs.generate_item_path(pointcloud_name)
                if download_items is True:
                    api.pointcloud.download_path(pointcloud_id, pointcloud_file_path)

                    related_images_path = dataset_fs.get_related_images_path(pointcloud_name)
                    related_images = api.pointcloud.get_list_related_images(pointcloud_id)
                    for rimage_info in related_images:
                        name = rimage_info[ApiField.NAME]
                        rimage_id = rimage_info[ApiField.ID]

                        path_img = os.path.join(related_images_path, name)
                        path_json = os.path.join(related_images_path, name + ".json")

                        api.pointcloud.download_related_image(rimage_id, path_img)
                        dump_json_file(rimage_info, path_json)

                else:
                    touch(pointcloud_file_path)

                dataset_fs.add_item_file(pointcloud_name,
                                         pointcloud_file_path,
                                         ann=PointcloudAnnotation.from_json(ann_json, project_fs.meta, key_id_map),
                                         _validate_item=False)

            ds_progress.iters_done_report(len(batch))

    project_fs.set_key_id_map(key_id_map)