Пример #1
0
 def _load_task_model_config():
     raw_task_config = load_json_file(TaskPaths.TASK_CONFIG_PATH)
     raw_task_config = maybe_convert_from_deploy_task_config(
         raw_task_config)
     task_config = maybe_convert_from_v1_inference_task_config(
         raw_task_config)
     return task_config[MODEL]
Пример #2
0
    def __init__(self, schema_fpath):
        vtor_class = _extend_with_default(Draft4Validator)
        schemas = load_json_file(schema_fpath)
        # Detach common definitions from the named schemas and inline them in into every schema.
        definitions = schemas.pop(self.DEFINITIONS, {})
        for name, schema in schemas.items():
            schema.setdefault(self.DEFINITIONS, {}).update(definitions)

        self._validators = {name: vtor_class(schema) for name, schema in schemas.items()}
Пример #3
0
 def load_json(cls, path):
     '''
     Download json data by given path and convert in to dict with bidict values
     :param path: str
     :return: dict
     '''
     simple_dict = load_json_file(path)
     result = cls()
     for key_type, value_dict in simple_dict.items():
         for key_str, id in value_dict.items():
             result._add(key_type, uuid.UUID(key_str), id)
     return result
Пример #4
0
    def upload_paths(self,
                     video_ids,
                     ann_paths,
                     project_meta,
                     progress_cb=None):
        # video_ids from the same dataset

        for video_id, ann_path in zip(video_ids, ann_paths):
            ann_json = load_json_file(ann_path)
            ann = VideoAnnotation.from_json(ann_json, project_meta)

            # ignore existing key_id_map because the new objects will be created
            self.append(video_id, ann)
Пример #5
0
 def get_related_images(self, item_name):
     results = []
     path = self.get_related_images_path(item_name)
     if dir_exists(path):
         files = list_files(path, SUPPORTED_IMG_EXTS)
         for file in files:
             img_meta_path = os.path.join(
                 path,
                 get_file_name_with_ext(file) + ".json")
             img_meta = {}
             if file_exists(img_meta_path):
                 img_meta = load_json_file(img_meta_path)
                 if img_meta[ApiField.NAME] != get_file_name_with_ext(file):
                     raise RuntimeError(
                         'Wrong format: name field contains wrong image path'
                     )
             results.append((file, img_meta))
     return results
def process_random_object_image(api):
    project_fs = sly.Project(g.project_dir, sly.OpenMode.READ)
    datasets = [dataset for dataset in project_fs.datasets]
    dataset = choice(datasets)
    # for dataset in datasets:
    #     ds_dir = os.path.join(g.project_dir, dataset.name)

    img_dir = os.path.join(dataset.directory, "img")
    ann_dir = os.path.join(dataset.directory, "ann")
    image_path = choice(
        [os.path.join(img_dir, img_name) for img_name in os.listdir(img_dir)])
    ann_path = os.path.join(ann_dir,
                            f"{get_file_name_with_ext(image_path)}.json")
    ann_json = load_json_file(ann_path)

    img = sly.image.read(image_path)
    ann = sly.Annotation.from_json(ann_json, g.project_meta)

    img_file_info = api.file.upload(g.team_id, image_path, remote_preview_path)
    return img_file_info, img, ann
Пример #7
0
    def _read(self):
        if not dir_exists(self.item_dir):
            raise FileNotFoundError('Item directory not found: {!r}'.format(
                self.item_dir))

        item_paths = sorted(
            list_files(self.item_dir, filter_fn=self._has_valid_ext))
        item_names = sorted([os.path.basename(path) for path in item_paths])

        map_file_path = self.get_frame_pointcloud_map_path()
        if os.path.isfile(map_file_path):
            self._frame_to_pc_map = load_json_file(map_file_path)
        else:
            self._frame_to_pc_map = {
                frame_index: item_names[frame_index]
                for frame_index in range(len(item_names))
            }

        self._pc_to_frame = {v: k for k, v in self._frame_to_pc_map.items()}
        self._item_to_ann = {
            name: self._pc_to_frame[name]
            for name in item_names
        }
Пример #8
0
 def get_template_path(self):
     if self._template_path is not None:
         return self._template_path
     config_path = os.path.join(self.repo_dir,
                                os.environ.get("CONFIG_DIR", ""),
                                'config.json')
     if file_exists(config_path):
         config = load_json_file(config_path)
         self._template_path = config.get('gui_template', None)
         if self._template_path is None:
             self.logger.info(
                 "there is no gui_template field in config.json")
         else:
             self._template_path = os.path.join(self.repo_dir,
                                                self._template_path)
             if not file_exists(self._template_path):
                 self._template_path = os.path.join(
                     os.path.dirname(sys.argv[0]), 'gui.html')
     if self._template_path is None:
         self._template_path = os.path.join(os.path.dirname(sys.argv[0]),
                                            'gui.html')
     if file_exists(self._template_path):
         return self._template_path
     return None
Пример #9
0
 def load_config(self):
     self.config = deepcopy(ModelDeploy.config)
     new_config = load_json_file(TaskPaths.TASK_CONFIG_PATH)
     logger.info('Input config', extra={CONFIG: new_config})
     update_recursively(self.config, new_config)
     logger.info('Full config', extra={CONFIG: self.config})
Пример #10
0
def create_img_infos(project_fs):
    tag_id_map = {
        tag["name"]: tag["id"]
        for tag in project_fs.meta.tag_metas.to_json()
    }
    images_infos = []
    for dataset_fs in project_fs:
        img_info_dir = os.path.join(dataset_fs.directory, "img_info")
        mkdir(img_info_dir)
        for idx, item_name in enumerate(os.listdir(dataset_fs.item_dir)):
            item_ext = get_file_ext(item_name).lstrip(".")
            item_path = os.path.join(dataset_fs.item_dir, item_name)
            item = sly.image.read(item_path)
            h, w = item.shape[:2]
            item_size = os.path.getsize(item_path)
            created_at = datetime.fromtimestamp(
                os.stat(item_path).st_ctime,
                tz=timezone.utc).strftime("%d-%m-%Y %H:%M:%S")
            modified_at = datetime.fromtimestamp(
                os.stat(item_path).st_mtime,
                tz=timezone.utc).strftime("%d-%m-%Y %H:%M:%S")

            item_ann_path = os.path.join(dataset_fs.ann_dir,
                                         f"{item_name}.json")
            ann_json = load_json_file(item_ann_path)
            ann = sly.Annotation.from_json(ann_json, project_fs.meta)
            tags = ann.img_tags
            tags_json = tags.to_json()
            labels_count = len(ann.labels)

            tags_img_info = []
            for tag in tags_json:
                tag_info = {
                    "entityId": None,
                    "tagId": tag_id_map[tag["name"]],
                    "id": None,
                    "labelerLogin": tag["labelerLogin"],
                    "createdAt": tag["createdAt"],
                    "updatedAt": tag["updatedAt"],
                    "name": tag["name"]
                }
                tags_img_info.append(tag_info)

            item_img_info = {
                "id": idx,
                "name": item_name,
                "link": "",
                "hash": "",
                "mime": f"image/{item_ext}",
                "ext": item_ext,
                "size": item_size,
                "width": w,
                "height": h,
                "labels_count": labels_count,
                "dataset_id": dataset_fs.name,
                "created_at": created_at,
                "updated_at": modified_at,
                "meta": {},
                "path_original": "",
                "full_storage_url": "",
                "tags": tags_img_info
            }
            save_path = os.path.join(img_info_dir, f"{item_name}.json")
            dump_json_file(item_img_info, save_path)
            images_infos.append(item_img_info)
    return images_infos
Пример #11
0
def upload_pointcloud_episode_project(directory,
                                      api,
                                      workspace_id,
                                      project_name=None,
                                      log_progress=False):
    # STEP 0 — create project remotely
    project_locally = PointcloudEpisodeProject.read_single(directory)
    project_name = project_locally.name if project_name is None else project_name

    if api.project.exists(workspace_id, project_name):
        project_name = api.project.get_free_name(workspace_id, project_name)

    project_remotely = api.project.create(workspace_id, project_name,
                                          ProjectType.POINT_CLOUD_EPISODES)
    api.project.update_meta(project_remotely.id,
                            project_locally.meta.to_json())

    uploaded_objects = KeyIdMap()
    for dataset_locally in project_locally.datasets:
        ann_json_path = dataset_locally.get_ann_path()

        if os.path.isfile(ann_json_path):
            ann_json = load_json_file(ann_json_path)
            episode_annotation = PointcloudEpisodeAnnotation.from_json(
                ann_json, project_locally.meta)
        else:
            episode_annotation = PointcloudEpisodeAnnotation()

        dataset_remotely = api.dataset.create(
            project_remotely.id,
            dataset_locally.name,
            description=episode_annotation.description,
            change_name_if_conflict=True)

        # STEP 1 — upload episodes
        items_infos = {'names': [], 'paths': [], 'metas': []}

        for item_name in dataset_locally:
            item_path, related_images_dir = dataset_locally.get_item_paths(
                item_name)
            frame_idx = dataset_locally.get_frame_idx(item_name)

            item_meta = {"frame": frame_idx}

            items_infos['names'].append(item_name)
            items_infos['paths'].append(item_path)
            items_infos['metas'].append(item_meta)

        ds_progress = Progress(
            'Uploading pointclouds: {!r}'.format(dataset_remotely.name),
            total_cnt=len(dataset_locally)) if log_progress else None
        pcl_infos = api.pointcloud_episode.upload_paths(
            dataset_remotely.id,
            names=items_infos['names'],
            paths=items_infos['paths'],
            metas=items_infos['metas'],
            progress_cb=ds_progress.iters_done_report
            if log_progress else None)

        # STEP 2 — upload annotations
        frame_to_pcl_ids = {
            pcl_info.frame: pcl_info.id
            for pcl_info in pcl_infos
        }
        api.pointcloud_episode.annotation.append(dataset_remotely.id,
                                                 episode_annotation,
                                                 frame_to_pcl_ids,
                                                 uploaded_objects)

        # STEP 3 — upload photo context
        img_infos = {'img_paths': [], 'img_metas': []}

        # STEP 3.1 — upload images
        for pcl_info in pcl_infos:
            related_items = dataset_locally.get_related_images(pcl_info.name)
            images_paths_for_frame = [
                img_path for img_path, _ in related_items
            ]

            img_infos['img_paths'].extend(images_paths_for_frame)

        img_progress = Progress(
            'Uploading photo context: {!r}'.format(dataset_remotely.name),
            total_cnt=len(img_infos['img_paths'])) if log_progress else None

        images_hashes = api.pointcloud_episode.upload_related_images(
            img_infos['img_paths'],
            progress_cb=img_progress.iters_done_report
            if log_progress else None)

        # STEP 3.2 — upload images metas
        images_hashes_iterator = images_hashes.__iter__()
        for pcl_info in pcl_infos:
            related_items = dataset_locally.get_related_images(pcl_info.name)

            for _, meta_json in related_items:
                img_hash = next(images_hashes_iterator)
                img_infos['img_metas'].append({
                    ApiField.ENTITY_ID:
                    pcl_info.id,
                    ApiField.NAME:
                    meta_json[ApiField.NAME],
                    ApiField.HASH:
                    img_hash,
                    ApiField.META:
                    meta_json[ApiField.META]
                })

        api.pointcloud_episode.add_related_images(img_infos['img_metas'])

    return project_remotely.id, project_remotely.name
Пример #12
0
def upload_pointcloud_project(directory,
                              api,
                              workspace_id,
                              project_name=None,
                              log_progress=False):
    project_fs = PointcloudProject.read_single(directory)
    if project_name is None:
        project_name = project_fs.name

    if api.project.exists(workspace_id, project_name):
        project_name = api.project.get_free_name(workspace_id, project_name)

    project = api.project.create(workspace_id, project_name,
                                 ProjectType.POINT_CLOUDS)
    api.project.update_meta(project.id, project_fs.meta.to_json())

    uploaded_objects = KeyIdMap()
    for dataset_fs in project_fs:
        dataset = api.dataset.create(project.id,
                                     dataset_fs.name,
                                     change_name_if_conflict=True)

        ds_progress = None
        if log_progress:
            ds_progress = Progress('Uploading dataset: {!r}'.format(
                dataset.name),
                                   total_cnt=len(dataset_fs))

        for item_name in dataset_fs:

            item_path, related_images_dir, ann_path = dataset_fs.get_item_paths(
                item_name)
            related_items = dataset_fs.get_related_images(item_name)

            try:
                _, meta = related_items[0]
                timestamp = meta[ApiField.META]['timestamp']
                if timestamp:
                    item_meta = {"timestamp": timestamp}
            except (KeyError, IndexError):
                item_meta = {}

            pointcloud = api.pointcloud.upload_path(dataset.id, item_name,
                                                    item_path, item_meta)

            # validate_item_annotation
            ann_json = load_json_file(ann_path)
            ann = PointcloudAnnotation.from_json(ann_json, project_fs.meta)

            # ignore existing key_id_map because the new objects will be created
            api.pointcloud.annotation.append(pointcloud.id, ann,
                                             uploaded_objects)

            # upload related_images if exist
            if len(related_items) != 0:
                rimg_infos = []
                for img_path, meta_json in related_items:
                    img = api.pointcloud.upload_related_image(img_path)[0]
                    rimg_infos.append({
                        ApiField.ENTITY_ID: pointcloud.id,
                        ApiField.NAME: meta_json[ApiField.NAME],
                        ApiField.HASH: img,
                        ApiField.META: meta_json[ApiField.META]
                    })

                api.pointcloud.add_related_images(rimg_infos)
            if log_progress:
                ds_progress.iters_done_report(1)