def append_to_objects(self, entity_id, project_id, objects: KeyIndexedCollection, key_id_map: KeyIdMap): tag_name_id_map = self.get_name_to_id_map(project_id) tags_to_add = [] tags_keys = [] for object in objects: obj_id = key_id_map.get_object_id(object.key()) if obj_id is None: raise RuntimeError( "Can not add tags to object: OBJECT_ID not found for key {}" .format(object.key())) tags_json, cur_tags_keys = self._tags_to_json( object.tags, tag_name_id_map=tag_name_id_map) for tag in tags_json: tag[ApiField.OBJECT_ID] = obj_id tags_to_add.append(tag) tags_keys.extend(cur_tags_keys) if len(tags_keys) != len(tags_to_add): raise RuntimeError("SDK error: len(tags_keys) != len(tags_to_add)") if len(tags_keys) == 0: return ids = self.append_to_objects_json(entity_id, tags_to_add) KeyIdMap.add_tags_to(key_id_map, tags_keys, ids)
def from_json(cls, data, project_meta, key_id_map: KeyIdMap=None): ''' The function from_json convert videoannotation from json format to VideoAnnotation class object. :param data: input videoannotation in json format :param project_meta: ProjectMeta class object :param key_id_map: KeyIdMap class object :return: VideoAnnotation class object ''' #video_name = data[VIDEO_NAME] video_key = uuid.UUID(data[KEY]) if KEY in data else uuid.uuid4() if key_id_map is not None: key_id_map.add_video(video_key, data.get(VIDEO_ID, None)) img_size_dict = data[IMG_SIZE] img_height = img_size_dict[IMG_SIZE_HEIGHT] img_width = img_size_dict[IMG_SIZE_WIDTH] img_size = (img_height, img_width) description = data.get(DESCRIPTION, "") frames_count = data[FRAMES_COUNT] tags = VideoTagCollection.from_json(data[TAGS], project_meta.tag_metas, key_id_map) objects = VideoObjectCollection.from_json(data[OBJECTS], project_meta, key_id_map) frames = FrameCollection.from_json(data[FRAMES], objects, frames_count, key_id_map) return cls(img_size=img_size, frames_count=frames_count, objects=objects, frames=frames, tags=tags, description=description, key=video_key)
def from_json(cls, data, project_meta, key_id_map: KeyIdMap = None): ''' :param data: input PointcloudAnnotation in json format :param project_meta: ProjectMeta class object :param key_id_map: KeyIdMap class object :return: PointcloudAnnotation class object ''' item_key = uuid.UUID(data[KEY]) if KEY in data else uuid.uuid4() if key_id_map is not None: key_id_map.add_video(item_key, data.get(POINTCLOUD_ID, None)) description = data.get(DESCRIPTION, "") tags = VideoTagCollection.from_json(data[TAGS], project_meta.tag_metas, key_id_map) objects = PointcloudObjectCollection.from_json(data[OBJECTS], project_meta, key_id_map) figures = [] for figure_json in data.get(FIGURES, []): figure = PointcloudFigure.from_json(figure_json, objects, None, key_id_map) figures.append(figure) return cls(objects=objects, figures=figures, tags=tags, description=description, key=item_key)
def _read(self): ''' Download project from given project directory. Checks item and annotation directoris existing and dataset not empty. Consistency checks. Every video must have an annotation, and the correspondence must be one to one. ''' super(VideoProject, self)._read() self._key_id_map = KeyIdMap() self._key_id_map.load_json(self._get_key_id_map_path())
def from_json(cls, data, project_meta: ProjectMeta, key_id_map: KeyIdMap = None): ''' The function from_json convert PointcloudObject from json format to PointcloudObject class object. Raise error if object class name is not found in the given project meta :param data: input PointcloudObject in json format :param project_meta: ProjectMeta class object :param key_id_map: KeyIdMap class object :return: PointcloudObject class object ''' obj_class_name = data[LabelJsonFields.OBJ_CLASS_NAME] obj_class = project_meta.get_obj_class(obj_class_name) if obj_class is None: raise RuntimeError( f'Failed to deserialize a object from JSON: class name {obj_class_name!r} ' f'was not found in the given project meta.') object_id = data.get(ID, None) existing_key = None if object_id is not None and key_id_map is not None: existing_key = key_id_map.get_object_key(object_id) json_key = uuid.UUID(data[KEY]) if KEY in data else None if (existing_key is not None) and (json_key is not None) and (existing_key != json_key): raise RuntimeError( "Object id = {!r}: existing_key {!r} != json_key {!r}".format( object_id, existing_key, json_key)) if existing_key is not None: key = existing_key elif json_key is not None: key = json_key else: key = uuid.uuid4() if key_id_map is not None and existing_key is None: key_id_map.add_object(key, object_id) class_id = data.get(CLASS_ID, None) labeler_login = data.get(LABELER_LOGIN, None) updated_at = data.get(UPDATED_AT, None) created_at = data.get(CREATED_AT, None) return cls(obj_class=obj_class, key=key, tags=VideoTagCollection.from_json( data[LabelJsonFields.TAGS], project_meta.tag_metas), class_id=class_id, labeler_login=labeler_login, updated_at=updated_at, created_at=created_at)
def append_to_entity(self, entity_id, project_id, tags: KeyIndexedCollection, key_id_map: KeyIdMap = None): if len(tags) == 0: return [] tags_json, tags_keys = self._tags_to_json(tags, project_id=project_id) ids = self._append_json(entity_id, tags_json) KeyIdMap.add_tags_to(key_id_map, tags_keys, ids) return ids
def to_json(self, key_id_map: KeyIdMap=None): ''' The function to_json convert videoannotation to json format :param key_id_map: KeyIdMap class object :return: videoannotation in json format ''' res_json = { IMG_SIZE: { IMG_SIZE_HEIGHT: int(self.img_size[0]), IMG_SIZE_WIDTH: int(self.img_size[1]) }, DESCRIPTION: self.description, KEY: self.key().hex, TAGS: self.tags.to_json(key_id_map), OBJECTS: self.objects.to_json(key_id_map), FRAMES: self.frames.to_json(key_id_map), FRAMES_COUNT: self.frames_count } if key_id_map is not None: video_id = key_id_map.get_video_id(self.key()) if video_id is not None: res_json[VIDEO_ID] = video_id return res_json
def from_json(cls, data, tag_meta_collection, key_id_map: KeyIdMap = None): ''' The function from_json convert VideoTag from json format to VideoTag class object. :param data: input VideoTag in json format :param tag_meta_collection: VideoTagCollection :param key_id_map: KeyIdMap class object :return: VideoTag class object ''' temp = super(VideoTag, cls).from_json(data, tag_meta_collection) frame_range = data.get(FRAME_RANGE, None) key = uuid.UUID(data[KEY]) if KEY in data else uuid.uuid4() if key_id_map is not None: key_id_map.add_tag(key, data.get(ID, None)) return cls(meta=temp.meta, value=temp.value, frame_range=frame_range, key=key, sly_id=temp.sly_id, labeler_login=temp.labeler_login, updated_at=temp.updated_at, created_at=temp.created_at)
def from_json(cls, data, project_meta, key_id_map: KeyIdMap = None): item_key = uuid.UUID(data[KEY]) if KEY in data else uuid.uuid4() if key_id_map is not None: key_id_map.add_video(item_key, data.get(ApiField.DATASET_ID, None)) description = data.get(DESCRIPTION, "") frames_count = data.get(FRAMES_COUNT, 0) tags = VideoTagCollection.from_json(data[TAGS], project_meta.tag_metas, key_id_map) objects = PointcloudObjectCollection.from_json(data[OBJECTS], project_meta, key_id_map) frames = FrameCollection.from_json(data[FRAMES], objects, key_id_map=key_id_map) return cls(frames_count, objects, frames, tags, description, item_key)
def _append_bulk(self, entity_id, figures_json, figures_keys, key_id_map: KeyIdMap, field_name=ApiField.ENTITY_ID): if len(figures_json) == 0: return for (batch_keys, batch_jsons) in zip(batched(figures_keys, batch_size=100), batched(figures_json, batch_size=100)): resp = self._api.post('figures.bulk.add', { field_name: entity_id, ApiField.FIGURES: batch_jsons }) for key, resp_obj in zip(batch_keys, resp.json()): figure_id = resp_obj[ApiField.ID] key_id_map.add_figure(key, figure_id)
def from_json(cls, data, project_meta: ProjectMeta, key_id_map: KeyIdMap = None): ''' The function from_json convert VideoObject from json format to VideoObject class object. Raise error if object class name is not found in the given project meta :param data: input VideoObject in json format :param project_meta: ProjectMeta class object :param key_id_map: KeyIdMap class object :return: VideoObject class object ''' obj_class_name = data[LabelJsonFields.OBJ_CLASS_NAME] obj_class = project_meta.get_obj_class(obj_class_name) if obj_class is None: raise RuntimeError( f'Failed to deserialize a object from JSON: class name {obj_class_name!r} ' f'was not found in the given project meta.') key = uuid.UUID(data[KEY]) if KEY in data else uuid.uuid4() if key_id_map is not None: key_id_map.add_object(key, data.get(ID, None)) class_id = data.get(CLASS_ID, None) labeler_login = data.get(LABELER_LOGIN, None) updated_at = data.get(UPDATED_AT, None) created_at = data.get(CREATED_AT, None) return cls(obj_class=obj_class, key=key, tags=VideoTagCollection.from_json( data[LabelJsonFields.TAGS], project_meta.tag_metas), class_id=class_id, labeler_login=labeler_login, updated_at=updated_at, created_at=created_at)
def _append_bulk(self, tag_api, entity_id, project_id, dataset_id, objects, key_id_map: KeyIdMap = None, is_pointcloud=False): if len(objects) == 0: return [] objcls_name_id_map = self._api.object_class.get_name_to_id_map(project_id) items = [] for obj in objects: new_obj = {ApiField.CLASS_ID: objcls_name_id_map[obj.obj_class.name]} if not is_pointcloud: #if entity_id is not None: new_obj[ApiField.ENTITY_ID] = entity_id items.append(new_obj) response = self._api.post('annotation-objects.bulk.add', {ApiField.DATASET_ID: dataset_id, ApiField.ANNOTATION_OBJECTS: items}) ids = [obj[ApiField.ID] for obj in response.json()] KeyIdMap.add_objects_to(key_id_map, [obj.key() for obj in objects], ids) # add tags to objects tag_api.append_to_objects(entity_id, project_id, objects, key_id_map) return ids
def to_json(self, key_id_map: KeyIdMap = None): res_json = { DESCRIPTION: self.description, KEY: self.key().hex, TAGS: self.tags.to_json(key_id_map), OBJECTS: self.objects.to_json(key_id_map), FRAMES_COUNT: self.frames_count, FRAMES: self.frames.to_json(key_id_map), } if key_id_map is not None: dataset_id = key_id_map.get_video_id(self.key()) if dataset_id is not None: res_json[ApiField.DATASET_ID] = dataset_id return res_json
def to_json(self, key_id_map: KeyIdMap = None): ''' The function to_json convert VideoObject class object to json format :param key_id_map: KeyIdMap class object :return: VideoObject in json format ''' data_json = { KEY: self.key().hex, LabelJsonFields.OBJ_CLASS_NAME: self.obj_class.name, LabelJsonFields.TAGS: self.tags.to_json(key_id_map) } if key_id_map is not None: item_id = key_id_map.get_object_id(self.key()) if item_id is not None: data_json[ID] = item_id self._add_creation_info(data_json) return data_json
def append(self, pointcloud_id, ann: PointcloudAnnotation, key_id_map: KeyIdMap = None): info = self._api.pointcloud.get_info_by_id(pointcloud_id) new_objects = [] for object_3d in ann.objects: if key_id_map is not None and key_id_map.get_object_id( object_3d.key()) is not None: # object already uploaded continue new_objects.append(object_3d) self._append(self._api.pointcloud.tag, self._api.pointcloud.object, self._api.pointcloud.figure, info.project_id, info.dataset_id, pointcloud_id, ann.tags, PointcloudObjectCollection(new_objects), ann.figures, key_id_map)
def to_json(self, key_id_map: KeyIdMap = None): ''' The function to_json convert VideoTag class object to json format :param key_id_map: KeyIdMap class object :return: VideoTag in json format ''' data_json = super(VideoTag, self).to_json() if type(data_json) is str: # @TODO: case when tag has no value, super.to_json() returns tag name data_json = {TagJsonFields.TAG_NAME: data_json} if self.frame_range is not None: data_json[FRAME_RANGE] = self.frame_range data_json[KEY] = self.key().hex if key_id_map is not None: item_id = key_id_map.get_tag_id(self.key()) if item_id is not None: data_json[ID] = item_id return data_json
def to_json(self, key_id_map: KeyIdMap = None): ''' The function to_json convert PointcloudAnnotation to json format :param key_id_map: KeyIdMap class object :return: PointcloudAnnotation in json format ''' res_json = { DESCRIPTION: self.description, KEY: self.key().hex, TAGS: self.tags.to_json(key_id_map), OBJECTS: self.objects.to_json(key_id_map), FIGURES: [figure.to_json(key_id_map) for figure in self.figures] } if key_id_map is not None: pointcloud_id = key_id_map.get_video_id(self.key()) if pointcloud_id is not None: res_json[POINTCLOUD_ID] = pointcloud_id return res_json
def append(self, dataset_id, ann: PointcloudEpisodeAnnotation, frame_to_pointcloud_ids, key_id_map: KeyIdMap = None): if key_id_map is None: # create for internal purposes (to link figures and tags to objects) key_id_map = KeyIdMap() figures = [] pointcloud_ids = [] for i, frame in enumerate(ann.frames): for fig in frame.figures: if frame_to_pointcloud_ids.get( i) is None: # skip unmapped frames continue figures.append(fig) pointcloud_ids.append(frame_to_pointcloud_ids[i]) self._api.pointcloud_episode.object.append_to_dataset( dataset_id, ann.objects, key_id_map) self._api.pointcloud_episode.figure.append_to_dataset( dataset_id, figures, pointcloud_ids, key_id_map)
def download_pointcloud_project(api, project_id, dest_dir, dataset_ids=None, download_items=True, log_progress=False): LOG_BATCH_SIZE = 1 key_id_map = KeyIdMap() project_fs = PointcloudProject(dest_dir, OpenMode.CREATE) meta = ProjectMeta.from_json(api.project.get_meta(project_id)) project_fs.set_meta(meta) datasets_infos = [] if dataset_ids is not None: for ds_id in dataset_ids: datasets_infos.append(api.dataset.get_info_by_id(ds_id)) else: datasets_infos = api.dataset.get_list(project_id) for dataset in datasets_infos: dataset_fs = project_fs.create_dataset(dataset.name) pointclouds = api.pointcloud.get_list(dataset.id) ds_progress = None if log_progress: ds_progress = Progress('Downloading dataset: {!r}'.format( dataset.name), total_cnt=len(pointclouds)) for batch in batched(pointclouds, batch_size=LOG_BATCH_SIZE): pointcloud_ids = [pointcloud_info.id for pointcloud_info in batch] pointcloud_names = [ pointcloud_info.name for pointcloud_info in batch ] ann_jsons = api.pointcloud.annotation.download_bulk( dataset.id, pointcloud_ids) for pointcloud_id, pointcloud_name, ann_json in zip( pointcloud_ids, pointcloud_names, ann_jsons): if pointcloud_name != ann_json[ApiField.NAME]: raise RuntimeError( "Error in api.video.annotation.download_batch: broken order" ) pointcloud_file_path = dataset_fs.generate_item_path( pointcloud_name) if download_items is True: api.pointcloud.download_path(pointcloud_id, pointcloud_file_path) related_images_path = dataset_fs.get_related_images_path( pointcloud_name) related_images = api.pointcloud.get_list_related_images( pointcloud_id) for rimage_info in related_images: name = rimage_info[ApiField.NAME] if not has_valid_ext(name): new_name = get_file_name( name) # to fix cases like .png.json if has_valid_ext(new_name): name = new_name rimage_info[ApiField.NAME] = name else: raise RuntimeError( 'Something wrong with photo context filenames.\ Please, contact support') rimage_id = rimage_info[ApiField.ID] path_img = os.path.join(related_images_path, name) path_json = os.path.join(related_images_path, name + ".json") api.pointcloud.download_related_image( rimage_id, path_img) dump_json_file(rimage_info, path_json) else: touch(pointcloud_file_path) dataset_fs.add_item_file(pointcloud_name, pointcloud_file_path, ann=PointcloudAnnotation.from_json( ann_json, project_fs.meta, key_id_map), _validate_item=False) ds_progress.iters_done_report(len(batch)) project_fs.set_key_id_map(key_id_map)
def _create(self): ''' Creates a leaf directory and empty meta.json file. Generate exception error if project directory already exists and is not empty. ''' super()._create() self.set_key_id_map(KeyIdMap())
def from_json(cls, data, objects: VideoObjectCollection, frame_index, key_id_map: KeyIdMap = None): ''' The function from_json convert VideoFigure from json format to VideoFigure class object. :param data: input VideoFigure in json format :param objects: VideoObjectCollection :param frame_index: int :param key_id_map: KeyIdMap class object :return: VideoFigure class object ''' object_id = data.get(OBJECT_ID, None) object_key = None if OBJECT_KEY in data: object_key = uuid.UUID(data[OBJECT_KEY]) if object_id is None and object_key is None: raise RuntimeError( "Figure can not be deserialized from json: object_id or object_key are not found" ) if object_key is None: if key_id_map is None: raise RuntimeError( "Figure can not be deserialized: key_id_map is None") object_key = key_id_map.get_object_key(object_id) if object_key is None: raise RuntimeError( "Object with id={!r} not found in key_id_map".format( object_id)) object = objects.get(object_key) if object is None: raise RuntimeError( "Figure can not be deserialized: corresponding object {!r} not found in ObjectsCollection" .format(object_key.hex)) shape_str = data[ApiField.GEOMETRY_TYPE] geometry_json = data[ApiField.GEOMETRY] shape = GET_GEOMETRY_FROM_STR(shape_str) geometry = shape.from_json(geometry_json) key = uuid.UUID(data[KEY]) if KEY in data else uuid.uuid4() if key_id_map is not None: key_id_map.add_figure(key, data.get(ID, None)) class_id = data.get(CLASS_ID, None) labeler_login = data.get(LABELER_LOGIN, None) updated_at = data.get(UPDATED_AT, None) created_at = data.get(CREATED_AT, None) return cls(object, geometry, frame_index, key, class_id=class_id, labeler_login=labeler_login, updated_at=updated_at, created_at=created_at)
def download_video_project(api, project_id, dest_dir, dataset_ids=None, download_videos=True, log_progress=False): ''' Download project with given id in destination directory :param api: Api class object :param project_id: int :param dest_dir: str :param dataset_ids: list of integers :param download_videos: bool :param log_progress: bool ''' LOG_BATCH_SIZE = 1 key_id_map = KeyIdMap() project_fs = VideoProject(dest_dir, OpenMode.CREATE) meta = ProjectMeta.from_json(api.project.get_meta(project_id)) project_fs.set_meta(meta) datasets_infos = [] if dataset_ids is not None: for ds_id in dataset_ids: datasets_infos.append(api.dataset.get_info_by_id(ds_id)) else: datasets_infos = api.dataset.get_list(project_id) for dataset in datasets_infos: dataset_fs = project_fs.create_dataset(dataset.name) videos = api.video.get_list(dataset.id) ds_progress = None if log_progress: ds_progress = Progress('Downloading dataset: {!r}'.format( dataset.name), total_cnt=len(videos)) for batch in batched(videos, batch_size=LOG_BATCH_SIZE): video_ids = [video_info.id for video_info in batch] video_names = [video_info.name for video_info in batch] ann_jsons = api.video.annotation.download_bulk( dataset.id, video_ids) for video_id, video_name, ann_json in zip(video_ids, video_names, ann_jsons): if video_name != ann_json[ApiField.VIDEO_NAME]: raise RuntimeError( "Error in api.video.annotation.download_batch: broken order" ) video_file_path = dataset_fs.generate_item_path(video_name) if download_videos is True: api.video.download_path(video_id, video_file_path) else: touch(video_file_path) dataset_fs.add_item_file(video_name, video_file_path, ann=VideoAnnotation.from_json( ann_json, project_fs.meta, key_id_map), _validate_item=False) ds_progress.iters_done_report(len(batch)) project_fs.set_key_id_map(key_id_map)
class VideoProject(Project): ''' This is a class for creating and using VideoProject objects. You can think of a VideoProject as a superfolder with data and meta information. ''' dataset_class = VideoDataset class DatasetDict(KeyIndexedCollection): item_type = VideoDataset def __init__(self, directory, mode: OpenMode): ''' :param directory: path to the directory where the project will be saved or where it will be loaded from :param mode: OpenMode class object which determines in what mode to work with the project (generate exception error if not so) ''' self._key_id_map: KeyIdMap = None super().__init__(directory, mode) def _read(self): ''' Download project from given project directory. Checks item and annotation directoris existing and dataset not empty. Consistency checks. Every video must have an annotation, and the correspondence must be one to one. ''' super(VideoProject, self)._read() self._key_id_map = KeyIdMap() self._key_id_map.load_json(self._get_key_id_map_path()) def _create(self): ''' Creates a leaf directory and empty meta.json file. Generate exception error if project directory already exists and is not empty. ''' super()._create() self.set_key_id_map(KeyIdMap()) def _add_item_file_to_dataset(self, ds, item_name, item_paths, _validate_item, _use_hardlink): ''' Add given item file to dataset items directory and add annatation to dataset annotations dir corresponding to item. Generate exception error if item_name already exists in dataset or item name has unsupported extension :param ds: VideoDataset class object :param item_name: str :param item_paths: ItemPaths object :param _validate_item: bool :param _use_hardlink: bool ''' ds.add_item_file(item_name, item_paths.item_path, ann=item_paths.ann_path, _validate_item=_validate_item, _use_hardlink=_use_hardlink) @property def key_id_map(self): return self._key_id_map def set_key_id_map(self, new_map: KeyIdMap): ''' Save given KeyIdMap object to project dir in json format. :param new_map: KeyIdMap class object ''' self._key_id_map = new_map self._key_id_map.dump_json(self._get_key_id_map_path()) def _get_key_id_map_path(self): ''' :return: str (full path to key_id_map.json) ''' return os.path.join(self.directory, 'key_id_map.json') @classmethod def read_single(cls, dir): ''' Read project from given ditectory. Generate exception error if given dir contains more than one subdirectory :param dir: str :return: VideoProject class object ''' return read_project_wrapper(dir, cls)
def download_pointcloud_episode_project(api, project_id, dest_dir, dataset_ids=None, download_pcd=True, download_realated_images=True, download_annotations=True, log_progress=False, batch_size=10): key_id_map = KeyIdMap() project_fs = PointcloudEpisodeProject(dest_dir, OpenMode.CREATE) meta = ProjectMeta.from_json(api.project.get_meta(project_id)) project_fs.set_meta(meta) datasets_infos = [] if dataset_ids is not None: for ds_id in dataset_ids: datasets_infos.append(api.dataset.get_info_by_id(ds_id)) else: datasets_infos = api.dataset.get_list(project_id) for dataset in datasets_infos: dataset_fs = project_fs.create_dataset(dataset.name) pointclouds = api.pointcloud_episode.get_list(dataset.id) if download_annotations: # Download annotation to project_path/dataset_path/annotation.json ann_json = api.pointcloud_episode.annotation.download(dataset.id) annotation = dataset_fs.annotation_class.from_json( ann_json, meta, key_id_map) dataset_fs.set_ann(annotation) # frames --> pointcloud mapping to project_path/dataset_path/frame_pointcloud_map.json frame_name_map = api.pointcloud_episode.get_frame_name_map( dataset.id) frame_pointcloud_map_path = dataset_fs.get_frame_pointcloud_map_path( ) dump_json_file(frame_name_map, frame_pointcloud_map_path) # Download data if log_progress: ds_progress = Progress('Downloading dataset: {!r}'.format( dataset.name), total_cnt=len(pointclouds)) for batch in batched(pointclouds, batch_size=batch_size): pointcloud_ids = [pointcloud_info.id for pointcloud_info in batch] pointcloud_names = [ pointcloud_info.name for pointcloud_info in batch ] for pointcloud_id, pointcloud_name in zip(pointcloud_ids, pointcloud_names): pointcloud_file_path = dataset_fs.generate_item_path( pointcloud_name) if download_pcd is True: api.pointcloud_episode.download_path( pointcloud_id, pointcloud_file_path) else: touch(pointcloud_file_path) if download_realated_images: related_images_path = dataset_fs.get_related_images_path( pointcloud_name) related_images = api.pointcloud_episode.get_list_related_images( pointcloud_id) for rimage_info in related_images: name = rimage_info[ApiField.NAME] rimage_id = rimage_info[ApiField.ID] path_img = os.path.join(related_images_path, name) path_json = os.path.join(related_images_path, name + ".json") api.pointcloud_episode.download_related_image( rimage_id, path_img) dump_json_file(rimage_info, path_json) dataset_fs.add_item_file(pointcloud_name, pointcloud_file_path, _validate_item=False) if log_progress: ds_progress.iters_done_report(len(batch)) project_fs.set_key_id_map(key_id_map)
def upload_pointcloud_episode_project(directory, api, workspace_id, project_name=None, log_progress=False): # STEP 0 — create project remotely project_locally = PointcloudEpisodeProject.read_single(directory) project_name = project_locally.name if project_name is None else project_name if api.project.exists(workspace_id, project_name): project_name = api.project.get_free_name(workspace_id, project_name) project_remotely = api.project.create(workspace_id, project_name, ProjectType.POINT_CLOUD_EPISODES) api.project.update_meta(project_remotely.id, project_locally.meta.to_json()) uploaded_objects = KeyIdMap() for dataset_locally in project_locally.datasets: ann_json_path = dataset_locally.get_ann_path() if os.path.isfile(ann_json_path): ann_json = load_json_file(ann_json_path) episode_annotation = PointcloudEpisodeAnnotation.from_json( ann_json, project_locally.meta) else: episode_annotation = PointcloudEpisodeAnnotation() dataset_remotely = api.dataset.create( project_remotely.id, dataset_locally.name, description=episode_annotation.description, change_name_if_conflict=True) # STEP 1 — upload episodes items_infos = {'names': [], 'paths': [], 'metas': []} for item_name in dataset_locally: item_path, related_images_dir = dataset_locally.get_item_paths( item_name) frame_idx = dataset_locally.get_frame_idx(item_name) item_meta = {"frame": frame_idx} items_infos['names'].append(item_name) items_infos['paths'].append(item_path) items_infos['metas'].append(item_meta) ds_progress = Progress( 'Uploading pointclouds: {!r}'.format(dataset_remotely.name), total_cnt=len(dataset_locally)) if log_progress else None pcl_infos = api.pointcloud_episode.upload_paths( dataset_remotely.id, names=items_infos['names'], paths=items_infos['paths'], metas=items_infos['metas'], progress_cb=ds_progress.iters_done_report if log_progress else None) # STEP 2 — upload annotations frame_to_pcl_ids = { pcl_info.frame: pcl_info.id for pcl_info in pcl_infos } api.pointcloud_episode.annotation.append(dataset_remotely.id, episode_annotation, frame_to_pcl_ids, uploaded_objects) # STEP 3 — upload photo context img_infos = {'img_paths': [], 'img_metas': []} # STEP 3.1 — upload images for pcl_info in pcl_infos: related_items = dataset_locally.get_related_images(pcl_info.name) images_paths_for_frame = [ img_path for img_path, _ in related_items ] img_infos['img_paths'].extend(images_paths_for_frame) img_progress = Progress( 'Uploading photo context: {!r}'.format(dataset_remotely.name), total_cnt=len(img_infos['img_paths'])) if log_progress else None images_hashes = api.pointcloud_episode.upload_related_images( img_infos['img_paths'], progress_cb=img_progress.iters_done_report if log_progress else None) # STEP 3.2 — upload images metas images_hashes_iterator = images_hashes.__iter__() for pcl_info in pcl_infos: related_items = dataset_locally.get_related_images(pcl_info.name) for _, meta_json in related_items: img_hash = next(images_hashes_iterator) img_infos['img_metas'].append({ ApiField.ENTITY_ID: pcl_info.id, ApiField.NAME: meta_json[ApiField.NAME], ApiField.HASH: img_hash, ApiField.META: meta_json[ApiField.META] }) api.pointcloud_episode.add_related_images(img_infos['img_metas']) return project_remotely.id, project_remotely.name
def upload_pointcloud_project(directory, api, workspace_id, project_name=None, log_progress=False): project_fs = PointcloudProject.read_single(directory) if project_name is None: project_name = project_fs.name if api.project.exists(workspace_id, project_name): project_name = api.project.get_free_name(workspace_id, project_name) project = api.project.create(workspace_id, project_name, ProjectType.POINT_CLOUDS) api.project.update_meta(project.id, project_fs.meta.to_json()) uploaded_objects = KeyIdMap() for dataset_fs in project_fs: dataset = api.dataset.create(project.id, dataset_fs.name, change_name_if_conflict=True) ds_progress = None if log_progress: ds_progress = Progress('Uploading dataset: {!r}'.format( dataset.name), total_cnt=len(dataset_fs)) for item_name in dataset_fs: item_path, related_images_dir, ann_path = dataset_fs.get_item_paths( item_name) related_items = dataset_fs.get_related_images(item_name) try: _, meta = related_items[0] timestamp = meta[ApiField.META]['timestamp'] if timestamp: item_meta = {"timestamp": timestamp} except (KeyError, IndexError): item_meta = {} pointcloud = api.pointcloud.upload_path(dataset.id, item_name, item_path, item_meta) # validate_item_annotation ann_json = load_json_file(ann_path) ann = PointcloudAnnotation.from_json(ann_json, project_fs.meta) # ignore existing key_id_map because the new objects will be created api.pointcloud.annotation.append(pointcloud.id, ann, uploaded_objects) # upload related_images if exist if len(related_items) != 0: rimg_infos = [] for img_path, meta_json in related_items: img = api.pointcloud.upload_related_image(img_path)[0] rimg_infos.append({ ApiField.ENTITY_ID: pointcloud.id, ApiField.NAME: meta_json[ApiField.NAME], ApiField.HASH: img, ApiField.META: meta_json[ApiField.META] }) api.pointcloud.add_related_images(rimg_infos) if log_progress: ds_progress.iters_done_report(1)