def append_to_objects(self, entity_id, project_id, objects: KeyIndexedCollection, key_id_map: KeyIdMap): tag_name_id_map = self.get_name_to_id_map(project_id) tags_to_add = [] tags_keys = [] for object in objects: obj_id = key_id_map.get_object_id(object.key()) if obj_id is None: raise RuntimeError( "Can not add tags to object: OBJECT_ID not found for key {}" .format(object.key())) tags_json, cur_tags_keys = self._tags_to_json( object.tags, tag_name_id_map=tag_name_id_map) for tag in tags_json: tag[ApiField.OBJECT_ID] = obj_id tags_to_add.append(tag) tags_keys.extend(cur_tags_keys) if len(tags_keys) != len(tags_to_add): raise RuntimeError("SDK error: len(tags_keys) != len(tags_to_add)") if len(tags_keys) == 0: return ids = self.append_to_objects_json(entity_id, tags_to_add) KeyIdMap.add_tags_to(key_id_map, tags_keys, ids)
def to_json(self, key_id_map: KeyIdMap = None): data_json = { KEY: self.key().hex, LabelJsonFields.OBJ_CLASS_NAME: self.obj_class.name, LabelJsonFields.TAGS: self.tags.to_json(key_id_map) } if key_id_map is not None: item_id = key_id_map.get_object_id(self.key()) if item_id is not None: data_json[ID] = item_id return data_json
def to_json(self, key_id_map: KeyIdMap = None): ''' The function to_json convert VideoObject class object to json format :param key_id_map: KeyIdMap class object :return: VideoObject in json format ''' data_json = { KEY: self.key().hex, LabelJsonFields.OBJ_CLASS_NAME: self.obj_class.name, LabelJsonFields.TAGS: self.tags.to_json(key_id_map) } if key_id_map is not None: item_id = key_id_map.get_object_id(self.key()) if item_id is not None: data_json[ID] = item_id self._add_creation_info(data_json) return data_json
def append(self, pointcloud_id, ann: PointcloudAnnotation, key_id_map: KeyIdMap = None): info = self._api.pointcloud.get_info_by_id(pointcloud_id) new_objects = [] for object_3d in ann.objects: if key_id_map is not None and key_id_map.get_object_id( object_3d.key()) is not None: # object already uploaded continue new_objects.append(object_3d) self._append(self._api.pointcloud.tag, self._api.pointcloud.object, self._api.pointcloud.figure, info.project_id, info.dataset_id, pointcloud_id, ann.tags, PointcloudObjectCollection(new_objects), ann.figures, key_id_map)
def turn_into_images_project(api: sly.Api, task_id, context, state, app_logger): res_project_name = f"{g.project.name}(images)" dst_project = api.project.create(g.WORKSPACE_ID, res_project_name, type=sly.ProjectType.IMAGES, change_name_if_conflict=True) api.project.update_meta(dst_project.id, g.meta.to_json()) key_id_map = KeyIdMap() for dataset_name in g.SELECTED_DATASETS: dataset = api.dataset.get_info_by_name(g.PROJECT_ID, dataset_name) dst_dataset = api.dataset.create(dst_project.id, dataset.name) videos = api.video.get_list(dataset.id) for batch in sly.batched(videos): for video_info in batch: general_time = time() ann_info = api.video.annotation.download(video_info.id) ann = sly.VideoAnnotation.from_json(ann_info, g.meta, key_id_map) if g.OPTIONS == "annotated" and len(ann.tags) == 0 and len( ann.frames) == 0: g.my_app.logger.warn( f"Video {video_info.name} annotation is empty in Dataset {dataset_name}" ) continue need_download_video = f.need_download_video( video_info.frames_count, len(ann.frames)) video_path = None if need_download_video or g.OPTIONS == "all": local_time = time() video_path = os.path.join(g.video_dir, video_info.name) progress_cb = f.get_progress_cb( "Downloading video", int(video_info.file_meta['size']), is_size=True) api.video.download_path(video_info.id, video_path, progress_cb=progress_cb) g.logger.info( f'video {video_info.name} downloaded in {time() - local_time} seconds' ) frames_to_convert = [] video_props = [] video_frame_tags = defaultdict(list) f.convert_tags(ann.tags, video_props, video_frame_tags, frames_to_convert) object_frame_tags = defaultdict(lambda: defaultdict(list)) object_props = defaultdict(list) for vobject in ann.objects: f.convert_tags(vobject.tags, object_props[vobject.key()], object_frame_tags[vobject.key()], frames_to_convert) vobject_id = key_id_map.get_object_id(vobject.key()) f.add_object_id_tag(vobject_id, object_props[vobject.key()]) if g.OPTIONS == "annotated": frames_to_convert.extend(list(ann.frames.keys())) frames_to_convert = list(dict.fromkeys(frames_to_convert)) frames_to_convert.sort() else: frames_to_convert = list(range(0, video_info.frames_count)) progress = sly.Progress( "Processing video frames: {!r}".format(video_info.name), len(frames_to_convert)) # total_images_size = 0 for batch_index, batch_frames in enumerate( sly.batched(frames_to_convert, batch_size=g.BATCH_SIZE)): metas = [] anns = [] if need_download_video or g.OPTIONS == "all": local_time = time() images_names, images = f.get_frames_from_video( video_info.name, video_path, batch_frames) g.logger.debug( f'extracted {len(batch_frames)} by {time() - local_time} seconds' ) """ too slow calculations, for extreme debug images_size = f.calculate_batch_size(images) / (1024 * 1024) # in MegaBytes g.logger.debug(f'batch size: {images_size} MB') g.logger.debug(f'mean item size: {images_size / len(images)} MB') total_images_size += images_size """ else: images_names, images = f.get_frames_from_api( api, video_info.id, video_info.name, batch_frames) for frame_index in batch_frames: metas.append({ "video_id": video_info.id, "video_name": video_info.name, "frame_index": frame_index, "video_dataset_id": video_info.dataset_id, "video_dataset_name": dataset.name, "video_project_id": g.project.id, "video_project_name": g.project.name }) labels = [] frame_annotation = ann.frames.get(frame_index) if frame_annotation is not None: for figure in frame_annotation.figures: tags_to_assign = object_props[ figure.parent_object.key()].copy() tags_to_assign.extend(object_frame_tags[ figure.parent_object.key()].get( frame_index, []).copy()) cur_label = sly.Label( figure.geometry, figure.parent_object.obj_class, sly.TagCollection(tags_to_assign)) labels.append(cur_label) img_tags = video_props.copy() + video_frame_tags.get( frame_index, []).copy() anns.append( sly.Annotation( ann.img_size, labels=labels, img_tags=sly.TagCollection(img_tags))) if g.LOG_LEVEL == 'debug': f.distort_frames(images) g.logger.debug(f'{len(images)} frames distorted') f.upload_frames( api, dst_dataset.id, images_names, images, anns, metas, f'{batch_index}/{int(len(frames_to_convert) / g.BATCH_SIZE)}' ) progress.iters_done_report(len(images_names)) # g.logger.debug(f'total images size for video: {total_images_size} MB') g.logger.info( f'video {video_info.name} converted in {time() - general_time} seconds' ) g.my_app.stop()