def get_next_object(api, task_id, project_id): project_dir = os.path.join(sly.app.SHARED_DATA, "app_tagging", str(project_id)) image_labels = sly_json.load_json_file( os.path.join(project_dir, "image_labels_pairs.json")) free_pairs = sly_json.load_json_file( os.path.join(project_dir, "free_pairs.json")) if len(free_pairs) == 0: # @TODO: show message to user sly.logger.info("labeling finished") item = image_labels[free_pairs[0]] image_id = item[0] ann_path = item[1] label_index = item[2] image_path = os.path.join(project_dir, "images", "{}.png".format(image_id)) if not sly.fs.file_exists(image_path): api.image.download_path(image_id, image_path) image = sly.image.read(image_path) meta_json = sly_json.load_json_file(os.path.join(project_dir, "meta.json")) meta = sly.ProjectMeta.from_json(meta_json) ann_json = sly_json.load_json_file(ann_path) ann = sly.Annotation.from_json(ann_json, meta) label = ann.labels[label_index] rect = label.geometry.to_bbox() cropped_image = sly.image.crop(image, label.geometry.to_bbox()) canvas = image.copy() label.draw_contour(canvas, thickness=3) pad = 150 rect_context = sly.Rectangle(max(0, rect.top - pad), max(0, rect.left - pad), min(image.shape[0] - 1, rect.bottom + pad), min(image.shape[1] - 1, rect.right + pad)) cropped_context = sly.image.crop(canvas, rect_context) cropped_url, cropped_context_url = pack_images(cropped_image, cropped_context) api.task.set_data(task_id, [[cropped_context_url], [cropped_url]], "data.objectToTag")
def convert(): settings = load_json_file(sly.TaskPaths.SETTINGS_PATH) imgs_dir = sly.TaskPaths.DATA_DIR inst_dir = os.path.join(sly.TaskPaths.DATA_DIR, 'stuffthingmaps_trainval2017') labels = os.path.join(sly.TaskPaths.DATA_DIR, 'labels.txt') number_class, pixel_color = read_colors(labels) out_project = sly.Project( os.path.join(sly.TaskPaths.RESULTS_DIR, settings['res_names']['project']), sly.OpenMode.CREATE) src_datasets = read_datasets(inst_dir) for ds_name, sample_names in src_datasets.items(): ds = out_project.create_dataset(ds_name) #make train -> img, ann progress = sly.Progress('Dataset: {!r}'.format(ds_name), len(sample_names)) # for logger imgs_dir_new = os.path.join(imgs_dir, ds_name) inst_dir_new = os.path.join(inst_dir, ds_name) for name in sample_names: src_img_path = os.path.join(imgs_dir_new, name + '.jpg') inst_path = os.path.join(inst_dir_new, name + '.png') if all((os.path.isfile(x) or (x is None) for x in [src_img_path, inst_path])): ann = get_ann(src_img_path, inst_path, number_class, pixel_color) ds.add_item_file(name, src_img_path, ann=ann) progress.iter_done_report() out_meta = sly.ProjectMeta(obj_classes=classes_dict) out_project.set_meta(out_meta)
def main(): settings = load_json_file(sly.TaskPaths.TASK_CONFIG_PATH) sly.logger.info('Input settings:', extra={'config': settings}) if IOU not in settings: raise RuntimeError( '"{}" field is missing. Please set Intersection over Union threshold' .format(IOU)) if CONFIDENCE_TAG_NAME not in settings: raise RuntimeError( f'{CONFIDENCE_TAG_NAME!r} field is missing. Please set the tag name to read prediction confidence from.' ) confidence_tag_name = settings[CONFIDENCE_TAG_NAME] confidence_threshold = settings.get(CONFIDENCE_THRESHOLD, 0.0) metric = MAPMetric(settings[CLASSES_MAPPING], settings[IOU], confidence_tag_name=confidence_tag_name, confidence_threshold=confidence_threshold) applier = sly.MetricProjectsApplier(metric, settings) # Input sanity checks. check_class_mapping(applier.project_gt, applier.project_pred, settings[CLASSES_MAPPING]) if not applier.project_pred.meta.tag_metas.has_key(confidence_tag_name): raise RuntimeError( f'Tag {confidence_tag_name!r} cannot be found in the project with predictions ' f'{applier.project_pred.name!r} does not have that tag. Make sure you specify the correct ' f'confidence tag name as a {CONFIDENCE_TAG_NAME!r} setting in the plugin config.' ) applier.run_evaluation() metric.log_total_metrics()
def _load_task_model_config(): raw_task_config = load_json_file(TaskPaths.TASK_CONFIG_PATH) raw_task_config = maybe_convert_from_deploy_task_config( raw_task_config) task_config = maybe_convert_from_v1_inference_task_config( raw_task_config) return task_config[MODEL]
def add_metadata_to_images(api, path_to_files, dataset_id, app_logger): path_to_images = [ sly.fs.get_file_name(json_name) for json_name in os.listdir(path_to_files) ] images = api.image.get_list(dataset_id) image_names = [image_info.name for image_info in images] matches = list(set(path_to_images) & set(image_names)) if len(path_to_images) != len(matches): app_logger.warn( '{} metadata files were given, {} matches image names in dataset'. format(len(path_to_images), len(matches))) progress = sly.Progress('Uploading metadata to images', len(images), app_logger) for batch in sly.batched(images): for image_info in batch: if image_info.name not in path_to_images: app_logger.warn( 'Metadata file for image {} was not found in directory {}'. format(image_info.name, path_to_files)) continue meta = load_json_file( os.path.join(path_to_files, image_info.name + '.json')) if RESOLVE == "merge": meta_copy = meta.copy() for key in meta.keys(): if key in image_info.meta: meta_copy[key + "-original"] = image_info.meta[key] meta = {**image_info.meta, **meta_copy} api.image.update_meta(image_info.id, meta) progress.iters_done_report(len(batch))
def _read_colors(self): if os.path.isfile(self.colors_file): sly.logger.info('Will try to read segmentation colors from provided file.') color_info = load_json_file(self.colors_file) else: sly.logger.info('Will use default Mapillary color mapping.') default_filepath = os.path.join(os.path.dirname(__file__), 'colors.json') color_info = load_json_file(default_filepath) self._class_id_to_object_class = { color_id: sly.ObjClass(name=el['readable'], geometry_type=sly.Bitmap, color=el['color']) for color_id, el in enumerate(color_info['labels'])} sly.logger.info('Found {} class(es).'.format(len(self._class_id_to_object_class)), extra={ 'classes': list(obj_class.name for obj_class in self._class_id_to_object_class.values())}) self._instance_id_to_obj_class = InstanceIdToObjClass(self._class_id_to_object_class)
def convert(): settings = load_json_file(sly.TaskPaths.TASK_CONFIG_PATH) out_project = sly.Project( os.path.join(sly.TaskPaths.RESULTS_DIR, settings['res_names']['project']), sly.OpenMode.CREATE) for directory in ['train', 'test']: if directory == 'train': imgs_dir = os.path.join(sly.TaskPaths.DATA_DIR, 'ch4_training_images') inst_dir = os.path.join( sly.TaskPaths.DATA_DIR, 'ch4_training_localization_transcription_gt') else: imgs_dir = os.path.join(sly.TaskPaths.DATA_DIR, 'ch4_test_images') inst_dir = os.path.join(sly.TaskPaths.DATA_DIR, 'Challenge4_Test_Task1_GT') src_datasets = read_datasets(inst_dir, directory) for ds_name, sample_names in src_datasets.items(): ds = out_project.create_dataset(ds_name) #make train -> img, ann progress = sly.Progress('Dataset: {!r}'.format(ds_name), len(sample_names)) # for logger for name in sample_names: src_img_path = os.path.join(imgs_dir, name + '.jpg') inst_path = os.path.join(inst_dir, 'gt_' + name + '.txt') if all((os.path.isfile(x) or (x is None) for x in [src_img_path, inst_path])): ann = get_ann(src_img_path, inst_path) ds.add_item_file(name, src_img_path, ann=ann) progress.iter_done_report() out_meta = sly.ProjectMeta(obj_classes=classes_dict) out_project.set_meta(out_meta)
def convert(): settings = load_json_file(sly.TaskPaths.SETTINGS_PATH) out_project = sly.Project( os.path.join(sly.TaskPaths.RESULTS_DIR, settings['res_names']['project']), sly.OpenMode.CREATE) imgs_dir = os.path.join(sly.TaskPaths.DATA_DIR, 'train2017') ann_file = os.path.join(sly.TaskPaths.DATA_DIR, 'COCO_Text.json') src_datasets = read_datasets(ann_file) photo_to_coords_text = read_coords_text(ann_file) NAME_ZERO_PADDING = 12 for ds_name, sample_names in src_datasets.items(): ds = out_project.create_dataset(ds_name) #make train -> img, ann progress = sly.Progress('Dataset: {!r}'.format(ds_name), len(sample_names)) # for logger for name in sample_names: full_img_name = name.zfill(NAME_ZERO_PADDING) + '.jpg' src_img_path = os.path.join(imgs_dir, full_img_name) if all((os.path.isfile(x) or (x is None) for x in [src_img_path])): try: coords_text = photo_to_coords_text[int(name)] except KeyError: continue ann = get_ann(src_img_path, coords_text) ds.add_item_file(name, src_img_path, ann=ann) progress.iter_done_report() out_meta = sly.ProjectMeta(obj_classes=classes_dict) out_project.set_meta(out_meta)
def main(): task_config = load_json_file(TaskPaths.TASK_CONFIG_PATH) convert_options = task_config['options'] normalize_url = True if convert_options is not None: normalize_url = convert_options.get('normalize_image_name', True) server_address = task_config['server_address'] token = task_config['api_token'] append_to_existing_project = task_config['append_to_existing_project'] api = sly.Api(server_address, token) task_info = api.task.get_info_by_id(task_config['task_id']) # TODO migrate to passing workspace id via the task config. project_info = create_project(api, task_info["workspaceId"], task_config['res_names']['project'], append_to_existing_project) total_counter = 0 for file_path in sly.fs.list_files_recursively( TaskPaths.DATA_DIR, filter_fn=lambda path: sly.fs.get_file_ext(path).lower( ) == '.txt'): total_counter += process_dataset_links(api, project_info, file_path, normalize_url=normalize_url) if total_counter == 0: raise RuntimeError( 'Result project is empty! No valid links find in files.') dump_json_file({'project_id': project_info.id}, os.path.join(TaskPaths.RESULTS_DIR, 'project_info.json'))
def convert(): settings = load_json_file(sly.TaskPaths.SETTINGS_PATH) imgs_dir = os.path.join(sly.TaskPaths.DATA_DIR, 'Pratheepan_Dataset') inst_dir = os.path.join(sly.TaskPaths.DATA_DIR, 'Ground_Truth') default_classes_colors = {'background': [1, 1, 1], 'skin': [255, 255, 255]} out_project = sly.Project( os.path.join(sly.TaskPaths.RESULTS_DIR, settings['res_names']['project']), sly.OpenMode.CREATE) src_datasets = read_datasets(inst_dir) for ds_name, sample_names in src_datasets.items(): ds = out_project.create_dataset(ds_name) progress = sly.Progress('Dataset: {!r}'.format(ds_name), len(sample_names)) # for logger img_dir_temp = os.path.join(imgs_dir, ds_name) inst_dir_temp = os.path.join(inst_dir, 'GroundT_' + ds_name) for name in sample_names: src_img_path = os.path.join(img_dir_temp, name + '.jpg') inst_path = os.path.join(inst_dir_temp, name + '.png') if all((os.path.isfile(x) or (x is None) for x in [src_img_path, inst_path])): ann = get_ann(src_img_path, inst_path, default_classes_colors) ds.add_item_file(name, src_img_path, ann=ann) progress.iter_done_report() out_meta = sly.ProjectMeta(obj_classes=classes_dict) out_project.set_meta(out_meta)
def convert(): sly.fs.clean_dir(sly.TaskPaths.RESULTS_DIR) settings = load_json_file(sly.TaskPaths.TASK_CONFIG_PATH) all_img = os.path.join( sly.TaskPaths.DATA_DIR, 'graz50_facade_dataset/graz50_facade_dataset/images') all_ann = os.path.join( sly.TaskPaths.DATA_DIR, 'graz50_facade_dataset/graz50_facade_dataset/labels_full') out_project = sly.Project( os.path.join(sly.TaskPaths.RESULTS_DIR, settings['res_names']['project']), sly.OpenMode.CREATE) src_datasets = read_datasets(all_ann) for ds_name, sample_names in src_datasets.items(): ds = out_project.create_dataset(ds_name) #make train -> img, ann progress = sly.Progress('Dataset: {!r}'.format(ds_name), len(sample_names)) # for logger for name in sample_names: src_img_path = os.path.join(all_img, name + '.png') inst_path = os.path.join(all_ann, name + '.png') if all((os.path.isfile(x) or (x is None) for x in [src_img_path, inst_path])): ann = get_ann(src_img_path, inst_path, default_classes_colors) ds.add_item_file(name, src_img_path, ann=ann) progress.iter_done_report() out_meta = sly.ProjectMeta(obj_classes=classes_dict) out_project.set_meta(out_meta)
def load_ann(ann_fpath, classes_mapping, project_meta): ann_packed = load_json_file(ann_fpath) ann = Annotation.from_json(ann_packed, project_meta) # ann.normalize_figures() # @TODO: enaaaable! (h, w) = ann.img_size gt_boxes, classes_text, classes = [], [], [] for label in ann.labels: gt = np.zeros((h, w), dtype=np.uint8) # default bkg gt_idx = classes_mapping.get(label.obj_class.name, None) if gt_idx is None: raise RuntimeError( 'Missing class mapping (title to index). Class {}.'.format( label.obj_class.name)) label.geometry.draw(gt, 1) if np.sum(gt) > 0: xmin, ymin, xmax, ymax = get_bbox(gt) gt_boxes.append([ymin / h, xmin / w, ymax / h, xmax / w]) classes_text.append(label.obj_class.name.encode('utf8')) # List of string class name of bounding box (1 per box) classes.append( gt_idx) # List of integer class id of bounding box (1 per box) num_boxes = len(gt_boxes) gt_boxes = np.array(gt_boxes).astype(np.float32) classes = np.array(classes, dtype=np.int64) if num_boxes == 0: gt_boxes = np.reshape(gt_boxes, [0, 4]) return gt_boxes, classes, np.array([num_boxes]).astype(np.int32)[0]
def convert(): sly.fs.clean_dir(sly.TaskPaths.RESULTS_DIR) settings = load_json_file(sly.TaskPaths.TASK_CONFIG_PATH) all_img = os.path.join(sly.TaskPaths.DATA_DIR, 'ParisArtDecoFacadesDataset-master/images') all_ann = os.path.join(sly.TaskPaths.DATA_DIR, 'ParisArtDecoFacadesDataset-master/labels') out_project = sly.Project(os.path.join(sly.TaskPaths.RESULTS_DIR, settings['res_names']['project']), sly.OpenMode.CREATE) src_datasets = read_datasets(all_ann) number_class = {2: 'Door', 3: 'Shop', 4: 'Balcony', 5: 'Window', 6: 'Wall', 7: 'Sky', 8: 'Roof', 1: 'Unknown'} pixel_color = {2: (255, 255, 0), 3: (0, 128, 0), 4: (0, 0, 255), 5: (128, 255, 0), 6: (255, 0, 0), 7: (0, 255, 255), 8: (211, 211, 211), 1: (0, 0, 0)} for ds_name, sample_names in src_datasets.items(): ds = out_project.create_dataset(ds_name) #make train -> img, ann progress = sly.Progress('Dataset: {!r}'.format(ds_name), len(sample_names)) # for logger for name in sample_names: src_img_path = os.path.join(all_img, name + '.png') inst_path = os.path.join(all_ann, name + '.txt') if all((os.path.isfile(x) or (x is None) for x in [src_img_path, inst_path])): ann = get_ann(src_img_path, inst_path, number_class, pixel_color) ds.add_item_file(name, src_img_path, ann=ann) progress.iter_done_report() out_meta = sly.ProjectMeta(obj_classes=classes_dict) out_project.set_meta(out_meta)
def convert(): settings = load_json_file(sly.TaskPaths.TASK_CONFIG_PATH) imgs_dir = os.path.join(sly.TaskPaths.DATA_DIR, 'JPEGImages') inst_dir_trainval = os.path.join(sly.TaskPaths.DATA_DIR, 'Annotations_Part') out_project = sly.Project( os.path.join(sly.TaskPaths.RESULTS_DIR, settings['res_names']['project']), sly.OpenMode.CREATE) src_datasets = read_datasets(inst_dir_trainval) for ds_name, sample_names in src_datasets.items(): ds = out_project.create_dataset(ds_name) progress = sly.Progress('Dataset: {!r}'.format(ds_name), len(sample_names)) # for logger for name in sample_names: src_img_path = os.path.join(imgs_dir, name + '.jpg') inst_path = os.path.join(inst_dir_trainval, name + '.mat') if all((os.path.isfile(x) or (x is None) for x in [src_img_path, inst_path])): ann = get_ann(src_img_path, inst_path) ds.add_item_file(name, src_img_path, ann=ann) progress.iter_done_report() out_meta = sly.ProjectMeta(obj_classes=classes_dict) out_project.set_meta(out_meta)
def convert(): settings = load_json_file(sly.TaskPaths.SETTINGS_PATH) all_img = os.path.join(sly.TaskPaths.DATA_DIR, 'ADEChallengeData2016/images') all_ann = os.path.join(sly.TaskPaths.DATA_DIR, 'annotations_instance') out_project = sly.Project( os.path.join(sly.TaskPaths.RESULTS_DIR, settings['res_names']['project']), sly.OpenMode.CREATE) src_datasets = read_datasets(all_ann) default_classes_colors = {'background': (10, 10, 10)} default_colors_classes = {(10, 10, 10): 'background'} for ds_name, sample_names in src_datasets.items(): ds = out_project.create_dataset(ds_name) progress = sly.Progress('Dataset: {!r}'.format(ds_name), len(sample_names)) # for logger all_img_temp = os.path.join(all_img, ds_name) all_ann_temp = os.path.join(all_ann, ds_name) for name in sample_names: src_img_path = os.path.join(all_img_temp, name + '.jpg') inst_path = os.path.join(all_ann_temp, name + '.png') if all((os.path.isfile(x) or (x is None) for x in [src_img_path, inst_path])): ann = get_ann(src_img_path, inst_path, default_classes_colors, default_colors_classes) ds.add_item_file(name, src_img_path, ann=ann) progress.iter_done_report() out_meta = sly.ProjectMeta(obj_classes=classes_dict) out_project.set_meta(out_meta)
def load_json(cls, path): simple_dict = load_json_file(path) result = cls() for key_type, value_dict in simple_dict.items(): for key_str, id in value_dict.items(): result._add(key_type, uuid.UUID(key_str), id) return result
def convert(): settings = load_json_file(sly.TaskPaths.SETTINGS_PATH) out_project = sly.Project( os.path.join(sly.TaskPaths.RESULTS_DIR, settings['res_names']['project']), sly.OpenMode.CREATE) all_dirs = os.path.join(sly.TaskPaths.DATA_DIR, 'RANGE') src_datasets = read_datasets(all_dirs) for ds_name, sample_names in src_datasets.items(): ds = out_project.create_dataset(ds_name) #make train -> img, ann progress = sly.Progress('Dataset: {!r}'.format(ds_name), len(sample_names)) # for logger for name in sample_names: subdir = os.path.join(all_dirs, ds_name) img_foto = os.path.join(subdir, 'd_images') img_mat = os.path.join(subdir, 'd_masks') src_img_path = os.path.join(img_foto, name + '.jpg') inst_path = os.path.join(img_mat, name + '.mat') if all((os.path.isfile(x) or (x is None) for x in [src_img_path, inst_path])): ann = get_ann(src_img_path, inst_path, number_class, pixel_color) ds.add_item_file(name, src_img_path, ann=ann) progress.iter_done_report() out_meta = sly.ProjectMeta(obj_classes=classes_dict) out_project.set_meta(out_meta)
def determine_task_inference_mode_config(default_inference_mode_config): raw_task_config = load_json_file(TaskPaths.TASK_CONFIG_PATH) task_config = maybe_convert_from_v1_inference_task_config(raw_task_config) logger.info('Input task config', extra={'config': task_config}) result_config = get_effective_inference_mode_config( task_config.get(MODE, {}), default_inference_mode_config) logger.info('Full inference mode config', extra={'config': result_config}) return result_config
def main(): settings = load_json_file(sly.TaskPaths.SETTINGS_PATH) sly.logger.info('Input settings:', extra={'config': settings}) metric = IoUMetric(settings[CLASSES_MAPPING]) applier = sly.MetricProjectsApplier(metric, settings) check_class_mapping(applier.project_gt, applier.project_pred, settings[CLASSES_MAPPING]) applier.run_evaluation() metric.log_total_metrics()
def main(): settings = load_json_file(sly.TaskPaths.TASK_CONFIG_PATH) sly.logger.info('Input settings:', extra={'config': settings}) metric = ClassificationMetrics(settings[TAGS_MAPPING], settings[CONFIDENCE_THRESHOLD]) applier = sly.MetricProjectsApplier(metric, settings) check_tag_mapping(applier.project_gt, applier.project_pred, settings[TAGS_MAPPING]) applier.run_evaluation() metric.log_total_metrics()
def __init__(self): task_paths = dtl_paths.DtlPaths() self.in_dir = task_paths.data_dir self.out_dir = task_paths.results_dir self.settings = load_json_file(task_paths.settings_path) if len(task_paths.project_dirs) != 1: raise RuntimeError( 'Invalid data format. Input folder should contain only "images_and_annotations" dir' ) self.data_dir = join(self.in_dir, 'images_and_annotations')
def get_task_api(): SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) task_info = sly_json.load_json_file( os.path.join(SCRIPT_DIR, "../task_config.json")) task_id = task_info["task_id"] server_address = task_info["server_address"] api_token = task_info["api_token"] api = sly.Api(server_address, api_token, retry_count=10) #api.add_additional_field('taskId', task_id) #api.add_header('x-task-id', str(task_id)) return task_id, api, task_info
def get_related_images(self, item_name): results = [] path = self.get_related_images_path(item_name) if dir_exists(path): files = list_files(path, SUPPORTED_IMG_EXTS) for file in files: img_meta_path = os.path.join(path, get_file_name_with_ext(file)+".json") img_meta = {} if file_exists(img_meta_path): img_meta = load_json_file(img_meta_path) results.append((file, img_meta)) return results
def _read(self): meta_json = load_json_file(self._get_project_meta_path()) self._meta = ProjectMeta.from_json(meta_json) possible_datasets = get_subdirs(self.directory) for ds_name in possible_datasets: current_dataset = Dataset(os.path.join(self.directory, ds_name), OpenMode.READ) self._datasets = self._datasets.add(current_dataset) if self.total_items == 0: raise RuntimeError('Project is empty')
def __init__(self): self.settings = load_json_file(sly.TaskPaths.TASK_CONFIG_PATH) self.lists_dir = os.path.join(sly.TaskPaths.DATA_DIR, 'ImageSets/Segmentation') self.imgs_dir = os.path.join(sly.TaskPaths.DATA_DIR, 'JPEGImages') self.segm_dir = os.path.join(sly.TaskPaths.DATA_DIR, 'SegmentationClass') self.inst_dir = os.path.join(sly.TaskPaths.DATA_DIR, 'SegmentationObject') self.colors_file = os.path.join(sly.TaskPaths.DATA_DIR, 'colors.txt') self.with_instances = os.path.isdir(self.inst_dir) sly.logger.info('Will import data {} instance info.'.format('with' if self.with_instances else 'without')) self.obj_classes = sly.ObjClassCollection() self._read_datasets() self._read_colors()
def __init__(self): task_paths = dtl_paths.DtlPaths() self.in_dir = task_paths.data_dir self.out_dir = task_paths.results_dir self.settings = load_json_file(task_paths.settings_path) if len(task_paths.project_dirs) > 1: raise RuntimeError( 'The project should consist of only one folder.') self.dataset_dir = task_paths.project_dirs[0] self._define_classes()
def load_json(cls, path): ''' Download json data by given path and convert in to dict with bidict values :param path: str :return: dict ''' simple_dict = load_json_file(path) result = cls() for key_type, value_dict in simple_dict.items(): for key_str, id in value_dict.items(): result._add(key_type, uuid.UUID(key_str), id) return result
def __init__(self, schema_fpath): vtor_class = _extend_with_default(Draft4Validator) schemas = load_json_file(schema_fpath) # Detach common definitions from the named schemas and inline them in into every schema. definitions = schemas.pop(self.DEFINITIONS, {}) for name, schema in schemas.items(): schema.setdefault(self.DEFINITIONS, {}).update(definitions) self._validators = { name: vtor_class(schema) for name, schema in schemas.items() }
def read_datasets(ann_file): if not os.path.isfile(ann_file): raise RuntimeError( 'There is no file {}, but it is necessary'.format(ann_file)) data = load_json_file(ann_file) src_datasets = {'train': [], 'val': []} set_to_image_name = data['imgs'] for images_info in set_to_image_name.values(): if images_info['set'] == 'test': continue src_datasets[images_info['set']].append(str(images_info['id'])) return src_datasets
def get_task_api(): PROJECT_ID = 489 SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) task_info = sly_json.load_json_file( os.path.join(SCRIPT_DIR, "../task_config.json")) task_id = task_info["task_id"] server_address = task_info["server_address"] api_token = task_info["api_token"] api = sly.Api(server_address, api_token, retry_count=10) api.add_additional_field('taskId', task_id) api.add_header('x-task-id', str(task_id)) # context = api.task.get_data(task_id, sly.app.CONTEXT) # user_id = context["userId"] return task_id, api, PROJECT_ID