def get_project_meta(api: sly.Api, project_id, force=False): global metas if project_id not in metas or force is True: meta_json = api.project.get_meta(project_id) meta = sly.ProjectMeta.from_json(meta_json) update_meta = False upc_tag_meta = meta.get_tag_meta(TAG_NAME) if upc_tag_meta is None: meta = meta.add_tag_meta( sly.TagMeta(TAG_NAME, sly.TagValueType.ANY_STRING)) update_meta = True error_tag_meta = meta.get_tag_meta(ERROR_TAG_NAME) if error_tag_meta is None: meta = meta.add_tag_meta( sly.TagMeta(ERROR_TAG_NAME, sly.TagValueType.NONE, color=[255, 0, 0])) update_meta = True if update_meta is True: api.project.update_meta(project_id, meta.to_json()) # get meta from server again to access tag_id (tag_meta_id) meta_json = api.project.get_meta(project_id) meta = sly.ProjectMeta.from_json(meta_json) global metas_lock metas_lock.acquire() metas[project_id] = meta metas_lock.release() return metas[project_id]
def process_meta(input_meta): classes_mapping = {} output_meta = sly.ProjectMeta(obj_classes=[], tag_metas=input_meta.tag_metas) for obj_class in input_meta.obj_classes: classes_mapping[obj_class.name] = '{}_bbox'.format(obj_class.name) new_obj_class = sly.ObjClass(classes_mapping[obj_class.name], sly.Rectangle, color=obj_class.color) output_meta = output_meta.add_obj_class(new_obj_class) output_meta = output_meta.add_tag_meta( sly.TagMeta('train', sly.TagValueType.NONE)) output_meta = output_meta.add_tag_meta( sly.TagMeta('val', sly.TagValueType.NONE)) return output_meta, classes_mapping
def create_meta(config, side): meta = sly.ProjectMeta() classes = [ sly.ObjClass(class_name, sly.AnyGeometry) for class_name in config[side] ] meta = meta.add_obj_classes(classes) meta = meta.add_tag_metas([ sly.TagMeta("case_id", sly.TagValueType.ANY_STRING), sly.TagMeta("validation", sly.TagValueType.ONEOF_STRING, possible_values=["accepted", "rejected"]), sly.TagMeta("finished", sly.TagValueType.NONE) ]) return meta
def construct_model_meta(): g.labels_urls = sly.json.load_json_file(g.local_labels_urls_path) g.gt_labels = sly.json.load_json_file(g.local_gt_labels_path) g.gt_index_to_labels = {index: name for name, index in g.gt_labels.items()} tag_metas = [] for name, index in g.gt_labels.items(): tag_metas.append(sly.TagMeta(name, sly.TagValueType.NONE)) g.meta = sly.ProjectMeta(tag_metas=sly.TagMetaCollection(tag_metas))
def upload_project_meta(api, project_id, config_yaml_info): classes = [] for class_id, class_name in enumerate(config_yaml_info["names"]): yaml_class_color = config_yaml_info["colors"][class_id] obj_class = sly.ObjClass(name=class_name, geometry_type=sly.Rectangle, color=yaml_class_color) classes.append(obj_class) tags_arr = [ sly.TagMeta(name="train", value_type=sly.TagValueType.NONE), sly.TagMeta(name="val", value_type=sly.TagValueType.NONE) ] project_meta = sly.ProjectMeta( obj_classes=sly.ObjClassCollection(items=classes), tag_metas=sly.TagMetaCollection(items=tags_arr)) api.project.update_meta(project_id, project_meta.to_json()) return project_meta
def get_tags_from_dicom_object(dicom_obj, requested_tags): results = [] for tag_name in requested_tags: tag_value = getattr(dicom_obj, tag_name, None) if tag_value is not None: tag_meta = sly.TagMeta(tag_name, sly.TagValueType.ANY_STRING) tag = sly.Tag(tag_meta, str(tag_value)) results.append((tag, tag_meta)) return results
def _load_train_config(self): self.confidence_tag_meta = sly.TagMeta( self._config['confidence_tag_name'], sly.TagValueType.ANY_NUMBER) super()._load_train_config() src_size = self.train_config[SETTINGS]['input_size'] self.input_size_limits = (src_size['min_dim'], src_size['max_dim']) self.idx_to_class_title = { v: k for k, v in self.class_title_to_idx.items() }
def process_meta(input_meta): output_meta = sly.ProjectMeta(obj_classes=None, img_tag_metas=input_meta.img_tag_metas, objtag_metas=input_meta.obj_tags) for obj_class in input_meta.obj_classes: if obj_class.name in classes_mapping.keys() or obj_class.name in classes_mapping.values(): output_meta = output_meta.add_obj_class(obj_class) for gt_class in classes_mapping: output_meta = output_meta.add_obj_class(sly.ObjClass(make_false_positive_name(gt_class), sly.Bitmap)) output_meta = output_meta.add_obj_class(sly.ObjClass(make_false_negative_name(gt_class), sly.Bitmap)) output_meta = output_meta.add_img_tag_meta(sly.TagMeta(make_iou_tag_name(gt_class), sly.TagValueType.ANY_NUMBER)) return output_meta
def _generate_sample_annotation(self, orig_img_path, orig_ann_path, train_val_tag): try: tag_meta = sly.TagMeta(train_val_tag, sly.TagValueType.NONE) if not self.tag_metas.has_key(tag_meta.name): self.tag_metas = self.tag_metas.add(tag_meta) tag = sly.Tag(tag_meta) ann = self._load_cityscapes_annotation(orig_img_path, orig_ann_path) ann = ann.add_tag(tag) return ann except Exception: raise AnnotationConvertionException() # ok, may continue work with another sample
def add_tags_to_meta(): global RES_META for column_name in CSV_COLUMNS: tag_meta: sly.TagMeta = RES_META.get_tag_meta(column_name) if tag_meta is None: RES_META = RES_META.add_tag_meta( sly.TagMeta(column_name, value_type=sly.TagValueType.ANY_STRING)) else: if RESOLVE == "skip": continue elif RESOLVE == "replace" and tag_meta.value_type != sly.TagValueType.ANY_STRING: raise TypeError( "Type of existing tag {!r} is not string".format( tag_meta.name)) elif RESOLVE == "raise": raise RuntimeError( "Tag {!r} already exists in project {!r}".format( tag_meta.name, PROJECT.name))
def construct_model_meta(model): names = model.module.names if hasattr(model, 'module') else model.names colors = None if hasattr(model, 'module') and hasattr(model.module, 'colors'): colors = model.module.colors elif hasattr(model, 'colors'): colors = model.colors else: colors = [] for i in range(len(names)): colors.append(sly.color.generate_rgb(exist_colors=colors)) obj_classes = [sly.ObjClass(name, sly.Rectangle, color) for name, color in zip(names, colors)] tags = [sly.TagMeta(CONFIDENCE, sly.TagValueType.ANY_NUMBER)] meta = sly.ProjectMeta(obj_classes=sly.ObjClassCollection(obj_classes), tag_metas=sly.TagMetaCollection(tags)) return meta
def init_project(api: sly.Api, project_id): project_dir = os.path.join(sly.app.SHARED_DATA, "app_tagging", str(project_id)) #@TODO: comment sly.fs.remove_dir(project_dir) if sly.fs.dir_exists(project_dir): return project_dir else: sly.fs.mkdir(project_dir) meta_json = api.project.get_meta(project_id) meta = sly.ProjectMeta.from_json(meta_json) product_id = meta.get_tag_meta("product_id") if product_id is None: meta = meta.add_tag_meta( sly.TagMeta("product_id", sly.TagValueType.ANY_STRING)) category = meta.get_tag_meta("category") if category is None: meta = meta.add_tag_meta( sly.TagMeta("category", sly.TagValueType.ANY_STRING)) brand = meta.get_tag_meta("brand") if brand is None: meta = meta.add_tag_meta( sly.TagMeta("brand", sly.TagValueType.ANY_STRING)) item_name = meta.get_tag_meta("item_name") if item_name is None: meta = meta.add_tag_meta( sly.TagMeta("item_name", sly.TagValueType.ANY_STRING)) if product_id is None or category is None or brand is None or item_name is None: api.project.update_meta(project_id, meta.to_json()) sly_json.dump_json_file(meta.to_json(), os.path.join(project_dir, "meta.json")) image_label_pairs = [] for dataset in api.dataset.get_list(project_id): images = api.image.get_list(dataset.id) image_ids = [image.id for image in images] for batch in sly.batched(image_ids): annotations = api.annotation.download_batch(dataset.id, batch) for ann_info in annotations: ann_path = os.path.join(project_dir, str(dataset.id), str(ann_info.image_id) + sly.ANN_EXT) sly.fs.ensure_base_path(ann_path) sly_json.dump_json_file(ann_info.annotation, ann_path) image_ids.append(ann_info.image_id) ann = sly.Annotation.from_json(ann_info.annotation, meta) label_indices = list(range(0, len(ann.labels))) image_label_pairs.extend( list( zip([ann_info.image_id] * len(label_indices), [ann_path] * len(label_indices), label_indices))) sly_json.dump_json_file( image_label_pairs, os.path.join(project_dir, "image_labels_pairs.json")) sly_json.dump_json_file(list(range(len(image_label_pairs))), os.path.join(project_dir, "free_pairs.json")) return project_dir
SELECTED_DATASETS = json.loads(os.environ["modal.state.selectedDatasets"].replace("'", '"')) ALL_DATASETS = os.getenv("modal.state.allDatasets").lower() in ('true', '1', 't') if ALL_DATASETS: SELECTED_DATASETS = [dataset.name for dataset in api.dataset.get_list(PROJECT_ID)] need_download_threshold = 0.15 storage_dir = os.path.join(my_app.data_dir, "sly_base_sir") mkdir(storage_dir, True) video_dir = os.path.join(storage_dir, "video") mkdir(video_dir) img_dir = os.path.join(storage_dir, "images") mkdir(img_dir) project = api.project.get_info_by_id(PROJECT_ID) if project is None: raise RuntimeError("Project {!r} not found".format(project.name)) if project.type != str(sly.ProjectType.VIDEOS): raise TypeError("Project type is {!r}, but have to be {!r}".format(project.type, sly.ProjectType.VIDEOS)) meta_json = api.project.get_meta(project.id) meta = sly.ProjectMeta.from_json(meta_json) if "object_id" not in [tag.name for tag in meta.tag_metas]: vobj_id_tag_meta = sly.TagMeta(name="object_id", value_type=sly.TagValueType.ANY_NUMBER, applicable_to=sly.TagApplicableTo.OBJECTS_ONLY) meta = meta.add_tag_meta(vobj_id_tag_meta) if OPTIONS == "annotated" and len(meta.obj_classes) == 0 and len(meta.tag_metas) == 0: raise ValueError("Nothing to convert, there are no tags and classes in project {!r}".format(project.name))
import supervisely_lib as sly from tqdm import tqdm import random import os team_name = 'max' workspace_name = 'test_dtl_segmentation' src_project_name = 'lemons_annotated' dst_project_name = 'lemons_annotated_segmentation' validation_portion = 0.05 image_multiplier = 5 class_bg = sly.ObjClass('bg', sly.Rectangle) tag_meta_train = sly.TagMeta('train', sly.TagValueType.NONE) tag_meta_val = sly.TagMeta('val', sly.TagValueType.NONE) address = os.environ['SERVER_ADDRESS'] token = os.environ['API_TOKEN'] print("Server address: ", address) print("Your API token: ", token) api = sly.Api(address, token) team = api.team.get_info_by_name(team_name) workspace = api.workspace.get_info_by_name(team.id, workspace_name) print("Current context: Team {!r}, Workspace {!r}".format( team.name, workspace.name))
def transform(api: sly.Api, task_id, context, state, app_logger): storage_dir = my_app.data_dir project = api.project.create(WORKSPACE_ID, PROJECT_NAME, change_name_if_conflict=True) dataset = api.dataset.create(project.id, DATASET_NAME, change_name_if_conflict=True) local_file = os.path.join(storage_dir, sly.fs.get_file_name_with_ext(INPUT_FILE)) api.file.download(TEAM_ID, INPUT_FILE, local_file) tag_names = set() movies_info = [] with open(local_file, encoding="ISO-8859-1") as f: reader = csv.DictReader(f) for row in reader: movies_info.append(row) tag_names.update(parse_genres(row["Genre"])) tags_arr = [ sly.TagMeta(name=tag_name, value_type=sly.TagValueType.NONE) for tag_name in tag_names ] project_meta = sly.ProjectMeta(tag_metas=sly.TagMetaCollection( items=tags_arr)) api.project.update_meta(project.id, project_meta.to_json()) movies_info_len = len(movies_info) movies_info_len_digits = len(str(movies_info_len)) batch_size = 50 progress = sly.Progress('Uploading images', movies_info_len, app_logger) for batch_idx, batch in enumerate( sly._utils.batched(movies_info, batch_size)): image_paths = [] image_names = [] image_metas = [] csv_rows = [] for idx, csv_row in enumerate(batch): image_url = csv_row["Poster"] cur_img_ext = os.path.splitext(image_url)[1] cur_img_idx = str(batch_idx * batch_size + idx + 1).rjust( movies_info_len_digits, '0') image_name = f"{cur_img_idx}{cur_img_ext}" local_path = os.path.join(storage_dir, image_name) try: download_file(image_url, local_path, app_logger, batch_idx * batch_size + idx, movies_info_len) except: app_logger.warn( f"Couldn't download image:(row={batch_idx*batch_size+idx}, url={image_url}" ) continue csv_rows.append(csv_row) image_paths.append(local_path) image_names.append(image_name) image_metas.append({ "Title": csv_row["Title"], "imdbId": csv_row["imdbId"], "IMDB Score": csv_row["IMDB Score"], "Imdb Link": csv_row["Imdb Link"].replace('title/tt', 'title/tt0') }) images = api.image.upload_paths(dataset.id, image_names, image_paths, metas=image_metas) cur_anns = [] for image, csv_row in zip(images, csv_rows): tags_arr = [] image_tags = parse_genres(csv_row["Genre"]) if len(image_tags) == 0: continue for image_tag in image_tags: tag_meta = project_meta.get_tag_meta(image_tag) tags_arr.append(sly.Tag(tag_meta)) tags_arr = sly.TagCollection(items=tags_arr) ann = sly.Annotation(img_size=(image.height, image.width), img_tags=tags_arr) cur_anns.append((image.id, ann)) if len(cur_anns) > 0: img_ids = [img_id for img_id, ann in cur_anns] anns = [ann for img_id, ann in cur_anns] api.annotation.upload_anns(img_ids, anns) progress.iters_done_report(len(batch)) api.task.set_output_project(task_id, project.id, project.name) my_app.stop()
if PROJECT_INFO is None: raise RuntimeError(f"Project id={PROJECT_ID} not found") META = sly.ProjectMeta.from_json(app.public_api.project.get_meta(PROJECT_ID)) ALL_IMAGES_INFO = {} # image id -> image info IMAGE_PATH = {} # image id -> local path PRODUCTS = defaultdict(lambda: defaultdict(list)) # tag name (i.e. product-id) -> image id -> list of labels # for debug vis_dir = os.path.join(app.data_dir, "vis_images") sly.fs.mkdir(vis_dir) sly.fs.clean_dir(vis_dir) # good for debug RESULT_CLASS = sly.ObjClass("product", sly.Bitmap, [0, 0, 255]) TRAIN_TAG = sly.TagMeta("train", sly.TagValueType.NONE, color=[0, 255, 0]) VAL_TAG = sly.TagMeta("val", sly.TagValueType.NONE, color=[255, 255, 0]) PRODUCT_TAGS = sly.TagMetaCollection() def validate_project_meta(): global META if len(META.obj_classes) == 0: raise ValueError("Project should have at least one class") cnt_valid_classes = 0 for obj_class in META.obj_classes: obj_class: sly.ObjClass if obj_class.geometry_type in [sly.Polygon, sly.Bitmap]: cnt_valid_classes += 1 if cnt_valid_classes == 0: raise ValueError("Project should have at least one class of type polygon or bitmap")
for img_path in img_paths: img_hash = api.image.upload_path(img_path) image_info = api.image.add(ds.id, sly.fs.get_file_name(img_path), img_hash) print('Image (id={}, name={}) has been sucessfully added'.format(image_info.id, image_info.name)) print("Number of images in created projects: ", api.project.get_images_count(project.id)) #define object classes class_person = sly.ObjClass('person', sly.Rectangle, color=[255, 0, 0]) class_car = sly.ObjClass('car', sly.Polygon, color=[0, 255, 0]) class_road = sly.ObjClass('road', sly.Bitmap, color=[0, 0, 255]) obj_class_collection = sly.ObjClassCollection([class_person, class_car, class_road]) #define tags for images tagmeta_weather = sly.TagMeta(name='weather', value_type=sly.TagValueType.ONEOF_STRING, possible_values=['rain', 'sun', 'cloud'], color=[153, 0, 153]) tagmeta_annotate = sly.TagMeta('to_annotation', sly.TagValueType.NONE) #define tags for objects tagmeta_vehicle_type = sly.TagMeta('vehicle_type', sly.TagValueType.ONEOF_STRING, ['sedan', 'suv', 'hatchback']) tagmeta_confidence = sly.TagMeta('confidence', sly.TagValueType.ANY_NUMBER) tagmeta_collection = sly.TagMetaCollection( [tagmeta_weather, tagmeta_annotate, tagmeta_vehicle_type, tagmeta_confidence]) #combine everythiong to project meta meta = sly.ProjectMeta(obj_class_collection, tagmeta_collection) print(meta) api.project.update_meta(project.id, meta.to_json())
def _load_train_config(self): self.confidence_tag_meta = sly.TagMeta(self._config['confidence_tag_name'], sly.TagValueType.ANY_NUMBER) super()._load_train_config()
def import_cityscapes(api: sly.Api, task_id, context, state, app_logger): tag_metas = sly.TagMetaCollection() obj_classes = sly.ObjClassCollection() dataset_names = [] storage_dir = my_app.data_dir if INPUT_DIR: cur_files_path = INPUT_DIR extract_dir = os.path.join( storage_dir, str(Path(cur_files_path).parent).lstrip("/")) input_dir = os.path.join(extract_dir, Path(cur_files_path).name) archive_path = os.path.join( storage_dir, cur_files_path + ".tar") # cur_files_path.split("/")[-2] + ".tar" project_name = Path(cur_files_path).name else: cur_files_path = INPUT_FILE extract_dir = os.path.join(storage_dir, get_file_name(cur_files_path)) archive_path = os.path.join(storage_dir, get_file_name_with_ext(cur_files_path)) project_name = get_file_name(INPUT_FILE) input_dir = os.path.join(storage_dir, get_file_name(cur_files_path)) # extract_dir api.file.download(TEAM_ID, cur_files_path, archive_path) if tarfile.is_tarfile(archive_path): with tarfile.open(archive_path) as archive: archive.extractall(extract_dir) else: raise Exception("No such file".format(INPUT_FILE)) new_project = api.project.create(WORKSPACE_ID, project_name, change_name_if_conflict=True) tags_template = os.path.join(input_dir, "gtFine", "*") tags_paths = glob.glob(tags_template) tags = [os.path.basename(tag_path) for tag_path in tags_paths] if train_tag in tags and val_tag not in tags: split_train = True elif trainval_tag in tags and val_tag not in tags: split_train = True else: split_train = False search_fine = os.path.join(input_dir, "gtFine", "*", "*", "*_gt*_polygons.json") files_fine = glob.glob(search_fine) files_fine.sort() search_imgs = os.path.join(input_dir, "leftImg8bit", "*", "*", "*_leftImg8bit" + IMAGE_EXT) files_imgs = glob.glob(search_imgs) files_imgs.sort() if len(files_fine) == 0 or len(files_imgs) == 0: raise Exception('Input cityscapes format not correct') samples_count = len(files_fine) progress = sly.Progress('Importing images', samples_count) images_pathes_for_compare = [] images_pathes = {} images_names = {} anns_data = {} ds_name_to_id = {} if samples_count > 2: random_train_indexes = get_split_idxs(samples_count, samplePercent) for idx, orig_ann_path in enumerate(files_fine): parent_dir, json_filename = os.path.split( os.path.abspath(orig_ann_path)) dataset_name = os.path.basename(parent_dir) if dataset_name not in dataset_names: dataset_names.append(dataset_name) ds = api.dataset.create(new_project.id, dataset_name, change_name_if_conflict=True) ds_name_to_id[dataset_name] = ds.id images_pathes[dataset_name] = [] images_names[dataset_name] = [] anns_data[dataset_name] = [] orig_img_path = json_path_to_image_path(orig_ann_path) images_pathes_for_compare.append(orig_img_path) if not file_exists(orig_img_path): logger.warn( 'Image for annotation {} not found is dataset {}'.format( orig_ann_path.split('/')[-1], dataset_name)) continue images_pathes[dataset_name].append(orig_img_path) images_names[dataset_name].append( sly.io.fs.get_file_name_with_ext(orig_img_path)) tag_path = os.path.split(parent_dir)[0] train_val_tag = os.path.basename(tag_path) if split_train is True and samples_count > 2: if (train_val_tag == train_tag) or (train_val_tag == trainval_tag): if idx in random_train_indexes: train_val_tag = train_tag else: train_val_tag = val_tag # tag_meta = sly.TagMeta(train_val_tag, sly.TagValueType.NONE) tag_meta = sly.TagMeta('split', sly.TagValueType.ANY_STRING) if not tag_metas.has_key(tag_meta.name): tag_metas = tag_metas.add(tag_meta) # tag = sly.Tag(tag_meta) tag = sly.Tag(meta=tag_meta, value=train_val_tag) json_data = json.load(open(orig_ann_path)) ann = sly.Annotation.from_img_path(orig_img_path) for obj in json_data['objects']: class_name = obj['label'] if class_name == 'out of roi': polygon = obj['polygon'][:5] interiors = [obj['polygon'][5:]] else: polygon = obj['polygon'] if len(polygon) < 3: logger.warn( 'Polygon must contain at least 3 points in ann {}, obj_class {}' .format(orig_ann_path, class_name)) continue interiors = [] interiors = [convert_points(interior) for interior in interiors] polygon = sly.Polygon(convert_points(polygon), interiors) if city_classes_to_colors.get(class_name, None): obj_class = sly.ObjClass( name=class_name, geometry_type=sly.Polygon, color=city_classes_to_colors[class_name]) else: new_color = generate_rgb(city_colors) city_colors.append(new_color) obj_class = sly.ObjClass(name=class_name, geometry_type=sly.Polygon, color=new_color) ann = ann.add_label(sly.Label(polygon, obj_class)) if not obj_classes.has_key(class_name): obj_classes = obj_classes.add(obj_class) ann = ann.add_tag(tag) anns_data[dataset_name].append(ann) progress.iter_done_report() out_meta = sly.ProjectMeta(obj_classes=obj_classes, tag_metas=tag_metas) api.project.update_meta(new_project.id, out_meta.to_json()) for ds_name, ds_id in ds_name_to_id.items(): dst_image_infos = api.image.upload_paths(ds_id, images_names[ds_name], images_pathes[ds_name]) dst_image_ids = [img_info.id for img_info in dst_image_infos] api.annotation.upload_anns(dst_image_ids, anns_data[ds_name]) stat_dct = { 'samples': samples_count, 'src_ann_cnt': len(files_fine), 'src_img_cnt': len(files_imgs) } logger.info('Found img/ann pairs.', extra=stat_dct) images_without_anns = set(files_imgs) - set(images_pathes_for_compare) if len(images_without_anns) > 0: logger.warn('Found source images without corresponding annotations:') for im_path in images_without_anns: logger.warn('Annotation not found {}'.format(im_path)) logger.info('Found classes.', extra={ 'cnt': len(obj_classes), 'classes': sorted([obj_class.name for obj_class in obj_classes]) }) logger.info('Created tags.', extra={ 'cnt': len(out_meta.tag_metas), 'tags': sorted([tag_meta.name for tag_meta in out_meta.tag_metas]) }) my_app.stop()
#### End settings. #### # Download remote project src_project_info = api.project.get_info_by_name(WORKSPACE_ID, src_project_name) src_project_dir = os.path.join(sly.TaskPaths.DATA_DIR, src_project_name) sly.logger.info('DOWNLOAD_PROJECT', extra={'title': src_project_name}) sly.download_project(api, src_project_info.id, src_project_dir, log_progress=True) sly.logger.info('Project {!r} has been successfully downloaded. Starting to process.'.format(src_project_name)) src_project = sly.Project(directory=src_project_dir, mode=sly.OpenMode.READ) dst_project_dir = os.path.join(sly.TaskPaths.OUT_PROJECTS_DIR, dst_project_name) dst_project = sly.Project(directory=dst_project_dir, mode=sly.OpenMode.CREATE) tag_meta_train = sly.TagMeta(train_tag_name, sly.TagValueType.NONE) tag_meta_val = sly.TagMeta(val_tag_name, sly.TagValueType.NONE) bbox_class_mapping = { obj_class.name: ( obj_class if (obj_class.geometry_type == sly.Rectangle) else sly.ObjClass(obj_class.name + '_bbox', sly.Rectangle, color=obj_class.color)) for obj_class in src_project.meta.obj_classes} dst_meta = src_project.meta.clone( obj_classes=sly.ObjClassCollection(bbox_class_mapping.values()), tag_metas=src_project.meta.tag_metas.add_items([tag_meta_train, tag_meta_val])) dst_project.set_meta(dst_meta) crop_side_fraction = (min_crop_side_fraction, max_crop_side_fraction)
def _load_train_config(self): self.confidence_tag_meta = sly.TagMeta(self._config[CONFIDENCE_TAG_NAME], sly.TagValueType.ANY_NUMBER) super()._load_train_config()
TEAM_ID = int(os.environ['context.teamId']) WORKSPACE_ID = int(os.environ['context.workspaceId']) PROJECT_ID = int(os.environ['modal.state.slyProjectId']) my_app = sly.AppService() PROJECT = None TOTAL_IMAGES_COUNT = None META_ORIGINAL: sly.ProjectMeta = None META_RESULT: sly.ProjectMeta = None TRAIN_NAME = 'train' TRAIN_COLOR = [0, 255, 0] #RGB VAL_NAME = 'val' VAL_COLOR = [255, 128, 0] TRAIN_TAG_META = sly.TagMeta(TRAIN_NAME, sly.TagValueType.NONE, color=TRAIN_COLOR) VAL_TAG_META = sly.TagMeta(VAL_NAME, sly.TagValueType.NONE, color=VAL_COLOR) def sample_images(api, datasets, train_images_count): all_images = [] for dataset in datasets: images = api.image.get_list(dataset.id) all_images.extend(images) cnt_images = len(all_images) shuffled_images = all_images.copy() random.shuffle(shuffled_images) train_images = shuffled_images[:int(train_images_count)] val_images = shuffled_images[int(train_images_count):]