Example #1
0
def get_tags_from_dicom_object(dicom_obj, requested_tags):
    results = []
    for tag_name in requested_tags:
        tag_value = getattr(dicom_obj, tag_name, None)
        if tag_value is not None:
            tag_meta = sly.TagMeta(tag_name, sly.TagValueType.ANY_STRING)
            tag = sly.Tag(tag_meta, str(tag_value))
            results.append((tag, tag_meta))
    return results
Example #2
0
 def _generate_sample_annotation(self, orig_img_path, orig_ann_path, train_val_tag):
     try:
         tag_meta = sly.TagMeta(train_val_tag, sly.TagValueType.NONE)
         if not self.tag_metas.has_key(tag_meta.name):
             self.tag_metas = self.tag_metas.add(tag_meta)
         tag = sly.Tag(tag_meta)
         ann = self._load_cityscapes_annotation(orig_img_path, orig_ann_path)
         ann = ann.add_tag(tag)
         return ann
     except Exception:
         raise AnnotationConvertionException()  # ok, may continue work with another sample
def convert_tags(tags, prop_container, frame_container, frame_indices=None):
    for video_tag in tags:
        tag = sly.Tag(video_tag.meta,
                      value=video_tag.value,
                      labeler_login=video_tag.labeler_login)
        if video_tag.frame_range is None:
            prop_container.append(tag)
        else:
            for frame_index in range(video_tag.frame_range[0],
                                     video_tag.frame_range[1] + 1):
                frame_container[frame_index].append(tag)
                if frame_indices is not None:
                    frame_indices.append(frame_index)
def assign_csv_row_as_tags(image_id, image_name, res_ann, row):
    new_tags = []
    for k, v in row.items():
        tag_meta = RES_META.get_tag_meta(k)
        if tag_meta is None:
            raise RuntimeError(
                "Tag {!r} not found in resulting project {!r}".format(
                    k, RES_PROJECT.name))
        existing_tag = res_ann.img_tags.get(k)
        if existing_tag is None:
            new_tags.append(sly.Tag(tag_meta, value=v))
        else:
            if RESOLVE == "skip":
                continue
            elif RESOLVE == "raise":
                raise KeyError("Image {!r} (id={}): tag {!r} exists".format(
                    image_name, image_id, k))
            elif RESOLVE == "replace":
                res_ann = res_ann.delete_tag_by_name(k)
                new_tags.append(sly.Tag(tag_meta, value=v))

    res_ann = res_ann.add_tags(new_tags)
    return res_ann
Example #5
0
def yolo_preds_to_sly_rects(detections, idx_to_class, confidence_tag_meta):
    labels = []
    for classId, confidence, box in detections:
            xmin = box[0] - box[2] / 2
            ymin = box[1] - box[3] / 2
            xmax = box[0] + box[2] / 2
            ymax = box[1] + box[3] / 2
            rect = sly.Rectangle(round(ymin), round(xmin), round(ymax), round(xmax))

            label = sly.Label(rect, idx_to_class[classId])

            confidence_tag = sly.Tag(confidence_tag_meta, value=round(float(confidence), 4))
            label = label.add_tag(confidence_tag)
            labels.append(label)
    return labels
Example #6
0
def inference(model, half, device, imgsz, stride, image: np.ndarray, meta: sly.ProjectMeta, conf_thres=0.25, iou_thres=0.45,
              augment=False, agnostic_nms=False, debug_visualization=False) -> sly.Annotation:
    names = model.module.names if hasattr(model, 'module') else model.names

    img0 = image # RGB
    # Padded resize
    img = letterbox(img0, new_shape=imgsz, stride=stride)[0]
    img = img.transpose(2, 0, 1)  # to 3x416x416
    img = np.ascontiguousarray(img)

    img = torch.from_numpy(img).to(device)
    img = img.half() if half else img.float()  # uint8 to fp16/32
    img /= 255.0  # 0 - 255 to 0.0 - 1.0
    if img.ndimension() == 3:
        img = img.unsqueeze(0)

    inf_out = model(img, augment=augment)[0]

    # Apply NMS
    labels = []
    output = non_max_suppression(inf_out, conf_thres=conf_thres, iou_thres=iou_thres, agnostic=agnostic_nms)
    for i, det in enumerate(output):
        if det is not None and len(det):
            det[:, :4] = scale_coords(img.shape[2:], det[:, :4], img0.shape).round()

            for *xyxy, conf, cls in reversed(det):
                top, left, bottom, right = int(xyxy[1]), int(xyxy[0]), int(xyxy[3]), int(xyxy[2])
                rect = sly.Rectangle(top, left, bottom, right)
                obj_class = meta.get_obj_class(names[int(cls)])
                tag = sly.Tag(meta.get_tag_meta(CONFIDENCE), round(float(conf), 4))
                label = sly.Label(rect, obj_class, sly.TagCollection([tag]))
                labels.append(label)

    height, width = img0.shape[:2]
    ann = sly.Annotation(img_size=(height, width), labels=labels)

    if debug_visualization is True:
        # visualize for debug purposes
        vis = np.copy(img0)
        ann.draw_contour(vis, thickness=2)
        sly.image.write("vis.jpg", vis)

    return ann.to_json()
Example #7
0
def visualize_dets(img_, output_, save_path_, names_, meta_):
    labels = []
    for i, det in enumerate(output_):
        if det is not None and len(det):
            for *xyxy, conf, cls in reversed(det):
                left, top, right, bottom = int(xyxy[0]), int(xyxy[1]), int(
                    xyxy[2]), int(xyxy[3])
                rect = sly.Rectangle(top, left, bottom, right)
                obj_class = meta_.get_obj_class(names_[int(cls)])
                tag = sly.Tag(meta_.get_tag_meta("confidence"),
                              round(float(conf), 4))
                label = sly.Label(rect, obj_class, sly.TagCollection([tag]))
                labels.append(label)

    width, height = img_.size
    ann = sly.Annotation(img_size=(height, width), labels=labels)

    vis = np.copy(img_)
    ann.draw_contour(vis, thickness=2)
    sly.image.write(save_path_, vis)
    return vis
def process(img, ann):
    original = (img, ann)
    flipped = sly.aug.fliplr(*original)

    crops = []
    for cur_img, cur_ann in [original, flipped]:
        for i in range(image_multiplier):
            res_img, res_ann = sly.aug.random_crop_fraction(
                cur_img, cur_ann, (70, 90), (70, 90))
            crops.append((res_img, res_ann))

    results = []
    for cur_img, cur_ann in [original, flipped, *crops]:
        bg_label = sly.Label(sly.Rectangle.from_array(cur_img), class_bg)
        cur_ann = cur_ann.add_label(bg_label)
        tag = sly.Tag(tag_meta_train if random.random() <= validation_portion
                      else tag_meta_val)
        cur_ann = cur_ann.add_tag(tag)
        results.append((cur_img, cur_ann))

    return results
Example #9
0
                       'and validation).')
is_train_image = sly_dataset.partition_train_val(total_images, validation_fraction)

# Iterate over datasets and items.
image_idx = 0
for dataset in src_project:
    sly.logger.info('Dataset processing', extra={'dataset_name': dataset.name})
    dst_dataset = dst_project.create_dataset(dataset.name)

    for item_name in dataset:
        item_paths = dataset.get_item_paths(item_name)
        img = sly.image.read(item_paths.img_path)
        ann = sly.Annotation.load_json_file(item_paths.ann_path, src_project.meta)

        # Decide whether this image and its crops should go to a train or validation fold.
        tag = sly.Tag(tag_meta_train) if is_train_image[image_idx] else sly.Tag(tag_meta_val)
        ann = ann.add_tag(tag)

        # Convert all the objects to bounding boxes for detection.
        bbox_labels = [
            label.clone(obj_class=bbox_class_mapping[label.obj_class.name], geometry=label.geometry.to_bbox())
            for label in ann.labels]
        ann = ann.clone(labels=bbox_labels)

        augmented_items = sly.aug.flip_add_random_crops(
            img, ann, crops_per_image, crop_side_fraction, crop_side_fraction)
        aug_imgs, aug_anns = zip(*augmented_items)

        names = sly.generate_names(item_name, len(augmented_items))
        for aug_name, aug_img, aug_ann in zip(names, aug_imgs, aug_anns):
            dst_dataset.add_item_np(item_name=aug_name, img=aug_img, ann=aug_ann)
def process_coco_dir(input_dir, project, project_meta, api, config_yaml_info,
                     app_logger):
    for dataset_type, dataset_path in config_yaml_info["datasets"]:
        tag_meta = project_meta.get_tag_meta(dataset_type)
        dataset_name = os.path.basename(dataset_path)

        images_list = sorted(
            sly.fs.list_files(dataset_path,
                              valid_extensions=sly.image.SUPPORTED_IMG_EXTS))
        if len(images_list) == 0:
            raise Exception(
                "Dataset: {!r} is empty. Check {!r} directory in project folder"
                .format(dataset_name, dataset_path))

        dataset = api.dataset.create(project.id,
                                     dataset_name,
                                     change_name_if_conflict=True)
        progress = sly.Progress("Processing {} dataset".format(dataset_name),
                                len(images_list), sly.logger)
        for batch in sly._utils.batched(images_list):
            cur_img_names = []
            cur_img_paths = []
            cur_anns = []

            for image_file_name in batch:
                image_name = os.path.basename(image_file_name)
                cur_img_names.append(image_name)
                cur_img_paths.append(image_file_name)
                ann_file_name = os.path.join(
                    input_dir, "labels", dataset_name,
                    "{}.txt".format(os.path.splitext(image_name)[0]))
                curr_img = sly.image.read(image_file_name)
                height, width = curr_img.shape[:2]

                labels_arr = []
                if os.path.isfile(ann_file_name):
                    with open(ann_file_name, "r") as f:
                        for idx, line in enumerate(f):
                            try:
                                label = parse_line(line, width, height,
                                                   project_meta,
                                                   config_yaml_info)
                                labels_arr.append(label)
                            except Exception as e:
                                app_logger.warn(
                                    e, {
                                        "filename": ann_file_name,
                                        "line": line,
                                        "line_num": idx
                                    })

                tags_arr = sly.TagCollection(items=[sly.Tag(tag_meta)])
                ann = sly.Annotation(img_size=(height, width),
                                     labels=labels_arr,
                                     img_tags=tags_arr)
                cur_anns.append(ann)

            img_infos = api.image.upload_paths(dataset.id, cur_img_names,
                                               cur_img_paths)
            img_ids = [x.id for x in img_infos]

            api.annotation.upload_anns(img_ids, cur_anns)
            progress.iters_done_report(len(batch))
def add_object_id_tag(vobject_id, prop_container):
    vobj_id_tag = sly.Tag(g.vobj_id_tag_meta, value=vobject_id)
    prop_container.append(vobj_id_tag)
def transform(api: sly.Api, task_id, context, state, app_logger):
    storage_dir = my_app.data_dir

    project = api.project.create(WORKSPACE_ID,
                                 PROJECT_NAME,
                                 change_name_if_conflict=True)
    dataset = api.dataset.create(project.id,
                                 DATASET_NAME,
                                 change_name_if_conflict=True)

    local_file = os.path.join(storage_dir,
                              sly.fs.get_file_name_with_ext(INPUT_FILE))
    api.file.download(TEAM_ID, INPUT_FILE, local_file)

    tag_names = set()
    movies_info = []
    with open(local_file, encoding="ISO-8859-1") as f:
        reader = csv.DictReader(f)
        for row in reader:
            movies_info.append(row)
            tag_names.update(parse_genres(row["Genre"]))

    tags_arr = [
        sly.TagMeta(name=tag_name, value_type=sly.TagValueType.NONE)
        for tag_name in tag_names
    ]
    project_meta = sly.ProjectMeta(tag_metas=sly.TagMetaCollection(
        items=tags_arr))
    api.project.update_meta(project.id, project_meta.to_json())
    movies_info_len = len(movies_info)
    movies_info_len_digits = len(str(movies_info_len))
    batch_size = 50

    progress = sly.Progress('Uploading images', movies_info_len, app_logger)
    for batch_idx, batch in enumerate(
            sly._utils.batched(movies_info, batch_size)):
        image_paths = []
        image_names = []
        image_metas = []
        csv_rows = []
        for idx, csv_row in enumerate(batch):
            image_url = csv_row["Poster"]
            cur_img_ext = os.path.splitext(image_url)[1]
            cur_img_idx = str(batch_idx * batch_size + idx + 1).rjust(
                movies_info_len_digits, '0')
            image_name = f"{cur_img_idx}{cur_img_ext}"
            local_path = os.path.join(storage_dir, image_name)

            try:
                download_file(image_url, local_path, app_logger,
                              batch_idx * batch_size + idx, movies_info_len)
            except:
                app_logger.warn(
                    f"Couldn't download image:(row={batch_idx*batch_size+idx}, url={image_url}"
                )
                continue

            csv_rows.append(csv_row)
            image_paths.append(local_path)
            image_names.append(image_name)
            image_metas.append({
                "Title":
                csv_row["Title"],
                "imdbId":
                csv_row["imdbId"],
                "IMDB Score":
                csv_row["IMDB Score"],
                "Imdb Link":
                csv_row["Imdb Link"].replace('title/tt', 'title/tt0')
            })

        images = api.image.upload_paths(dataset.id,
                                        image_names,
                                        image_paths,
                                        metas=image_metas)
        cur_anns = []
        for image, csv_row in zip(images, csv_rows):
            tags_arr = []
            image_tags = parse_genres(csv_row["Genre"])
            if len(image_tags) == 0:
                continue

            for image_tag in image_tags:
                tag_meta = project_meta.get_tag_meta(image_tag)
                tags_arr.append(sly.Tag(tag_meta))

            tags_arr = sly.TagCollection(items=tags_arr)
            ann = sly.Annotation(img_size=(image.height, image.width),
                                 img_tags=tags_arr)
            cur_anns.append((image.id, ann))

        if len(cur_anns) > 0:
            img_ids = [img_id for img_id, ann in cur_anns]
            anns = [ann for img_id, ann in cur_anns]
            api.annotation.upload_anns(img_ids, anns)

        progress.iters_done_report(len(batch))
    api.task.set_output_project(task_id, project.id, project.name)
    my_app.stop()
def import_cityscapes(api: sly.Api, task_id, context, state, app_logger):
    tag_metas = sly.TagMetaCollection()
    obj_classes = sly.ObjClassCollection()
    dataset_names = []

    storage_dir = my_app.data_dir
    if INPUT_DIR:
        cur_files_path = INPUT_DIR
        extract_dir = os.path.join(
            storage_dir,
            str(Path(cur_files_path).parent).lstrip("/"))
        input_dir = os.path.join(extract_dir, Path(cur_files_path).name)
        archive_path = os.path.join(
            storage_dir,
            cur_files_path + ".tar")  # cur_files_path.split("/")[-2] + ".tar"
        project_name = Path(cur_files_path).name
    else:
        cur_files_path = INPUT_FILE
        extract_dir = os.path.join(storage_dir, get_file_name(cur_files_path))
        archive_path = os.path.join(storage_dir,
                                    get_file_name_with_ext(cur_files_path))
        project_name = get_file_name(INPUT_FILE)
        input_dir = os.path.join(storage_dir,
                                 get_file_name(cur_files_path))  # extract_dir
    api.file.download(TEAM_ID, cur_files_path, archive_path)
    if tarfile.is_tarfile(archive_path):
        with tarfile.open(archive_path) as archive:
            archive.extractall(extract_dir)
    else:
        raise Exception("No such file".format(INPUT_FILE))
    new_project = api.project.create(WORKSPACE_ID,
                                     project_name,
                                     change_name_if_conflict=True)
    tags_template = os.path.join(input_dir, "gtFine", "*")
    tags_paths = glob.glob(tags_template)
    tags = [os.path.basename(tag_path) for tag_path in tags_paths]
    if train_tag in tags and val_tag not in tags:
        split_train = True
    elif trainval_tag in tags and val_tag not in tags:
        split_train = True
    else:
        split_train = False
    search_fine = os.path.join(input_dir, "gtFine", "*", "*",
                               "*_gt*_polygons.json")
    files_fine = glob.glob(search_fine)
    files_fine.sort()
    search_imgs = os.path.join(input_dir, "leftImg8bit", "*", "*",
                               "*_leftImg8bit" + IMAGE_EXT)
    files_imgs = glob.glob(search_imgs)
    files_imgs.sort()
    if len(files_fine) == 0 or len(files_imgs) == 0:
        raise Exception('Input cityscapes format not correct')
    samples_count = len(files_fine)
    progress = sly.Progress('Importing images', samples_count)
    images_pathes_for_compare = []
    images_pathes = {}
    images_names = {}
    anns_data = {}
    ds_name_to_id = {}

    if samples_count > 2:
        random_train_indexes = get_split_idxs(samples_count, samplePercent)

    for idx, orig_ann_path in enumerate(files_fine):
        parent_dir, json_filename = os.path.split(
            os.path.abspath(orig_ann_path))
        dataset_name = os.path.basename(parent_dir)
        if dataset_name not in dataset_names:
            dataset_names.append(dataset_name)
            ds = api.dataset.create(new_project.id,
                                    dataset_name,
                                    change_name_if_conflict=True)
            ds_name_to_id[dataset_name] = ds.id
            images_pathes[dataset_name] = []
            images_names[dataset_name] = []
            anns_data[dataset_name] = []
        orig_img_path = json_path_to_image_path(orig_ann_path)
        images_pathes_for_compare.append(orig_img_path)
        if not file_exists(orig_img_path):
            logger.warn(
                'Image for annotation {} not found is dataset {}'.format(
                    orig_ann_path.split('/')[-1], dataset_name))
            continue
        images_pathes[dataset_name].append(orig_img_path)
        images_names[dataset_name].append(
            sly.io.fs.get_file_name_with_ext(orig_img_path))
        tag_path = os.path.split(parent_dir)[0]
        train_val_tag = os.path.basename(tag_path)
        if split_train is True and samples_count > 2:
            if (train_val_tag == train_tag) or (train_val_tag == trainval_tag):
                if idx in random_train_indexes:
                    train_val_tag = train_tag
                else:
                    train_val_tag = val_tag

        # tag_meta = sly.TagMeta(train_val_tag, sly.TagValueType.NONE)
        tag_meta = sly.TagMeta('split', sly.TagValueType.ANY_STRING)
        if not tag_metas.has_key(tag_meta.name):
            tag_metas = tag_metas.add(tag_meta)
        # tag = sly.Tag(tag_meta)
        tag = sly.Tag(meta=tag_meta, value=train_val_tag)
        json_data = json.load(open(orig_ann_path))
        ann = sly.Annotation.from_img_path(orig_img_path)
        for obj in json_data['objects']:
            class_name = obj['label']
            if class_name == 'out of roi':
                polygon = obj['polygon'][:5]
                interiors = [obj['polygon'][5:]]
            else:
                polygon = obj['polygon']
                if len(polygon) < 3:
                    logger.warn(
                        'Polygon must contain at least 3 points in ann {}, obj_class {}'
                        .format(orig_ann_path, class_name))
                    continue
                interiors = []
            interiors = [convert_points(interior) for interior in interiors]
            polygon = sly.Polygon(convert_points(polygon), interiors)
            if city_classes_to_colors.get(class_name, None):
                obj_class = sly.ObjClass(
                    name=class_name,
                    geometry_type=sly.Polygon,
                    color=city_classes_to_colors[class_name])
            else:
                new_color = generate_rgb(city_colors)
                city_colors.append(new_color)
                obj_class = sly.ObjClass(name=class_name,
                                         geometry_type=sly.Polygon,
                                         color=new_color)
            ann = ann.add_label(sly.Label(polygon, obj_class))
            if not obj_classes.has_key(class_name):
                obj_classes = obj_classes.add(obj_class)
        ann = ann.add_tag(tag)
        anns_data[dataset_name].append(ann)
        progress.iter_done_report()
    out_meta = sly.ProjectMeta(obj_classes=obj_classes, tag_metas=tag_metas)
    api.project.update_meta(new_project.id, out_meta.to_json())

    for ds_name, ds_id in ds_name_to_id.items():
        dst_image_infos = api.image.upload_paths(ds_id, images_names[ds_name],
                                                 images_pathes[ds_name])
        dst_image_ids = [img_info.id for img_info in dst_image_infos]
        api.annotation.upload_anns(dst_image_ids, anns_data[ds_name])

    stat_dct = {
        'samples': samples_count,
        'src_ann_cnt': len(files_fine),
        'src_img_cnt': len(files_imgs)
    }
    logger.info('Found img/ann pairs.', extra=stat_dct)
    images_without_anns = set(files_imgs) - set(images_pathes_for_compare)
    if len(images_without_anns) > 0:
        logger.warn('Found source images without corresponding annotations:')
        for im_path in images_without_anns:
            logger.warn('Annotation not found {}'.format(im_path))

    logger.info('Found classes.',
                extra={
                    'cnt':
                    len(obj_classes),
                    'classes':
                    sorted([obj_class.name for obj_class in obj_classes])
                })
    logger.info('Created tags.',
                extra={
                    'cnt':
                    len(out_meta.tag_metas),
                    'tags':
                    sorted([tag_meta.name for tag_meta in out_meta.tag_metas])
                })
    my_app.stop()
Example #14
0
total_images = sum(ds_info.images_count for ds_info in src_dataset_infos)

if total_images <= 1:
    raise RuntimeError('Need at least 2 images in a project to prepare a training set (at least 1 each for training '
                       'and validation).')
is_train_image = partition_train_val(total_images, validation_fraction)

batch_start_idx = 0
for src_dataset in src_dataset_infos:
    dst_dataset = api.dataset.create(dst_project.id, src_dataset.name, src_dataset.description)
    images = api.image.get_list(src_dataset.id)
    ds_progress = sly.Progress(
        'Tagging dataset: {!r}/{!r}'.format(src_project.name, src_dataset.name), total_cnt=len(images))
    for batch in sly.batched(images):
        image_ids = [image_info.id for image_info in batch]
        image_names = [image_info.name for image_info in batch]

        ann_infos = api.annotation.download_batch(src_dataset.id, image_ids)
        src_anns = [sly.Annotation.from_json(ann_info.annotation, dst_meta) for ann_info in ann_infos]
        anns_tagged = [ann.add_tag(sly.Tag(tag_meta_train) if is_train_image[image_idx] else sly.Tag(tag_meta_val))
                       for image_idx, ann in enumerate(src_anns, start=batch_start_idx)]
        anns_tagged_jsons = [ann.to_json() for ann in anns_tagged]

        dst_images = api.image.upload_ids(dst_dataset.id, image_names, image_ids)
        dst_image_ids = [dst_img_info.id for dst_img_info in dst_images]
        api.annotation.upload_jsons(dst_image_ids, anns_tagged_jsons)

        ds_progress.iters_done_report(len(batch))
        batch_start_idx += len(batch)

sly.logger.info('Project {!r} train/val tagging done. Result project: {!r}'.format(src_project.name, dst_project_name))
def generate(api: sly.Api, task_id, context, state, app_logger):
    global PRODUCT_TAGS
    products_count = len(PRODUCTS.keys())
    train_count = state["trainCount"]
    val_count = state["valCount"]
    total_count = products_count * (train_count + val_count)

    augs_settings = yaml.safe_load(state["augs"])
    augs.init_fg_augs(augs_settings)

    PRODUCT_TAGS = PRODUCT_TAGS.add_items([TRAIN_TAG, VAL_TAG])
    res_meta = sly.ProjectMeta(
        obj_classes=sly.ObjClassCollection([RESULT_CLASS]),
        tag_metas=PRODUCT_TAGS
    )
    res_project = api.project.create(WORKSPACE_ID, state["outputProjectName"], change_name_if_conflict=True)
    api.project.update_meta(res_project.id, res_meta.to_json())

    progress = sly.Progress("Generating", total_count)
    for product_id in PRODUCTS.keys():
        dataset = api.dataset.create(res_project.id, str(product_id))

        tag_meta = PRODUCT_TAGS.get(product_id)
        if tag_meta is None:
            raise ValueError(f"TagMeta {product_id} not found")

        # cache images for one product
        images = {}
        for image_id in PRODUCTS[product_id].keys():
            images[image_id] = sly.image.read(IMAGE_PATH[image_id])

        name_index = 0
        for batch in sly.batched([TRAIN_TAG] * train_count + [VAL_TAG] * val_count, batch_size=10):
            final_images = []
            final_anns = []
            final_names = []
            for tag in batch:
                image_id = random.choice(list(PRODUCTS[product_id].keys()))
                img = images[image_id]
                ann = random.choice(list(PRODUCTS[product_id][image_id]))

                label_image = None
                label_mask = None
                label_preview = None
                retry_count = 5
                for retry_idx in range(5):
                    try:
                        label_image, label_mask, label_preview = \
                            try_generate_example(
                                augs_settings,
                                augs,
                                preview=True,
                                product_id=product_id,
                                img=img,
                                ann=ann
                            )
                        break
                    except Exception as e:
                        if retry_idx == retry_count - 1:
                            raise e
                        continue

                res_ann = sly.Annotation(label_image.shape[:2],
                                         labels=[label_preview],
                                         img_tags=sly.TagCollection([tag, sly.Tag(tag_meta)]))
                final_images.append(label_image)
                final_anns.append(res_ann)
                final_names.append("{:05d}.jpg".format(name_index))
                name_index += 1

            new_images = api.image.upload_nps(dataset.id, final_names, final_images)
            new_image_ids = [image_info.id for image_info in new_images]
            api.annotation.upload_anns(new_image_ids, final_anns)
            progress.iters_done_report(len(batch))
            refresh_progress(api, task_id, progress)
    refresh_progress(api, task_id, progress)
    res_project = api.project.get_info_by_id(res_project.id)
    fields = [
        {"field": "data.started", "payload": False},
        {"field": "data.resProjectId", "payload": res_project.id},
        {"field": "data.resProjectName", "payload": res_project.name},
        {"field": "data.resProjectPreviewUrl",
         "payload": api.image.preview_url(res_project.reference_image_url, 100, 100)},
    ]
    api.task.set_fields(task_id, fields)
    api.task.set_output_project(task_id, res_project.id, res_project.name)
    app.stop()
Example #16
0
item = image_labels[free_pairs[0]]
image_id = item[0]
ann_path = item[1]
label_index = item[2]

meta_json = sly_json.load_json_file(os.path.join(project_dir, "meta.json"))
meta = sly.ProjectMeta.from_json(meta_json)

ann_json = sly_json.load_json_file(ann_path)
ann = sly.Annotation.from_json(ann_json, meta)

product_id_tm = meta.get_tag_meta("product_id")

labels = ann.labels
new_label = labels[label_index].add_tags([
    sly.Tag(product_id_tm, "unknown"),
])

labels[label_index] = new_label
ann = ann.clone(labels=labels)

api.annotation.upload_ann(image_id, ann)
sly_json.dump_json_file(ann.to_json(), ann_path)

free_pairs.pop(0)
sly_json.dump_json_file(free_pairs, os.path.join(project_dir,
                                                 "free_pairs.json"))

utils.get_next_object(api, task_id, project_id)

api.task.set_data(task_id, False, "state.tagging")
Example #17
0
ann_json = sly_json.load_json_file(ann_path)
ann = sly.Annotation.from_json(ann_json, meta)

product_id_tm = meta.get_tag_meta("product_id")
category_tm = meta.get_tag_meta("category")
brand_tm = meta.get_tag_meta("brand")
item_name_tm = meta.get_tag_meta("item_name")

selectedImageIndex = api.task.get_data(task_id, "state.selectedImageIndex")
product = sly_json.load_json_file(os.path.join(
    project_dir, "products.json"))[selectedImageIndex]
print(product)

labels = ann.labels
new_label = labels[label_index].add_tags([
    sly.Tag(product_id_tm, product["Id"]),
    sly.Tag(category_tm, product["Category"]),
    sly.Tag(brand_tm, product["Brand"]),
    sly.Tag(item_name_tm, product["Item"]),
])

labels[label_index] = new_label
ann = ann.clone(labels=labels)

api.annotation.upload_ann(image_id, ann)
sly_json.dump_json_file(ann.to_json(), ann_path)

free_pairs.pop(0)
sly_json.dump_json_file(free_pairs, os.path.join(project_dir,
                                                 "free_pairs.json"))