Ejemplo n.º 1
0
def postprocess(api: sly.Api, project_id, ann: sly.Annotation,
                project_meta: sly.ProjectMeta, model_meta: sly.ProjectMeta,
                state):
    keep_classes = ui.get_keep_classes(state)  # @TODO: for debug ['dog'] #
    keep_tags = ui.get_keep_tags(state)
    res_project_meta, class_mapping, tag_meta_mapping = \
        merge_metas(project_meta, model_meta, keep_classes, keep_tags, state["suffix"])

    image_tags = []
    for tag in ann.img_tags:
        if tag.meta.name not in keep_tags:
            continue
        image_tags.append(tag.clone(meta=tag_meta_mapping[tag.meta.name]))

    new_labels = []
    for label in ann.labels:
        if label.obj_class.name not in keep_classes:
            continue
        label_tags = []
        for tag in label.tags:
            if tag.meta.name not in keep_tags:
                continue
            label_tags.append(tag.clone(meta=tag_meta_mapping[tag.meta.name]))
        new_label = label.clone(obj_class=class_mapping[label.obj_class.name],
                                tags=sly.TagCollection(label_tags))
        new_labels.append(new_label)

    res_ann = ann.clone(labels=new_labels,
                        img_tags=sly.TagCollection(image_tags))
    return res_ann, res_project_meta
Ejemplo n.º 2
0
def inference(model, half, device, imgsz, stride, image: np.ndarray, meta: sly.ProjectMeta, conf_thres=0.25, iou_thres=0.45,
              augment=False, agnostic_nms=False, debug_visualization=False) -> sly.Annotation:
    names = model.module.names if hasattr(model, 'module') else model.names

    img0 = image # RGB
    # Padded resize
    img = letterbox(img0, new_shape=imgsz, stride=stride)[0]
    img = img.transpose(2, 0, 1)  # to 3x416x416
    img = np.ascontiguousarray(img)

    img = torch.from_numpy(img).to(device)
    img = img.half() if half else img.float()  # uint8 to fp16/32
    img /= 255.0  # 0 - 255 to 0.0 - 1.0
    if img.ndimension() == 3:
        img = img.unsqueeze(0)

    inf_out = model(img, augment=augment)[0]

    # Apply NMS
    labels = []
    output = non_max_suppression(inf_out, conf_thres=conf_thres, iou_thres=iou_thres, agnostic=agnostic_nms)
    for i, det in enumerate(output):
        if det is not None and len(det):
            det[:, :4] = scale_coords(img.shape[2:], det[:, :4], img0.shape).round()

            for *xyxy, conf, cls in reversed(det):
                top, left, bottom, right = int(xyxy[1]), int(xyxy[0]), int(xyxy[3]), int(xyxy[2])
                rect = sly.Rectangle(top, left, bottom, right)
                obj_class = meta.get_obj_class(names[int(cls)])
                tag = sly.Tag(meta.get_tag_meta(CONFIDENCE), round(float(conf), 4))
                label = sly.Label(rect, obj_class, sly.TagCollection([tag]))
                labels.append(label)

    height, width = img0.shape[:2]
    ann = sly.Annotation(img_size=(height, width), labels=labels)

    if debug_visualization is True:
        # visualize for debug purposes
        vis = np.copy(img0)
        ann.draw_contour(vis, thickness=2)
        sly.image.write("vis.jpg", vis)

    return ann.to_json()
Ejemplo n.º 3
0
def visualize_dets(img_, output_, save_path_, names_, meta_):
    labels = []
    for i, det in enumerate(output_):
        if det is not None and len(det):
            for *xyxy, conf, cls in reversed(det):
                left, top, right, bottom = int(xyxy[0]), int(xyxy[1]), int(
                    xyxy[2]), int(xyxy[3])
                rect = sly.Rectangle(top, left, bottom, right)
                obj_class = meta_.get_obj_class(names_[int(cls)])
                tag = sly.Tag(meta_.get_tag_meta("confidence"),
                              round(float(conf), 4))
                label = sly.Label(rect, obj_class, sly.TagCollection([tag]))
                labels.append(label)

    width, height = img_.size
    ann = sly.Annotation(img_size=(height, width), labels=labels)

    vis = np.copy(img_)
    ann.draw_contour(vis, thickness=2)
    sly.image.write(save_path_, vis)
    return vis
tag = meta.get_tag_meta('vehicle_colour')
print(tag.sly_id)

dataset = api.dataset.create(project.id,
                             "test_dataset",
                             change_name_if_conflict=True)

local_path = "/my_data/car.mp4"

# metadata is optional
video_metadata = {"field1": "value1", "field2": "value2"}

#smart upload - if video already uploaded to server, it will be added by hash to dataset withoud direct upload
video_infos = api.video.upload_paths(dataset.id, ["car.mp4"], [local_path],
                                     metas=[video_metadata])
video_info = video_infos[0]

print(video_info)
print("uploaded video id: ", video_info.id)

tags_to_assign = [
    VideoTag(tag, value="red", frame_range=[3, 17]),
    VideoTag(tag, value="orange", frame_range=[22, 30]),
]
api.video.tag.append_to_entity(video_info.id,
                               project.id,
                               tags=sly.TagCollection(tags_to_assign))

# see screenshot with result
# https://i.imgur.com/eVtfY1k.png
def generate(api: sly.Api, task_id, context, state, app_logger):
    global PRODUCT_TAGS
    products_count = len(PRODUCTS.keys())
    train_count = state["trainCount"]
    val_count = state["valCount"]
    total_count = products_count * (train_count + val_count)

    augs_settings = yaml.safe_load(state["augs"])
    augs.init_fg_augs(augs_settings)

    PRODUCT_TAGS = PRODUCT_TAGS.add_items([TRAIN_TAG, VAL_TAG])
    res_meta = sly.ProjectMeta(
        obj_classes=sly.ObjClassCollection([RESULT_CLASS]),
        tag_metas=PRODUCT_TAGS
    )
    res_project = api.project.create(WORKSPACE_ID, state["outputProjectName"], change_name_if_conflict=True)
    api.project.update_meta(res_project.id, res_meta.to_json())

    progress = sly.Progress("Generating", total_count)
    for product_id in PRODUCTS.keys():
        dataset = api.dataset.create(res_project.id, str(product_id))

        tag_meta = PRODUCT_TAGS.get(product_id)
        if tag_meta is None:
            raise ValueError(f"TagMeta {product_id} not found")

        # cache images for one product
        images = {}
        for image_id in PRODUCTS[product_id].keys():
            images[image_id] = sly.image.read(IMAGE_PATH[image_id])

        name_index = 0
        for batch in sly.batched([TRAIN_TAG] * train_count + [VAL_TAG] * val_count, batch_size=10):
            final_images = []
            final_anns = []
            final_names = []
            for tag in batch:
                image_id = random.choice(list(PRODUCTS[product_id].keys()))
                img = images[image_id]
                ann = random.choice(list(PRODUCTS[product_id][image_id]))

                label_image = None
                label_mask = None
                label_preview = None
                retry_count = 5
                for retry_idx in range(5):
                    try:
                        label_image, label_mask, label_preview = \
                            try_generate_example(
                                augs_settings,
                                augs,
                                preview=True,
                                product_id=product_id,
                                img=img,
                                ann=ann
                            )
                        break
                    except Exception as e:
                        if retry_idx == retry_count - 1:
                            raise e
                        continue

                res_ann = sly.Annotation(label_image.shape[:2],
                                         labels=[label_preview],
                                         img_tags=sly.TagCollection([tag, sly.Tag(tag_meta)]))
                final_images.append(label_image)
                final_anns.append(res_ann)
                final_names.append("{:05d}.jpg".format(name_index))
                name_index += 1

            new_images = api.image.upload_nps(dataset.id, final_names, final_images)
            new_image_ids = [image_info.id for image_info in new_images]
            api.annotation.upload_anns(new_image_ids, final_anns)
            progress.iters_done_report(len(batch))
            refresh_progress(api, task_id, progress)
    refresh_progress(api, task_id, progress)
    res_project = api.project.get_info_by_id(res_project.id)
    fields = [
        {"field": "data.started", "payload": False},
        {"field": "data.resProjectId", "payload": res_project.id},
        {"field": "data.resProjectName", "payload": res_project.name},
        {"field": "data.resProjectPreviewUrl",
         "payload": api.image.preview_url(res_project.reference_image_url, 100, 100)},
    ]
    api.task.set_fields(task_id, fields)
    api.task.set_output_project(task_id, res_project.id, res_project.name)
    app.stop()
def turn_into_images_project(api: sly.Api, task_id, context, state,
                             app_logger):
    res_project_name = f"{g.project.name}(images)"
    dst_project = api.project.create(g.WORKSPACE_ID,
                                     res_project_name,
                                     type=sly.ProjectType.IMAGES,
                                     change_name_if_conflict=True)
    api.project.update_meta(dst_project.id, g.meta.to_json())

    key_id_map = KeyIdMap()
    for dataset_name in g.SELECTED_DATASETS:
        dataset = api.dataset.get_info_by_name(g.PROJECT_ID, dataset_name)
        dst_dataset = api.dataset.create(dst_project.id, dataset.name)
        videos = api.video.get_list(dataset.id)
        for batch in sly.batched(videos):
            for video_info in batch:
                general_time = time()
                ann_info = api.video.annotation.download(video_info.id)
                ann = sly.VideoAnnotation.from_json(ann_info, g.meta,
                                                    key_id_map)
                if g.OPTIONS == "annotated" and len(ann.tags) == 0 and len(
                        ann.frames) == 0:
                    g.my_app.logger.warn(
                        f"Video {video_info.name} annotation is empty in Dataset {dataset_name}"
                    )
                    continue

                need_download_video = f.need_download_video(
                    video_info.frames_count, len(ann.frames))
                video_path = None
                if need_download_video or g.OPTIONS == "all":
                    local_time = time()
                    video_path = os.path.join(g.video_dir, video_info.name)

                    progress_cb = f.get_progress_cb(
                        "Downloading video",
                        int(video_info.file_meta['size']),
                        is_size=True)
                    api.video.download_path(video_info.id,
                                            video_path,
                                            progress_cb=progress_cb)
                    g.logger.info(
                        f'video {video_info.name} downloaded in {time() - local_time} seconds'
                    )

                frames_to_convert = []
                video_props = []
                video_frame_tags = defaultdict(list)
                f.convert_tags(ann.tags, video_props, video_frame_tags,
                               frames_to_convert)
                object_frame_tags = defaultdict(lambda: defaultdict(list))
                object_props = defaultdict(list)
                for vobject in ann.objects:
                    f.convert_tags(vobject.tags, object_props[vobject.key()],
                                   object_frame_tags[vobject.key()],
                                   frames_to_convert)
                    vobject_id = key_id_map.get_object_id(vobject.key())
                    f.add_object_id_tag(vobject_id,
                                        object_props[vobject.key()])
                if g.OPTIONS == "annotated":
                    frames_to_convert.extend(list(ann.frames.keys()))
                    frames_to_convert = list(dict.fromkeys(frames_to_convert))
                    frames_to_convert.sort()
                else:
                    frames_to_convert = list(range(0, video_info.frames_count))

                progress = sly.Progress(
                    "Processing video frames: {!r}".format(video_info.name),
                    len(frames_to_convert))

                # total_images_size = 0
                for batch_index, batch_frames in enumerate(
                        sly.batched(frames_to_convert,
                                    batch_size=g.BATCH_SIZE)):
                    metas = []
                    anns = []
                    if need_download_video or g.OPTIONS == "all":
                        local_time = time()
                        images_names, images = f.get_frames_from_video(
                            video_info.name, video_path, batch_frames)

                        g.logger.debug(
                            f'extracted {len(batch_frames)} by {time() - local_time} seconds'
                        )
                        """
                        too slow calculations, for extreme debug
                        
                        
                        images_size = f.calculate_batch_size(images) / (1024 * 1024)  # in MegaBytes
                        
                        g.logger.debug(f'batch size: {images_size} MB')
                        g.logger.debug(f'mean item size: {images_size / len(images)} MB')

                        total_images_size += images_size
                        """

                    else:
                        images_names, images = f.get_frames_from_api(
                            api, video_info.id, video_info.name, batch_frames)
                    for frame_index in batch_frames:
                        metas.append({
                            "video_id": video_info.id,
                            "video_name": video_info.name,
                            "frame_index": frame_index,
                            "video_dataset_id": video_info.dataset_id,
                            "video_dataset_name": dataset.name,
                            "video_project_id": g.project.id,
                            "video_project_name": g.project.name
                        })

                        labels = []
                        frame_annotation = ann.frames.get(frame_index)
                        if frame_annotation is not None:
                            for figure in frame_annotation.figures:
                                tags_to_assign = object_props[
                                    figure.parent_object.key()].copy()
                                tags_to_assign.extend(object_frame_tags[
                                    figure.parent_object.key()].get(
                                        frame_index, []).copy())
                                cur_label = sly.Label(
                                    figure.geometry,
                                    figure.parent_object.obj_class,
                                    sly.TagCollection(tags_to_assign))
                                labels.append(cur_label)

                        img_tags = video_props.copy() + video_frame_tags.get(
                            frame_index, []).copy()
                        anns.append(
                            sly.Annotation(
                                ann.img_size,
                                labels=labels,
                                img_tags=sly.TagCollection(img_tags)))

                    if g.LOG_LEVEL == 'debug':
                        f.distort_frames(images)
                        g.logger.debug(f'{len(images)} frames distorted')

                    f.upload_frames(
                        api, dst_dataset.id, images_names, images, anns, metas,
                        f'{batch_index}/{int(len(frames_to_convert) / g.BATCH_SIZE)}'
                    )
                    progress.iters_done_report(len(images_names))

                # g.logger.debug(f'total images size for video: {total_images_size} MB')
                g.logger.info(
                    f'video {video_info.name} converted in {time() - general_time} seconds'
                )
    g.my_app.stop()
def create_trainset(api: sly.Api, task_id, context, state, app_logger):
    api.task.set_field(task_id, "data.started", True)

    project = api.project.get_info_by_id(PROJECT_ID)
    meta = sly.ProjectMeta.from_json(api.project.get_meta(project.id))
    res_meta = aug_project_meta(meta, state)

    result_project_name = state["resultProjectName"]
    if not result_project_name:
        result_project_name = _get_res_project_name(api, project)
    new_project = api.project.create(WORKSPACE_ID, result_project_name,
                                     description="for SmartTool",
                                     change_name_if_conflict=True)
    api.project.update_meta(new_project.id, res_meta.to_json())

    datasets = api.dataset.get_list(PROJECT_ID)
    ds_images_train, ds_images_val, sample_count = sample_images(api, datasets, state)

    train_tag = res_meta.get_tag_meta("train")
    val_tag = res_meta.get_tag_meta("val")
    splitted_images = []

    for dataset_id, images in ds_images_train.items():
        splitted_images.append((dataset_id, images, train_tag))
    for dataset_id, images in ds_images_val.items():
        splitted_images.append((dataset_id, images, val_tag))

    progress = sly.Progress("Augmentations", total_images_count)

    _created_datasets = {}
    current_progress = 0
    for (dataset_id, images, tag)  in splitted_images:
        dataset = api.dataset.get_info_by_id(dataset_id)

    # for dataset in datasets:
        if dataset.name not in _created_datasets:
            new_dataset = api.dataset.create(new_project.id, dataset.name)
            _created_datasets[dataset.name] = new_dataset
        new_dataset = _created_datasets[dataset.name]

        #images = api.image.get_list(dataset.id)

        used_names = []
        for batch in sly.batched(images):
            image_ids = [image_info.id for image_info in batch]
            image_names = [image_info.name for image_info in batch]
            images_np = api.image.download_nps(dataset.id, image_ids)
            ann_infos = api.annotation.download_batch(dataset.id, image_ids)
            new_annotations = []
            new_images = []
            new_images_names = []
            for image_np, image_name, ann_info in zip(images_np, image_names, ann_infos):
                used_names.append(image_name)
                ann = sly.Annotation.from_json(ann_info.annotation, meta)
                ann = ann.clone(img_tags=sly.TagCollection([tag]))

                imgs_anns = aug_img_ann(image_np, ann, res_meta, state)
                if len(imgs_anns) == 0:
                    continue

                for (aug_img, aug_ann) in imgs_anns:
                    new_images.append(aug_img)
                    name = sly._utils.generate_free_name(used_names, image_name, with_ext=True)
                    new_images_names.append(name)
                    new_annotations.append(aug_ann)
                    used_names.append(name)

            new_image_infos = api.image.upload_nps(new_dataset.id, new_images_names, new_images)
            image_ids = [img_info.id for img_info in new_image_infos]
            api.annotation.upload_anns(image_ids, new_annotations)
            progress.iters_done_report(len(batch))
            current_progress += len(batch)
            api.task.set_field(task_id, "data.progress", int(current_progress * 100 / total_images_count))

    # to get correct "reference_image_url"
    res_project = api.project.get_info_by_id(new_project.id)
    fields = [
        {"field": "data.resultProject", "payload": res_project.name},
        {"field": "data.resultProjectId", "payload": res_project.id},
        {"field": "data.resultProjectPreviewUrl",
         "payload": api.image.preview_url(res_project.reference_image_url, 100, 100)},
        {"field": "data.finished", "payload": True}
    ]
    api.task.set_fields(task_id, fields)
    api.task.set_output_project(task_id, res_project.id, res_project.name)

    my_app.stop()
def transform(api: sly.Api, task_id, context, state, app_logger):
    storage_dir = my_app.data_dir

    project = api.project.create(WORKSPACE_ID,
                                 PROJECT_NAME,
                                 change_name_if_conflict=True)
    dataset = api.dataset.create(project.id,
                                 DATASET_NAME,
                                 change_name_if_conflict=True)

    local_file = os.path.join(storage_dir,
                              sly.fs.get_file_name_with_ext(INPUT_FILE))
    api.file.download(TEAM_ID, INPUT_FILE, local_file)

    tag_names = set()
    movies_info = []
    with open(local_file, encoding="ISO-8859-1") as f:
        reader = csv.DictReader(f)
        for row in reader:
            movies_info.append(row)
            tag_names.update(parse_genres(row["Genre"]))

    tags_arr = [
        sly.TagMeta(name=tag_name, value_type=sly.TagValueType.NONE)
        for tag_name in tag_names
    ]
    project_meta = sly.ProjectMeta(tag_metas=sly.TagMetaCollection(
        items=tags_arr))
    api.project.update_meta(project.id, project_meta.to_json())
    movies_info_len = len(movies_info)
    movies_info_len_digits = len(str(movies_info_len))
    batch_size = 50

    progress = sly.Progress('Uploading images', movies_info_len, app_logger)
    for batch_idx, batch in enumerate(
            sly._utils.batched(movies_info, batch_size)):
        image_paths = []
        image_names = []
        image_metas = []
        csv_rows = []
        for idx, csv_row in enumerate(batch):
            image_url = csv_row["Poster"]
            cur_img_ext = os.path.splitext(image_url)[1]
            cur_img_idx = str(batch_idx * batch_size + idx + 1).rjust(
                movies_info_len_digits, '0')
            image_name = f"{cur_img_idx}{cur_img_ext}"
            local_path = os.path.join(storage_dir, image_name)

            try:
                download_file(image_url, local_path, app_logger,
                              batch_idx * batch_size + idx, movies_info_len)
            except:
                app_logger.warn(
                    f"Couldn't download image:(row={batch_idx*batch_size+idx}, url={image_url}"
                )
                continue

            csv_rows.append(csv_row)
            image_paths.append(local_path)
            image_names.append(image_name)
            image_metas.append({
                "Title":
                csv_row["Title"],
                "imdbId":
                csv_row["imdbId"],
                "IMDB Score":
                csv_row["IMDB Score"],
                "Imdb Link":
                csv_row["Imdb Link"].replace('title/tt', 'title/tt0')
            })

        images = api.image.upload_paths(dataset.id,
                                        image_names,
                                        image_paths,
                                        metas=image_metas)
        cur_anns = []
        for image, csv_row in zip(images, csv_rows):
            tags_arr = []
            image_tags = parse_genres(csv_row["Genre"])
            if len(image_tags) == 0:
                continue

            for image_tag in image_tags:
                tag_meta = project_meta.get_tag_meta(image_tag)
                tags_arr.append(sly.Tag(tag_meta))

            tags_arr = sly.TagCollection(items=tags_arr)
            ann = sly.Annotation(img_size=(image.height, image.width),
                                 img_tags=tags_arr)
            cur_anns.append((image.id, ann))

        if len(cur_anns) > 0:
            img_ids = [img_id for img_id, ann in cur_anns]
            anns = [ann for img_id, ann in cur_anns]
            api.annotation.upload_anns(img_ids, anns)

        progress.iters_done_report(len(batch))
    api.task.set_output_project(task_id, project.id, project.name)
    my_app.stop()
def process_coco_dir(input_dir, project, project_meta, api, config_yaml_info,
                     app_logger):
    for dataset_type, dataset_path in config_yaml_info["datasets"]:
        tag_meta = project_meta.get_tag_meta(dataset_type)
        dataset_name = os.path.basename(dataset_path)

        images_list = sorted(
            sly.fs.list_files(dataset_path,
                              valid_extensions=sly.image.SUPPORTED_IMG_EXTS))
        if len(images_list) == 0:
            raise Exception(
                "Dataset: {!r} is empty. Check {!r} directory in project folder"
                .format(dataset_name, dataset_path))

        dataset = api.dataset.create(project.id,
                                     dataset_name,
                                     change_name_if_conflict=True)
        progress = sly.Progress("Processing {} dataset".format(dataset_name),
                                len(images_list), sly.logger)
        for batch in sly._utils.batched(images_list):
            cur_img_names = []
            cur_img_paths = []
            cur_anns = []

            for image_file_name in batch:
                image_name = os.path.basename(image_file_name)
                cur_img_names.append(image_name)
                cur_img_paths.append(image_file_name)
                ann_file_name = os.path.join(
                    input_dir, "labels", dataset_name,
                    "{}.txt".format(os.path.splitext(image_name)[0]))
                curr_img = sly.image.read(image_file_name)
                height, width = curr_img.shape[:2]

                labels_arr = []
                if os.path.isfile(ann_file_name):
                    with open(ann_file_name, "r") as f:
                        for idx, line in enumerate(f):
                            try:
                                label = parse_line(line, width, height,
                                                   project_meta,
                                                   config_yaml_info)
                                labels_arr.append(label)
                            except Exception as e:
                                app_logger.warn(
                                    e, {
                                        "filename": ann_file_name,
                                        "line": line,
                                        "line_num": idx
                                    })

                tags_arr = sly.TagCollection(items=[sly.Tag(tag_meta)])
                ann = sly.Annotation(img_size=(height, width),
                                     labels=labels_arr,
                                     img_tags=tags_arr)
                cur_anns.append(ann)

            img_infos = api.image.upload_paths(dataset.id, cur_img_names,
                                               cur_img_paths)
            img_ids = [x.id for x in img_infos]

            api.annotation.upload_anns(img_ids, cur_anns)
            progress.iters_done_report(len(batch))