Ejemplo n.º 1
0
        def chunk_generator():
            progress = sly.ProgressCounter('Upload annotations',
                                           len(img_ids),
                                           ext_logger=self.logger)

            for batch_some in sly.batched(
                    list(zip(img_ids, img_names, ann_paths)),
                    constants.BATCH_SIZE_UPLOAD_ANNOTATIONS):
                for img_id, img_name, ann_path in batch_some:
                    proto_img = api_proto.Image(id=img_id,
                                                title=img_name,
                                                project_id=project_id)
                    freader = ChunkedFileReader(ann_path,
                                                constants.NETW_CHUNK_SIZE)
                    for chunk_bytes in freader:
                        current_chunk = api_proto.Chunk(
                            buffer=chunk_bytes, total_size=freader.file_size)
                        yield api_proto.ChunkImage(chunk=current_chunk,
                                                   image=proto_img)
                    self.logger.trace('annotation is uploaded',
                                      extra={
                                          'img_name': img_name,
                                          'img_path': ann_path
                                      })
                    progress.iter_done_report()
def add_metadata_to_images(api, path_to_files, dataset_id, app_logger):
    path_to_images = [
        sly.fs.get_file_name(json_name)
        for json_name in os.listdir(path_to_files)
    ]
    images = api.image.get_list(dataset_id)
    image_names = [image_info.name for image_info in images]
    matches = list(set(path_to_images) & set(image_names))
    if len(path_to_images) != len(matches):
        app_logger.warn(
            '{} metadata files were given, {} matches image names in dataset'.
            format(len(path_to_images), len(matches)))

    progress = sly.Progress('Uploading metadata to images', len(images),
                            app_logger)
    for batch in sly.batched(images):
        for image_info in batch:
            if image_info.name not in path_to_images:
                app_logger.warn(
                    'Metadata file for image {} was not found in directory {}'.
                    format(image_info.name, path_to_files))
                continue

            meta = load_json_file(
                os.path.join(path_to_files, image_info.name + '.json'))
            if RESOLVE == "merge":
                meta_copy = meta.copy()
                for key in meta.keys():
                    if key in image_info.meta:
                        meta_copy[key + "-original"] = image_info.meta[key]

                meta = {**image_info.meta, **meta_copy}

            api.image.update_meta(image_info.id, meta)
        progress.iters_done_report(len(batch))
Ejemplo n.º 3
0
    def _download_annotations(self, pr_writer, image_id_to_ds):
        progress = sly.ProgressCounter('Download annotations',
                                       len(image_id_to_ds),
                                       ext_logger=self.logger)

        for batch_img_ids in sly.batched(
                list(image_id_to_ds.keys()),
                constants.BATCH_SIZE_DOWNLOAD_ANNOTATIONS):
            for chunk in self.api.get_stream_with_data(
                    'DownloadAnnotations', api_proto.ChunkImage,
                    api_proto.ImageArray(images=batch_img_ids)):
                img_id = chunk.image.id
                ds_name = image_id_to_ds[img_id]
                self.logger.trace('download_annotations',
                                  extra={'img_id': img_id})
                fh = ChunkedFileWriter(file_path=pr_writer.get_ann_path(
                    ds_name, chunk.image.title))
                fh.write(chunk.chunk)
                progress.iter_done_report()
                if not fh.close_and_check():
                    self.logger.warning('ann was skipped while downloading',
                                        extra={
                                            'img_id': img_id,
                                            'ann_path': fh.file_path
                                        })
Ejemplo n.º 4
0
def upload_pred_vis():
    submitted = False
    for i in range(5):
        paths = [
            x for x in Path(g.local_artifacts_dir).glob('test*.jpg')
            if x.exists()
        ]
        if len(paths) % 2 != 0:
            time.sleep(
                3
            )  # wait while thread in YOLOv5 script produce visualization: test batch + prediction
            continue
        _paths = [str(path) for path in paths]
        _paths.sort()
        sync_bindings = []
        for batch in sly.batched(_paths, 2):
            sync_bindings.append([
                sly.fs.get_file_name_with_ext(batch[0]),
                sly.fs.get_file_name_with_ext(batch[1])
            ])
        api.task.set_field(task_id, "data.syncBindings", sync_bindings)
        _upload_data_vis("data.predVis", paths, 2)
        submitted = True
        break

    if submitted is False:
        sly.logger.warn(
            "Test batch visualizations (labels + predictions) are not ready, see them in artifacts "
            "directory after training ")
        pass
def _assign_tag(task_id, api: sly.Api, split, tag_metas, new_project, created_datasets, progress):
    for dataset_id, images in split.items():
        dataset = api.dataset.get_info_by_id(dataset_id)
        if dataset.name not in created_datasets:
            new_dataset = api.dataset.create(new_project.id, dataset.name)
            created_datasets[dataset.name] = new_dataset
        new_dataset = created_datasets[dataset.name]

        for batch in sly.batched(images):
            image_ids = [image_info.id for image_info in batch]
            image_names = [image_info.name for image_info in batch]
            ann_infos = api.annotation.download_batch(dataset.id, image_ids)
            new_annotations = []

            for ann_info in ann_infos:
                ann_json = ann_info.annotation
                new_ann = sly.Annotation.from_json(ann_json, META_ORIGINAL)
                for tag_meta in tag_metas:
                    new_ann = new_ann.add_tag(sly.Tag(tag_meta))
                new_annotations.append(new_ann)

            new_images = api.image.upload_ids(new_dataset.id, image_names, image_ids)
            new_image_ids = [image_info.id for image_info in new_images]
            api.annotation.upload_anns(new_image_ids, new_annotations)

            progress.iters_done_report(len(batch))
            progress_percent = int(progress.current * 100 / progress.total)
            api.task.set_field(task_id, "data.progress", progress_percent)
            fields = [
                {"field": "data.progressCurrent", "payload": progress.current},
                {"field": "data.progressTotal", "payload": progress.total},
                {"field": "data.progress", "payload": progress_percent},
            ]
            api.task.set_fields(task_id, fields)
def create_foreground(api: sly.Api, task_id, context, state, app_logger):
    global meta
    project_info = api.project.get_info_by_id(project_id)
    meta = create_classes(api)

    progress = sly.Progress("Processing", project_info.items_count)
    for dataset in api.dataset.get_list(project_id):
        images_infos = api.image.get_list(dataset.id)
        for batch_infos in sly.batched(images_infos, 20):
            ids = []
            names = []
            local_paths = []
            for info in batch_infos:
                ids.append(info.id)
                names.append(info.name)
                local_paths.append(os.path.join(app.data_dir, info.name))

            api.image.download_paths(dataset.id, ids, local_paths)
            anns_infos = api.annotation.download_batch(dataset.id, ids)
            anns = [
                sly.Annotation.from_json(info.annotation, meta)
                for info in anns_infos
            ]

            res_ids = []
            res_anns = []
            for img_id, img_name, img_path, ann in zip(ids, names, local_paths,
                                                       anns):
                img = sly.image.read(img_path, remove_alpha_channel=False)
                if len(img.shape) == 3:
                    if img.shape[2] != 4:
                        sly.logger.warn(
                            f"Image {img_name} (id={img_id}) does not have alpha channel, will be skipped"
                        )
                        continue
                else:
                    sly.logger.warn(
                        f"Image {img_name} (id={img_id}) does not have alpha channel, will be skipped"
                    )
                    continue

                fg, fuzzy = get_masks(img)

                new_ann = ann.add_label(fg)
                if fuzzy is not None:
                    new_ann = new_ann.add_label(fuzzy)

                res_ids.append(img_id)
                res_anns.append(new_ann)

            api.annotation.upload_anns(res_ids, res_anns)
            for img_path in local_paths:
                sly.fs.silent_remove(img_path)
            progress.iters_done_report(len(batch_infos))

    api.task.set_output_project(task_id, project_id)
    app.stop()
Ejemplo n.º 7
0
    def get_dataset_images_hashes(self, dataset_id):
        image_array = self.api.simple_request('GetDatasetImages', sly.api_proto.ImageArray, sly.api_proto.Id(id=dataset_id))
        img_hashes = []

        for batch_img_ids in sly.batched(list(image_array.images), constants.BATCH_SIZE_GET_IMAGES_INFO()):
            images_info_proto = self.api.simple_request('GetImagesInfo', sly.api_proto.ImagesInfo,
                                                        sly.api_proto.ImageArray(images=batch_img_ids))
            img_hashes.extend([(info.hash, info.ext) for info in images_info_proto.infos])
        return img_hashes
Ejemplo n.º 8
0
def copy_project(api: sly.Api, task_id, context, state, app_logger):
    project2 = api2.project.get_info_by_id(PROJECT_ID2)
    if project2 is None:
        raise RuntimeError(
            f"Project with id={PROJECT_ID2} not found on remote Supervisely instance"
        )
    if project2.type != str(sly.ProjectType.IMAGES):
        raise TypeError(
            "This version supports only images project. Please, submit a feature request to Supervisely dev"
            "team to add support of other types of projects (videos, 3d, dicom, ...)"
        )

    meta2_json = api2.project.get_meta(project2.id)

    project = api.project.create(WORKSPACE_ID,
                                 project2.name,
                                 project2.type,
                                 project2.description,
                                 change_name_if_conflict=True)
    api.project.update_meta(project.id, meta2_json)

    progress = sly.Progress("Import",
                            api2.project.get_images_count(project2.id))
    for dataset2 in api2.dataset.get_list(project2.id):
        dataset = api.dataset.create(project.id, dataset2.name,
                                     dataset2.description)
        images2 = api2.image.get_list(dataset2.id)
        for batch2 in sly.batched(images2, batch_size=10):
            ids2 = []
            names = []
            paths = []
            metas = []
            for image_info2 in batch2:
                ids2.append(image_info2.id)
                names.append(image_info2.name)
                paths.append(os.path.join(my_app.data_dir, image_info2.name))
                metas.append(image_info2.meta)

            api2.image.download_paths(dataset2.id, ids2, paths)
            anns2 = api2.annotation.download_batch(dataset2.id, ids2)
            anns2 = [ann2.annotation for ann2 in anns2]

            batch = api.image.upload_paths(dataset.id,
                                           names,
                                           paths,
                                           metas=metas)
            ids = [image_info.id for image_info in batch]
            api.annotation.upload_jsons(ids, anns2)

            for p in paths:
                sly.fs.silent_remove(p)

            progress.iters_done_report(len(batch2))

    api.task.set_output_project(task_id, project.id, project.name)
    my_app.stop()
Ejemplo n.º 9
0
def apply_model(api: sly.Api, task_id, context, state, app_logger):
    def _update_progress(progress):
        fields = [
            {"field": "data.progress", "payload": int(progress.current * 100 / progress.total)},
            {"field": "data.progressCurrent", "payload": progress.current},
            {"field": "data.progressTotal", "payload": progress.total},
        ]
        api.task.set_fields(task_id, fields)

    try:
        inf_setting = yaml.safe_load(state["settings"])
    except Exception as e:
        inf_setting = {}
        app_logger.warn(repr(e))

    global project_meta
    res_project_meta = project_meta.clone()
    res_project = api.project.create(workspace_id, state["resProjectName"], change_name_if_conflict=True)
    api.project.update_meta(res_project.id, res_project_meta.to_json())

    progress = sly.Progress("Inference", len(input_images), need_info_log=True)

    for dataset in input_datasets:
        res_dataset = api.dataset.create(res_project.id, dataset.name, dataset.description)
        images = api.image.get_list(dataset.id)

        for batch in sly.batched(images, batch_size=10):
            image_ids, res_names, res_metas = [], [], []

            for image_info in batch:
                image_ids.append(image_info.id)
                res_names.append(image_info.name)
                res_metas.append(image_info.meta)
            _, res_anns, final_project_meta = apply_model_to_images(api, state, dataset.id, image_ids, inf_setting)
            if res_project_meta != final_project_meta:
                res_project_meta = final_project_meta
                api.project.update_meta(res_project.id, res_project_meta.to_json())

            res_images_infos = api.image.upload_ids(res_dataset.id, res_names, image_ids, metas=res_metas)
            res_ids = [image_info.id for image_info in res_images_infos]
            api.annotation.upload_anns(res_ids, res_anns)
            progress.iters_done_report(len(res_ids))
            if progress.need_report():
                _update_progress(progress)

    res_project = api.project.get_info_by_id(res_project.id)  # to refresh reference_image_url
    fields = [
        {"field": "data.resProjectId", "payload": res_project.id},
        {"field": "data.resProjectName", "payload": res_project.name},
        {"field": "data.resProjectPreviewUrl", "payload": api.image.preview_url(res_project.reference_image_url, 100, 100)},
    ]
    api.task.set_fields(task_id, fields)
    api.task.set_output_project(task_id, res_project.id, res_project.name)
    my_app.stop()
Ejemplo n.º 10
0
    def _download_images(self, pr_writer, image_id_to_ds):
        # get full info (hash, title, ext)
        img_infos = []
        for batch_img_ids in sly.batched(list(image_id_to_ds.keys()), constants.BATCH_SIZE_GET_IMAGES_INFO):
            images_info_proto = self.api.simple_request('GetImagesInfo',
                                                        api_proto.ImagesInfo,
                                                        api_proto.ImageArray(images=batch_img_ids))
            img_infos.extend(images_info_proto.infos)

        written_hashes = set(self._get_images_from_agent_storage(pr_writer, image_id_to_ds, img_infos))
        img_infos_to_download = [x for x in img_infos if x.hash not in written_hashes]
        self._download_images_from_remote(pr_writer, image_id_to_ds, img_infos_to_download)
Ejemplo n.º 11
0
    def _download_images_from_remote(self, pr_writer, image_id_to_ds, img_infos):
        if len(img_infos) == 0:
            return

        infos_with_paths = [(info, pr_writer.get_img_path(image_id_to_ds[info.id], info.title, info.ext))
                            for info in img_infos]
        hash2path = {x[0].hash: x[1] for x in infos_with_paths}  # for unique hashes
        unique_hashes = list(hash2path.keys())

        ready_paths = []
        ready_hashes = []
        progress = sly.ProgressCounter('Download remote images', len(unique_hashes), ext_logger=self.logger)

        def close_fh(fh):
            fpath = fh.file_path
            if fh.close_and_check():
                ready_paths.append(fpath)
                ready_hashes.append(img_hash)
                progress.iter_done_report()
            else:
                self.logger.warning('file was skipped while downloading',
                                    extra={'img_path': fpath, 'img_hash': img_hash})

        # download by unique hashes
        for batch_img_hashes in sly.batched(unique_hashes, constants.BATCH_SIZE_DOWNLOAD_IMAGES):
            file_handler = None
            img_hash = None
            for chunk in self.api.get_stream_with_data('DownloadImages',
                                                       api_proto.ChunkImage,
                                                       api_proto.ImagesHashes(images_hashes=batch_img_hashes)):
                if chunk.image.hash:  # non-empty hash means beginning of new image
                    if file_handler is not None:
                        close_fh(file_handler)
                    img_hash = chunk.image.hash
                    self.logger.trace('download_images', extra={'img_hash': img_hash})
                    dst_fpath = hash2path[img_hash]
                    file_handler = ChunkedFileWriter(file_path=dst_fpath)

                file_handler.write(chunk.chunk)

            close_fh(file_handler)  # must be not None

        # process non-unique hashes
        for info, dst_path in infos_with_paths:
            origin_path = hash2path[info.hash]
            if (origin_path != dst_path) and osp.isfile(origin_path):
                sly.ensure_base_path(dst_path)
                sly.copy_file(origin_path, dst_path)

        self._write_images_to_agent_storage(ready_paths, ready_hashes)
Ejemplo n.º 12
0
def init(api: sly.Api, task_id, context, state, app_logger):
    global project_info, meta

    project_info = api.project.get_info_by_id(project_id)
    meta = sly.ProjectMeta.from_json(api.project.get_meta(project_id))

    same_tags = defaultdict(list)

    progress = sly.Progress("Initializing image examples",
                            project_info.items_count,
                            need_info_log=True)
    for dataset in api.dataset.get_list(project_id):
        images_infos = api.image.get_list(dataset.id)
        for batch in sly.batched(images_infos):
            img_ids = [info.id for info in batch]
            ann_infos = api.annotation.download_batch(dataset.id, img_ids)
            anns = [
                sly.Annotation.from_json(ann_info.annotation, meta)
                for ann_info in ann_infos
            ]
            for img_info, ann in zip(batch, anns):
                for tag in ann.img_tags:
                    tag: sly.Tag
                    same_tags[tag.get_compact_str()].append({
                        "url": img_info.full_storage_url,
                        "figures": [],
                        "tag": {
                            **tag.to_json(), "color":
                            sly.color.rgb2hex(
                                meta.get_tag_meta(tag.name).color)
                        },
                        "_tag": tag
                    })
            progress.iters_done_report(len(batch))

    index = 0
    for key, cards in same_tags.items():
        for card in cards:
            tag = card.pop('_tag', None)
            gallery["content"]["annotations"][str(index)] = card
            gallery["content"]["layout"][index % CNT_GRID_COLUMNS].append(
                str(index))
            gallery2tag[str(index)] = tag
            index += 1

    api.task.set_field(task_id, "data.gallery", gallery)
    if len(gallery2tag) > 0:
        api.task.set_field(task_id, "state.selectedItem", '0')

    sly.logger.info("Initialization finished")
Ejemplo n.º 13
0
    def upload_images_to_remote(self, fpaths, infos):
        progress = sly.ProgressCounter('Upload images', len(fpaths), ext_logger=self.logger)
        for batch_paths_infos in sly.batched(list(zip(fpaths, infos)), constants.BATCH_SIZE_UPLOAD_IMAGES):
            def chunk_generator():
                for fpath, proto_img_info in batch_paths_infos:
                    self.logger.trace('image upload start', extra={'img_path': fpath})
                    freader = ChunkedFileReader(fpath, constants.NETW_CHUNK_SIZE)
                    for chunk_bytes in freader:
                        current_chunk = api_proto.Chunk(buffer=chunk_bytes, total_size=freader.file_size)
                        yield api_proto.ChunkImage(chunk=current_chunk, image=proto_img_info)

                    self.logger.trace('image uploaded', extra={'img_path': fpath})
                    progress.iter_done_report()

            self.api.put_stream_with_data('UploadImages', api_proto.Empty, chunk_generator())
            self.logger.debug('Batch of images has been sent.', extra={'batch_len': len(batch_paths_infos)})
def inference_model_batch(model, images_nps, topn=5):
    """Inference image(s) with the classifier.

    Args:
        model (nn.Module): The loaded classifier.
        img (str/ndarray): The image filename or loaded image.

    Returns:
        result (list of dict): The classification results that contains
            `class_name`, `pred_label` and `pred_score`.
    """
    cfg = model.cfg
    device = next(model.parameters()).device  # model device
    # build the data pipeline
    if cfg.data.test.pipeline[0]['type'] == 'LoadImageFromFile':
        cfg.data.test.pipeline.pop(0)

    test_pipeline = Compose(cfg.data.test.pipeline)

    with torch.no_grad():

        inference_results = []
        for images_batch in sly.batched(images_nps, g.batch_size):
            data = [dict(img=img) for img in images_batch]

            data = [test_pipeline(row) for row in data]
            data = collate(data, samples_per_gpu=1)

            if next(model.parameters()).is_cuda:
                # scatter to specified GPU
                data = scatter(data, [device])[0]

            batch_scores = np.asarray(model(return_loss=False, **data))
            batch_top_indexes = batch_scores.argsort(axis=1)[:,
                                                             -topn:][:, ::-1]

            for scores, top_indexes in zip(batch_scores, batch_top_indexes):
                inference_results.append({
                    'label':
                    top_indexes.astype(int).tolist(),
                    'score':
                    scores[top_indexes].astype(float).tolist(),
                    'class':
                    np.asarray(model.CLASSES)[top_indexes].tolist()
                })

    return inference_results
Ejemplo n.º 15
0
def cache_annotations(api: sly.Api, task_id, context, state, app_logger):
    progress = sly.Progress("Cache annotations", project_info.items_count)
    for dataset in api.dataset.get_list(project_id):
        images = api.image.get_list(dataset.id)
        for batch in sly.batched(images):
            image_ids = [image_info.id for image_info in batch]
            ann_infos = api.annotation.download_batch(dataset.id, image_ids)
            for image_id, image_info, ann_info in zip(image_ids, batch,
                                                      ann_infos):
                ann = sly.Annotation.from_json(ann_info.annotation, meta)
                anns[image_id] = ann
                images_info[image_id] = image_info
                for label in ann.labels:
                    labels[label.obj_class.name][image_id].append(label)
            progress.iters_done_report(len(batch))

    progress = sly.Progress("App is ready", 1)
    progress.iter_done_report()
Ejemplo n.º 16
0
        def chunk_generator():
            progress = sly.ProgressCounter('Upload images',
                                           len(fpaths),
                                           ext_logger=self.logger)

            for batch_paths_infos in sly.batched(
                    list(zip(fpaths, infos)),
                    constants.BATCH_SIZE_UPLOAD_IMAGES):
                for fpath, proto_img_info in batch_paths_infos:
                    self.logger.trace('image upload start',
                                      extra={'img_path': fpath})

                    freader = ChunkedFileReader(fpath,
                                                constants.NETW_CHUNK_SIZE)
                    for chunk_bytes in freader:
                        current_chunk = api_proto.Chunk(
                            buffer=chunk_bytes, total_size=freader.file_size)
                        yield api_proto.ChunkImage(chunk=current_chunk,
                                                   image=proto_img_info)

                    self.logger.trace('image uploaded',
                                      extra={'img_path': fpath})
                    progress.iter_done_report()
Ejemplo n.º 17
0
def create_reference_file(api: sly.Api, task_id, context, state, app_logger):
    global PROJECT, JSON_PATH_REMOTE

    PROJECT = api.project.get_info_by_id(PROJECT_ID)
    read_and_validate_project_meta()

    file_remote = "/reference_items/{}_{}.json".format(PROJECT.id,
                                                       PROJECT.name)
    app_logger.info("Remote file path: {!r}".format(file_remote))
    if api.file.exists(TEAM_ID, file_remote):
        raise FileExistsError(
            "File {!r} already exists in Team Files. Make sure you want to replace it. "
            "Please, remove it manually and run the app again.".format(
                file_remote))

    result = {
        "project_id": PROJECT.id,
        "project_name": PROJECT.name,
        "project_url": api.project.url(PROJECT_ID),
        "reference_tag_name": TAG_NAME,
        "key_image_field": KEY_IMAGE_FIELD,
        "all_keys": [],
        "references": defaultdict(list)
    }

    progress = sly.Progress("Processing",
                            PROJECT.images_count,
                            ext_logger=app_logger)
    for dataset in api.dataset.get_list(PROJECT.id):
        ds_images = api.image.get_list(dataset.id)
        for batch in sly.batched(ds_images):
            image_ids = [image_info.id for image_info in batch]
            image_names = [image_info.name for image_info in batch]

            ann_infos = api.annotation.download_batch(dataset.id, image_ids)
            anns = [
                sly.Annotation.from_json(ann_info.annotation, META)
                for ann_info in ann_infos
            ]

            for image_info, image_id, image_name, ann in zip(
                    batch, image_ids, image_names, anns):
                for label in ann.labels:
                    tag = label.tags.get(TAG_NAME)
                    if tag is None:
                        continue

                    key_tag = image_info.meta.get(KEY_IMAGE_FIELD)
                    if key_tag is None:
                        app_logger.warn(
                            "Object has reference tag, but image doesn't have key field. Object is skipped",
                            extra={
                                "figure_id": label.geometry.sly_id,
                                "image_id": image_id,
                                "image_name": image_name,
                                "dataset_name": dataset.name
                            })
                        continue

                    rect: sly.Rectangle = label.geometry.to_bbox()
                    reference = {
                        "image_id":
                        image_id,
                        "image_name":
                        image_name,
                        "dataset_name":
                        dataset.name,
                        "image_preview_url":
                        api.image.url(TEAM_ID, WORKSPACE_ID, PROJECT.id,
                                      dataset.id, image_id),
                        "image_url":
                        image_info.full_storage_url,
                        KEY_IMAGE_FIELD:
                        key_tag,
                        "bbox": [rect.top, rect.left, rect.bottom, rect.right],
                        "geometry":
                        label.geometry.to_json()
                    }
                    result["references"][key_tag].append(reference)
            progress.iters_done_report(len(batch))

    result["all_keys"] = list(result["references"].keys())
    file_local = os.path.join(my_app.data_dir, file_remote.lstrip("/"))
    app_logger.info("Local file path: {!r}".format(file_local))
    sly.fs.ensure_base_path(file_local)
    sly.json.dump_json_file(result, file_local)
    file_info = api.file.upload(TEAM_ID, file_local, file_remote)
    api.task._set_custom_output(task_id,
                                file_info.id,
                                sly.fs.get_file_name_with_ext(file_remote),
                                description="JSON with reference items",
                                icon="zmdi zmdi-collection-text")
    #zmdi-receipt
    #zmdi-ungroup
    #zmdi-collection-text
    app_logger.info("Local file successfully uploaded to team files")

    my_app.stop()
def start_import(api: sly.Api, task_id, context, state, app_logger):
    fields = [
        {"field": "data.destinationError", "payload": ""},
        {"field": "data.uploadError", "payload": ""},
        {"field": "data.uploadStarted", "payload": True},
        {"field": "data.uploadedCount", "payload": 0},
        {"field": "data.totalCount", "payload": 0},
        {"field": "data.uploadProgress", "payload": 0},
        {"field": "data.uploadDsName", "payload": ""},
        {"field": "data.uploadedDsCount", "payload": 0},
        {"field": "data.totalDsCount", "payload": 0},
        {"field": "data.uploadDsProgress", "payload": 0},
    ]
    api.app.set_fields(task_id, fields)

    remote_dir = state["remoteDir"]
    listing_flags = state["listingFlags"]

    workspace_name = state["workspaceName"]
    project_name = state["projectName"] #slugify(state["projectName"], lowercase=False, save_order=True)
    if project_name == "":
        _show_error(api, task_id, "data.destinationError", "Project name is not defined", app_logger)
        return

    #@TODO: will be added in future releases
    add_to_existing_project = False #state["addToExisting"]

    existing_meta = None
    try:
        workspace = api.workspace.get_info_by_name(TEAM_ID, workspace_name)
        if workspace is None:
            workspace = api.workspace.create(TEAM_ID, workspace_name)
            app_logger.info("Workspace {!r} is created".format(workspace.name))
        else:
            app_logger.info("Workspace {!r} already exists".format(workspace.name))

        project = api.project.get_info_by_name(workspace.id, project_name)
        if project is None:
            project = api.project.create(workspace.id, project_name)
            app_logger.info("Project {!r} is created".format(project.name))
        else:
            _show_error(api, task_id, "data.destinationError", "Project {!r} already exists".format(project.name), app_logger)
            return

            if add_to_existing_project is False:
                app_logger.warn("Project {!r} already exists. Allow add to existing project or change the name of "
                                "destination project. We recommend to upload to new project. Thus the existing project "
                                "will be safe. New name will be generated".format(project.name))
                project = api.project.create(workspace.id, project_name, change_name_if_conflict=True)
            else:
                existing_meta_json = api.project.get_meta(project.id)
                existing_meta = sly.ProjectMeta.from_json(existing_meta_json)

        update_res_project_icon = None
        fields = [
            {"field": "data.resultProject", "payload": project.name},
            {"field": "data.resultProjectId", "payload": project.id},
            # {"field": "data.resultProjectPreviewUrl", "payload": 0},
        ]
        api.app.set_fields(task_id, fields)

        resp = requests.get(urljoin(remote_dir, 'meta.json'))
        meta_json = resp.json()
        meta = sly.ProjectMeta.from_json(meta_json)
        if existing_meta is not None:
            meta = existing_meta.merge(meta)

        api.project.update_meta(project.id, meta.to_json())

        datasets_to_upload = []
        for ds_info, flags in zip(listing, listing_flags):
            dataset_name = ds_info['name']
            if flags["selected"] is False:
                app_logger.info("Folder {!r} is not selected, it will be skipped".format(dataset_name))
                continue
            if flags["disabled"] is True:
                app_logger.info("File {!r} is skipped".format(dataset_name))
                continue
            datasets_to_upload.append(dataset_name)

        api.task.set_field(task_id, "data.totalDsCount", len(datasets_to_upload))
        for index, dataset_name in enumerate(datasets_to_upload):
            dataset = api.dataset.get_info_by_name(project.id, dataset_name)
            if dataset is None:
                dataset = api.dataset.create(project.id, dataset_name)
                app_logger.info("Dataset {!r} is created".format(dataset.name))
            else:
                app_logger.warn("Dataset {!r} already exists. Uploading is skipped".format(dataset.name))
                _increment_ds_progress(task_id, api, index + 1, len(datasets_to_upload))
                continue

            #img_dir = reduce(urljoin, [remote_dir, dataset_name, 'img'])
            #ann_dir = reduce(urljoin, [remote_dir, dataset_name, 'ann'])
            img_dir = os.path.join(remote_dir, dataset_name, 'img/')
            ann_dir = os.path.join(remote_dir, dataset_name, 'ann/')

            cwd, img_listing = htmllistparse.fetch_listing(img_dir, timeout=30)

            uploaded_to_dataset = 0
            fields = [
                {"field": "data.totalCount", "payload": len(img_listing)},
                {"field": "data.uploadDsName", "payload": dataset.name},
            ]
            api.app.set_fields(task_id, fields)

            task_progress = sly.Progress("Uploading dataset {!r}".format(dataset.name), len(img_listing))
            for batch in sly.batched(img_listing, batch_size=50):
                try:
                    names = []
                    image_urls_batch = []
                    annotations_batch = []

                    for file_entry in batch:
                        name = file_entry.name
                        try:
                            img_url = urljoin(img_dir, name) #'https://i.imgur.com/uFYNj9Z.jpg'
                            ann_url = urljoin(ann_dir, name + sly.ANN_EXT)

                            resp = requests.get(ann_url)
                            if resp.status_code == 404:
                                ann_url = urljoin(ann_dir, sly.fs.get_file_name(name) + sly.ANN_EXT)
                                resp = requests.get(ann_url)

                            resp.raise_for_status()
                            ann_json = resp.json()

                            ann = sly.Annotation.from_json(ann_json, meta)
                        except Exception as e:
                            app_logger.warn("Image {!r} and annotation {!r} are skipped due to error: {}"
                                            .format(img_url, ann_url, repr(e)))
                            continue

                        names.append(name)
                        image_urls_batch.append(img_url)
                        annotations_batch.append(ann)

                    img_infos = api.image.upload_links(dataset.id, names, image_urls_batch)
                    uploaded_ids = [img_info.id for img_info in img_infos]
                    api.annotation.upload_anns(uploaded_ids, annotations_batch)
                    uploaded_to_dataset += len(uploaded_ids)
                except Exception as e:
                    app_logger.warn("Batch ({} items) of images is skipped due to error: {}"
                                    .format(len(batch), repr(e)))
                finally:
                    task_progress.iters_done_report(len(batch))
                    _increment_task_progress(task_id, api, task_progress)

                    #only once + to check the image urls are loaded correctly
                    if update_res_project_icon is None:
                        pinfo = api.project.get_info_by_id(project.id)
                        if pinfo.reference_image_url is None:
                            raise RuntimeError("Preview image is not accessible. Check that image URLs are public.")
                        update_res_project_icon = api.image.preview_url(pinfo.reference_image_url, 100, 100),
                        api.task.set_field(task_id, "data.resultProjectPreviewUrl", update_res_project_icon)

            _increment_ds_progress(task_id, api, index + 1, len(datasets_to_upload))
            app_logger.info("Dataset {!r} is uploaded: {} images with annotations"
                            .format(dataset.name, uploaded_to_dataset))

    except Exception as e:
        app_logger.error(repr(e))
        api.task.set_field(task_id, "data.uploadError", repr(e))

    api.task.set_output_project(task_id, project.id, project.name)
    my_app.stop()
Ejemplo n.º 19
0
def main():
    api = sly.Api.from_env()

    # read source project
    src_project = api.project.get_info_by_id(PROJECT_ID)

    if src_project.type != str(sly.ProjectType.IMAGES):
        raise RuntimeError("Project {!r} has type {!r}. App works only with type {!r}"
                           .format(src_project.name, src_project.type, sly.ProjectType.IMAGES))

    src_project_meta_json = api.project.get_meta(src_project.id)
    src_project_meta = sly.ProjectMeta.from_json(src_project_meta_json)

    # create destination project
    DST_PROJECT_NAME = "{} (rasterized)".format(src_project.name)

    dst_project = api.project.create(WORKSPACE_ID, DST_PROJECT_NAME, description="rasterized", change_name_if_conflict=True)
    sly.logger.info('Destination project is created.', extra={'project_id': dst_project.id, 'project_name': dst_project.name})

    # mapping polygons -> bitmaps
    new_classes_lst = []
    for cls in src_project_meta.obj_classes:
        if need_convert(cls.geometry_type):
            new_class = cls.clone(geometry_type=sly.Bitmap)
        else:
            new_class = cls.clone()
        new_classes_lst.append(new_class)
    dst_classes = sly.ObjClassCollection(new_classes_lst)

    # create destination meta
    dst_project_meta = src_project_meta.clone(obj_classes=dst_classes)
    api.project.update_meta(dst_project.id, dst_project_meta.to_json())

    def convert_to_nonoverlapping(src_ann: sly.Annotation) -> sly.Annotation:
        common_img = np.zeros(src_ann.img_size, np.int32)  # size is (h, w)
        for idx, lbl in enumerate(src_ann.labels, start=1):
            if need_convert(lbl.obj_class.geometry_type):
                if allow_render_non_spatial_for_any_shape(lbl) == True:
                    lbl.draw(common_img, color=idx)
                else:
                    sly.logger.warn(
                        "Object of class {!r} (class shape: {!r}) has non spatial shape {!r}. It will not be rendered."
                        .format(lbl.obj_class.name,
                                lbl.obj_class.geometry_type.geometry_name(),
                                lbl.geometry.geometry_name()))

        new_labels = []
        for idx, lbl in enumerate(src_ann.labels, start=1):
            new_cls = dst_project_meta.obj_classes.get(lbl.obj_class.name)
            if not need_convert(lbl.obj_class.geometry_type):
                new_lbl = lbl.clone(obj_class=new_cls)
                new_labels.append(new_lbl)
            else:
                if allow_render_non_spatial_for_any_shape(lbl) == False:
                    continue
                mask = common_img == idx
                if np.any(mask):  # figure may be entirely covered by others
                    g = lbl.geometry
                    new_bmp = sly.Bitmap(data=mask,
                                         labeler_login=g.labeler_login,
                                         updated_at=g.updated_at,
                                         created_at=g.created_at)
                    new_lbl = lbl.clone(geometry=new_bmp, obj_class=new_cls)
                    new_labels.append(new_lbl)

        return src_ann.clone(labels=new_labels)

    for ds_info in api.dataset.get_list(src_project.id):
        ds_progress = sly.Progress('Processing dataset: {!r}/{!r}'.format(src_project.name, ds_info.name),
                                   total_cnt=ds_info.images_count)
        dst_dataset = api.dataset.create(dst_project.id, ds_info.name)
        img_infos_all = api.image.get_list(ds_info.id)

        for img_infos in sly.batched(img_infos_all):
            img_names, img_ids, img_metas = zip(*((x.name, x.id, x.meta) for x in img_infos))

            ann_infos = api.annotation.download_batch(ds_info.id, img_ids)
            anns = [sly.Annotation.from_json(x.annotation, src_project_meta) for x in ann_infos]

            new_anns = [convert_to_nonoverlapping(ann) for ann in anns]

            new_img_infos = api.image.upload_ids(dst_dataset.id, img_names, img_ids, metas=img_metas)
            new_img_ids = [x.id for x in new_img_infos]
            api.annotation.upload_anns(new_img_ids, new_anns)

            ds_progress.iters_done_report(len(img_infos))

    api.task.set_output_project(task_id, dst_project.id, dst_project.name)
def calc(api: sly.Api, task_id, context, state, app_logger):
    global progress, sum_class_area_per_image, sum_class_count_per_image, count_images_with_class

    workspace = api.workspace.get_info_by_id(WORKSPACE_ID)
    project = None
    datasets = None
    if DATASET_ID is not None:
        dataset = api.dataset.get_info_by_id(DATASET_ID)
        datasets = [dataset]

    project = api.project.get_info_by_id(PROJECT_ID)
    if datasets is None:
        datasets = api.dataset.get_list(PROJECT_ID)
    ds_images, sample_count = sample_images(api, datasets)

    fields = [
        {
            "field": "data.projectName",
            "payload": project.name
        },
        {
            "field": "data.projectId",
            "payload": project.id
        },
        {
            "field": "data.projectPreviewUrl",
            "payload": api.image.preview_url(project.reference_image_url, 100,
                                             100)
        },
        {
            "field": "data.samplePercent",
            "payload": SAMPLE_PERCENT
        },
        {
            "field": "data.sampleCount",
            "payload": sample_count
        },
    ]
    api.task.set_fields(task_id, fields)

    meta_json = api.project.get_meta(project.id)
    meta = sly.ProjectMeta.from_json(meta_json)
    colors_warning = meta.obj_classes.validate_classes_colors()

    # list classes
    class_names = ["unlabeled"]
    class_colors = [[0, 0, 0]]
    class_indices_colors = [[0, 0, 0]]
    _name_to_index = {}
    table_columns = [
        "image id", "image", "dataset", "height", "width", "channels",
        "unlabeled"
    ]
    for idx, obj_class in enumerate(meta.obj_classes):
        class_names.append(obj_class.name)
        class_colors.append(obj_class.color)
        class_index = idx + 1
        class_indices_colors.append([class_index, class_index, class_index])
        _name_to_index[obj_class.name] = class_index
        table_columns.append(get_col_name_area(obj_class.name,
                                               obj_class.color))
        table_columns.append(
            get_col_name_count(obj_class.name, obj_class.color))

    sum_class_area_per_image = [0] * len(class_names)
    sum_class_count_per_image = [0] * len(class_names)
    count_images_with_class = [0] * len(class_names)
    #count_images_with_class[0] = 1  # for unlabeled

    api.task.set_field(task_id, "data.table.columns", table_columns)

    all_stats = []
    task_progress = sly.Progress("Stats", sample_count, app_logger)
    for dataset_id, images in ds_images.items():
        dataset = api.dataset.get_info_by_id(dataset_id)
        for batch in sly.batched(images, batch_size=BATCH_SIZE):
            batch_stats = []

            image_ids = [image_info.id for image_info in batch]
            ann_infos = api.annotation.download_batch(dataset_id, image_ids)
            ann_jsons = [ann_info.annotation for ann_info in ann_infos]

            for info, ann_json in zip(batch, ann_jsons):
                ann = sly.Annotation.from_json(ann_json, meta)

                render_idx_rgb = np.zeros(ann.img_size + (3, ), dtype=np.uint8)
                render_idx_rgb[:] = BG_COLOR
                ann.draw_class_idx_rgb(render_idx_rgb, _name_to_index)
                stat_area = sly.Annotation.stat_area(render_idx_rgb,
                                                     class_names,
                                                     class_indices_colors)
                stat_count = ann.stat_class_count(class_names)

                if stat_area["unlabeled"] > 0:
                    stat_count["unlabeled"] = 1

                table_row = []
                table_row.append(info.id)
                table_row.append(
                    '<a href="{0}" rel="noopener noreferrer" target="_blank">{1}</a>'
                    .format(
                        api.image.url(TEAM_ID, WORKSPACE_ID, project.id,
                                      dataset.id, info.id), info.name))
                table_row.append(dataset.name)
                area_unl = stat_area["unlabeled"] if not np.isnan(
                    stat_area["unlabeled"]) else 0
                table_row.extend([
                    stat_area["height"], stat_area["width"],
                    stat_area["channels"],
                    round(area_unl, 2)
                ])
                resolutions_count["{}x{}x{}".format(
                    stat_area["height"], stat_area["width"],
                    stat_area["channels"])] += 1
                for idx, class_name in enumerate(class_names):
                    cur_area = stat_area[class_name] if not np.isnan(
                        stat_area[class_name]) else 0
                    cur_count = stat_count[class_name] if not np.isnan(
                        stat_count[class_name]) else 0
                    sum_class_area_per_image[idx] += cur_area
                    sum_class_count_per_image[idx] += cur_count
                    count_images_with_class[
                        idx] += 1 if stat_count[class_name] > 0 else 0
                    if class_name == "unlabeled":
                        continue
                    table_row.append(round(cur_area, 2))
                    table_row.append(round(cur_count, 2))

                if len(table_row) != len(table_columns):
                    raise RuntimeError("Values for some columns are missed")
                batch_stats.append(table_row)

            all_stats.extend(batch_stats)
            progress += len(batch_stats)

            fields = [{
                "field": "data.progress",
                "payload": int(progress * 100 / sample_count)
            }, {
                "field": "data.table.data",
                "payload": batch_stats,
                "append": True
            }]
            api.task.set_fields(task_id, fields)
            task_progress.iters_done_report(len(batch_stats))

    # average nonzero class area per image
    with np.errstate(divide='ignore'):
        avg_nonzero_area = np.divide(sum_class_area_per_image,
                                     count_images_with_class)
        avg_nonzero_count = np.divide(sum_class_count_per_image,
                                      count_images_with_class)

    avg_nonzero_area = np.where(np.isnan(avg_nonzero_area), None,
                                avg_nonzero_area)
    avg_nonzero_count = np.where(np.isnan(avg_nonzero_count), None,
                                 avg_nonzero_count)

    fig = go.Figure(data=[
        go.Bar(name='Area %',
               x=class_names,
               y=avg_nonzero_area,
               yaxis='y',
               offsetgroup=1),
        go.Bar(name='Count',
               x=class_names,
               y=avg_nonzero_count,
               yaxis='y2',
               offsetgroup=2)
    ],
                    layout={
                        'yaxis': {
                            'title': 'Area'
                        },
                        'yaxis2': {
                            'title': 'Count',
                            'overlaying': 'y',
                            'side': 'right'
                        }
                    })
    # Change the bar mode
    fig.update_layout(barmode='group')  # , legend_orientation="h")

    # images count with/without classes
    images_with_count = []
    images_without_count = []
    images_with_count_text = []
    images_without_count_text = []
    for idx, class_name in enumerate(class_names):
        #if class_name == "unlabeled":
        #    continue
        with_count = count_images_with_class[
            idx]  #- 1 if class_name == "unlabeled" else count_images_with_class[idx]
        without_count = sample_count - with_count
        images_with_count.append(with_count)
        images_without_count.append(without_count)
        images_with_count_text.append("{} ({:.2f} %)".format(
            with_count, with_count * 100 / sample_count))
        images_without_count_text.append("{} ({:.2f} %)".format(
            without_count, without_count * 100 / sample_count))

    if len(class_names) != len(images_with_count) or len(class_names) != len(images_with_count_text) or \
        len(class_names) != len(images_without_count) or len(class_names) != len(images_without_count_text):
        raise RuntimeError("Class names are inconsistent with images counting")

    fig_with_without_count = go.Figure(data=[
        go.Bar(name='# of images that have class',
               x=class_names,
               y=images_with_count,
               text=images_with_count_text),
        go.Bar(name='# of images that do not have class',
               x=class_names,
               y=images_without_count,
               text=images_without_count_text)
    ], )
    fig_with_without_count.update_layout(
        barmode='stack')  # , legend_orientation="h")

    # barchart resolution
    resolution_labels = []
    resolution_values = []
    resolution_percent = []
    for label, value in sorted(resolutions_count.items(),
                               key=lambda item: item[1],
                               reverse=True):
        resolution_labels.append(label)
        resolution_values.append(value)
    if len(resolution_labels) > 10:
        resolution_labels = resolution_labels[:10]
        resolution_labels.append("other")
        other_value = sum(resolution_values[10:])
        resolution_values = resolution_values[:10]
        resolution_values.append(other_value)
    resolution_percent = [
        round(v * 100 / sample_count) for v in resolution_values
    ]

    #df_resolution = pd.DataFrame({'resolution': resolution_labels, 'count': resolution_values, 'percent': resolution_percent})
    pie_resolution = go.Figure(
        data=[go.Pie(labels=resolution_labels, values=resolution_values)])
    #pie_resolution = px.pie(df_resolution, names='resolution', values='count')

    # @TODO: hotfix - pie chart do not refreshes automatically
    fig.update_layout(autosize=False, height=450)
    fig_with_without_count.update_layout(autosize=False, height=450)
    pie_resolution.update_layout(autosize=False, height=450)

    # overview table
    overviewTable = {"columns": overview_columns, "data": []}
    _overview_data = []
    for idx, (class_name,
              class_color) in enumerate(zip(class_names, class_colors)):
        row = [
            idx,
            color_text(class_name, class_color),
            count_images_with_class[
                idx],  # - 1 if class_name == "unlabeled" else count_images_with_class[idx],
            sum_class_count_per_image[idx],
            None if avg_nonzero_area[idx] is None else round(
                avg_nonzero_area[idx], 2),
            None if avg_nonzero_count[idx] is None else round(
                avg_nonzero_count[idx], 2)
        ]
        _overview_data.append(row)
    overviewTable["data"] = _overview_data

    # save report to file *.lnk (link to report)
    report_name = "{}.lnk".format(project.name)
    local_path = os.path.join(my_app.data_dir, report_name)
    sly.fs.ensure_base_path(local_path)
    with open(local_path, "w") as text_file:
        print(my_app.app_url, file=text_file)
    remote_path = "/reports/classes_stats/{}/{}/{}".format(
        USER_LOGIN, workspace.name, report_name)
    remote_path = api.file.get_free_name(TEAM_ID, remote_path)
    report_name = sly.fs.get_file_name_with_ext(remote_path)
    file_info = api.file.upload(TEAM_ID, local_path, remote_path)
    report_url = api.file.get_url(file_info.id)

    fields = [
        {
            "field": "data.overviewTable",
            "payload": overviewTable
        },
        {
            "field": "data.avgAreaCount",
            "payload": json.loads(fig.to_json())
        },
        {
            "field": "data.imageWithClassCount",
            "payload": json.loads(fig_with_without_count.to_json())
        },
        {
            "field": "data.resolutionsCount",
            "payload": json.loads(pie_resolution.to_json())
        },
        {
            "field": "data.loading0",
            "payload": False
        },
        {
            "field": "data.loading1",
            "payload": False
        },
        {
            "field": "data.loading2",
            "payload": False
        },
        {
            "field": "data.loading3",
            "payload": False
        },
        {
            "field": "state.showDialog",
            "payload": True
        },
        {
            "field": "data.savePath",
            "payload": remote_path
        },
        {
            "field": "data.reportName",
            "payload": report_name
        },
        {
            "field": "data.reportUrl",
            "payload": report_url
        },
    ]
    api.task.set_fields(task_id, fields)
    api.task.set_output_report(task_id, file_info.id, report_name)
    my_app.stop()
def video_stats(api: sly.Api, task_id, context, state, app_logger):

    project_info = api.project.get_info_by_id(PROJECT_ID)
    key_id_map = KeyIdMap()
    if project_info is None:
        raise RuntimeError("Project with ID {!r} not found".format(PROJECT_ID))
    if project_info.type != str(sly.ProjectType.VIDEOS):
        raise TypeError("Project type is {!r}, but have to be {!r}".format(
            project_info.type, sly.ProjectType.VIDEOS))

    meta_json = api.project.get_meta(project_info.id)
    meta = sly.ProjectMeta.from_json(meta_json)

    if len(meta.obj_classes) == 0 and CLASSES in stat_type:
        app_logger.warn("Project {!r} have no classes".format(
            project_info.name))

    if len(meta.tag_metas) == 0 and TAGS in stat_type:
        app_logger.warn("Project {!r} have no tags".format(project_info.name))

    if len(meta.obj_classes) == 0 and len(meta.tag_metas) == 0:
        app_logger.warn("Project {!r} have no classes and tags".format(
            project_info.name))
        my_app.stop()

    if CLASSES in stat_type:
        classes = []
        counter = {}
        classes_id = []
        for idx, curr_class in enumerate(meta.obj_classes):
            classes.append(curr_class.name)
            classes_id.append(idx)
            counter[curr_class.name] = 0

        columns_classes = [
            FIRST_STRING, CLASS_NAME, 'total_objects', 'total_figures',
            'total_frames'
        ]
        data = {
            FIRST_STRING: classes_id,
            CLASS_NAME: classes,
            'total_objects': [0] * len(classes),
            'total_figures': [0] * len(classes),
            'total_frames': [0] * len(classes)
        }

    if TAGS in stat_type:
        columns = [FIRST_STRING, TAG_COLOMN]
        columns_for_values = [FIRST_STRING, TAG_COLOMN, TAG_VALUE_COLOMN]
        columns_frame_tag = [FIRST_STRING,
                             TAG_COLOMN]  # ===========frame_tags=======
        columns_frame_tag_values = [
            FIRST_STRING, TAG_COLOMN, TAG_VALUE_COLOMN
        ]  # ===========frame_tags=======
        columns_object_tag = [FIRST_STRING,
                              TAG_COLOMN]  # ===========object_tags=======
        columns_object_tag_values = [
            FIRST_STRING, TAG_COLOMN, TAG_VALUE_COLOMN
        ]  # ===========object_tags=======

        columns.extend([TOTAL])
        columns_for_values.extend([TOTAL])
        columns_frame_tag.extend([TOTAL, TOTAL + COUNT_SUFFIX
                                  ])  # ===========frame_tags=======
        columns_frame_tag_values.extend([TOTAL
                                         ])  # ===========frame_tags=======
        columns_object_tag.extend([TOTAL])  # ===========object_tags=======
        columns_object_tag_values.extend([TOTAL
                                          ])  # ===========object_tags=======

        datasets_counts = []
        datasets_values_counts = []
        datasets_frame_tag_counts = []  # ===========frame_tags=======
        datasets_frame_tag_values_counts = []  # ===========frame_tags=======
        datasets_object_tag_counts = []  # ===========object_tags=======
        datasets_object_tag_values_counts = []  # ===========object_tags=======

    for dataset in api.dataset.get_list(PROJECT_ID):

        if CLASSES in stat_type:
            columns_classes.extend([
                dataset.name + OBJECTS, dataset.name + FIGURES,
                dataset.name + FRAMES
            ])
            classes_counter = copy.deepcopy(counter)
            figures_counter = copy.deepcopy(counter)
            frames_counter = copy.deepcopy(counter)
            data[dataset.name + OBJECTS] = []
            data[dataset.name + FIGURES] = []
            data[dataset.name + FRAMES] = []
            videos = api.video.get_list(dataset.id)
            progress_classes = sly.Progress("Processing video classes ...",
                                            len(videos), app_logger)

        if TAGS in stat_type:
            columns.extend([dataset.name])
            ds_property_tags = defaultdict(int)

            columns_for_values.extend([dataset.name])
            ds_property_tags_values = defaultdict(lambda: defaultdict(int))

            # ===========frame_tags=========================================
            columns_frame_tag.extend(
                [dataset.name, dataset.name + COUNT_SUFFIX])
            ds_frame_tags = defaultdict(int)
            ds_frame_tags_counter = defaultdict(int)

            columns_frame_tag_values.extend([dataset.name])
            ds_frame_tags_values = defaultdict(lambda: defaultdict(int))
            ds_frame_tags_values_counter = defaultdict(
                lambda: defaultdict(int))
            # ===========frame_tags=========================================

            # ===========object_tags=========================================
            columns_object_tag.extend([dataset.name])
            ds_object_tags = defaultdict(int)

            columns_object_tag_values.extend([dataset.name])
            ds_object_tags_values = defaultdict(lambda: defaultdict(int))
            # ===========object_tags=========================================

            videos = api.video.get_list(dataset.id)
            progress_tags = sly.Progress("Processing video tags ...",
                                         len(videos), app_logger)

        for batch in sly.batched(videos, batch_size=10):
            for video_info in batch:

                ann_info = api.video.annotation.download(video_info.id)
                ann = sly.VideoAnnotation.from_json(ann_info, meta, key_id_map)

                if CLASSES in stat_type:
                    classes_counter, figures_counter, frames_counter = items_counter(
                        ann, classes_counter, figures_counter, frames_counter)
                    progress_classes.iter_done_report()

                if TAGS in stat_type:
                    process_video_annotation(ann, ds_property_tags)
                    process_video_annotation_tags_values(
                        ann, ds_property_tags_values)

                    process_video_ann_frame_tags(
                        ann, ds_frame_tags,
                        ds_frame_tags_counter)  # ===========frame_tags=======
                    process_video_ann_frame_tags_vals(
                        ann,
                        ds_frame_tags_values)  # ===========frame_tags=======

                    process_video_ann_object_tags(
                        ann, ds_object_tags)  # ===========object_tags=======
                    process_video_ann_object_tags_vals(
                        ann,
                        ds_object_tags_values)  # ===========object_tags=======

                    progress_tags.iter_done_report()

        if CLASSES in stat_type:
            data = data_counter(data, dataset, classes, classes_counter,
                                figures_counter, frames_counter)

        if TAGS in stat_type:
            datasets_counts.append((dataset.name, ds_property_tags))
            datasets_values_counts.append(
                (dataset.name, ds_property_tags_values))
            datasets_frame_tag_counts.append(
                (dataset.name, ds_frame_tags))  # ===========frame_tags=======
            datasets_frame_tag_values_counts.append(
                (dataset.name,
                 ds_frame_tags_values))  # ===========frame_tags=======
            datasets_object_tag_counts.append(
                (dataset.name,
                 ds_object_tags))  # ===========object_tags=======
            datasets_object_tag_values_counts.append(
                (dataset.name,
                 ds_object_tags_values))  # ===========object_tags=======

    if CLASSES in stat_type:
        classes.append(TOTAL)
        data[FIRST_STRING].append(len(data[FIRST_STRING]))
        for key, val in data.items():
            if key == CLASS_NAME or key == FIRST_STRING:
                continue
            data[key].append(sum(val))
        df_classes = pd.DataFrame(data, columns=columns_classes, index=classes)
        print(df_classes)

    if TAGS in stat_type:
        # =========property_tags===============================================================
        df = get_pd_tag_stat(meta, datasets_counts, columns)
        print('Total video tags stats')
        print(df)
        # =========property_tags_values=========================================================
        df_values = get_pd_tag_values_stat(datasets_values_counts,
                                           columns_for_values)
        print('Total video tags values stats')
        print(df_values)

        # =========frame_tag=====================================================================
        data_frame_tags = []
        for idx, tag_meta in enumerate(meta.tag_metas):
            name = tag_meta.name
            row_frame_tags = [idx, name]
            row_frame_tags.extend([0, 0])
            for ds_name, ds_frame_tags in datasets_frame_tag_counts:
                row_frame_tags.extend(
                    [ds_frame_tags[name], ds_frame_tags_counter[name]])
                row_frame_tags[2] += ds_frame_tags[name]
                row_frame_tags[3] += ds_frame_tags_counter[name]
            data_frame_tags.append(row_frame_tags)

        df_frame_tags = pd.DataFrame(data_frame_tags,
                                     columns=columns_frame_tag)
        total_row = list(df_frame_tags.sum(axis=0))
        total_row[0] = len(df_frame_tags)
        total_row[1] = TOTAL
        df_frame_tags.loc[len(df_frame_tags)] = total_row
        print('Total frame tags stats')
        print(df_frame_tags)

        # =========frame_tags_values=============================================================
        df_frame_tags_values = get_pd_tag_values_stat(
            datasets_frame_tag_values_counts, columns_frame_tag_values)
        print('Total frame tags values stats')
        print(df_frame_tags_values)

        # ==========object_tag================================================================
        df_object_tags = get_pd_tag_stat(meta, datasets_object_tag_counts,
                                         columns_object_tag)
        print('Total object tags stats')
        print(df_object_tags)
        # =========object_tags_values=========================================================
        df_object_values = get_pd_tag_values_stat(
            datasets_object_tag_values_counts, columns_object_tag_values)
        print('Total object tags values stats')
        print(df_object_values)

    report_name = "{}_{}.lnk".format(PROJECT_ID, project_info.name)
    local_path = os.path.join(my_app.data_dir, report_name)
    sly.fs.ensure_base_path(local_path)
    with open(local_path, "w") as text_file:
        print(my_app.app_url, file=text_file)
    remote_path = "/reports/video_stat/{}".format(report_name)
    remote_path = api.file.get_free_name(TEAM_ID, remote_path)
    report_name = sly.fs.get_file_name_with_ext(remote_path)
    file_info = api.file.upload(TEAM_ID, local_path, remote_path)
    report_url = api.file.get_url(file_info.id)

    fields = [
        {
            "field": "data.loading",
            "payload": False
        },
        {
            "field": "data.classesTable",
            "payload": json.loads(df_classes.to_json(orient="split"))
        },
        {
            "field": "data.tagsTable",
            "payload": json.loads(df.to_json(orient="split"))
        },
        {
            "field": "data.savePath",
            "payload": remote_path
        },
        {
            "field": "data.reportName",
            "payload": report_name
        },
        {
            "field": "data.reportUrl",
            "payload": report_url
        },
    ]

    api.task.set_fields(task_id, fields)
    api.task.set_output_report(task_id, file_info.id, report_name)
    my_app.stop()
Ejemplo n.º 22
0
def export_coco(api: sly.Api, task_id, context, state, app_logger):
    datasets = [ds for ds in api.dataset.get_list(g.project_id)]
    for dataset in datasets:
        coco_dataset_dir = os.path.join(g.coco_base_dir, dataset.name)
        mkdir(os.path.join(coco_dataset_dir))
        ann_dir = os.path.join(g.coco_base_dir, 'annotations')
        mkdir(ann_dir)

        images = api.image.get_list(dataset.id)
        for batch in sly.batched(images):
            image_ids = [image_info.id for image_info in batch]
            image_names = [image_info.name for image_info in batch]
            image_paths = [
                os.path.join(coco_dataset_dir, image_info.name)
                for image_info in batch
            ]
            api.image.download_paths(dataset.id, image_ids, image_paths)

            ann_infos = api.annotation.download_batch(dataset.id, image_ids)
            anns = [
                sly.Annotation.from_json(x.annotation, g.meta)
                for x in ann_infos
            ]

            meta = convert_geometry.prepare_meta(g.meta)
            new_anns = [
                convert_geometry.convert_annotation(ann, meta) for ann in anns
            ]

            data = dict(
                info=dict(
                    description=None,
                    url=None,
                    version=1.0,
                    year=dataset.created_at[:4],
                    contributor=g.user.name,
                    date_created=dataset.created_at,
                ),
                licenses=[dict(
                    url=None,
                    id=0,
                    name=None,
                )],
                images=[
                    # license, url, file_name, height, width, date_captured, id
                ],
                type="instances",
                annotations=[
                    # segmentation, area, iscrowd, image_id, bbox, category_id, id
                ],
                categories=[
                    # supercategory, id, name
                ],
            )

            for image_info, ann in zip(batch, new_anns):
                data["images"].append(
                    dict(
                        license=None,
                        url=image_info.
                        full_storage_url,  # coco_url, flickr_url
                        filename=image_info.name,
                        height=image_info.height,
                        width=image_info.width,
                        date_captured=image_info.created_at,
                        id=image_info.id,
                    ))

                for label in ann.labels:
                    segmentation = label.geometry.to_json(
                    )["points"]["exterior"]
                    segmentation = [
                        coord for sublist in segmentation for coord in sublist
                    ]

                    bbox = label.geometry.to_bbox().to_json(
                    )["points"]["exterior"]
                    bbox = [coord for sublist in bbox for coord in sublist]
                    x, y, max_x, max_y = bbox
                    width = max_x - x
                    height = max_y - y
                    bbox = (x, y, width, height)

                    data["annotations"].append(
                        dict(
                            segmentation=[segmentation],
                            area=label.geometry.area,  # wrong?
                            iscrowd=0,
                            image_id=image_info.id,
                            bbox=bbox,
                            category_id=None,
                            id=None,  # label.id?
                        ))

                    data["categories"].append(
                        dict(supercategory=None,
                             id=None,
                             name=label.obj_class.name))

        dump_json_file(data,
                       os.path.join(ann_dir, f"instances_{dataset.name}.json"))
    g.my_app.stop()
Ejemplo n.º 23
0
dst_project = api.project.create(workspace.id,
                                 dst_project_name,
                                 change_name_if_conflict=True)
if dst_project.name != dst_project_name:
    sly.logger.warn(
        "Project with name={!r} already exists. Project is saved with autogenerated name {!r}"
        .format(dst_project_name, dst_project.name))
api.project.update_meta(dst_project.id, meta.to_json())

progress = sly.Progress("Splitting images",
                        api.project.get_images_count(src_project.id))
for src_dataset in api.dataset.get_list(src_project.id):
    dst_dataset = api.dataset.create(dst_project.id, src_dataset.name)
    images = api.image.get_list(src_dataset.id)
    for batch in sly.batched(images, batch_size=BATCH_SIZE):
        image_ids = [image_info.id for image_info in batch]  # debug [281152]
        image_names = [image_info.name for image_info in batch]
        ann_infos = api.annotation.download_batch(src_dataset.id, image_ids)
        images = api.image.download_nps(src_dataset.id, image_ids)

        for ann_info, image, image_name in zip(ann_infos, images, image_names):
            window_index = 0
            ann = sly.Annotation.from_json(ann_info.annotation, meta)

            crop_names = []
            crop_images = []
            crop_anns = []
            slider = SlidingWindowsFuzzy([window_height, window_width],
                                         [overlap_y, overlap_x],
                                         SW_BORDER_STRATEGY)
Ejemplo n.º 24
0
def download_project_objects(api: sly.Api, task_id, context, state,
                             app_logger):
    try:
        if not dir_exists(g.project_dir):
            mkdir(g.project_dir)
            project_meta_path = os.path.join(g.project_dir, "meta.json")
            g.project_meta = convert_object_tags(g.project_meta)
            project_meta_json = g.project_meta.to_json()
            dump_json_file(project_meta_json, project_meta_path)
            datasets = api.dataset.get_list(g.project_id)
            for dataset in datasets:
                ds_dir = os.path.join(g.project_dir, dataset.name)
                img_dir = os.path.join(ds_dir, "img")
                ann_dir = os.path.join(ds_dir, "ann")

                mkdir(ds_dir)
                mkdir(img_dir)
                mkdir(ann_dir)
                images_infos = api.image.get_list(dataset.id)
                download_progress = get_progress_cb(
                    progress_index, "Download project",
                    g.project_info.items_count * 2)
                for batch in sly.batched(images_infos):
                    image_ids = [image_info.id for image_info in batch]
                    image_names = [image_info.name for image_info in batch]
                    ann_infos = api.annotation.download_batch(
                        dataset.id, image_ids, progress_cb=download_progress)

                    image_nps = api.image.download_nps(
                        dataset.id, image_ids, progress_cb=download_progress)
                    anns = [
                        sly.Annotation.from_json(ann_info.annotation,
                                                 g.project_meta)
                        for ann_info in ann_infos
                    ]
                    selected_classes = get_selected_classes_from_ui(
                        state["classesSelected"])
                    crops = crop_and_resize_objects(image_nps, anns, state,
                                                    selected_classes,
                                                    image_names)
                    crop_nps, crop_anns, crop_names = unpack_crops(
                        crops, image_names)
                    crop_anns = copy_tags(crop_anns)
                    write_images(crop_nps, crop_names, img_dir)
                    dump_anns(crop_anns, crop_names, ann_dir)

            reset_progress(progress_index)

        global project_fs
        project_fs = sly.Project(g.project_dir, sly.OpenMode.READ)
        g.images_infos = create_img_infos(project_fs)
    except Exception as e:
        reset_progress(progress_index)
        raise e

    items_count = g.project_stats["objects"]["total"]["objectsInDataset"]
    train_percent = 80
    train_count = int(items_count / 100 * train_percent)
    random_split = {
        "count": {
            "total": items_count,
            "train": train_count,
            "val": items_count - train_count
        },
        "percent": {
            "total": 100,
            "train": train_percent,
            "val": 100 - train_percent
        },
        "shareImagesBetweenSplits": False,
        "sliderDisabled": False,
    }

    fields = [
        {
            "field": "data.done1",
            "payload": True
        },
        {
            "field": "state.collapsed2",
            "payload": False
        },
        {
            "field": "state.disabled2",
            "payload": False
        },
        {
            "field": "state.activeStep",
            "payload": 2
        },
        {
            "field": "data.totalImagesCount",
            "payload": items_count
        },
        {
            "field": "state.randomSplit",
            "payload": random_split
        },
    ]
    g.api.app.set_fields(g.task_id, fields)
Ejemplo n.º 25
0
def do(**kwargs):
    api = sly.Api.from_env()

    src_project = api.project.get_info_by_id(PROJECT_ID)
    if src_project.type != str(sly.ProjectType.IMAGES):
        raise Exception(
            "Project {!r} has type {!r}. App works only with type {!r}".format(
                src_project.name, src_project.type, sly.ProjectType.IMAGES))

    src_project_meta_json = api.project.get_meta(src_project.id)
    src_project_meta = sly.ProjectMeta.from_json(src_project_meta_json)

    # check that project has anyshape classes
    find_anyshape = False
    new_classes_lst = []
    for cls in src_project_meta.obj_classes:
        if cls.geometry_type == sly.AnyGeometry:
            find_anyshape = True
            continue
        new_classes_lst.append(cls.clone())
    dst_classes = sly.ObjClassCollection(new_classes_lst)
    if find_anyshape is False:
        raise Exception(
            "Project {!r} doesn't have classes with shape \"Any\"".format(
                src_project.name))

    # create destination project
    dst_name = src_project.name if _SUFFIX in src_project.name else src_project.name + _SUFFIX
    dst_project = api.project.create(WORKSPACE_ID,
                                     dst_name,
                                     description=_SUFFIX,
                                     change_name_if_conflict=True)
    sly.logger.info('Destination project is created.',
                    extra={
                        'project_id': dst_project.id,
                        'project_name': dst_project.name
                    })

    dst_project_meta = src_project_meta.clone(obj_classes=dst_classes)
    api.project.update_meta(dst_project.id, dst_project_meta.to_json())

    def convert_annotation(src_ann, dst_project_meta):
        new_labels = []
        for idx, lbl in enumerate(src_ann.labels):
            lbl: sly.Label
            if lbl.obj_class.geometry_type == sly.AnyGeometry:
                actual_geometry = type(lbl.geometry)

                new_class_name = "{}_{}".format(
                    lbl.obj_class.name, actual_geometry.geometry_name())
                new_class = dst_project_meta.get_obj_class(new_class_name)
                if new_class is None:
                    new_class = sly.ObjClass(name=new_class_name,
                                             geometry_type=actual_geometry,
                                             color=sly.color.random_rgb())
                    dst_project_meta = dst_project_meta.add_obj_class(
                        new_class)
                    api.project.update_meta(dst_project.id,
                                            dst_project_meta.to_json())

                new_labels.append(lbl.clone(obj_class=new_class))
            else:
                new_labels.append(lbl)
        return src_ann.clone(labels=new_labels), dst_project_meta

    for ds_info in api.dataset.get_list(src_project.id):
        ds_progress = sly.Progress('Dataset: {!r}'.format(ds_info.name),
                                   total_cnt=ds_info.images_count)
        dst_dataset = api.dataset.create(dst_project.id, ds_info.name)
        img_infos_all = api.image.get_list(ds_info.id)

        for img_infos in sly.batched(img_infos_all):
            img_names, img_ids, img_metas = zip(*((x.name, x.id, x.meta)
                                                  for x in img_infos))

            ann_infos = api.annotation.download_batch(ds_info.id, img_ids)
            anns = [
                sly.Annotation.from_json(x.annotation, src_project_meta)
                for x in ann_infos
            ]

            new_anns = []
            for ann in anns:
                new_ann, dst_project_meta = convert_annotation(
                    ann, dst_project_meta)
                new_anns.append(new_ann)

            new_img_infos = api.image.upload_ids(dst_dataset.id,
                                                 img_names,
                                                 img_ids,
                                                 metas=img_metas)
            new_img_ids = [x.id for x in new_img_infos]
            api.annotation.upload_anns(new_img_ids, new_anns)

            ds_progress.iters_done_report(len(img_infos))

    api.task.set_output_project(task_id, dst_project.id, dst_project.name)
    my_app.stop()
def main():
    global PROJECT, RES_PROJECT, RES_PROJECT_NAME

    PROJECT = api.project.get_info_by_id(PROJECT_ID)
    if RES_PROJECT_NAME == "":
        RES_PROJECT_NAME = PROJECT.name

    read_and_validate_project_meta()
    read_csv_and_create_index()

    RES_PROJECT = api.project.create(WORKSPACE_ID,
                                     RES_PROJECT_NAME,
                                     change_name_if_conflict=True)
    my_app.logger.info("Result Project is created (name={!r}; id={})".format(
        RES_PROJECT.name, RES_PROJECT.id))

    if ASSIGN_AS == "tags":
        add_tags_to_meta()

    api.project.update_meta(RES_PROJECT.id, RES_META.to_json())

    progress = sly.Progress("Processing",
                            PROJECT.images_count,
                            ext_logger=my_app.logger)
    for dataset in api.dataset.get_list(PROJECT.id):
        res_dataset = api.dataset.create(RES_PROJECT.id, dataset.name)
        ds_images = api.image.get_list(dataset.id)
        for batch in sly.batched(ds_images):
            image_ids = [image_info.id for image_info in batch]
            image_names = [image_info.name for image_info in batch]
            image_metas = [image_info.meta for image_info in batch]

            ann_infos = api.annotation.download_batch(dataset.id, image_ids)
            anns = [
                sly.Annotation.from_json(ann_info.annotation, META)
                for ann_info in ann_infos
            ]

            original_ids = []
            res_image_names = []
            res_anns = []
            res_metas = []

            for image_id, image_name, image_meta, ann in zip(
                    image_ids, image_names, image_metas, anns):
                tag: sly.Tag = ann.img_tags.get(IMAGE_TAG_NAME)
                if tag is None:
                    my_app.logger.warn(
                        "Image {!r} in dataset {!r} doesn't have tag {!r}. Image is skipped"
                        .format(image_name, dataset.name, IMAGE_TAG_NAME))
                    progress.iter_done_report()
                    continue

                csv_row = CSV_INDEX.get(str(tag.value), None)
                if csv_row is None:
                    my_app.logger.warn(
                        "Match not found (id={}, name={!r}, dataset={!r}, tag_value={!r}). Image is skipped"
                        .format(image_id, image_name, dataset.name,
                                str(tag.value)))
                    progress.iter_done_report()
                    continue

                res_ann = ann.clone()
                res_meta = image_meta.copy()

                if ASSIGN_AS == "tags":
                    res_ann = assign_csv_row_as_tags(image_id, image_name,
                                                     res_ann, csv_row)
                else:  # metadata
                    res_meta = assign_csv_row_as_metadata(
                        image_id, image_name, image_meta, csv_row)

                original_ids.append(image_id)
                res_image_names.append(image_name)
                res_anns.append(res_ann)
                res_metas.append(res_meta)

            res_image_infos = api.image.upload_ids(res_dataset.id,
                                                   res_image_names,
                                                   original_ids,
                                                   metas=res_metas)
            res_image_ids = [image_info.id for image_info in res_image_infos]
            api.annotation.upload_anns(res_image_ids, res_anns)
            progress.iters_done_report(len(res_image_ids))

    api.task.set_output_project(my_app.task_id, RES_PROJECT.id,
                                RES_PROJECT.name)
Ejemplo n.º 27
0
def add_images_to_project():
    sly.fs.ensure_base_path(sly.TaskPaths.RESULTS_DIR)

    task_config = load_json_file(TaskPaths.TASK_CONFIG_PATH)

    task_id = task_config['task_id']
    append_to_existing_project = task_config['append_to_existing_project']
    server_address = task_config['server_address']
    token = task_config['api_token']

    convert_options = task_config.get('options', {})
    normalize_exif = convert_options.get('normalize_exif', True)
    remove_alpha_channel = convert_options.get('remove_alpha_channel', True)
    need_download = normalize_exif or remove_alpha_channel

    api = sly.Api(server_address, token, retry_count=5)

    task_info = api.task.get_info_by_id(task_id)
    api.add_additional_field('taskId', task_id)
    api.add_header('x-task-id', str(task_id))

    workspace_id = task_info["workspaceId"]
    project_name = task_config.get('project_name', None)
    if project_name is None:
        project_name = task_config["res_names"]["project"]

    files_list = api.task.get_import_files_list(task_id)
    if len(files_list) == 0:
        raise RuntimeError("There are no import files")

    project_info = None
    if append_to_existing_project is True:
        project_info = api.project.get_info_by_name(
            workspace_id,
            project_name,
            expected_type=sly.ProjectType.IMAGES,
            raise_error=True)
    else:
        project_info = api.project.create(workspace_id,
                                          project_name,
                                          type=sly.ProjectType.IMAGES,
                                          change_name_if_conflict=True)

    dataset_to_item = defaultdict(dict)
    for dataset in api.dataset.get_list(project_info.id):
        images = api.image.get_list(dataset.id)
        for image_info in images:
            dataset_to_item[dataset.name][image_info.name] = None

    for file_info in files_list:
        original_path = file_info["filename"]
        try:
            sly.image.validate_ext(original_path)
            item_hash = file_info["hash"]
            ds_name = get_dataset_name(original_path)
            item_name = sly.fs.get_file_name_with_ext(original_path)

            if item_name in dataset_to_item[ds_name]:
                temp_name = sly.fs.get_file_name(original_path)
                temp_ext = sly.fs.get_file_ext(original_path)
                new_item_name = "{}_{}{}".format(temp_name, sly.rand_str(5),
                                                 temp_ext)
                sly.logger.warning(
                    "Name {!r} already exists in dataset {!r}: renamed to {!r}"
                    .format(item_name, ds_name, new_item_name))
                item_name = new_item_name
            dataset_to_item[ds_name][item_name] = item_hash
        except Exception as e:
            sly.logger.warning(
                "File skipped {!r}: error occurred during processing {!r}".
                format(original_path, str(e)))

    for ds_name, ds_items in dataset_to_item.items():
        ds_info = api.dataset.get_or_create(project_info.id, ds_name)

        names = []  # list(ds_items.keys())
        hashes = []  #list(ds_items.values())
        for name, hash in ds_items.items():
            if hash is None:
                #existing image => skip
                continue
            else:
                names.append(name)
                hashes.append(hash)

        paths = [
            os.path.join(sly.TaskPaths.RESULTS_DIR,
                         h.replace("/", "a") + sly.image.DEFAULT_IMG_EXT)
            for h in hashes
        ]
        progress = sly.Progress('Dataset: {!r}'.format(ds_name), len(ds_items))

        for batch_names, batch_hashes, batch_paths in zip(
                sly.batched(names, 10), sly.batched(hashes, 10),
                sly.batched(paths, 10)):
            if need_download is True:
                res_batch_names = []
                res_batch_paths = []
                api.image.download_paths_by_hashes(batch_hashes, batch_paths)
                for name, path in zip(batch_names, batch_paths):
                    try:
                        img = sly.image.read(path, remove_alpha_channel)
                        sly.image.write(path, img, remove_alpha_channel)
                        res_batch_names.append(name)
                        res_batch_paths.append(path)
                    except Exception as e:
                        sly.logger.warning("Skip image {!r}: {}".format(
                            name, str(e)),
                                           extra={'file_path': path})
                api.image.upload_paths(ds_info.id, res_batch_names,
                                       res_batch_paths)

                for path in res_batch_paths:
                    sly.fs.silent_remove(path)
                #sly.fs.clean_dir(sly.TaskPaths.RESULTS_DIR)
                progress.iters_done_report(len(batch_names))
            else:
                api.image.upload_hashes(ds_info.id,
                                        batch_names,
                                        batch_hashes,
                                        progress_cb=progress.iters_done_report)

    if project_info is not None:
        sly.logger.info('PROJECT_CREATED',
                        extra={
                            'event_type': sly.EventType.PROJECT_CREATED,
                            'project_id': project_info.id
                        })
    else:
        temp_str = "Project"
        if append_to_existing_project is True:
            temp_str = "Dataset"
        raise RuntimeError("{} wasn't created: 0 files were added")
    pass
def from_sly_to_pascal(api: sly.Api, task_id, context, state, app_logger):
    global PASCAL_CONTOUR_THICKNESS, TRAIN_VAL_SPLIT_COEF

    project_info = api.project.get_info_by_id(PROJECT_ID)
    meta_json = api.project.get_meta(PROJECT_ID)
    meta = sly.ProjectMeta.from_json(meta_json)
    app_logger.info("Palette has been created")

    full_archive_name = str(
        project_info.id) + '_' + project_info.name + ARCHIVE_NAME_ENDING
    full_result_dir_name = str(
        project_info.id) + '_' + project_info.name + RESULT_DIR_NAME_ENDING

    result_archive = os.path.join(my_app.data_dir, full_archive_name)
    result_dir = os.path.join(my_app.data_dir, full_result_dir_name)
    result_subdir = os.path.join(result_dir, RESULT_SUBDIR_NAME)

    result_ann_dir = os.path.join(result_subdir, ann_dir_name)
    result_images_dir = os.path.join(result_subdir, images_dir_name)
    result_class_dir_name = os.path.join(result_subdir, ann_class_dir_name)
    result_obj_dir = os.path.join(result_subdir, ann_obj_dir_name)
    result_imgsets_dir = os.path.join(result_subdir, trainval_sets_dir_name)

    sly.fs.mkdir(result_ann_dir)
    sly.fs.mkdir(result_imgsets_dir)
    sly.fs.mkdir(result_images_dir)
    sly.fs.mkdir(result_class_dir_name)
    sly.fs.mkdir(result_obj_dir)

    app_logger.info("Pascal VOC directories have been created")

    images_stats = []
    classes_colors = {}

    datasets = api.dataset.get_list(PROJECT_ID)
    dataset_names = ['trainval', 'val', 'train']
    progress = sly.Progress('Preparing images for export',
                            api.project.get_images_count(PROJECT_ID),
                            app_logger)
    for dataset in datasets:
        if dataset.name in dataset_names:
            is_trainval = 1
        else:
            is_trainval = 0

        images = api.image.get_list(dataset.id)
        for batch in sly.batched(images):
            image_ids = [image_info.id for image_info in batch]
            image_paths = [
                os.path.join(result_images_dir, image_info.name)
                for image_info in batch
            ]

            api.image.download_paths(dataset.id, image_ids, image_paths)
            ann_infos = api.annotation.download_batch(dataset.id, image_ids)
            for image_info, ann_info in zip(batch, ann_infos):
                img_title, img_ext = os.path.splitext(image_info.name)
                cur_img_filename = image_info.name

                if is_trainval == 1:
                    cur_img_stats = {
                        'classes': set(),
                        'dataset': dataset.name,
                        'name': img_title
                    }
                    images_stats.append(cur_img_stats)
                else:
                    cur_img_stats = {
                        'classes': set(),
                        'dataset': None,
                        'name': img_title
                    }
                    images_stats.append(cur_img_stats)

                if img_ext not in VALID_IMG_EXT:
                    orig_image_path = os.path.join(result_images_dir,
                                                   cur_img_filename)

                    jpg_image = img_title + ".jpg"
                    jpg_image_path = os.path.join(result_images_dir, jpg_image)

                    im = sly.image.read(orig_image_path)
                    sly.image.write(jpg_image_path, im)
                    sly.fs.silent_remove(orig_image_path)

                ann = sly.Annotation.from_json(ann_info.annotation, meta)
                tag = find_first_tag(ann.img_tags, SPLIT_TAGS)
                if tag is not None:
                    cur_img_stats['dataset'] = tag.meta.name

                valid_labels = []
                for label in ann.labels:
                    if type(label.geometry) in SUPPORTED_GEOMETRY_TYPES:
                        valid_labels.append(label)
                    else:
                        app_logger.warn(
                            f"Label has unsupported geometry type ({type(label.geometry)}) and will be skipped."
                        )

                ann = ann.clone(labels=valid_labels)
                ann_to_xml(project_info, image_info, cur_img_filename,
                           result_ann_dir, ann)
                for label in ann.labels:
                    cur_img_stats['classes'].add(label.obj_class.name)
                    classes_colors[label.obj_class.name] = tuple(
                        label.obj_class.color)

                fake_contour_th = 0
                if PASCAL_CONTOUR_THICKNESS != 0:
                    fake_contour_th = 2 * PASCAL_CONTOUR_THICKNESS + 1

                from_ann_to_instance_mask(
                    ann,
                    os.path.join(result_class_dir_name,
                                 img_title + pascal_ann_ext), fake_contour_th)
                from_ann_to_class_mask(
                    ann,
                    os.path.join(result_obj_dir, img_title + pascal_ann_ext),
                    fake_contour_th)

                progress.iter_done_report()

    classes_colors = OrderedDict((sorted(classes_colors.items(),
                                         key=lambda t: t[0])))

    with open(os.path.join(result_subdir, "colors.txt"), "w") as cc:
        if PASCAL_CONTOUR_THICKNESS != 0:
            cc.write(
                f"neutral {pascal_contour_color[0]} {pascal_contour_color[1]} {pascal_contour_color[2]}\n"
            )

        for k in classes_colors.keys():
            if k == 'neutral':
                continue

            cc.write(
                f"{k} {classes_colors[k][0]} {classes_colors[k][1]} {classes_colors[k][2]}\n"
            )

    imgs_to_split = [i for i in images_stats if i['dataset'] is None]
    train_len = int(len(imgs_to_split) * TRAIN_VAL_SPLIT_COEF)

    for img_stat in imgs_to_split[:train_len]:
        img_stat['dataset'] = TRAIN_TAG_NAME
    for img_stat in imgs_to_split[train_len:]:
        img_stat['dataset'] = VAL_TAG_NAME

    write_segm_set(is_trainval, images_stats, result_imgsets_dir)
    write_main_set(is_trainval, images_stats, meta, result_imgsets_dir)

    sly.fs.archive_directory(result_dir, result_archive)
    app_logger.info("Result directory is archived")

    upload_progress = []
    remote_archive_path = "/ApplicationsData/Export-to-Pascal-VOC/{}/{}".format(
        task_id, full_archive_name)

    def _print_progress(monitor, upload_progress):
        if len(upload_progress) == 0:
            upload_progress.append(
                sly.Progress(message="Upload {!r}".format(full_archive_name),
                             total_cnt=monitor.len,
                             ext_logger=app_logger,
                             is_size=True))
        upload_progress[0].set_current_value(monitor.bytes_read)

    file_info = api.file.upload(TEAM_ID, result_archive, remote_archive_path,
                                lambda m: _print_progress(m, upload_progress))
    app_logger.info("Uploaded to Team-Files: {!r}".format(
        file_info.full_storage_url))
    api.task.set_output_archive(task_id,
                                file_info.id,
                                full_archive_name,
                                file_url=file_info.full_storage_url)

    my_app.stop()
Ejemplo n.º 29
0
sly.io.json.dump_json_file(meta_json, os.path.join(dest_dir, 'meta.json'))

total_images = 0
src_dataset_infos = (
    [api.dataset.get_info_by_id(ds_id) for ds_id in src_dataset_ids] if (src_dataset_ids is not None)
    else api.dataset.get_list(project.id))

for dataset in src_dataset_infos:
    ann_dir = os.path.join(dest_dir, dataset.name, 'ann')
    sly.fs.mkdir(ann_dir)

    images = api.image.get_list(dataset.id)
    ds_progress = sly.Progress(
        'Downloading annotations for: {!r}/{!r}'.format(src_project_name, dataset.name),
        total_cnt=len(images))
    for batch in sly.batched(images):
        image_ids = [image_info.id for image_info in batch]
        image_names = [image_info.name for image_info in batch]

        #download annotations in json format
        ann_infos = api.annotation.download_batch(dataset.id, image_ids)
        ann_jsons = [ann_info.annotation for ann_info in ann_infos]

        for image_name, ann_info in zip(image_names, ann_infos):
            sly.io.json.dump_json_file(ann_info.annotation, os.path.join(ann_dir, image_name + '.json'))
        ds_progress.iters_done_report(len(batch))
        total_images += len(batch)

sly.logger.info('Project {!r} has been successfully downloaded'.format(src_project_name))
sly.logger.info('Total number of images: {!r}'.format(total_images))
def upload_and_reset(dataset_id, names, images, anns, progress):
    if len(names) > 0:
        new_image_infos = api.image.upload_nps(dataset_id, names, images)
        new_image_ids = [img_info.id for img_info in new_image_infos]
        api.annotation.upload_anns(new_image_ids, anns)
        progress.iters_done_report(len(names))
    del names[:]
    del images[:]
    del anns[:]


for dataset in api.dataset.get_list(project.id):
    dst_dataset = api.dataset.create(dst_project.id, dataset.name)
    videos = api.video.get_list(dataset.id)
    for batch in sly.batched(videos):
        for video_info in batch:
            name = sly.fs.get_file_name(video_info.name)
            ann_info = api.video.annotation.download(video_info.id)
            ann = sly.VideoAnnotation.from_json(ann_info, meta, key_id_map)

            progress = sly.Progress("Video: {!r}".format(video_info.name),
                                    len(ann.frames))
            image_names = []
            frame_images = []
            dst_anns = []
            for frame in ann.frames:
                image_names.append('{}_frame_{:05d}.png'.format(
                    name, frame.index))
                frame_images.append(
                    api.video.frame.download_np(video_info.id, frame.index))