コード例 #1
0
    def init_api(self):
        self.api = sly.AgentAPI(constants.TOKEN(), constants.SERVER_ADDRESS(), self.logger, constants.TIMEOUT_CONFIG_PATH())

        if 'user_api_key' in self.info:
            self.public_api = sly.Api(constants.SERVER_ADDRESS(), self.info['user_api_key'])
            self.public_api.add_additional_field('taskId', self.info['task_id'])
            self.public_api_context = self.public_api.task.get_context(self.info['task_id'])
コード例 #2
0
ファイル: main.py プロジェクト: todkang/supervisely
def main():
    task_config = load_json_file(TaskPaths.TASK_CONFIG_PATH)

    convert_options = task_config['options']
    normalize_url = True
    if convert_options is not None:
        normalize_url = convert_options.get('normalize_image_name', True)

    server_address = task_config['server_address']
    token = task_config['api_token']
    append_to_existing_project = task_config['append_to_existing_project']

    api = sly.Api(server_address, token)
    task_info = api.task.get_info_by_id(task_config['task_id'])
    # TODO migrate to passing workspace id via the task config.
    project_info = create_project(api, task_info["workspaceId"],
                                  task_config['res_names']['project'],
                                  append_to_existing_project)

    total_counter = 0
    for file_path in sly.fs.list_files_recursively(
            TaskPaths.DATA_DIR,
            filter_fn=lambda path: sly.fs.get_file_ext(path).lower(
            ) == '.txt'):
        total_counter += process_dataset_links(api,
                                               project_info,
                                               file_path,
                                               normalize_url=normalize_url)

    if total_counter == 0:
        raise RuntimeError(
            'Result project is empty! No valid links find in files.')

    dump_json_file({'project_id': project_info.id},
                   os.path.join(TaskPaths.RESULTS_DIR, 'project_info.json'))
コード例 #3
0
    def init_api(self):
        self.api = sly.AgentAPI(constants.TOKEN(), constants.SERVER_ADDRESS(), self.logger, constants.TIMEOUT_CONFIG_PATH())

        if self._user_api_key is not None:
            self.public_api = sly.Api(constants.SERVER_ADDRESS(), self._user_api_key, external_logger=self.logger,
                                      retry_count=constants.PUBLIC_API_RETRY_LIMIT())
            task_id = self.info['task_id']
            self.public_api.add_additional_field('taskId', task_id)
            self.public_api.add_header('x-task-id', str(task_id))
            self.public_api_context = self.public_api.task.get_context(task_id)
コード例 #4
0
def get_task_api():
    SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
    task_info = sly_json.load_json_file(
        os.path.join(SCRIPT_DIR, "../task_config.json"))
    task_id = task_info["task_id"]
    server_address = task_info["server_address"]
    api_token = task_info["api_token"]
    api = sly.Api(server_address, api_token, retry_count=10)
    #api.add_additional_field('taskId', task_id)
    #api.add_header('x-task-id', str(task_id))
    return task_id, api, task_info
コード例 #5
0
ファイル: utils.py プロジェクト: DeepSystems/app_tagging_old
def get_task_api():
    PROJECT_ID = 489
    SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
    task_info = sly_json.load_json_file(
        os.path.join(SCRIPT_DIR, "../task_config.json"))
    task_id = task_info["task_id"]
    server_address = task_info["server_address"]
    api_token = task_info["api_token"]
    api = sly.Api(server_address, api_token, retry_count=10)
    api.add_additional_field('taskId', task_id)
    api.add_header('x-task-id', str(task_id))
    # context = api.task.get_data(task_id, sly.app.CONTEXT)
    # user_id = context["userId"]
    return task_id, api, PROJECT_ID
コード例 #6
0
ファイル: main.py プロジェクト: supervisely/supervisely
def add_pointclouds_to_project():
    task_config = load_json_file(TaskPaths.TASK_CONFIG_PATH)

    task_id = task_config['task_id']
    append_to_existing_project = task_config['append_to_existing_project']

    server_address = task_config['server_address']
    token = task_config['api_token']

    #instance_type = task_config.get("instance_type", sly.ENTERPRISE)

    api = sly.Api(server_address, token)

    task_info = api.task.get_info_by_id(task_id)
    api.add_additional_field('taskId', task_id)
    api.add_header('x-task-id', str(task_id))

    workspace_id = task_info["workspaceId"]
    project_name = task_config.get('project_name')
    if project_name is None:
        project_name = task_config["res_names"]["project"]

    project_info = None
    if append_to_existing_project is True:
        project_info = api.project.get_info_by_name(workspace_id, project_name, expected_type=sly.ProjectType.POINT_CLOUDS, raise_error=True)

    files_list = api.task.get_import_files_list(task_id)

    #find related images
    related_items_info = {} #item_dir->item_name_processed->[img or json info]
    related_items = {}
    for file_info in files_list:
        original_path = file_info["filename"]
        if 'related_images' in original_path:
            related_items[original_path] = file_info
            item_dir = original_path.split('/related_images')[0]
            item_name_processed = os.path.basename(os.path.dirname(original_path))

            if item_dir not in related_items_info:
                related_items_info[item_dir] = {}
            if item_name_processed not in related_items_info[item_dir]:
                related_items_info[item_dir][item_name_processed] = []
            related_items_info[item_dir][item_name_processed].append(file_info)

    added_items = []
    for file_info in files_list:
        ds_info = None
        original_path = file_info["filename"]
        if original_path in related_items:
            continue

        try:
            file_name = sly.fs.get_file_name_with_ext(original_path)
            ext = sly.fs.get_file_ext(original_path)

            hash = file_info["hash"]

            if is_valid_ext(ext):
                if project_info is None:
                    project_info = api.project.create(workspace_id, project_name, type=sly.ProjectType.POINT_CLOUDS, change_name_if_conflict=True)
                if ds_info is None:
                    ds_name = get_dataset_name(original_path)
                    ds_info = api.dataset.get_or_create(project_info.id, ds_name)
                item_name = api.pointcloud.get_free_name(ds_info.id, file_name)
                item_info = api.pointcloud.upload_hash(ds_info.id, item_name, hash)
                added_items.append((ds_info, item_info, original_path))
        except Exception as e:
            sly.logger.warning("File skipped {!r}: error occurred during processing {!r}".format(original_path, str(e)))

    # add related images for all added items
    for ds_info, item_info, import_path in added_items:
        item_dir = os.path.dirname(import_path)
        item_import_name = sly.fs.get_file_name_with_ext(import_path)
        item_context_dir = item_import_name.replace(".", "_")

        if item_dir not in related_items_info:
            continue
        if item_context_dir not in related_items_info[item_dir]:
            continue

        files = related_items_info[item_dir][item_context_dir]
        temp_dir = os.path.join(sly.TaskPaths.DATA_DIR, item_context_dir)
        sly.fs.mkdir(temp_dir)
        context_img_to_hash = {}
        for file_import_info in files:
            original_path = file_import_info["filename"]
            save_name = sly.fs.get_file_name_with_ext(original_path)
            api.task.download_import_file(task_id, original_path, os.path.join(temp_dir, save_name))
            context_img_to_hash[os.path.join(temp_dir, save_name)] = file_import_info

        related_items = []
        files = sly.fs.list_files(temp_dir, sly.image.SUPPORTED_IMG_EXTS)
        for file in files:
            img_meta_path = os.path.join(temp_dir, sly.fs.get_file_name_with_ext(file) + ".json")
            img_meta = {}
            if sly.fs.file_exists(img_meta_path):
                img_meta = load_json_file(img_meta_path)
                if not image.has_valid_ext(img_meta[ApiField.NAME]):
                    raise RuntimeError('Wrong format: name field contains path with unsupported extension')
                if img_meta[ApiField.NAME] != sly.fs.get_file_name_with_ext(file):
                    raise RuntimeError('Wrong format: name field contains wrong image path')
            related_items.append((file, img_meta, context_img_to_hash[file]['hash']))

        if len(related_items) != 0:
            rimg_infos = []
            for img_path, meta_json, hash in related_items:
                rimg_infos.append({ApiField.ENTITY_ID: item_info.id,
                                   ApiField.NAME: meta_json.get(ApiField.NAME, sly.fs.get_file_name_with_ext(img_path)),
                                   ApiField.HASH: hash,
                                   ApiField.META: meta_json.get(ApiField.META, {}) })
            api.pointcloud.add_related_images(rimg_infos)

        sly.fs.remove_dir(temp_dir)

        pass

    if project_info is not None:
        sly.logger.info('PROJECT_CREATED', extra={'event_type': sly.EventType.PROJECT_CREATED, 'project_id': project_info.id})
    else:
        temp_str = "Project"
        if append_to_existing_project is True:
            temp_str = "Dataset"
        raise RuntimeError("{} wasn't created: 0 files with supported formats were found. Supported formats: {!r}"
                           .format(temp_str, ALLOWED_POINTCLOUD_EXTENSIONS))
    pass
コード例 #7
0
import json
import os
import supervisely_lib as sly

WORKSPACE_ID = int('%%WORKSPACE_ID%%')
src_project_name = '%%IN_PROJECT_NAME%%'
src_dataset_ids = %%DATASET_IDS:None%%

api = sly.Api(server_address=os.environ['SERVER_ADDRESS'], token=os.environ['API_TOKEN'])

#### End settings. ####

project = api.project.get_info_by_name(WORKSPACE_ID, src_project_name)
if project is None:
    raise RuntimeError('Project {!r} not found'.format(src_project_name))

dest_dir = os.path.join(sly.TaskPaths.OUT_ARTIFACTS_DIR, src_project_name)
sly.fs.mkdir(dest_dir)

meta_json = api.project.get_meta(project.id)
sly.io.json.dump_json_file(meta_json, os.path.join(dest_dir, 'meta.json'))

total_images = 0
src_dataset_infos = (
    [api.dataset.get_info_by_id(ds_id) for ds_id in src_dataset_ids] if (src_dataset_ids is not None)
    else api.dataset.get_list(project.id))

for dataset in src_dataset_infos:
    ann_dir = os.path.join(dest_dir, dataset.name, 'ann')
    sly.fs.mkdir(ann_dir)
コード例 #8
0
def add_pointclouds_to_project():
    task_config = load_json_file(TaskPaths.TASK_CONFIG_PATH)

    task_id = task_config['task_id']
    append_to_existing_project = task_config['append_to_existing_project']

    server_address = task_config['server_address']
    token = task_config['api_token']

    #instance_type = task_config.get("instance_type", sly.ENTERPRISE)

    api = sly.Api(server_address, token)

    task_info = api.task.get_info_by_id(task_id)
    api.add_additional_field('taskId', task_id)
    api.add_header('x-task-id', str(task_id))

    workspace_id = task_info["workspaceId"]
    project_name = task_config.get('project_name')
    if project_name is None:
        project_name = task_config["res_names"]["project"]

    project_info = None
    if append_to_existing_project is True:
        project_info = api.project.get_info_by_name(workspace_id, project_name, expected_type=sly.ProjectType.VIDEOS, raise_error=True)

    files_list = api.task.get_import_files_list(task_id)

    #find related images
    related_items_info = {} #item_dir->item_name_processed->[img or json info]
    related_items = {}
    for file_info in files_list:
        original_path = file_info["filename"]
        if 'related_images' in original_path:
            related_items[original_path] = file_info
            item_dir = original_path.split('/related_images')[0]
            item_name_processed = os.path.basename(os.path.dirname(original_path))

            if item_dir not in related_items_info:
                related_items_info[item_dir] = {}
            if item_name_processed not in related_items_info[item_dir]:
                related_items_info[item_dir][item_name_processed] = []
            related_items_info[item_dir][item_name_processed].append(file_info)

    project_dir = os.path.join(sly.TaskPaths.DATA_DIR, project_name)

    progress = sly.Progress('Processing', len(files_list), sly.logger)
    added_items = []
    for file_info in files_list:
        ds_info = None
        original_path = file_info["filename"]
        if original_path in related_items:
            progress.iter_done_report()
            continue

        try:
            file_name = sly.fs.get_file_name_with_ext(original_path)
            ext = sly.fs.get_file_ext(original_path)

            if ext == '.ply':
                if project_info is None:
                    project_info = api.project.create(workspace_id, project_name, type=sly.ProjectType.POINT_CLOUDS, change_name_if_conflict=True)
                if ds_info is None:
                    ds_name = get_dataset_name(original_path)
                    ds_info = api.dataset.get_or_create(project_info.id, ds_name)

                save_path = project_dir + original_path

                api.task.download_import_file(task_id, original_path, save_path)
                #points = pcl.load(save_path)
                points = o3d.io.read_point_cloud(save_path)

                new_file_name = sly.fs.get_file_name(file_name) + ".pcd"
                pcd_save_path = os.path.join(os.path.dirname(save_path), new_file_name)
                #pcl.save(points, pcd_save_path)
                # https://stackoverflow.com/questions/51350493/convertion-of-ply-format-to-pcd-format/62488893#62488893
                # https://stackoverflow.com/questions/61774682/converting-from-ply-to-pcd-format
                o3d.io.write_point_cloud(pcd_save_path, points, write_ascii=True)

                item_name = api.pointcloud.get_free_name(ds_info.id, new_file_name)
                item_info = api.pointcloud.upload_path(ds_info.id, item_name, pcd_save_path)

                added_items.append((ds_info, item_info, original_path))
            else:
                raise ValueError("Plugin supports only *.ply files.")

        except Exception as e:
            sly.logger.warning("File skipped {!r}: error occurred during processing - {!r}".format(original_path, str(e)))

        progress.iter_done_report()

    # add related images for all added items
    for ds_info, item_info, import_path in added_items:
        item_dir = os.path.dirname(import_path)
        item_import_name = sly.fs.get_file_name_with_ext(import_path)
        item_context_dir = item_import_name.replace(".", "_")

        if item_dir not in related_items_info:
            continue
        if item_context_dir not in related_items_info[item_dir]:
            continue

        files = related_items_info[item_dir][item_context_dir]
        temp_dir = os.path.join(sly.TaskPaths.DATA_DIR, item_context_dir)
        sly.fs.mkdir(temp_dir)
        context_img_to_hash = {}
        for file_import_info in files:
            original_path = file_import_info["filename"]
            save_name = sly.fs.get_file_name_with_ext(original_path)
            api.task.download_import_file(task_id, original_path, os.path.join(temp_dir, save_name))
            context_img_to_hash[os.path.join(temp_dir, save_name)] = file_import_info

        related_items = []
        files = sly.fs.list_files(temp_dir, sly.image.SUPPORTED_IMG_EXTS)
        for file in files:
            img_meta_path = os.path.join(temp_dir, sly.fs.get_file_name_with_ext(file) + ".json")
            img_meta = {}
            if sly.fs.file_exists(img_meta_path):
                img_meta = load_json_file(img_meta_path)
            related_items.append((file, img_meta, context_img_to_hash[file]['hash']))

        if len(related_items) != 0:
            rimg_infos = []
            for img_path, meta_json, hash in related_items:
                rimg_infos.append({ApiField.ENTITY_ID: item_info.id,
                                   ApiField.NAME: meta_json.get(ApiField.NAME, sly.fs.get_file_name_with_ext(img_path)),
                                   ApiField.HASH: hash,
                                   ApiField.META: meta_json.get(ApiField.META, {}) })
            api.pointcloud.add_related_images(rimg_infos)

        sly.fs.remove_dir(temp_dir)

        pass

    if project_info is not None:
        sly.logger.info('PROJECT_CREATED', extra={'event_type': sly.EventType.PROJECT_CREATED, 'project_id': project_info.id})
    else:
        temp_str = "Project"
        if append_to_existing_project is True:
            temp_str = "Dataset"
        raise RuntimeError("{} wasn't created: 0 files with supported formats were found. Supported formats: {!r}"
                           .format(temp_str, ALLOWED_POINTCLOUD_EXTENSIONS))
    pass
コード例 #9
0
ファイル: main.py プロジェクト: wangjirui/supervisely
def add_videos_to_project():
    task_config = load_json_file(TaskPaths.TASK_CONFIG_PATH)

    task_id = task_config['task_id']
    append_to_existing_project = task_config['append_to_existing_project']

    server_address = task_config['server_address']
    token = task_config['api_token']

    instance_type = task_config.get("instance_type", sly.ENTERPRISE)

    api = sly.Api(server_address, token)

    task_info = api.task.get_info_by_id(task_id)
    api.add_additional_field('taskId', task_id)
    api.add_header('x-task-id', str(task_id))

    workspace_id = task_info["workspaceId"]
    project_name = task_config.get('project_name')
    if project_name is None:
        project_name = task_config["res_names"]["project"]

    project_info = None
    if append_to_existing_project is True:
        project_info = api.project.get_info_by_name(
            workspace_id,
            project_name,
            expected_type=sly.ProjectType.VIDEOS,
            raise_error=True)

    files_list = api.task.get_import_files_list(task_id)
    for file_info in files_list:
        original_path = file_info["filename"]
        try:
            file_name = sly.fs.get_file_name_with_ext(original_path)
            all_streams = file_info["meta"]["streams"]
            hash = file_info["hash"]
            ds_info = None

            video_streams = get_video_streams(all_streams)
            for stream_info in video_streams:
                stream_index = stream_info["index"]

                if instance_type == sly.COMMUNITY:
                    if _check_video_requires_processing(
                            file_info, stream_info) is True:
                        warn_video_requires_processing(file_name)
                        continue

                if project_info is None:
                    project_info = api.project.create(
                        workspace_id,
                        project_name,
                        type=sly.ProjectType.VIDEOS,
                        change_name_if_conflict=True)

                if ds_info is None:
                    ds_name = get_dataset_name(original_path)
                    ds_info = api.dataset.get_or_create(
                        project_info.id, ds_name)

                item_name = file_name
                info = api.video.get_info_by_name(ds_info.id, item_name)
                if info is not None:
                    item_name = gen_video_stream_name(file_name, stream_index)
                    sly.logger.warning(
                        "Name {!r} already exists in dataset {!r}: renamed to {!r}"
                        .format(file_name, ds_info.name, item_name))

                _ = api.video.upload_hash(ds_info.id, item_name, hash,
                                          stream_index)
        except Exception as e:
            sly.logger.warning(
                "File skipped {!r}: error occurred during processing {!r}".
                format(original_path, str(e)))

    if project_info is not None:
        sly.logger.info('PROJECT_CREATED',
                        extra={
                            'event_type': sly.EventType.PROJECT_CREATED,
                            'project_id': project_info.id
                        })
    else:
        temp_str = "Project"
        if append_to_existing_project is True:
            temp_str = "Dataset"
        raise RuntimeError(
            "{} wasn't created: 0 files with supported codecs ({}) and containers ({}). It is a limitation for Community Edition (CE)."
            .format(temp_str, _SUPPORTED_CODECS, _SUPPORTED_CONTAINERS))
    pass
コード例 #10
0
ファイル: main.py プロジェクト: wangjirui/supervisely
def add_images_to_project():
    sly.fs.ensure_base_path(sly.TaskPaths.RESULTS_DIR)

    task_config = load_json_file(TaskPaths.TASK_CONFIG_PATH)

    task_id = task_config['task_id']
    append_to_existing_project = task_config['append_to_existing_project']
    server_address = task_config['server_address']
    token = task_config['api_token']

    convert_options = task_config.get('options', {})
    normalize_exif = convert_options.get('normalize_exif', True)
    remove_alpha_channel = convert_options.get('remove_alpha_channel', True)
    need_download = normalize_exif or remove_alpha_channel

    api = sly.Api(server_address, token, retry_count=5)

    task_info = api.task.get_info_by_id(task_id)
    api.add_additional_field('taskId', task_id)
    api.add_header('x-task-id', str(task_id))

    workspace_id = task_info["workspaceId"]
    project_name = task_config.get('project_name', None)
    if project_name is None:
        project_name = task_config["res_names"]["project"]

    files_list = api.task.get_import_files_list(task_id)
    if len(files_list) == 0:
        raise RuntimeError("There are no import files")

    project_info = None
    if append_to_existing_project is True:
        project_info = api.project.get_info_by_name(
            workspace_id,
            project_name,
            expected_type=sly.ProjectType.IMAGES,
            raise_error=True)
    else:
        project_info = api.project.create(workspace_id,
                                          project_name,
                                          type=sly.ProjectType.IMAGES,
                                          change_name_if_conflict=True)

    dataset_to_item = defaultdict(dict)
    for dataset in api.dataset.get_list(project_info.id):
        images = api.image.get_list(dataset.id)
        for image_info in images:
            dataset_to_item[dataset.name][image_info.name] = None

    for file_info in files_list:
        original_path = file_info["filename"]
        try:
            sly.image.validate_ext(original_path)
            item_hash = file_info["hash"]
            ds_name = get_dataset_name(original_path)
            item_name = sly.fs.get_file_name_with_ext(original_path)

            if item_name in dataset_to_item[ds_name]:
                temp_name = sly.fs.get_file_name(original_path)
                temp_ext = sly.fs.get_file_ext(original_path)
                new_item_name = "{}_{}{}".format(temp_name, sly.rand_str(5),
                                                 temp_ext)
                sly.logger.warning(
                    "Name {!r} already exists in dataset {!r}: renamed to {!r}"
                    .format(item_name, ds_name, new_item_name))
                item_name = new_item_name
            dataset_to_item[ds_name][item_name] = item_hash
        except Exception as e:
            sly.logger.warning(
                "File skipped {!r}: error occurred during processing {!r}".
                format(original_path, str(e)))

    for ds_name, ds_items in dataset_to_item.items():
        ds_info = api.dataset.get_or_create(project_info.id, ds_name)

        names = []  # list(ds_items.keys())
        hashes = []  #list(ds_items.values())
        for name, hash in ds_items.items():
            if hash is None:
                #existing image => skip
                continue
            else:
                names.append(name)
                hashes.append(hash)

        paths = [
            os.path.join(sly.TaskPaths.RESULTS_DIR,
                         h.replace("/", "a") + sly.image.DEFAULT_IMG_EXT)
            for h in hashes
        ]
        progress = sly.Progress('Dataset: {!r}'.format(ds_name), len(ds_items))

        for batch_names, batch_hashes, batch_paths in zip(
                sly.batched(names, 10), sly.batched(hashes, 10),
                sly.batched(paths, 10)):
            if need_download is True:
                res_batch_names = []
                res_batch_paths = []
                api.image.download_paths_by_hashes(batch_hashes, batch_paths)
                for name, path in zip(batch_names, batch_paths):
                    try:
                        img = sly.image.read(path, remove_alpha_channel)
                        sly.image.write(path, img, remove_alpha_channel)
                        res_batch_names.append(name)
                        res_batch_paths.append(path)
                    except Exception as e:
                        sly.logger.warning("Skip image {!r}: {}".format(
                            name, str(e)),
                                           extra={'file_path': path})
                api.image.upload_paths(ds_info.id, res_batch_names,
                                       res_batch_paths)

                for path in res_batch_paths:
                    sly.fs.silent_remove(path)
                #sly.fs.clean_dir(sly.TaskPaths.RESULTS_DIR)
                progress.iters_done_report(len(batch_names))
            else:
                api.image.upload_hashes(ds_info.id,
                                        batch_names,
                                        batch_hashes,
                                        progress_cb=progress.iters_done_report)

    if project_info is not None:
        sly.logger.info('PROJECT_CREATED',
                        extra={
                            'event_type': sly.EventType.PROJECT_CREATED,
                            'project_id': project_info.id
                        })
    else:
        temp_str = "Project"
        if append_to_existing_project is True:
            temp_str = "Dataset"
        raise RuntimeError("{} wasn't created: 0 files were added")
    pass
コード例 #11
0
img_url = 'http://192.168.1.69:5555/h5un6l2bnaz1vj8a9qgms4-public/assets/projects/images/V/N/iQ/wOqv967pfMoMcJQ5Zo8j666wkABscaSR0f1N8lfKgc1eG98GVI1qxTv0UPiKsbsTFuCSEVhJpU6tCaYkUD0eJo69xzy3dwCRnvHaAwKFwZdr0jwfYrqQn0uf6PeN.png'

import requests
import numpy as np
import supervisely_lib as sly
import time

response = requests.get(img_url)
img = sly.image.read_bytes(response.content)
print(img.shape)

#@TODO: what if model has already deployed ???
#@TODO: why do we need workspace_id to deploy model???

api = sly.Api(address, token)
team_id = api.team.get_id_by_name(team_name)
workspace_id = api.workspace.get_id_by_name(workspace_name, team_id)
agent_id = api.agent.get_id_by_name(agent_name, team_id)

model_info = api.model.get_info_by_name(model_name, workspace_id)
plugin_id = model_info['pluginId']

plugin_info = api.plugin.get_info_by_id(plugin_id, team_id)
plugin_version = plugin_info['defaultVersion']

#@TODO: add option to send only image_hash or image_id, not image data

tasks_ids = api.model.get_deploy_tasks(model_info['id'])
if len(tasks_ids) == 0:
    task_id = api.task.deploy_model(agent_id, model_info['id'], workspace_id,
コード例 #12
0
def add_videos_to_project():
    task_config = load_json_file(TaskPaths.TASK_CONFIG_PATH)

    task_id = task_config['task_id']
    append_to_existing_project = task_config['append_to_existing_project']
    if append_to_existing_project is True:
        raise RuntimeError("Appending to existing project is not supported by this version")

    server_address = task_config['server_address']
    token = task_config['api_token']

    instance_type = task_config.get("instance_type", sly.ENTERPRISE)

    api = sly.Api(server_address, token)

    task_info = api.task.get_info_by_id(task_id)
    api.add_additional_field('taskId', task_id)
    api.add_header('x-task-id', str(task_id))

    workspace_id = task_info["workspaceId"]
    project_name = task_config.get('project_name')
    if project_name is None:
        project_name = task_config["res_names"]["project"]

    project_dir = os.path.join(sly.TaskPaths.DATA_DIR, project_name)
    path_info_map = {}

    files_list = api.task.get_import_files_list(task_id)
    for file_info in files_list:
        original_path = file_info["filename"]
        ext = sly.fs.get_file_ext(original_path)
        save_path = project_dir + original_path
        path_info_map[save_path] = file_info
        if ext == '.json':
            api.task.download_import_file(task_id, original_path, save_path)
        else:
            sly.video.validate_ext(ext)
            #@TODO: validate streams for community
            sly.fs.touch(save_path)

    # files structure without original video files is done
    # validate project structure
    project_fs = sly.VideoProject.read_single(sly.TaskPaths.DATA_DIR)

    project = api.project.create(workspace_id, project_name, type=sly.ProjectType.VIDEOS, change_name_if_conflict=True)
    api.project.update_meta(project.id, project_fs.meta.to_json())
    sly.logger.info("Project {!r} [id={!r}] has been created".format(project.name, project.id))

    for dataset_fs in project_fs:
        dataset = api.dataset.get_info_by_name(project.id, dataset_fs.name)
        if dataset is None:
            dataset = api.dataset.create(project.id, dataset_fs.name)
            sly.logger.info("dataset {!r} [id={!r}] has been created".format(dataset.name, dataset.id))

        for item_name in dataset_fs:
            item_path, ann_path = dataset_fs.get_item_paths(item_name)

            file_info = path_info_map[item_path]
            video_streams = get_video_streams(file_info["meta"]["streams"])
            if len(video_streams) != 1:
                sly.logger.warn(("Video {!r} has {} video streams. Import Videos in Supervisely format supports only"
                                 "videos with a single video stream. And annotation file has to be provided"
                                 "for each stream. Item will be skipped.")
                                .format(item_path, len(video_streams)))
                continue

            if instance_type is sly.COMMUNITY:
                if _check_video_requires_processing(file_info, video_streams[0]) is True:
                    warn_video_requires_processing(item_path)
                    continue

            item_hash = file_info["hash"]
            item_meta = {}

            video = api.video.upload_hash(dataset.id, item_name, item_hash, item_meta)

            #validate_item_annotation
            ann_json = sly.io.json.load_json_file(ann_path)
            ann = sly.VideoAnnotation.from_json(ann_json, project_fs.meta)

            # ignore existing key_id_map because the new objects will be created
            api.video.annotation.append(video.id, ann)

    sly.logger.info('PROJECT_CREATED', extra={'event_type': sly.EventType.PROJECT_CREATED, 'project_id': project.id})
    pass
コード例 #13
0
def add_pointclouds_to_project():
    task_config = load_json_file(TaskPaths.TASK_CONFIG_PATH)

    task_id = task_config['task_id']
    append_to_existing_project = task_config['append_to_existing_project']
    if append_to_existing_project is True:
        raise RuntimeError(
            "Appending to existing project is not supported by this version")

    server_address = task_config['server_address']
    token = task_config['api_token']

    instance_type = task_config.get("instance_type", sly.ENTERPRISE)

    api = sly.Api(server_address, token)

    task_info = api.task.get_info_by_id(task_id)
    api.add_additional_field('taskId', task_id)
    api.add_header('x-task-id', str(task_id))

    workspace_id = task_info["workspaceId"]
    project_name = task_config.get('project_name')
    if project_name is None:
        project_name = task_config["res_names"]["project"]

    project_dir = os.path.join(sly.TaskPaths.DATA_DIR, project_name)
    path_info_map = {}

    files_list = api.task.get_import_files_list(task_id)
    for file_info in files_list:
        original_path = file_info["filename"]
        ext = sly.fs.get_file_ext(original_path)
        save_path = project_dir + original_path
        path_info_map[save_path] = file_info
        if ext == '.json':
            api.task.download_import_file(task_id, original_path, save_path)
        else:
            if sly.PointcloudDataset.related_images_dir_name in save_path:
                sly.image.validate_ext(save_path)
            else:
                sly.pointcloud.validate_ext(ext)
            sly.fs.touch(save_path)

    # files structure without original video files is done
    # validate project structure

    project_fs = sly.PointcloudProject.read_single(sly.TaskPaths.DATA_DIR)

    project = api.project.create(workspace_id,
                                 project_name,
                                 type=sly.ProjectType.POINT_CLOUDS,
                                 change_name_if_conflict=True)
    api.project.update_meta(project.id, project_fs.meta.to_json())
    sly.logger.info("Project {!r} [id={!r}] has been created".format(
        project.name, project.id))

    uploaded_objects = KeyIdMap()
    for dataset_fs in project_fs:
        dataset = api.dataset.get_info_by_name(project.id, dataset_fs.name)
        if dataset is None:
            dataset = api.dataset.create(project.id, dataset_fs.name)
            sly.logger.info("dataset {!r} [id={!r}] has been created".format(
                dataset.name, dataset.id))

        for item_name in dataset_fs:
            item_path, related_images_dir, ann_path = dataset_fs.get_item_paths(
                item_name)

            file_info = path_info_map[item_path]
            item_hash = file_info[ApiField.HASH]
            item_meta = {}

            pointcloud = api.pointcloud.upload_hash(dataset.id, item_name,
                                                    item_hash, item_meta)

            #validate_item_annotation
            ann_json = sly.io.json.load_json_file(ann_path)
            ann = sly.PointcloudAnnotation.from_json(ann_json, project_fs.meta)

            # ignore existing key_id_map because the new objects will be created
            api.pointcloud.annotation.append(pointcloud.id, ann,
                                             uploaded_objects)

            #upload related_images if exist
            related_items = dataset_fs.get_related_images(item_name)
            if len(related_items) != 0:
                rimg_infos = []
                for img_path, meta_json in related_items:
                    rimg_infos.append({
                        ApiField.ENTITY_ID:
                        pointcloud.id,
                        ApiField.NAME:
                        meta_json[ApiField.NAME],
                        ApiField.HASH:
                        path_info_map[img_path][ApiField.HASH],
                        ApiField.META:
                        meta_json[ApiField.META],
                    })
                api.pointcloud.add_related_images(rimg_infos)

    sly.logger.info('PROJECT_CREATED',
                    extra={
                        'event_type': sly.EventType.PROJECT_CREATED,
                        'project_id': project.id
                    })
    pass
コード例 #14
0
import os
import supervisely_lib as sly

TEAM_ID = int(os.environ['context.teamId'])
WORKSPACE_ID = int(os.environ['context.workspaceId'])

SERVER_ADDRESS2 = os.environ["modal.state.serverAddress"]
if SERVER_ADDRESS2 == "":
    raise ValueError("Remote server address is not defined")
API_TOKEN2 = os.environ["modal.state.apiToken"]
if API_TOKEN2 == "":
    raise ValueError("Remote API token is not defined")

my_app = sly.AppService()
api2 = sly.Api(SERVER_ADDRESS2, API_TOKEN2)
PROJECT_ID2 = int(os.environ['modal.state.projectId'])


@my_app.callback("copy_project")
@sly.timeit
def copy_project(api: sly.Api, task_id, context, state, app_logger):
    project2 = api2.project.get_info_by_id(PROJECT_ID2)
    if project2 is None:
        raise RuntimeError(
            f"Project with id={PROJECT_ID2} not found on remote Supervisely instance"
        )
    if project2.type != str(sly.ProjectType.IMAGES):
        raise TypeError(
            "This version supports only images project. Please, submit a feature request to Supervisely dev"
            "team to add support of other types of projects (videos, 3d, dicom, ...)"
        )