예제 #1
0
def find_input_datasets():
    root_files_paths = set(
        fs.list_files(TaskPaths.DATA_DIR, filter_fn=sly.image.has_valid_ext))
    files_paths = set(
        fs.list_files_recursively(TaskPaths.DATA_DIR,
                                  filter_fn=sly.image.has_valid_ext))
    files_paths = files_paths - root_files_paths

    if len(root_files_paths) + len(files_paths) == 0:
        raise RuntimeError(
            f'Input directory is empty! Supported formats list: {sly.image.SUPPORTED_IMG_EXTS}.'
        )

    datasets = defaultdict(list)
    for path in files_paths:
        ds_name = os.path.relpath(os.path.dirname(path),
                                  TaskPaths.DATA_DIR).replace(os.sep, '__')
        datasets[ds_name].append(path)

    default_ds_name = (
        DEFAULT_DS_NAME + '_' +
        sly.rand_str(8)) if DEFAULT_DS_NAME in datasets else DEFAULT_DS_NAME
    for path in root_files_paths:
        datasets[default_ds_name].append(path)

    return datasets
예제 #2
0
def gen_video_stream_name(file_name, stream_index):
    '''
    Create name to video stream from given filename and index of stream
    :param file_name: str
    :param stream_index: int
    :return: str
    '''
    return "{}_stream_{}_{}{}".format(get_file_name(file_name), stream_index,
                                      rand_str(5), get_file_ext(file_name))
예제 #3
0
    def task_main_func(self):
        if constants.TOKEN() != self.info['config']['access_token']:
            raise RuntimeError('Current token != new token')

        docker_inspect_cmd = "curl -s --unix-socket /var/run/docker.sock http:/containers/$(hostname)/json"
        docker_img_info = subprocess.Popen(
            [docker_inspect_cmd],
            shell=True,
            executable="/bin/bash",
            stdout=subprocess.PIPE).communicate()[0].decode("utf-8")
        docker_img_info = json.loads(docker_img_info)
        #docker_image = docker_img_info["Config"]["Image"]
        #cur_version = docker_img_info["Config"]["Labels"]["VERSION"]
        cur_container_id = docker_img_info["Config"]["Hostname"]
        #cur_container_name = docker_img_info["Name"].split("/")[1]
        cur_volumes = docker_img_info["HostConfig"]["Binds"]
        cur_envs = docker_img_info["Config"]["Env"]

        if docker_img_info["Config"]["Labels"].get(
                "com.docker.compose.project", None) == "supervisely":
            raise RuntimeError(
                'Docker container was started from docker-compose. Please, use docker-compose to upgrade.'
            )
            return

        self._docker_pull(self.info['docker_image'])

        new_volumes = {}
        for vol in cur_volumes:
            parts = vol.split(":")
            src = parts[0]
            dst = parts[1]
            new_volumes[src] = {'bind': dst, 'mode': 'rw'}

        cur_envs.append("REMOVE_OLD_AGENT={}".format(cur_container_id))

        container = self._docker_api.containers.run(
            self.info['docker_image'],
            runtime=self.info['config']['docker_runtime'],
            detach=True,
            name='supervisely-agent-{}-{}'.format(constants.TOKEN(),
                                                  sly.rand_str(5)),
            remove=False,
            restart_policy={"Name": "unless-stopped"},
            volumes=new_volumes,
            environment=cur_envs,
            stdin_open=False,
            tty=False)
        container.reload()
        self.logger.debug('After spawning. Container status: {}'.format(
            str(container.status)))
        self.logger.info('Docker container is spawned',
                         extra={
                             'container_id': container.id,
                             'container_name': container.name
                         })
예제 #4
0
def inference_image_id(api: sly.Api, task_id, context, state, app_logger):
    app_logger.debug("Input data", extra={"state": state})
    image_id = state["image_id"]
    image_info = api.image.get_info_by_id(image_id)
    image_path = os.path.join(my_app.data_dir,
                              sly.rand_str(10) + image_info.name)
    api.image.download_path(image_id, image_path)
    ann_json = inference_image_path(image_path, context, state, app_logger)
    sly.fs.silent_remove(image_path)
    request_id = context["request_id"]
    my_app.send_response(request_id, data=ann_json)
예제 #5
0
    def upload_archive(self, task_id, dir_to_archive, archive_name):
        self.logger.info("PACK_TO_ARCHIVE ...")
        archive_name = archive_name if len(archive_name) > 0 else sly.rand_str(30)
        local_tar_path = os.path.join(constants.AGENT_TMP_DIR(), archive_name + '.tar')
        sly.fs.archive_directory(dir_to_archive, local_tar_path)

        try:
            self.upload_tar_file(task_id, local_tar_path)
        finally:
            sly.fs.silent_remove(local_tar_path)

        self.logger.info('ARCHIVE_UPLOADED', extra={'archive_name': archive_name})
예제 #6
0
    def get_free_name(self, img_desc):
        name = img_desc.get_img_name()
        new_name = name
        names_in_ds = self.existing_names.get(img_desc.get_res_ds_name(), set())

        if name in names_in_ds:
            new_name = name + '_' + sly.rand_str(10)

        names_in_ds.add(new_name)
        self.existing_names[img_desc.get_res_ds_name()] = names_in_ds

        return new_name
예제 #7
0
    def upload_archive(self, task_id, dir_to_archive, archive_name):
        self.logger.info("PACK_TO_ARCHIVE ...")
        archive_name = archive_name if len(archive_name) > 0 else sly.rand_str(30)
        local_tar_path = os.path.join(constants.AGENT_TMP_DIR(), archive_name + '.tar')
        sly.fs.archive_directory(dir_to_archive, local_tar_path)

        size_mb = sly.fs.get_file_size(local_tar_path) / 1024.0 / 1024
        progress = sly.Progress("Upload archive", size_mb, ext_logger=self.logger)
        try:
            self.public_api.task.upload_dtl_archive(task_id, local_tar_path, progress.set_current_value)
        finally:
            sly.fs.silent_remove(local_tar_path)

        self.logger.info('ARCHIVE_UPLOADED', extra={'archive_name': archive_name})
예제 #8
0
def inference_image_url(api: sly.Api, task_id, context, state, app_logger):
    app_logger.debug("Input data", extra={"state": state})

    image_url = state["image_url"]
    ext = sly.fs.get_file_ext(image_url)
    if ext == "":
        ext = ".jpg"
    local_image_path = os.path.join(my_app.data_dir, sly.rand_str(15) + ext)

    sly.fs.download(image_url, local_image_path)
    ann_json = inference_image_path(local_image_path, context, state,
                                    app_logger)
    sly.fs.silent_remove(local_image_path)

    request_id = context["request_id"]
    my_app.send_response(request_id, data=ann_json)
예제 #9
0
    def upload_nn(self, nn_id, nn_hash):
        local_service_log = {'nn_id': nn_id, 'nn_hash': nn_hash}

        storage_nn_dir = self.storage.nns.check_storage_object(nn_hash)
        if storage_nn_dir is None:
            self.logger.critical('NN_NOT_FOUND', extra=local_service_log)
        local_tar_path = os.path.join(constants.AGENT_TMP_DIR(), sly.rand_str(30) + '.tar')
        sly.fs.archive_directory(storage_nn_dir, local_tar_path)

        size_mb = sly.fs.get_file_size(local_tar_path) / 1024.0 / 1024
        progress = sly.Progress("Upload NN weights", size_mb, ext_logger=self.logger)
        try:
            self.public_api.model.upload(nn_hash, local_tar_path, progress.set_current_value)
        finally:
            sly.fs.silent_remove(local_tar_path)

        self.logger.info('ARCHIVE_UPLOADED')
        self.logger.info('NN_UPLOADED', extra=local_service_log)
예제 #10
0
def inference_batch_ids(api: sly.Api, task_id, context, state, app_logger):
    app_logger.debug("Input data", extra={"state": state})
    ids = state["batch_ids"]
    infos = api.image.get_info_by_id_batch(ids)
    paths = []
    for info in infos:
        paths.append(
            os.path.join(my_app.data_dir,
                         sly.rand_str(10) + info.name))
    api.image.download_paths(infos[0].dataset_id, ids, paths)

    results = []
    for image_path in paths:
        ann_json = inference_image_path(image_path, context, state, app_logger)
        results.append(ann_json)
        sly.fs.silent_remove(image_path)

    request_id = context["request_id"]
    my_app.send_response(request_id, data=results)
예제 #11
0
def main():
    task_id = int(os.getenv("TASK_ID"))
    api = sly.Api.from_env()
    api.add_additional_field('taskId', task_id)
    api.add_header('x-task-id', str(task_id))

    task_context = api.task.get_context(task_id)
    team_id = task_context["team"]["id"]
    workspace_id = task_context["workspace"]["id"]

    gui_template = ""
    with open('/workdir/src/gui.html', 'r') as file:
        gui_template = file.read()

    #@TODO: filter non-image project
    #@TODO: replace to id
    projects = utils.read_projects(api, workspace_id)

    table = []
    for i in range(40):
        table.append({"name": sly.rand_str(5), "my_value": i})

    data = const.DATA_DEFAULTS
    data["projects"] = projects
    data["taskId"] = task_id
    data[const.TABLE] = table

    payload = {}
    payload["template"] = gui_template
    payload[const.STATE] = const.STATE_DEFAULTS
    #@TODO: for debug
    payload[const.STATE][const.PROJECT_INDEX] = len(data["projects"]) - 1
    payload[const.DATA] = data

    #http://192.168.1.42/apps/sessions/54
    #"http://192.168.1.42/apps/2/sessions/54"
    jresp = api.task.set_data(task_id, payload)

    sly.logger.info("APP_STARTED")

    while True:
        time.sleep(5)
예제 #12
0
def convert():
    task_settings = json.load(open(sly.TaskPaths.SETTINGS_PATH, "r"))
    in_datasets = find_input_datasets()

    pr = sly.Project(
        os.path.join(sly.TaskPaths.RESULTS_DIR,
                     task_settings['res_names']['project']),
        sly.OpenMode.CREATE)
    for ds_name, ds_path in in_datasets:
        img_paths = sly.fs.list_files(ds_path, sly.image.SUPPORTED_IMG_EXTS)
        sly.logger.info('Dataset {!r} contains {} image(s).'.format(
            ds_name, len(img_paths)))
        ds = pr.create_dataset(ds_name)
        progress = sly.Progress('Dataset: {!r}'.format(ds_name),
                                len(img_paths))
        for img_path in img_paths:
            item_name = sly.fs.get_file_name(img_path)
            if ds.item_exists(item_name):
                item_name = item_name + '_' + sly.rand_str(5)
            ds.add_item_file(item_name, img_path)
            progress.iter_done_report()
예제 #13
0
def convert():
    task_settings = load_json_file(sly.TaskPaths.TASK_CONFIG_PATH)
    in_datasets = find_input_datasets()

    pr = sly.Project(
        os.path.join(sly.TaskPaths.RESULTS_DIR,
                     task_settings['res_names']['project']),
        sly.OpenMode.CREATE)
    for ds_name, ds_path in in_datasets:
        img_paths = sly.fs.list_files(ds_path,
                                      filter_fn=sly.image.has_valid_ext)
        sly.logger.info(
            'Found {} files with supported image extensions in Dataset {!r}.'.
            format(len(img_paths), ds_name))
        ds = pr.create_dataset(ds_name)
        progress = sly.Progress('Dataset: {!r}'.format(ds_name),
                                len(img_paths))
        for img_path in img_paths:
            try:
                item_name = os.path.basename(img_path)
                if ds.item_exists(item_name):
                    item_name_noext, item_ext = os.path.splitext(item_name)
                    item_name = item_name_noext + '_' + sly.rand_str(
                        5) + item_ext
                ds.add_item_file(item_name, img_path)
            except Exception as e:
                exc_str = str(e)
                sly.logger.warn(
                    'Input sample skipped due to error: {}'.format(exc_str),
                    exc_info=True,
                    extra={
                        'exc_str': exc_str,
                        'dataset_name': ds_name,
                        'image_name': img_path,
                    })
            progress.iter_done_report()
import supervisely_lib as sly

project_id = 75563
dataset_id = 317675

image_path = '/workdir/src/examples/d.jpg'
image_name = sly.fs.get_file_name_with_ext(image_path)

api = sly.Api.from_env()

if api.image.get_info_by_name(dataset_id, image_name) is not None:
    print('image (name={!r}) already exists in dataset (id={!r})'.format(image_name, dataset_id))
    new_name = '{}_{}{}'.format(sly.fs.get_file_name(image_name), sly.rand_str(5), sly.fs.get_file_ext(image_name))
    print('new name: ', new_name)
    image_name = new_name

meta = {"xxx": 777, "yyy": 999}
uploaded_image_info = api.image.upload_path(dataset_id, image_name, image_path, meta)
print("!!! uploaded_image_info", uploaded_image_info)
print("[FOR DEBUG] Image meta on server: ", uploaded_image_info.meta)

image_info = api.image.get_info_by_name(dataset_id, image_name)
print("Image meta on server: ", image_info.meta)
예제 #15
0
def add_images_to_project():
    sly.fs.ensure_base_path(sly.TaskPaths.RESULTS_DIR)

    task_config = load_json_file(TaskPaths.TASK_CONFIG_PATH)

    task_id = task_config['task_id']
    append_to_existing_project = task_config['append_to_existing_project']
    server_address = task_config['server_address']
    token = task_config['api_token']

    convert_options = task_config.get('options', {})
    normalize_exif = convert_options.get('normalize_exif', True)
    remove_alpha_channel = convert_options.get('remove_alpha_channel', True)
    need_download = normalize_exif or remove_alpha_channel

    api = sly.Api(server_address, token, retry_count=5)

    task_info = api.task.get_info_by_id(task_id)
    api.add_additional_field('taskId', task_id)
    api.add_header('x-task-id', str(task_id))

    workspace_id = task_info["workspaceId"]
    project_name = task_config.get('project_name', None)
    if project_name is None:
        project_name = task_config["res_names"]["project"]

    files_list = api.task.get_import_files_list(task_id)
    if len(files_list) == 0:
        raise RuntimeError("There are no import files")

    project_info = None
    if append_to_existing_project is True:
        project_info = api.project.get_info_by_name(
            workspace_id,
            project_name,
            expected_type=sly.ProjectType.IMAGES,
            raise_error=True)
    else:
        project_info = api.project.create(workspace_id,
                                          project_name,
                                          type=sly.ProjectType.IMAGES,
                                          change_name_if_conflict=True)

    dataset_to_item = defaultdict(dict)
    for dataset in api.dataset.get_list(project_info.id):
        images = api.image.get_list(dataset.id)
        for image_info in images:
            dataset_to_item[dataset.name][image_info.name] = None

    for file_info in files_list:
        original_path = file_info["filename"]
        try:
            sly.image.validate_ext(original_path)
            item_hash = file_info["hash"]
            ds_name = get_dataset_name(original_path)
            item_name = sly.fs.get_file_name_with_ext(original_path)

            if item_name in dataset_to_item[ds_name]:
                temp_name = sly.fs.get_file_name(original_path)
                temp_ext = sly.fs.get_file_ext(original_path)
                new_item_name = "{}_{}{}".format(temp_name, sly.rand_str(5),
                                                 temp_ext)
                sly.logger.warning(
                    "Name {!r} already exists in dataset {!r}: renamed to {!r}"
                    .format(item_name, ds_name, new_item_name))
                item_name = new_item_name
            dataset_to_item[ds_name][item_name] = item_hash
        except Exception as e:
            sly.logger.warning(
                "File skipped {!r}: error occurred during processing {!r}".
                format(original_path, str(e)))

    for ds_name, ds_items in dataset_to_item.items():
        ds_info = api.dataset.get_or_create(project_info.id, ds_name)

        names = []  # list(ds_items.keys())
        hashes = []  #list(ds_items.values())
        for name, hash in ds_items.items():
            if hash is None:
                #existing image => skip
                continue
            else:
                names.append(name)
                hashes.append(hash)

        paths = [
            os.path.join(sly.TaskPaths.RESULTS_DIR,
                         h.replace("/", "a") + sly.image.DEFAULT_IMG_EXT)
            for h in hashes
        ]
        progress = sly.Progress('Dataset: {!r}'.format(ds_name), len(ds_items))

        for batch_names, batch_hashes, batch_paths in zip(
                sly.batched(names, 10), sly.batched(hashes, 10),
                sly.batched(paths, 10)):
            if need_download is True:
                res_batch_names = []
                res_batch_paths = []
                api.image.download_paths_by_hashes(batch_hashes, batch_paths)
                for name, path in zip(batch_names, batch_paths):
                    try:
                        img = sly.image.read(path, remove_alpha_channel)
                        sly.image.write(path, img, remove_alpha_channel)
                        res_batch_names.append(name)
                        res_batch_paths.append(path)
                    except Exception as e:
                        sly.logger.warning("Skip image {!r}: {}".format(
                            name, str(e)),
                                           extra={'file_path': path})
                api.image.upload_paths(ds_info.id, res_batch_names,
                                       res_batch_paths)

                for path in res_batch_paths:
                    sly.fs.silent_remove(path)
                #sly.fs.clean_dir(sly.TaskPaths.RESULTS_DIR)
                progress.iters_done_report(len(batch_names))
            else:
                api.image.upload_hashes(ds_info.id,
                                        batch_names,
                                        batch_hashes,
                                        progress_cb=progress.iters_done_report)

    if project_info is not None:
        sly.logger.info('PROJECT_CREATED',
                        extra={
                            'event_type': sly.EventType.PROJECT_CREATED,
                            'project_id': project_info.id
                        })
    else:
        temp_str = "Project"
        if append_to_existing_project is True:
            temp_str = "Dataset"
        raise RuntimeError("{} wasn't created: 0 files were added")
    pass
예제 #16
0
def gen_video_stream_name(file_name, stream_index):
    return "{}_stream_{}_{}{}".format(get_file_name(file_name), stream_index,
                                      rand_str(5), get_file_ext(file_name))
예제 #17
0
def generate_random_string(api: sly.Api, task_id, context, state):
    new_str = sly.rand_str(10)
    api.app.set_vars(task_id, "data.randomString", new_str)