示例#1
0
 def set_meta(self, new_meta):
     '''
     Save given meta to project dir in json format.
     :param new_meta: ProjectMeta class object
     '''
     self._meta = new_meta
     dump_json_file(self.meta.to_json(), self._get_project_meta_path(), indent=4)
示例#2
0
    def get_image_and_ann():
        mkdir(image_dir_path)
        mkdir(ann_dir)
        image_path = os.path.join(image_dir_path, image_name)
        api.image.download_path(image_id, image_path)
        image_ext_to_png(image_path)

        mask_color, mask_label, poly_json = from_ann_to_cityscapes_mask(
            ann, name2id, app_logger, train_val_flag)
        # dump_json_file(poly_json,
        #                os.path.join(ann_dir, get_file_name(base_image_name) + cityscapes_polygons_suffix))
        # write(
        #     os.path.join(ann_dir,
        #                  get_file_name(base_image_name) + cityscapes_color_suffix), mask_color)
        # write(
        #     os.path.join(ann_dir,
        #                  get_file_name(base_image_name) + cityscapes_labels_suffix), mask_label)

        dump_json_file(
            poly_json,
            os.path.join(
                ann_dir,
                get_file_name(base_image_name).replace('_leftImg8bit', '') +
                cityscapes_polygons_suffix))
        write(
            os.path.join(
                ann_dir,
                get_file_name(base_image_name).replace('_leftImg8bit', '') +
                cityscapes_color_suffix), mask_color)
        write(
            os.path.join(
                ann_dir,
                get_file_name(base_image_name).replace('_leftImg8bit', '') +
                cityscapes_labels_suffix), mask_label)
示例#3
0
def main():
    task_config = load_json_file(TaskPaths.TASK_CONFIG_PATH)

    convert_options = task_config['options']
    normalize_url = True
    if convert_options is not None:
        normalize_url = convert_options.get('normalize_image_name', True)

    server_address = task_config['server_address']
    token = task_config['api_token']
    append_to_existing_project = task_config['append_to_existing_project']

    api = sly.Api(server_address, token)
    task_info = api.task.get_info_by_id(task_config['task_id'])
    # TODO migrate to passing workspace id via the task config.
    project_info = create_project(api, task_info["workspaceId"],
                                  task_config['res_names']['project'],
                                  append_to_existing_project)

    total_counter = 0
    for file_path in sly.fs.list_files_recursively(
            TaskPaths.DATA_DIR,
            filter_fn=lambda path: sly.fs.get_file_ext(path).lower(
            ) == '.txt'):
        total_counter += process_dataset_links(api,
                                               project_info,
                                               file_path,
                                               normalize_url=normalize_url)

    if total_counter == 0:
        raise RuntimeError(
            'Result project is empty! No valid links find in files.')

    dump_json_file({'project_id': project_info.id},
                   os.path.join(TaskPaths.RESULTS_DIR, 'project_info.json'))
示例#4
0
    def run(self):
        try:
            self.init_api()
            self.future_log = self.executor_log.submit(
                self.submit_log)  # run log submitting
        except Exception as e:
            # unable to do something another if crashed
            print(e)
            dump_json_file(
                str(e),
                os.path.join(constants.AGENT_ROOT_DIR(), 'logger_fail.json'))
            os._exit(1)  # ok, documented

        try:
            self.report_start()
            self.init_additional()
            self.run_and_wait(self.task_main_func)
        except StopTaskException:
            exit_status = self.end_log_stop()
        except Exception as e:
            exit_status = self.end_log_crash(e)
        else:
            exit_status = self.end_log_finish()

        self.logger.info("WAIT_FOR_TASK_LOG")
        self.stop_log_thread()

        sys.exit(exit_codes[exit_status])
示例#5
0
    def download_step(self):
        if self.info.get('nn_model', None) is None:
            self.logger.critical('TASK_NN_EMPTY')
            raise ValueError('TASK_NN_EMPTY')

        self.data_mgr.download_nn(self.info['nn_model']['title'],
                                  self.dir_model)

        #@TODO: only for compatibility with old models
        shutil.move(self.dir_model, self.dir_model + '_delme')
        shutil.move(
            os.path.join(self.dir_model + '_delme',
                         self.info['nn_model']['title']), self.dir_model)
        sly.fs.remove_dir(self.dir_model + '_delme')

        out_cfg = {
            **self.info['task_settings'],  # settings from server
            'connection': {
                'server_address': constants.SERVER_ADDRESS(),
                'token': constants.TOKEN(),
                'task_id': str(self.info['task_id']),
            },
            'model_settings': self.info['task_settings']
        }

        dump_json_file(out_cfg,
                       self.config_path1)  # Deprecated 'task_settings.json'
        dump_json_file(out_cfg,
                       self.config_path2)  # New style task_config.json
        self.report_step_done(TaskStep.DOWNLOAD)
示例#6
0
 def dump_json(self, path):
     '''
     Save current class object data in json format by given path
     :param path: str
     '''
     simple_dict = self.to_dict()
     dump_json_file(simple_dict, path, indent=4)
 def download_step(self):
     self.logger.info("DOWNLOAD_DATA")
     human_config = self.make_human_config()
     dump_json_file(human_config, self.config_path1)
     dump_json_file(human_config, self.config_path2)
     hardlink_or_copy_tree(constants.AGENT_IMPORT_DIR(), self.dir_data)
     self.report_step_done(TaskStep.DOWNLOAD)
示例#8
0
 def set_ann(self, item_name: str, ann: Annotation):
     if type(ann) is not Annotation:
         raise TypeError(
             "Type of 'ann' have to be Annotation, not a {}".format(
                 type(ann)))
     dst_ann_path = self.get_ann_path(item_name)
     dump_json_file(ann.to_json(), dst_ann_path)
示例#9
0
    def _convert_sample(self, sample_info, project_meta: ProjectMeta):
        image_name = sample_info.image_name
        ext = sample_info.ia_data['image_ext']
        src_image_path = join(self.dataset_dir, 'images', image_name + ext)

        sly.fs.copy_file(src_image_path, sample_info.img_path)
        ann = self._get_ann(self.dataset_dir, image_name + ext, project_meta)
        packed_ann = ann.pack()
        dump_json_file(packed_ann, sample_info.ann_path)
示例#10
0
 def set_ann_dict(self, item_name: str, ann: dict):
     '''
     Save given annotation with given name to dataset annotations dir in json format.
     :param item_name: str
     :param ann: dict (json format)
     '''
     if type(ann) is not dict:
         raise TypeError("Ann should be a dict, not a {}".format(type(ann)))
     dst_ann_path = self.get_ann_path(item_name)
     dump_json_file(ann, dst_ann_path, indent=4)
示例#11
0
 def set_ann(self, item_name: str, ann):
     '''
     Save given annotation with given name to dataset annotations dir in json format.
     :param item_name: str
     :param ann: Annotation class object (Generate exception error if not so)
     '''
     if type(ann) is not self.annotation_class:
         raise TypeError("Type of 'ann' have to be Annotation, not a {}".format(type(ann)))
     dst_ann_path = self.get_ann_path(item_name)
     dump_json_file(ann.to_json(), dst_ann_path, indent=4)
示例#12
0
 def set_ann(self, item_name: str, ann):
     '''
     Save given videoannotation for given video to the appropriate folder
     :param item_name: str
     :param ann: VideoAnnotation class object, raise error if ann type is another
     '''
     if type(ann) is not self.annotation_class:
         raise TypeError("Type of 'ann' have to be Annotation, not a {}".format(type(ann)))
     dst_ann_path = self.get_ann_path(item_name)
     dump_json_file(ann.to_json(), dst_ann_path)
示例#13
0
    def _convert_sample(self, sample_info, masks_map,
                        project_meta: ProjectMeta):
        image_name = sample_info.image_name
        src_image_path = sample_info.ia_data['image_orig_path']

        src_image = np.array(Image.open(src_image_path))

        cv2.imwrite(sample_info.img_path, src_image[:, :, ::-1])
        ann = self._get_ann(image_name, masks_map, src_image.shape[:2],
                            project_meta)
        packed_ann = ann.pack()
        dump_json_file(packed_ann, sample_info.ann_path)
示例#14
0
    def download_step(self):
        self.logger.info("DOWNLOAD_DATA")
        dump_json_file(self.info['config'], self.config_path1)
        dump_json_file(self.info['config'], self.config_path2)

        model = agent_utils.get_single_item_or_die(self.info, 'models',
                                                   'config')
        self.data_mgr.download_nn(model['title'], self.dir_model)

        project_name = agent_utils.get_single_item_or_die(
            self.info, 'projects', 'config')['title']
        self.data_mgr.download_project(self.dir_data, project_name)

        #@TODO: only for compatibility with old models
        shutil.move(self.dir_model, self.dir_model + '_delme')
        shutil.move(os.path.join(self.dir_model + '_delme', model['title']),
                    self.dir_model)
        sly.fs.remove_dir(self.dir_model + '_delme')

        self.report_step_done(TaskStep.DOWNLOAD)
示例#15
0
def main():
    args = parse_args()
    with open(args.in_file) as f:
        lines = f.readlines()
    names_list = [ln for ln in (line.strip() for line in lines) if ln]

    out_classes = sly.ObjClassCollection(items=[
        sly.ObjClass(name=name, geometry_type=sly.Rectangle)
        for name in names_list
    ])

    cls_mapping = {x: idx for idx, x in enumerate(names_list)}
    res_cfg = {
        SETTINGS: {},
        'out_classes': out_classes.to_json(),
        'class_title_to_idx': cls_mapping,
    }

    config_filename = os.path.join(args.out_dir,
                                   sly.TaskPaths.MODEL_CONFIG_NAME)
    dump_json_file(res_cfg, config_filename, indent=4)
    print('Done: {} -> {}'.format(args.in_file, config_filename))
def modify_label(project_path, project_meta, item_paths):
    img = sly.image.read(item_paths.img_path)
    h, w, c = img.shape

    ann, _ = sly.Annotation.load_json_file(item_paths.ann_path, project_meta)

    ann_json = ann.to_json()
    ann_json['size']['height'], ann_json['size']['width'] = h, w

    project_dirname = os.path.basename(project_path.split('/')[-2])
    save_dirname = project_dirname + '_modify'
    par_dirpath = os.path.join(project_path, os.pardir)
    save_path = os.path.join(par_dirpath, save_dirname)
    rst_fpath = item_paths.ann_path.replace(project_path, save_path)
    rst_dirpath = os.path.dirname(rst_fpath)

    cg.folder_exists(rst_dirpath, create_=True)

    dump_json_file(ann_json, rst_fpath)
    print('Modify & Save results : {}'.format(rst_fpath))

    return True
示例#17
0
 def dump_json(self, path):
     simple_dict = self.to_dict()
     dump_json_file(simple_dict, path, indent=4)
def export_only_labeled_items(api: sly.Api, task_id, context, state,
                              app_logger):

    project = api.project.get_info_by_id(PROJECT_ID)
    project_name = project.name
    meta_json = api.project.get_meta(PROJECT_ID)
    meta = sly.ProjectMeta.from_json(meta_json)

    if len(meta.obj_classes) == 0 and len(meta.tag_metas) == 0:
        logger.warn('Project {} have no labeled items'.format(project_name))
        my_app.stop()

    RESULT_DIR = os.path.join(my_app.data_dir, RESULT_DIR_NAME, project_name)
    RESULT_ARCHIVE_PATH = os.path.join(my_app.data_dir, RESULT_DIR_NAME)
    ARCHIVE_NAME = '{}_{}_{}.tar.gz'.format(TASK_ID, PROJECT_ID, project_name)
    RESULT_ARCHIVE = os.path.join(my_app.data_dir, ARCHIVE_NAME)
    remote_archive_path = "/{}/{}".format(RESULT_DIR_NAME, ARCHIVE_NAME)
    if api.file.exists(TEAM_ID, remote_archive_path):
        logger.warn('Archive with name {} already exist in {} folder'.format(
            ARCHIVE_NAME, RESULT_DIR_NAME))
        my_app.stop()

    sly.fs.mkdir(RESULT_DIR)
    app_logger.info("Export folder has been created")

    if project.type == str(sly.ProjectType.IMAGES):
        project_fs = Project(RESULT_DIR, OpenMode.CREATE)
        project_fs.set_meta(meta)
        for dataset_info in api.dataset.get_list(PROJECT_ID):
            dataset_id = dataset_info.id
            dataset_fs = project_fs.create_dataset(dataset_info.name)
            images = api.image.get_list(dataset_id)

            ds_progress = Progress('Downloading dataset: {}'.format(
                dataset_info.name),
                                   total_cnt=len(images))
            labeled_items_cnt = 0
            not_labeled_items_cnt = 0
            for batch in sly.batched(images, batch_size=10):
                image_ids = [image_info.id for image_info in batch]
                image_names = [image_info.name for image_info in batch]

                ann_infos = api.annotation.download_batch(
                    dataset_id, image_ids)
                ann_jsons = [ann_info.annotation for ann_info in ann_infos]

                if DOWNLOAD_ITEMS:
                    batch_imgs_bytes = api.image.download_bytes(
                        dataset_id, image_ids)
                    for name, img_bytes, ann_json in zip(
                            image_names, batch_imgs_bytes, ann_jsons):
                        ann = sly.Annotation.from_json(ann_json, meta)
                        if ann.is_empty():
                            not_labeled_items_cnt += 1
                            continue
                        dataset_fs.add_item_raw_bytes(name, img_bytes,
                                                      ann_json)
                        labeled_items_cnt += 1
                else:
                    ann_dir = os.path.join(RESULT_DIR, dataset_info.name,
                                           'ann')
                    sly.fs.mkdir(ann_dir)
                    for image_name, ann_json in zip(image_names, ann_jsons):
                        ann = sly.Annotation.from_json(ann_json, meta)
                        if ann.is_empty():
                            not_labeled_items_cnt += 1
                            continue
                        sly.io.json.dump_json_file(
                            ann_json,
                            os.path.join(ann_dir, image_name + '.json'))
                        labeled_items_cnt += 1

                ds_progress.iters_done_report(len(batch))
            logger.info(
                'In dataset {} {} items labeled, {} items not labeled'.format(
                    dataset_info.name, labeled_items_cnt,
                    not_labeled_items_cnt))
            if len(images) == not_labeled_items_cnt:
                logger.warn('There are no labeled items in dataset {}'.format(
                    dataset_info.name))

    elif project.type == str(sly.ProjectType.VIDEOS):
        key_id_map = KeyIdMap()
        project_fs = VideoProject(RESULT_DIR, OpenMode.CREATE)
        project_fs.set_meta(meta)
        for dataset_info in api.dataset.get_list(PROJECT_ID):
            dataset_fs = project_fs.create_dataset(dataset_info.name)
            videos = api.video.get_list(dataset_info.id)
            labeled_items_cnt = 0
            not_labeled_items_cnt = 0
            ds_progress = Progress('Downloading dataset: {}'.format(
                dataset_info.name),
                                   total_cnt=len(videos))
            for batch in sly.batched(videos, batch_size=10):
                video_ids = [video_info.id for video_info in batch]
                video_names = [video_info.name for video_info in batch]
                ann_jsons = api.video.annotation.download_bulk(
                    dataset_info.id, video_ids)
                for video_id, video_name, ann_json in zip(
                        video_ids, video_names, ann_jsons):
                    video_ann = sly.VideoAnnotation.from_json(
                        ann_json, meta, key_id_map)
                    if video_ann.is_empty():
                        not_labeled_items_cnt += 1
                        continue
                    video_file_path = dataset_fs.generate_item_path(video_name)
                    labeled_items_cnt += 1
                    if DOWNLOAD_ITEMS:
                        api.video.download_path(video_id, video_file_path)
                    dataset_fs.add_item_file(video_name,
                                             video_file_path,
                                             ann=video_ann,
                                             _validate_item=False)

                ds_progress.iters_done_report(len(batch))
            logger.info(
                'In dataset {} {} items labeled, {} items not labeled'.format(
                    dataset_info.name, labeled_items_cnt,
                    not_labeled_items_cnt))
            if len(videos) == not_labeled_items_cnt:
                logger.warn('There are no labeled items in dataset {}'.format(
                    dataset_info.name))

        project_fs.set_key_id_map(key_id_map)

    elif project.type == str(sly.ProjectType.POINT_CLOUDS):
        key_id_map = KeyIdMap()
        project_fs = PointcloudProject(RESULT_DIR, OpenMode.CREATE)
        project_fs.set_meta(meta)
        for dataset_info in api.dataset.get_list(PROJECT_ID):
            dataset_fs = project_fs.create_dataset(dataset_info.name)
            pointclouds = api.pointcloud.get_list(dataset_info.id)
            labeled_items_cnt = 0
            not_labeled_items_cnt = 0
            ds_progress = Progress('Downloading dataset: {!r}'.format(
                dataset_info.name),
                                   total_cnt=len(pointclouds))
            for batch in sly.batched(pointclouds, batch_size=10):
                pointcloud_ids = [
                    pointcloud_info.id for pointcloud_info in batch
                ]
                pointcloud_names = [
                    pointcloud_info.name for pointcloud_info in batch
                ]

                ann_jsons = api.pointcloud.annotation.download_bulk(
                    dataset_info.id, pointcloud_ids)

                for pointcloud_id, pointcloud_name, ann_json in zip(
                        pointcloud_ids, pointcloud_names, ann_jsons):
                    pc_ann = sly.PointcloudAnnotation.from_json(
                        ann_json, meta, key_id_map)
                    if pc_ann.is_empty():
                        not_labeled_items_cnt += 1
                        continue
                    pointcloud_file_path = dataset_fs.generate_item_path(
                        pointcloud_name)
                    labeled_items_cnt += 1
                    if DOWNLOAD_ITEMS:
                        api.pointcloud.download_path(pointcloud_id,
                                                     pointcloud_file_path)
                        related_images_path = dataset_fs.get_related_images_path(
                            pointcloud_name)
                        related_images = api.pointcloud.get_list_related_images(
                            pointcloud_id)
                        for rimage_info in related_images:
                            name = rimage_info[ApiField.NAME]
                            rimage_id = rimage_info[ApiField.ID]
                            path_img = os.path.join(related_images_path, name)
                            path_json = os.path.join(related_images_path,
                                                     name + ".json")
                            api.pointcloud.download_related_image(
                                rimage_id, path_img)
                            dump_json_file(rimage_info, path_json)

                    dataset_fs.add_item_file(pointcloud_name,
                                             pointcloud_file_path,
                                             ann=pc_ann,
                                             _validate_item=False)

                ds_progress.iters_done_report(len(batch))
            logger.info(
                'In dataset {} {} items labeled, {} items not labeled'.format(
                    dataset_info.name, labeled_items_cnt,
                    not_labeled_items_cnt))
            if len(pointclouds) == not_labeled_items_cnt:
                logger.warn('There are no labeled items in dataset {}'.format(
                    dataset_info.name))

        project_fs.set_key_id_map(key_id_map)

    sly.fs.archive_directory(RESULT_ARCHIVE_PATH, RESULT_ARCHIVE)
    app_logger.info("Result directory is archived")

    upload_progress = []

    def _print_progress(monitor, upload_progress):
        if len(upload_progress) == 0:
            upload_progress.append(
                sly.Progress(message="Upload {!r}".format(ARCHIVE_NAME),
                             total_cnt=monitor.len,
                             ext_logger=app_logger,
                             is_size=True))
        upload_progress[0].set_current_value(monitor.bytes_read)

    file_info = api.file.upload(TEAM_ID, RESULT_ARCHIVE, remote_archive_path,
                                lambda m: _print_progress(m, upload_progress))
    app_logger.info("Uploaded to Team-Files: {!r}".format(
        file_info.full_storage_url))
    api.task.set_output_archive(task_id,
                                file_info.id,
                                ARCHIVE_NAME,
                                file_url=file_info.full_storage_url)

    my_app.stop()
示例#19
0
 def on_verify(self, jlog):
     is_archive = jlog['output']['is_archive']
     dump_json_file({'is_archive': is_archive}, self.verif_status_path)
     return {}
示例#20
0
 def init_additional(self):
     super().init_additional()
     sly.fs.mkdir(self.dir_data)
     sly.fs.mkdir(self.dir_results)
     dump_json_file(self.info['graph'], self.graph_path)
示例#21
0
def export_as_masks(api: sly.Api, task_id, context, state, app_logger):
    project_info = api.project.get_info_by_id(PROJECT_ID)
    dataset_ids = [ds.id for ds in api.dataset.get_list(PROJECT_ID)]
    sly.logger.info('DOWNLOAD_PROJECT', extra={'title': project_info.name})
    dest_dir = os.path.join(my_app.data_dir,
                            f'{project_info.id}_{project_info.name}')
    sly.download_project(api,
                         project_info.id,
                         dest_dir,
                         dataset_ids=dataset_ids,
                         log_progress=True)
    sly.logger.info(
        'Project {!r} has been successfully downloaded. Starting to render masks.'
        .format(project_info.name))

    if MACHINE_MASKS is True or HUMAN_MASKS is True:
        project = sly.Project(directory=dest_dir, mode=sly.OpenMode.READ)
        if MACHINE_MASKS:
            machine_colors = {
                obj_class.name: [idx, idx, idx]
                for idx, obj_class in enumerate(project.meta.obj_classes,
                                                start=1)
            }
            dump_json_file(machine_colors,
                           os.path.join(dest_dir,
                                        'obj_class_to_machine_color.json'),
                           indent=2)

        for dataset in project:
            ds_progress = sly.Progress('Processing dataset: {!r}/{!r}'.format(
                project.name, dataset.name),
                                       total_cnt=len(dataset))

            if HUMAN_MASKS is True:
                human_masks_dir = os.path.join(dataset.directory,
                                               'masks_human')
                sly.fs.mkdir(human_masks_dir)
            if MACHINE_MASKS is True:
                machine_masks_dir = os.path.join(dataset.directory,
                                                 'masks_machine')
                sly.fs.mkdir(machine_masks_dir)

            for item_name in dataset:
                item_paths = dataset.get_item_paths(item_name)
                ann = sly.Annotation.load_json_file(item_paths.ann_path,
                                                    project.meta)
                mask_img_name = os.path.splitext(item_name)[0] + '.png'

                raw_img = sly.image.read(item_paths.img_path)
                raw_img_rendered = raw_img.copy()
                if HUMAN_MASKS is True:
                    for label in ann.labels:
                        label.geometry.draw(
                            raw_img_rendered,
                            color=label.obj_class.color,
                            config=label.obj_class.geometry_config,
                            thickness=THICKNESS)
                    raw_img_rendered = ((raw_img_rendered.astype(np.uint16) +
                                         raw_img.astype(np.uint16)) /
                                        2).astype(np.uint8)
                    sly.image.write(
                        os.path.join(human_masks_dir, mask_img_name),
                        np.concatenate([raw_img, raw_img_rendered], axis=1))

                if MACHINE_MASKS is True:
                    machine_mask = np.zeros(shape=ann.img_size + (3, ),
                                            dtype=np.uint8)
                    for label in ann.labels:
                        label.geometry.draw(
                            machine_mask,
                            color=machine_colors[label.obj_class.name],
                            thickness=THICKNESS)
                    sly.image.write(
                        os.path.join(machine_masks_dir, mask_img_name),
                        machine_mask)

                    ds_progress.iter_done_report()
        sly.logger.info('Finished masks rendering.'.format(project_info.name))

    full_archive_name = str(project_info.id) + '_' + project_info.name + '.tar'
    result_archive = os.path.join(my_app.data_dir, full_archive_name)
    sly.fs.archive_directory(dest_dir, result_archive)
    app_logger.info("Result directory is archived")

    upload_progress = []
    remote_archive_path = "/Export-as-masks/{}_{}".format(
        task_id, full_archive_name)

    def _print_progress(monitor, upload_progress):
        if len(upload_progress) == 0:
            upload_progress.append(
                sly.Progress(message="Upload {!r}".format(full_archive_name),
                             total_cnt=monitor.len,
                             ext_logger=app_logger,
                             is_size=True))
        upload_progress[0].set_current_value(monitor.bytes_read)

    file_info = api.file.upload(TEAM_ID, result_archive, remote_archive_path,
                                lambda m: _print_progress(m, upload_progress))
    app_logger.info("Uploaded to Team-Files: {!r}".format(
        file_info.full_storage_url))
    api.task.set_output_archive(task_id,
                                file_info.id,
                                full_archive_name,
                                file_url=file_info.full_storage_url)

    my_app.stop()
src_dataset_ids = %%DATASET_IDS:None%%
thickness = int('%%thickness:2%%')

api = sly.Api(server_address=os.environ['SERVER_ADDRESS'], token=os.environ['API_TOKEN'])

#### End settings. ####

sly.logger.info('DOWNLOAD_PROJECT', extra={'title': src_project_name})
project_info = api.project.get_info_by_name(WORKSPACE_ID, src_project_name)
dest_dir = os.path.join(sly.TaskPaths.OUT_ARTIFACTS_DIR, src_project_name)
sly.download_project(api, project_info.id, dest_dir, dataset_ids=src_dataset_ids, log_progress=True)
sly.logger.info('Project {!r} has been successfully downloaded. Starting to render masks.'.format(src_project_name))

project = sly.Project(directory=dest_dir, mode=sly.OpenMode.READ)
machine_colors = {obj_class.name: [idx, idx, idx] for idx, obj_class in enumerate(project.meta.obj_classes, start=1)}
dump_json_file(machine_colors, os.path.join(dest_dir, 'obj_class_to_machine_color.json'), indent=2)
for dataset in project:
    ds_progress = sly.Progress(
        'Processing dataset: {!r}/{!r}'.format(project.name, dataset.name), total_cnt=len(dataset))

    human_masks_dir = os.path.join(dataset.directory, 'masks_human')
    machine_masks_dir = os.path.join(dataset.directory, 'masks_machine')
    sly.fs.mkdir(human_masks_dir)
    sly.fs.mkdir(machine_masks_dir)
    for item_name in dataset:
        item_paths = dataset.get_item_paths(item_name)
        ann = sly.Annotation.load_json_file(item_paths.ann_path, project.meta)
        mask_img_name = os.path.splitext(item_name)[0] + '.png'

        # Render and save human interpretable masks.
        raw_img = sly.image.read(item_paths.img_path)
示例#23
0
 def _save_model_snapshot(self, is_best, opt_data):
     out_dir = self.checkpoints_saver.get_dir_to_write()
     dump_json_file(self.out_config, os.path.join(out_dir, TaskPaths.MODEL_CONFIG_NAME))
     self._dump_model_weights(out_dir)
     size_bytes = sly_fs.get_directory_size(out_dir)
     self.checkpoints_saver.saved(is_best, size_bytes, opt_data)
示例#24
0
item = image_labels[free_pairs[0]]
image_id = item[0]
ann_path = item[1]
label_index = item[2]

meta_json = sly_json.load_json_file(os.path.join(project_dir, "meta.json"))
meta = sly.ProjectMeta.from_json(meta_json)

ann_json = sly_json.load_json_file(ann_path)
ann = sly.Annotation.from_json(ann_json, meta)

product_id_tm = meta.get_tag_meta("product_id")

labels = ann.labels
new_label = labels[label_index].add_tags([
    sly.Tag(product_id_tm, "unknown"),
])

labels[label_index] = new_label
ann = ann.clone(labels=labels)

api.annotation.upload_ann(image_id, ann)
sly_json.dump_json_file(ann.to_json(), ann_path)

free_pairs.pop(0)
sly_json.dump_json_file(free_pairs, os.path.join(project_dir,
                                                 "free_pairs.json"))

utils.get_next_object(api, task_id, project_id)

api.task.set_data(task_id, False, "state.tagging")
示例#25
0
def main():
    task_id, api, project_id = utils.get_task_api()
    products, keywords, product_search, product_images = read_items_csv(
        ITEMS_PATH)

    project_dir = init_project(api, project_id)

    with open(os.path.join(SCRIPT_DIR, 'gui.html'), 'r') as file:
        gui_template = file.read()

    img_grid = build_image_grid_database(products, product_images)

    sly_json.dump_json_file(products, os.path.join(project_dir,
                                                   "products.json"))
    sly_json.dump_json_file(img_grid, os.path.join(project_dir,
                                                   "img_grid.json"))
    sly_json.dump_json_file(product_search,
                            os.path.join(project_dir, "product_search.json"))

    #data
    data = {
        "table":
        products,
        "objectToTag": [],
        "itemExamples": [],
        "imagesGrid":
        img_grid,
        "gridIndices":
        list(range(min(30, len(img_grid)))),
        "keywords":
        keywords,
        "gridData": [
            {
                "date": '2016-05-02',
                "name": 'Jack',
                "address": 'New York City'
            },
            {
                "date": '2016-05-04',
                "name": 'Jack',
                "address": 'New York City'
            },
            {
                "date": '2016-05-01',
                "name": 'Jack',
                "address": 'New York City'
            },
            {
                "date": '2016-05-03',
                "name": 'Jack',
                "address": 'New York City'
            },
        ],
        "currentLabelIndex":
        -1
    }

    #state
    state = {
        "projectId": project_id,
        "perPage": 20,
        "pageSizes": [10, 15, 20, 50, 100],
        "table": {},
        "selectedImageIndex": 0,
        "selectedKeywords": [],
        "searching": False,
        "tagging": False,
    }

    payload = {
        sly.app.TEMPLATE: gui_template,
        sly.app.STATE: state,
        sly.app.DATA: data,
    }

    #http://192.168.1.42/apps/2/sessions/75
    #http://192.168.1.42/app/images/1/9/28/35?page=1&sessionId=75#image-31872

    jresp = api.task.set_data(task_id, payload)
    utils.get_next_object(api, task_id, project_id)
示例#26
0
def from_sl_to_cityscapes(api: sly.Api, task_id, context, state, app_logger):
    def get_image_and_ann():
        mkdir(image_dir_path)
        mkdir(ann_dir)
        image_path = os.path.join(image_dir_path, image_name)
        api.image.download_path(image_id, image_path)
        image_ext_to_png(image_path)

        mask_color, mask_label, poly_json = from_ann_to_cityscapes_mask(
            ann, name2id, app_logger, train_val_flag)
        # dump_json_file(poly_json,
        #                os.path.join(ann_dir, get_file_name(base_image_name) + cityscapes_polygons_suffix))
        # write(
        #     os.path.join(ann_dir,
        #                  get_file_name(base_image_name) + cityscapes_color_suffix), mask_color)
        # write(
        #     os.path.join(ann_dir,
        #                  get_file_name(base_image_name) + cityscapes_labels_suffix), mask_label)

        dump_json_file(
            poly_json,
            os.path.join(
                ann_dir,
                get_file_name(base_image_name).replace('_leftImg8bit', '') +
                cityscapes_polygons_suffix))
        write(
            os.path.join(
                ann_dir,
                get_file_name(base_image_name).replace('_leftImg8bit', '') +
                cityscapes_color_suffix), mask_color)
        write(
            os.path.join(
                ann_dir,
                get_file_name(base_image_name).replace('_leftImg8bit', '') +
                cityscapes_labels_suffix), mask_label)

    project_name = api.project.get_info_by_id(PROJECT_ID).name
    ARCHIVE_NAME = '{}_{}_Cityscapes.tar.gz'.format(PROJECT_ID, project_name)
    meta_json = api.project.get_meta(PROJECT_ID)
    meta = sly.ProjectMeta.from_json(meta_json)
    has_bitmap_poly_shapes = False
    for obj_class in meta.obj_classes:
        if obj_class.geometry_type not in possible_geometries:
            app_logger.warn(
                f'Cityscapes format supports only bitmap and polygon classes, {obj_class.geometry_type} will be skipped'
            )
        else:
            has_bitmap_poly_shapes = True

    if has_bitmap_poly_shapes is False:
        raise Exception(
            'Input project does not contain bitmap or polygon classes')
        my_app.stop()

    RESULT_ARCHIVE = os.path.join(my_app.data_dir, ARCHIVE_NAME)
    RESULT_DIR = os.path.join(my_app.data_dir, RESULT_DIR_NAME)
    result_images_train = os.path.join(RESULT_DIR, images_dir_name,
                                       default_dir_train)
    result_images_val = os.path.join(RESULT_DIR, images_dir_name,
                                     default_dir_val)
    result_images_test = os.path.join(RESULT_DIR, images_dir_name,
                                      default_dir_test)
    result_anns_train = os.path.join(RESULT_DIR, annotations_dir_name,
                                     default_dir_train)
    result_anns_val = os.path.join(RESULT_DIR, annotations_dir_name,
                                   default_dir_val)
    result_anns_test = os.path.join(RESULT_DIR, annotations_dir_name,
                                    default_dir_test)
    sly.fs.mkdir(RESULT_DIR)
    app_logger.info("Cityscapes Dataset folder has been created")

    class_to_id = []
    name2id = {}
    for idx, obj_class in enumerate(meta.obj_classes):
        if obj_class.geometry_type not in possible_geometries:
            continue
        curr_class = {}
        curr_class['name'] = obj_class.name
        curr_class['id'] = idx + 1
        curr_class['color'] = obj_class.color
        class_to_id.append(curr_class)
        name2id[obj_class.name] = (idx + 1, idx + 1, idx + 1)

    dump_json_file(class_to_id, os.path.join(RESULT_DIR, 'class_to_id.json'))
    app_logger.info("Writing classes with colors to class_to_id.json file")

    datasets = api.dataset.get_list(PROJECT_ID)
    for dataset in datasets:
        images_dir_path_train = os.path.join(result_images_train, dataset.name)
        images_dir_path_val = os.path.join(result_images_val, dataset.name)
        images_dir_path_test = os.path.join(result_images_test, dataset.name)
        anns_dir_path_train = os.path.join(result_anns_train, dataset.name)
        anns_dir_path_val = os.path.join(result_anns_val, dataset.name)
        anns_dir_path_test = os.path.join(result_anns_test, dataset.name)

        images = api.image.get_list(dataset.id)
        progress = sly.Progress(
            'Convert images and anns from dataset {}'.format(dataset.name),
            len(images), app_logger)
        if len(images) < 3:
            app_logger.warn(
                'Number of images in {} dataset is less then 3, val and train directories for this dataset will not be created'
                .format(dataset.name))

        image_ids = [image_info.id for image_info in images]
        base_image_names = [image_info.name for image_info in images]
        # image_names = [
        #     get_file_name(image_info.name) + cityscapes_images_suffix + get_file_ext(image_info.name) for
        #     image_info in images
        # ]

        image_names = [
            get_file_name(image_info.name.replace('_leftImg8bit', '')) + \
            cityscapes_images_suffix + get_file_ext(image_info.name) for image_info in images
        ]

        ann_infos = api.annotation.download_batch(dataset.id, image_ids)
        anns = [
            sly.Annotation.from_json(ann_info.annotation, meta)
            for ann_info in ann_infos
        ]

        splitter = get_tags_splitter(anns)
        curr_splitter = {'train': 0, 'val': 0, 'test': 0}

        for ann, image_id, image_name, base_image_name in zip(
                anns, image_ids, image_names, base_image_names):
            train_val_flag = True
            try:
                split_name = ann.img_tags.get('split').value
                if split_name == 'train':
                    image_dir_path = images_dir_path_train
                    ann_dir = anns_dir_path_train
                elif split_name == 'val':
                    image_dir_path = images_dir_path_val
                    ann_dir = anns_dir_path_val
                else:
                    image_dir_path = images_dir_path_test
                    ann_dir = anns_dir_path_test
                    train_val_flag = False
            except:
                ann_tags = [tag.name for tag in ann.img_tags]
                separator_tags = list(set(ann_tags) & set(possible_tags))
                if len(separator_tags) > 1:
                    app_logger.warn(
                        '''There are more then one separator tag for {} image. {}
                    tag will be used for split'''.format(
                            image_name, separator_tags[0]))

                if len(separator_tags) >= 1:
                    if separator_tags[0] == 'train':
                        image_dir_path = images_dir_path_train
                        ann_dir = anns_dir_path_train
                    elif separator_tags[0] == 'val':
                        image_dir_path = images_dir_path_val
                        ann_dir = anns_dir_path_val
                    else:
                        image_dir_path = images_dir_path_test
                        ann_dir = anns_dir_path_test
                        train_val_flag = False

                if len(separator_tags) == 0:
                    if curr_splitter['test'] == splitter['test']:
                        curr_splitter = {'train': 0, 'val': 0, 'test': 0}
                    if curr_splitter['train'] < splitter['train']:
                        curr_splitter['train'] += 1
                        image_dir_path = images_dir_path_train
                        ann_dir = anns_dir_path_train
                    elif curr_splitter['val'] < splitter['val']:
                        curr_splitter['val'] += 1
                        image_dir_path = images_dir_path_val
                        ann_dir = anns_dir_path_val
                    elif curr_splitter['test'] < splitter['test']:
                        curr_splitter['test'] += 1
                        image_dir_path = images_dir_path_test
                        ann_dir = anns_dir_path_test
                        train_val_flag = False

            get_image_and_ann()

            progress.iter_done_report()

    sly.fs.archive_directory(RESULT_DIR, RESULT_ARCHIVE)
    app_logger.info("Result directory is archived")

    upload_progress = []
    remote_archive_path = "/cityscapes_format/{}/{}".format(
        task_id, ARCHIVE_NAME)

    def _print_progress(monitor, upload_progress):
        if len(upload_progress) == 0:
            upload_progress.append(
                sly.Progress(message="Upload {!r}".format(ARCHIVE_NAME),
                             total_cnt=monitor.len,
                             ext_logger=app_logger,
                             is_size=True))
        upload_progress[0].set_current_value(monitor.bytes_read)

    file_info = api.file.upload(
        team_id=TEAM_ID,
        src=RESULT_ARCHIVE,
        dst=remote_archive_path,
        progress_cb=lambda m: _print_progress(m, upload_progress))

    app_logger.info("Uploaded to Team-Files: {!r}".format(
        file_info.full_storage_url))
    api.task.set_output_archive(task_id,
                                file_info.id,
                                ARCHIVE_NAME,
                                file_url=file_info.full_storage_url)

    my_app.stop()
示例#27
0
def init_project(api: sly.Api, project_id):
    project_dir = os.path.join(sly.app.SHARED_DATA, "app_tagging",
                               str(project_id))

    #@TODO: comment
    sly.fs.remove_dir(project_dir)

    if sly.fs.dir_exists(project_dir):
        return project_dir
    else:
        sly.fs.mkdir(project_dir)

    meta_json = api.project.get_meta(project_id)
    meta = sly.ProjectMeta.from_json(meta_json)

    product_id = meta.get_tag_meta("product_id")
    if product_id is None:
        meta = meta.add_tag_meta(
            sly.TagMeta("product_id", sly.TagValueType.ANY_STRING))

    category = meta.get_tag_meta("category")
    if category is None:
        meta = meta.add_tag_meta(
            sly.TagMeta("category", sly.TagValueType.ANY_STRING))

    brand = meta.get_tag_meta("brand")
    if brand is None:
        meta = meta.add_tag_meta(
            sly.TagMeta("brand", sly.TagValueType.ANY_STRING))

    item_name = meta.get_tag_meta("item_name")
    if item_name is None:
        meta = meta.add_tag_meta(
            sly.TagMeta("item_name", sly.TagValueType.ANY_STRING))

    if product_id is None or category is None or brand is None or item_name is None:
        api.project.update_meta(project_id, meta.to_json())

    sly_json.dump_json_file(meta.to_json(),
                            os.path.join(project_dir, "meta.json"))

    image_label_pairs = []
    for dataset in api.dataset.get_list(project_id):
        images = api.image.get_list(dataset.id)
        image_ids = [image.id for image in images]
        for batch in sly.batched(image_ids):
            annotations = api.annotation.download_batch(dataset.id, batch)
            for ann_info in annotations:
                ann_path = os.path.join(project_dir, str(dataset.id),
                                        str(ann_info.image_id) + sly.ANN_EXT)
                sly.fs.ensure_base_path(ann_path)
                sly_json.dump_json_file(ann_info.annotation, ann_path)
                image_ids.append(ann_info.image_id)
                ann = sly.Annotation.from_json(ann_info.annotation, meta)
                label_indices = list(range(0, len(ann.labels)))
                image_label_pairs.extend(
                    list(
                        zip([ann_info.image_id] * len(label_indices),
                            [ann_path] * len(label_indices), label_indices)))

    sly_json.dump_json_file(
        image_label_pairs, os.path.join(project_dir,
                                        "image_labels_pairs.json"))
    sly_json.dump_json_file(list(range(len(image_label_pairs))),
                            os.path.join(project_dir, "free_pairs.json"))
    return project_dir
示例#28
0
 def set_ann_dict(self, item_name: str, ann: dict):
     if type(ann) is not dict:
         raise TypeError("Ann should be a dict, not a {}".format(type(ann)))
     dst_ann_path = self.get_ann_path(item_name)
     dump_json_file(ann, dst_ann_path)
示例#29
0
文件: main.py 项目: cxnt/export-coco
def export_coco(api: sly.Api, task_id, context, state, app_logger):
    datasets = [ds for ds in api.dataset.get_list(g.project_id)]
    for dataset in datasets:
        coco_dataset_dir = os.path.join(g.coco_base_dir, dataset.name)
        mkdir(os.path.join(coco_dataset_dir))
        ann_dir = os.path.join(g.coco_base_dir, 'annotations')
        mkdir(ann_dir)

        images = api.image.get_list(dataset.id)
        for batch in sly.batched(images):
            image_ids = [image_info.id for image_info in batch]
            image_names = [image_info.name for image_info in batch]
            image_paths = [
                os.path.join(coco_dataset_dir, image_info.name)
                for image_info in batch
            ]
            api.image.download_paths(dataset.id, image_ids, image_paths)

            ann_infos = api.annotation.download_batch(dataset.id, image_ids)
            anns = [
                sly.Annotation.from_json(x.annotation, g.meta)
                for x in ann_infos
            ]

            meta = convert_geometry.prepare_meta(g.meta)
            new_anns = [
                convert_geometry.convert_annotation(ann, meta) for ann in anns
            ]

            data = dict(
                info=dict(
                    description=None,
                    url=None,
                    version=1.0,
                    year=dataset.created_at[:4],
                    contributor=g.user.name,
                    date_created=dataset.created_at,
                ),
                licenses=[dict(
                    url=None,
                    id=0,
                    name=None,
                )],
                images=[
                    # license, url, file_name, height, width, date_captured, id
                ],
                type="instances",
                annotations=[
                    # segmentation, area, iscrowd, image_id, bbox, category_id, id
                ],
                categories=[
                    # supercategory, id, name
                ],
            )

            for image_info, ann in zip(batch, new_anns):
                data["images"].append(
                    dict(
                        license=None,
                        url=image_info.
                        full_storage_url,  # coco_url, flickr_url
                        filename=image_info.name,
                        height=image_info.height,
                        width=image_info.width,
                        date_captured=image_info.created_at,
                        id=image_info.id,
                    ))

                for label in ann.labels:
                    segmentation = label.geometry.to_json(
                    )["points"]["exterior"]
                    segmentation = [
                        coord for sublist in segmentation for coord in sublist
                    ]

                    bbox = label.geometry.to_bbox().to_json(
                    )["points"]["exterior"]
                    bbox = [coord for sublist in bbox for coord in sublist]
                    x, y, max_x, max_y = bbox
                    width = max_x - x
                    height = max_y - y
                    bbox = (x, y, width, height)

                    data["annotations"].append(
                        dict(
                            segmentation=[segmentation],
                            area=label.geometry.area,  # wrong?
                            iscrowd=0,
                            image_id=image_info.id,
                            bbox=bbox,
                            category_id=None,
                            id=None,  # label.id?
                        ))

                    data["categories"].append(
                        dict(supercategory=None,
                             id=None,
                             name=label.obj_class.name))

        dump_json_file(data,
                       os.path.join(ann_dir, f"instances_{dataset.name}.json"))
    g.my_app.stop()
示例#30
0
 def set_meta(self, new_meta):
     self._meta = new_meta
     dump_json_file(self.meta.to_json(),
                    self._get_project_meta_path(),
                    indent=4)