コード例 #1
0
 def _copy_dir_recursively(cls, src_path, dst_path):
     files = sly.list_dir(src_path)
     for file_subpath in files:
         src_fpath = osp.join(src_path, file_subpath)
         storage_fpath = osp.join(dst_path, file_subpath)
         sly.ensure_base_path(storage_fpath)
         cls._copy_file_concurr(src_fpath, storage_fpath)
コード例 #2
0
    def run_inference(self):
        out_project_fs = copy(self.in_project_fs)
        out_project_fs.root_path = self.helper.paths.results_dir
        out_project_fs.make_dirs()

        inf_feeder = sly.InferenceFeederFactory.create(self.config, self.helper.in_project_meta, self.train_classes)
        out_pr_meta = inf_feeder.out_meta
        out_pr_meta.to_dir(out_project_fs.project_path)

        ia_cnt = out_project_fs.pr_structure.image_cnt
        progress = sly.progress_counter_inference(cnt_imgs=ia_cnt)

        for sample in self.in_project_fs:
            logger.info('Will process image',
                        extra={'dataset_name': sample.ds_name, 'image_name': sample.image_name})
            ann_packed = sly.json_load(sample.ann_path)
            ann = sly.Annotation.from_packed(ann_packed, self.helper.in_project_meta)

            img = cv2.imread(sample.img_path)[:, :, ::-1]
            res_ann = inf_feeder.feed(img, ann, self._infer_on_img)

            out_ann_fpath = out_project_fs.ann_path(sample.ds_name, sample.image_name)
            res_ann_packed = res_ann.pack()
            sly.json_dump(res_ann_packed, out_ann_fpath)

            if self.debug_copy_images:
                out_img_fpath = out_project_fs.img_path(sample.ds_name, sample.image_name)
                sly.ensure_base_path(out_img_fpath)
                shutil.copy(sample.img_path, out_img_fpath)

            progress.iter_done_report()

        sly.report_inference_finished()
コード例 #3
0
    def _download_images_from_remote(self, pr_writer, image_id_to_ds, img_infos):
        if len(img_infos) == 0:
            return

        infos_with_paths = [(info, pr_writer.get_img_path(image_id_to_ds[info.id], info.title, info.ext))
                            for info in img_infos]
        hash2path = {x[0].hash: x[1] for x in infos_with_paths}  # for unique hashes
        unique_hashes = list(hash2path.keys())

        ready_paths = []
        ready_hashes = []
        progress = sly.ProgressCounter('Download remote images', len(unique_hashes), ext_logger=self.logger)

        def close_fh(fh):
            fpath = fh.file_path
            if fh.close_and_check():
                ready_paths.append(fpath)
                ready_hashes.append(img_hash)
                progress.iter_done_report()
            else:
                self.logger.warning('file was skipped while downloading',
                                    extra={'img_path': fpath, 'img_hash': img_hash})

        # download by unique hashes
        for batch_img_hashes in sly.batched(unique_hashes, constants.BATCH_SIZE_DOWNLOAD_IMAGES):
            file_handler = None
            img_hash = None
            for chunk in self.api.get_stream_with_data('DownloadImages',
                                                       api_proto.ChunkImage,
                                                       api_proto.ImagesHashes(images_hashes=batch_img_hashes)):
                if chunk.image.hash:  # non-empty hash means beginning of new image
                    if file_handler is not None:
                        close_fh(file_handler)
                    img_hash = chunk.image.hash
                    self.logger.trace('download_images', extra={'img_hash': img_hash})
                    dst_fpath = hash2path[img_hash]
                    file_handler = ChunkedFileWriter(file_path=dst_fpath)

                file_handler.write(chunk.chunk)

            close_fh(file_handler)  # must be not None

        # process non-unique hashes
        for info, dst_path in infos_with_paths:
            origin_path = hash2path[info.hash]
            if (origin_path != dst_path) and osp.isfile(origin_path):
                sly.ensure_base_path(dst_path)
                sly.copy_file(origin_path, dst_path)

        self._write_images_to_agent_storage(ready_paths, ready_hashes)
コード例 #4
0
    def process(self, data_el):
        img_desc, ann = data_el
        free_name = self.net.get_free_name(img_desc.get_img_name())
        new_dataset_name = img_desc.get_res_ds_name()

        for out_dir, flag_name, mapping_name in self.odir_flag_mapping:
            if not self.settings[flag_name]:
                continue
            cls_mapping = self.settings[mapping_name]

            # hack to draw 'black' regions
            if flag_name == 'masks_human':
                cls_mapping = {
                    k: (1, 1, 1) if max(v) == 0 else v
                    for k, v in cls_mapping.items()
                }

            img = self.draw_colored_mask(ann, cls_mapping)

            if flag_name == 'masks_human':
                orig_img = img_desc.read_image()
                comb_img = sly.overlay_images(orig_img, img, 0.5)

                sep = np.array([[[0, 255, 0]]] * orig_img.shape[0],
                               dtype=np.uint8)
                img = np.hstack((orig_img, sep, comb_img))

            img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
            output_img_path = osp.join(self.output_folder, new_dataset_name,
                                       out_dir, free_name + '.png')
            sly.ensure_base_path(output_img_path)
            cv2.imwrite(output_img_path, img)

        # net _always_ downloads images
        if self.settings['images'] is True:
            if img_desc.need_write() is True:
                self.pr_writer.write_image(img_desc, free_name)
            else:
                self.pr_writer.copy_image(img_desc, free_name)

        if self.settings['annotations'] is True:
            ann_to_save = deepcopy(ann)
            ann_to_save.normalize_figures()
            packed_ann = ann_to_save.pack()
            self.pr_writer.write_ann(img_desc, packed_ann, free_name)

        yield ([img_desc, ann], )
コード例 #5
0
    def process(self, data_el):
        img_desc, ann = data_el
        free_name = self.net.get_free_name(img_desc.get_img_name())
        new_dataset_name = img_desc.get_res_ds_name()

        if self.settings.get('visualize'):
            out_meta = self.net.get_result_project_meta()
            cls_mapping = {}
            for cls_descr in out_meta.classes:
                color_s = cls_descr.get('color')
                if color_s is not None:
                    color = sly.hex2rgb(color_s)
                else:
                    color = sly.get_random_color()
                cls_mapping[cls_descr['title']] = color

            # hack to draw 'black' regions
            cls_mapping = {k: (1, 1, 1) if max(v) == 0 else v for k, v in cls_mapping.items()}

            vis_img = self.draw_colored_mask(ann, cls_mapping)
            orig_img = img_desc.read_image()
            comb_img = sly.overlay_images(orig_img, vis_img, 0.5)

            sep = np.array([[[0, 255, 0]]] * orig_img.shape[0], dtype=np.uint8)
            img = np.hstack((orig_img, sep, comb_img))

            img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
            output_img_path = osp.join(self.output_folder, new_dataset_name, 'visualize', free_name + '.png')
            sly.ensure_base_path(output_img_path)
            cv2.imwrite(output_img_path, img)

        # net _always_ downloads images
        if self.settings['images'] is True:
            if img_desc.need_write() is True:
                self.pr_writer.write_image(img_desc, free_name)
            else:
                self.pr_writer.copy_image(img_desc, free_name)

        if self.settings['annotations'] is True:
            ann_to_save = deepcopy(ann)
            ann_to_save.normalize_figures()
            packed_ann = ann_to_save.pack()
            self.pr_writer.write_ann(img_desc, packed_ann, free_name)

        yield ([img_desc, ann],)
コード例 #6
0
def main():
    # Please note that auxiliary methods from sly (supervisely_lib) use supervisely_lib.logger to format output.
    # So don't replace formatters or handlers of the logger.
    # One may use other loggers or simple prints for other output, but it's recommended to use supervisely_lib.logger.
    logger.info('Hello ML world')
    print('Glad to see u')

    # TaskHelperTrain contains almost all needed to run inference as Supervisely task,
    # including task settings and paths to data and model.
    task_helper = sly.TaskHelperInference()

    # All settings and parameters are passed to task in json file.
    # Content of the file is entirely dependent on model implementation.
    inference_settings = task_helper.task_settings
    logger.info('Task settings are read',
                extra={'task_settings': inference_settings})

    # Let's imitate loading training model weights. Task acquires directory with the weights.
    # And content of the directory is entirely dependent on model implementation.
    model_dir = task_helper.paths.model_dir
    model = load_fake_model(model_dir)
    logger.info('Model weights are loaded', extra={'model_dir': model_dir})

    # We are going to read input project with data for inference.
    project_meta = task_helper.in_project_meta  # Project meta contains list of project classes.
    project_dir = task_helper.paths.project_dir
    project_fs = sly.ProjectFS.from_disk_dir_project(project_dir)
    # ProjectFS enlists all samples (image/annotation pairs) in input project.

    # We are to write inference results as sly project with same structure into provided results dir.
    # There is no need to save images, only annotations and project meta are required.
    results_dir = task_helper.paths.results_dir
    out_project_fs = sly.ProjectFS(results_dir, project_fs.pr_structure)

    # It's necessary to write project meta (with list of classes) for output project.
    out_meta = sly.ProjectMeta([{
        'title': 'hat',
        'shape': 'point',
        'color': '#FF0000'
    }])  # create meta
    out_meta.to_dir(out_project_fs.project_path)  # and save

    # We are to report progress of task over sly.ProgressCounter if we want to observe the progress in web panel.
    # In fact one task may report progress for some sequential (not nested) subtasks,
    # but here we will report inference progress only.
    ia_cnt = out_project_fs.pr_structure.image_cnt
    progress = sly.progress_counter_inference(cnt_imgs=ia_cnt)

    # Iterating over samples (image/annotation pairs) in input project.
    for item_descr in project_fs:
        logger.info('Processing input sample',
                    extra={
                        'dataset': item_descr.ds_name,
                        'image_name': item_descr.image_name
                    })

        # Open some image...
        img = cv2.imread(item_descr.img_path)
        logger.info('Read image from input project',
                    extra={
                        'width': img.shape[1],
                        'height': img.shape[0]
                    })

        # And read corresponding annotation...
        ann_packed = sly.json_load(item_descr.ann_path)
        ann = sly.Annotation.from_packed(ann_packed, project_meta)
        logger.info('Read annotation from input project',
                    extra={'tags': ann['tags']})

        logger.info('Some forward pass...')
        time.sleep(1)

        # Let's imitate inference output.
        # We are to save results as sly Figures in annotation.
        ann['objects'] = [sly.FigurePoint.from_pt('hat', (800, 800))
                          ]  # imagine that our model found the point
        out_ann_path = out_project_fs.ann_path(item_descr.ds_name,
                                               item_descr.image_name)
        sly.ensure_base_path(out_ann_path)  # create intermediate directories
        sly.json_dump(ann.pack(), out_ann_path)  # and save annotation
        # Note that there is no need to save image.

        progress.iter_done_report(
        )  # call it after every iteration to report progress

    # It's necessary to report that the inference task is finished.
    sly.report_inference_finished()

    # Thank you for your patience.
    logger.info('Applying finished.')
コード例 #7
0
def main():
    logger.info('Hello world.')

    # It isn't necessary, but let's suppose that our data will be stored as for Supervisely task:
    # input in '/sly_task_data/data` and results in '/sly_task_data/results'.
    # So TaskPaths provides the paths.
    task_paths = sly.TaskPaths()

    project_dir = task_paths.project_dir  # the paths includes project name

    project_meta = sly.ProjectMeta.from_dir(project_dir)
    # Now we've read meta of input project.
    logger.info('Input project meta: {} class(es).'.format(len(project_meta.classes)))

    project_fs = sly.ProjectFS.from_disk(*sly.ProjectFS.split_dir_project(project_dir))
    # Now we've read project structure.
    logger.info('Input project: "{}" contains {} dataset(s) and {} image(s).'.format(
        project_fs.pr_structure.name,
        len(project_fs.pr_structure.datasets),
        project_fs.image_cnt
    ))

    # prepare color mapping
    color_mapping = {}
    for cls_descr in project_meta.classes:
        color_s = cls_descr.get('color')
        if color_s is not None:
            color = sly.hex2rgb(color_s)  # use color from project meta if exists
        else:
            color = sly.get_random_color()  # or use random color otherwise
        color_mapping[cls_descr['title']] = color

    # enumerate all input samples (image/annotation pairs)
    for item_descr in project_fs:
        logger.info('Processing input sample',
                    extra={'dataset': item_descr.ds_name, 'image_name': item_descr.image_name})

        # Open image
        img = cv2.imread(item_descr.img_path)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)  # to work with r-g-b colors

        # And read corresponding annotation.
        ann_packed = sly.json_load(item_descr.ann_path)
        ann = sly.Annotation.from_packed(ann_packed, project_meta)

        # Draw annotations on image
        for fig in ann['objects']:
            color = color_mapping.get(fig.class_title)
            fig.draw(img, color)
            # Note that this method draws lines with width 1, and points as single pixels.

        # Save image. Please note that we just save images, not new Supervisely project.
        src_image_ext = item_descr.ia_data['image_ext']  # let's preserve source image format (by extension)
        out_fpath = osp.join(
            task_paths.results_dir, item_descr.project_name, item_descr.ds_name, item_descr.image_name + src_image_ext
        )
        sly.ensure_base_path(out_fpath)  # create intermediate dirs if required

        img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)  # to work with r-g-b colors
        cv2.imwrite(out_fpath, img)  # write image

    logger.info('Done.')
コード例 #8
0
 def _read_obj_impl(self, st_path, dst_path):
     sly.ensure_base_path(dst_path)
     sly.copy_file(st_path, dst_path)
コード例 #9
0
 def _write_obj_impl(self, src_path, st_path):
     sly.ensure_base_path(st_path)
     self._copy_file_concurr(src_path, st_path)