def _save_report(
            self, models_specs: List[ClassificationModelSpec], tags: List[str],
            output_directory: Union[str, Path],
            n_true_bboxes_data: List[List[BboxData]], markdowns: List[str],
            codes: List[str],
            classifications_reports_datas: List[ClassificationReportData]):
        output_directory = Path(output_directory)
        output_directory.mkdir(exist_ok=True, parents=True)
        for model_spec, tag in zip(models_specs, tags):
            if model_spec is None:
                continue
            classification_model_spec_filepath = output_directory / f"{CLASSIFICATION_MODEL_SPEC_PREFIX}_{tag}.pkl"
            with open(classification_model_spec_filepath, 'wb') as out:
                pickle.dump(model_spec, out)
        bboxes_data_filepath = output_directory / BBOXES_DATA_FILENAME
        with open(bboxes_data_filepath, 'wb') as out:
            pickle.dump(n_true_bboxes_data, out)

        classification_reports_data_filepath = output_directory / CLASSIFICATIONS_REPORTS_DATAS_FILENAME
        with open(classification_reports_data_filepath, "wb") as out:
            pickle.dump(classifications_reports_datas, out)

        nb = nbf.v4.new_notebook()
        nb['cells'] = [
            nbf.v4.new_markdown_cell(markdown) for markdown in markdowns
        ]
        nb['cells'].extend([nbf.v4.new_code_cell(code) for code in codes])
        nbf.write(nb, str(output_directory / 'report.ipynb'))
        logger.info(f"Classification report saved to '{output_directory}'.")
Exemple #2
0
    def _save_report(
        self,
        models_specs: List[DetectionModelSpec],
        tags: List[str],
        output_directory: Union[str, Path],
        true_images_data: List[ImageData],
        markdowns: List[str],
        codes: List[str],
    ):
        output_directory = Path(output_directory)
        output_directory.mkdir(exist_ok=True, parents=True)
        for model_spec, tag in zip(models_specs, tags):
            if model_spec is None:
                continue
            detection_model_spec_filepath = output_directory / f"{DETECTION_MODEL_SPEC_PREFIX}_{tag}.pkl"
            with open(detection_model_spec_filepath, 'wb') as out:
                pickle.dump(model_spec, out)
        images_data_filepath = output_directory / IMAGES_DATA_FILENAME
        with open(images_data_filepath, 'wb') as out:
            pickle.dump(true_images_data, out)

        nb = nbf.v4.new_notebook()
        nb['cells'] = [
            nbf.v4.new_markdown_cell(markdown) for markdown in markdowns
        ]
        nb['cells'].extend([nbf.v4.new_code_cell(code) for code in codes])
        nbf.write(nb, str(output_directory / 'report.ipynb'))
        logger.info(f"Detection report saved to '{output_directory}'.")
Exemple #3
0
    def report(self,
               models_specs: List[PipelineModelSpec],
               tags: List[str],
               detection_scores_thresholds: List[float],
               extra_bbox_labels: List[str],
               compare_tag: str,
               output_directory: Union[str, Path],
               true_images_data: List[ImageData],
               minimum_iou: float,
               pseudo_class_names: List[str],
               batch_size: int = 16):
        assert len(models_specs) == len(tags)
        assert len(tags) == len(detection_scores_thresholds)
        assert len(detection_scores_thresholds) == len(extra_bbox_labels)
        assert compare_tag in tags

        pipelines_reports_datas = []
        for model_spec, tag, detection_score_threshold, extra_bbox_label in zip(
                models_specs, tags, detection_scores_thresholds,
                extra_bbox_labels):
            logger.info(
                f"Making inference and counting metrics for '{tag}'...")
            tag_df_detection_metrics, tag_df_pipeline_metrics = self._inference_pipeline_and_get_metrics(
                model_spec=model_spec,
                true_images_data=true_images_data,
                detection_score_threshold=detection_score_threshold,
                minimum_iou=minimum_iou,
                extra_bbox_label=extra_bbox_label,
                pseudo_class_names=pseudo_class_names,
                batch_size=batch_size,
            )

            pipelines_reports_datas.append(
                PipelineReportData(
                    df_detection_metrics=tag_df_detection_metrics,
                    df_pipeline_metrics=tag_df_pipeline_metrics,
                    tag=tag))

        pipeline_report_data = concat_pipelines_reports_datas(
            pipelines_reports_datas=pipelines_reports_datas,
            compare_tag=compare_tag)
        markdowns = self._get_markdowns(
            pipeline_report_data=pipeline_report_data, tags=tags)
        codes = self._get_codes(
            tags=tags,
            detection_scores_thresholds=detection_scores_thresholds,
            extra_bbox_labels=extra_bbox_labels,
            minimum_iou=minimum_iou,
        )
        self._save_report(models_specs=models_specs,
                          tags=tags,
                          output_directory=output_directory,
                          true_images_data=true_images_data,
                          markdowns=markdowns,
                          codes=codes)
Exemple #4
0
def get_label_to_base_label_image(
    base_labels_images: Union[str, Path],
    label_to_description: Union[str, Path, Dict[str, str]] = None,
    add_label_to_image: bool = False,
    make_labels_for_these_class_names_too: List[str] = [
    ]  # add known description to classes without base images
) -> Dict[str, np.ndarray]:
    if base_labels_images is None:
        return None

    base_labels_images_files = fsspec.open_files(str(base_labels_images))
    ann_class_names_files = [
        Pathy(base_label_image_file.path).stem
        for base_label_image_file in base_labels_images_files
    ]
    unique_ann_class_names = set(ann_class_names_files)
    if 'unknown' not in unique_ann_class_names:
        raise ValueError(
            f'"{base_labels_images}" must have image with name "unknown.*"')
    unknown_image_path = base_labels_images_files[ann_class_names_files.index(
        'unknown')]
    label_to_base_label_image = defaultdict(lambda: unknown_image_path)
    label_to_base_label_image['unknown'] = unknown_image_path
    logger.info(f"Loading base labels images from {base_labels_images}...")
    for label in tqdm(
            list(unique_ann_class_names) +
            list(set(make_labels_for_these_class_names_too))):
        if label in unique_ann_class_names:
            base_label_image = base_labels_images_files[
                ann_class_names_files.index(label)]
        else:
            base_label_image = label_to_base_label_image['unknown']
        label_to_base_label_image[label] = base_label_image

    def label_to_base_label_image_func(
            label: str,
            label_to_description: Union[str, Path,
                                        Dict[str, str]] = label_to_description,
            add_label_to_image: bool = add_label_to_image):
        base_label_image = open_image(label_to_base_label_image[label])
        if label_to_description is not None:
            if isinstance(label_to_description, str) or isinstance(
                    label_to_description, Path):
                label_to_description = get_label_to_description(
                    label_to_description_dict=label_to_description)
            base_label_image = get_base_label_image_with_description(
                base_label_image=base_label_image,
                label=label,
                description=label_to_description[label])
        elif add_label_to_image:
            base_label_image = get_base_label_image_with_description(
                base_label_image=base_label_image, label=label, description='')
        return base_label_image

    return label_to_base_label_image_func
    def report(self,
               models_specs: List[ClassificationModelSpec],
               tags: List[str],
               compare_tag: str,
               output_directory: Union[str, Path],
               n_true_bboxes_data: List[List[BboxData]],
               pseudo_class_names: List[str],
               tops_n: List[int] = [1],
               batch_size: int = 16,
               step_penalty: int = 20) -> List[ClassificationReportData]:
        for model_spec in models_specs:
            if hasattr(model_spec, 'preprocess_input'):
                assert (isinstance(model_spec.preprocess_input, str)
                        or isinstance(model_spec.preprocess_input, Path))
        assert len(models_specs) == len(tags)
        assert compare_tag in tags

        classifications_reports_datas = []
        df_classification_metrics_columns = None
        for model_spec, tag in zip(models_specs, tags):
            logger.info(
                f"Making inference and counting metrics for '{tag}'...")
            tag_df_classification_metrics = self._inference_classification_and_get_metrics(
                model_spec=model_spec,
                n_true_bboxes_data=n_true_bboxes_data,
                pseudo_class_names=pseudo_class_names,
                tops_n=tops_n,
                batch_size=batch_size,
                step_penalty=step_penalty)
            if df_classification_metrics_columns is None:
                df_classification_metrics_columns = tag_df_classification_metrics.columns
            classifications_reports_datas.append(
                ClassificationReportData(
                    df_classification_metrics=tag_df_classification_metrics,
                    tops_n=tops_n,
                    tag=tag))

        classification_report_data = concat_classifications_reports_datas(
            classifications_reports_datas=classifications_reports_datas,
            df_classification_metrics_columns=df_classification_metrics_columns,
            tops_n=tops_n,
            compare_tag=compare_tag)
        markdowns = self._get_markdowns(
            classification_report_data=classification_report_data, )
        codes = self._get_codes(tags=tags)
        self._save_report(
            models_specs=models_specs,
            tags=tags,
            output_directory=output_directory,
            n_true_bboxes_data=n_true_bboxes_data,
            markdowns=markdowns,
            codes=codes,
            classifications_reports_datas=classifications_reports_datas)
        return classifications_reports_datas
Exemple #6
0
    def report(self,
               models_specs: List[DetectionModelSpec],
               tags: List[str],
               scores_thresholds: List[float],
               compare_tag: str,
               output_directory: Union[str, Path],
               true_images_data: List[ImageData],
               minimum_iou: float,
               batch_size: int = 16):
        assert len(models_specs) == len(tags)
        assert len(tags) == len(scores_thresholds)
        assert compare_tag in tags

        detections_reports_datas = []
        for model_spec, tag, score_threshold in zip(models_specs, tags,
                                                    scores_thresholds):
            logger.info(
                f"Making inference and counting metrics for '{tag}'...")
            tag_df_detection_metrics, tag_df_detection_recall_per_class = self._inference_detection_and_get_metrics(
                model_spec=model_spec,
                true_images_data=true_images_data,
                score_threshold=score_threshold,
                minimum_iou=minimum_iou,
                batch_size=batch_size)

            detections_reports_datas.append(
                DetectionReportData(
                    df_detection_metrics=tag_df_detection_metrics,
                    df_detection_recall_per_class=
                    tag_df_detection_recall_per_class,
                    tag=tag))

        detection_report_data = concat_detections_reports_datas(
            detections_reports_datas=detections_reports_datas,
            compare_tag=compare_tag)
        markdowns = self._get_markdowns(
            detection_report_data=detection_report_data, )
        codes = self._get_codes(
            tags=tags,
            scores_thresholds=scores_thresholds,
            minimum_iou=minimum_iou,
        )
        self._save_report(models_specs=models_specs,
                          tags=tags,
                          output_directory=output_directory,
                          true_images_data=true_images_data,
                          markdowns=markdowns,
                          codes=codes)
    def report_on_predictions(
            self,
            n_true_bboxes_data: List[List[BboxData]],
            n_pred_bboxes_data: List[List[BboxData]],
            tag: str,
            known_class_names: List[str],
            compare_tag: str,
            output_directory: Union[str, Path],
            pseudo_class_names: List[str],
            tops_n: List[int] = [1],
            step_penalty: int = 20) -> List[ClassificationReportData]:

        logger.info(f"Cunting metrics for '{tag}'...")
        tag_df_classification_metrics = get_df_classification_metrics(
            n_true_bboxes_data=n_true_bboxes_data,
            n_pred_bboxes_data=n_pred_bboxes_data,
            pseudo_class_names=pseudo_class_names,
            known_class_names=known_class_names,
            step_penalty=step_penalty)
        df_classification_metrics_columns = tag_df_classification_metrics.columns

        classifications_reports_datas = [
            ClassificationReportData(
                df_classification_metrics=tag_df_classification_metrics,
                tops_n=tops_n,
                tag=tag)
        ]

        classification_report_data = concat_classifications_reports_datas(
            classifications_reports_datas=classifications_reports_datas,
            df_classification_metrics_columns=df_classification_metrics_columns,
            tops_n=tops_n,
            compare_tag=tag)
        markdowns = self._get_markdowns(
            classification_report_data=classification_report_data, )
        self._save_report(
            models_specs=[None],
            tags=[tag],
            output_directory=output_directory,
            n_true_bboxes_data=n_true_bboxes_data,
            markdowns=markdowns,
            codes=[],
            classifications_reports_datas=classifications_reports_datas)

        return classifications_reports_datas
Exemple #8
0
def copy_files_from_directory_to_temp_directory(
        directory: str) -> tempfile.TemporaryDirectory:
    directory_openfile = fsspec.open(directory)
    directory = Pathy(directory)
    temp_dir = tempfile.TemporaryDirectory()
    temp_dir_path = Path(temp_dir.name)

    for some_file in fsspec.open_files(str(directory / '**'), 'rb'):
        some_file_path = Pathy(some_file.path)
        relative_filepath = some_file_path.relative_to(directory_openfile.path)
        filepath = temp_dir_path / relative_filepath
        (filepath.parent).mkdir(exist_ok=True, parents=True)
        logger.info(f'copy {some_file_path} => {filepath}')
        with open(filepath, 'wb') as out:
            with some_file as src:
                out.write(src.read())

    return temp_dir
Exemple #9
0
    def report_on_predictions(
        self,
        true_images_data: List[ImageData],
        pred_images_data_detection: List[ImageData],
        raw_pred_images_data_detection: List[ImageData],
        pred_images_data_pipeline: List[ImageData],
        tag: str,
        known_class_names: List[str],
        extra_bbox_label: List[str],
        output_directory: Union[str, Path],
        minimum_iou: float,
        pseudo_class_names: List[str],
    ):
        pipelines_reports_datas = []
        logger.info(f"Counting metrics for '{tag}'...")
        tag_df_detection_metrics = get_df_detection_metrics(
            true_images_data=true_images_data,
            pred_images_data=pred_images_data_detection,
            minimum_iou=minimum_iou,
            raw_pred_images_data=raw_pred_images_data_detection)
        tag_df_pipeline_metrics = get_df_pipeline_metrics(
            true_images_data=true_images_data,
            pred_images_data=pred_images_data_pipeline,
            minimum_iou=minimum_iou,
            extra_bbox_label=extra_bbox_label,
            pseudo_class_names=pseudo_class_names,
            known_class_names=known_class_names)

        pipelines_reports_datas = [
            PipelineReportData(df_detection_metrics=tag_df_detection_metrics,
                               df_pipeline_metrics=tag_df_pipeline_metrics,
                               tag=tag)
        ]

        pipeline_report_data = concat_pipelines_reports_datas(
            pipelines_reports_datas=pipelines_reports_datas, compare_tag=tag)
        markdowns = self._get_markdowns(
            pipeline_report_data=pipeline_report_data, tags=[tag])
        self._save_report(models_specs=[None],
                          tags=[tag],
                          output_directory=output_directory,
                          true_images_data=true_images_data,
                          markdowns=markdowns,
                          codes=[])
Exemple #10
0
    def report_on_predictions(
        self,
        true_images_data: List[ImageData],
        pred_images_data: List[ImageData],
        raw_pred_images_data: List[ImageData],
        tag: str,
        output_directory: Union[str, Path],
        minimum_iou: float,
    ):

        logger.info(f"Counting metrics for '{tag}'...")
        tag_df_detection_metrics = get_df_detection_metrics(
            true_images_data=true_images_data,
            pred_images_data=pred_images_data,
            minimum_iou=minimum_iou,
            raw_pred_images_data=raw_pred_images_data)
        tag_df_detection_recall_per_class = get_df_detection_recall_per_class(
            true_images_data=true_images_data,
            pred_images_data=pred_images_data,
            minimum_iou=minimum_iou,
        )
        detections_reports_datas = [
            DetectionReportData(
                df_detection_metrics=tag_df_detection_metrics,
                df_detection_recall_per_class=tag_df_detection_recall_per_class,
                tag=tag)
        ]

        detection_report_data = concat_detections_reports_datas(
            detections_reports_datas=detections_reports_datas, compare_tag=tag)
        markdowns = self._get_markdowns(
            detection_report_data=detection_report_data, )
        self._save_report(models_specs=[None],
                          tags=[tag],
                          output_directory=output_directory,
                          true_images_data=true_images_data,
                          markdowns=markdowns,
                          codes=[])