Ejemplo n.º 1
0
    def get_performance(self) -> Performance:
        confusion_matrix_dashboard_metrics: List[MetricsGroup] = []

        # Use normalized matrix for UI
        normalized_matrices: List[MatrixMetric] = copy.deepcopy(
            self._unnormalized_matrices)
        for unnormalized_matrix in normalized_matrices:
            unnormalized_matrix.normalize()

        confusion_matrix_info = MatrixChartInfo(
            name="Confusion matrix",
            header="confusion",
            row_header="Predicted label",
            column_header="True label",
        )
        confusion_matrix_dashboard_metrics.append(
            MatrixMetricsGroup(metrics=normalized_matrices,
                               visualization_info=confusion_matrix_info))
        #  Compute precision and recall MetricGroups and append them to the dashboard metrics
        for _confusion_matrix in self._unnormalized_matrices:
            confusion_matrix_dashboard_metrics.append(
                precision_metrics_group(_confusion_matrix))
            confusion_matrix_dashboard_metrics.append(
                recall_metrics_group(_confusion_matrix))

        return Performance(
            score=self.accuracy,
            dashboard_metrics=confusion_matrix_dashboard_metrics)
    def test_null_performance(self):
        """
        <b>Description:</b>
        Check NullPerformance class

        <b>Input data:</b>
        NullPerformance object

        <b>Expected results:</b>
        Test passes if NullPerformance object score and dashboard_metrics attributes and __repr__ and __eq__ methods
        return expected values

        <b>Steps</b>
        1. Check NullPerformance object score and dashboard_metrics attributes
        2. Check NullPerformance object __repr__ method
        3. Check NullPerformance object __eq__ method
        """
        # Checking NullPerformance score and dashboard_metrics attributes
        null_performance = NullPerformance()
        assert null_performance.score == ScoreMetric(name="Null score", value=0.0)
        assert null_performance.dashboard_metrics == []
        # Checking NullPerformance __repr__ method
        assert repr(null_performance) == "NullPerformance()"
        # Checking __eq__ method for equal NullPerformance objects
        equal_null_performance = NullPerformance()
        assert null_performance == equal_null_performance
        # Checking NullPerformance __eq__ method by comparing with Performance object
        score_metric = TestScoreMetric().score_metric()
        performance = Performance(score_metric)
        assert null_performance != performance
Ejemplo n.º 3
0
 def get_performance(self) -> Performance:
     score = self.overall_dice
     dashboard_metrics: Optional[List[MetricsGroup]]
     if len(self.dice_per_label) == 0:
         dashboard_metrics = None
     else:
         dashboard_metrics = [
             BarMetricsGroup(
                 metrics=list(self.dice_per_label.values()),
                 visualization_info=BarChartInfo(
                     name="Dice Average Per Label",
                     palette=ColorPalette.LABEL,
                 ),
             )
         ]
     return Performance(score=score, dashboard_metrics=dashboard_metrics)
    def get_performance(self) -> Performance:
        score = self.f_measure
        dashboard_metrics: Sequence[MetricsGroup]
        dashboard_metrics = [
            BarMetricsGroup(
                metrics=list(self.f_measure_per_label.values()),
                visualization_info=BarChartInfo(
                    name="F-measure per label",
                    palette=ColorPalette.LABEL,
                    visualization_type=VisualizationType.RADIAL_BAR,
                ),
            )
        ]
        if self.f_measure_per_confidence is not None:
            dashboard_metrics.append(
                LineMetricsGroup(
                    metrics=[self.f_measure_per_confidence],
                    visualization_info=LineChartInfo(
                        name="F-measure per confidence", ),
                ))

        if self.best_confidence_threshold is not None:
            dashboard_metrics.append(
                TextMetricsGroup(
                    metrics=[self.best_confidence_threshold],
                    visualization_info=TextChartInfo(
                        name="Optimal confidence threshold", ),
                ))

        if self.f_measure_per_nms is not None:
            dashboard_metrics.append(
                LineMetricsGroup(
                    metrics=[self.f_measure_per_nms],
                    visualization_info=LineChartInfo(
                        name="F-measure per nms", ),
                ))

        if self.best_nms_threshold is not None:
            dashboard_metrics.append(
                TextMetricsGroup(
                    metrics=[self.best_nms_threshold],
                    visualization_info=TextChartInfo(
                        name="Optimal nms threshold", ),
                ))
        return Performance(score=score, dashboard_metrics=dashboard_metrics)
Ejemplo n.º 5
0
    def save_model(self, output_model: ModelEntity):
        """
        Save the model after training is completed.
        """
        config = self.get_config()
        model_info = {
            "model": self.model.state_dict(),
            "config": config,
            "VERSION": 1,
        }
        buffer = io.BytesIO()
        torch.save(model_info, buffer)
        output_model.set_data("weights.pth", buffer.getvalue())
        output_model.set_data(
            "label_schema.json",
            label_schema_to_bytes(self.task_environment.label_schema))
        # store computed threshold
        output_model.set_data(
            "threshold", bytes(struct.pack("f", self.model.threshold.item())))

        f1_score = self.model.image_metrics.OptimalF1.compute().item()
        output_model.performance = Performance(
            score=ScoreMetric(name="F1 Score", value=f1_score))
        output_model.precision = [ModelPrecision.FP32]
    def test_performance(self):
        """
        <b>Description:</b>
        Check Performance class

        <b>Input data:</b>
        Performance object with specified score and dashboard_metrics parameters

        <b>Expected results:</b>
        Test passes if Performance object score and dashboard_metrics attributes and __eq__ and __repr__ method return
        expected values

        <b>Steps</b>
        1. Check score and dashboard_metrics attributes for Performance object with not specified dashboard_metrics
        parameter
        2. Check score and dashboard_metrics attributes for Performance object with specified dashboard_metrics
        parameter
        3. Check __eq__ method for equal Performance objects, Performance objects with different dashboard_metrics
        attributes, Performance objects with different score attributes
        4. Check __repr__ method
        5. Check ValueError exception raised when score attributes type not equal to ScoreMetric
        """
        # Positive scenario for Performance object with default parameters
        score_metric = TestScoreMetric().score_metric()
        default_parameters_performance = Performance(score_metric)
        assert default_parameters_performance.score == score_metric
        assert default_parameters_performance.dashboard_metrics == []
        # Positive scenario for Performance object with specified dashboard_metrics  parameter
        # Preparing dashboard metrics list
        with warnings.catch_warnings():
            # there is a matrix with zero sum in row, so we expect 0/0 division.
            warnings.filterwarnings(
                "ignore", "invalid value encountered in true_divide"
            )
            matrix_metrics = [
                TestMetrics().normalized_matrix_metric(),
                TestMetrics().normalized_matrix_zero_sum(),
            ]
        matrix_chart_info = TestMatrixChartInfo.default_values_matrix_chart_info()
        matrix_metrics_group = MatrixMetricsGroup(
            metrics=matrix_metrics, visualization_info=matrix_chart_info
        )
        curve_metrics = (
            TestCurveMetric().curve_metric(),
            TestCurveMetric().x_not_specified_curve_metric(),
        )
        line_chart_info = TestLineChartInfo().default_parameters_line_chart_info()
        line_metrics_group = LineMetricsGroup(
            metrics=curve_metrics, visualization_info=line_chart_info
        )
        bar_metrics = [
            TestScoreMetric().score_metric(),
            TestCountMetric().count_metric(),
        ]
        bar_chart_info = TestBarChartInfo().default_parameters_bar_chart_info()
        bar_metrics_group = BarMetricsGroup(
            metrics=bar_metrics, visualization_info=bar_chart_info
        )
        text_score_metric = TestScoreMetric().score_metric()
        text_chart_info = TestTextChartInfo().text_chart_info()
        text_metric_group = TextMetricsGroup(
            metrics=[text_score_metric], visualization_info=text_chart_info
        )
        dashboard_metrics = [
            matrix_metrics_group,
            line_metrics_group,
            bar_metrics_group,
            text_metric_group,
        ]
        # Checking Performance attributes
        specified_parameters_performance = Performance(
            score=score_metric, dashboard_metrics=dashboard_metrics
        )
        assert specified_parameters_performance.score == score_metric
        assert specified_parameters_performance.dashboard_metrics == dashboard_metrics
        # Checking __eq__ method
        equal_default_parameters_performance = Performance(score_metric)
        assert default_parameters_performance == equal_default_parameters_performance
        different_metrics_performance = Performance(
            score_metric, [matrix_metrics_group]
        )
        assert default_parameters_performance == different_metrics_performance
        unequal_score_metric = ScoreMetric(name="Unequal ScoreMetric", value=1.0)
        assert default_parameters_performance != Performance(unequal_score_metric)
        assert default_parameters_performance != str
        # Checking __repr__ method
        assert (
            repr(default_parameters_performance)
            == "Performance(score: 2.0, dashboard: (0 metric groups))"
        )
        assert (
            repr(specified_parameters_performance)
            == "Performance(score: 2.0, dashboard: (4 metric groups))"
        )
        # Checking ValueError exception raised when score parameter not ScoreMetric class
        count_metric = TestCountMetric().count_metric()
        with pytest.raises(ValueError):
            Performance(count_metric)
Ejemplo n.º 7
0
    def train(self,
              dataset: DatasetEntity,
              output_model: ModelEntity,
              train_parameters: Optional[TrainParameters] = None):
        """ Trains a model on a dataset """

        train_model = deepcopy(self._model)

        if train_parameters is not None:
            update_progress_callback = train_parameters.update_progress
        else:
            update_progress_callback = default_progress_callback
        time_monitor = TrainingProgressCallback(
            update_progress_callback,
            num_epoch=self._cfg.train.max_epoch,
            num_train_steps=math.ceil(
                len(dataset.get_subset(Subset.TRAINING)) /
                self._cfg.train.batch_size),
            num_val_steps=0,
            num_test_steps=0)

        self.metrics_monitor = DefaultMetricsMonitor()
        self.stop_callback.reset()

        set_random_seed(self._cfg.train.seed)
        train_subset = dataset.get_subset(Subset.TRAINING)
        val_subset = dataset.get_subset(Subset.VALIDATION)
        self._cfg.custom_datasets.roots = [
            OTEClassificationDataset(train_subset,
                                     self._labels,
                                     self._multilabel,
                                     keep_empty_label=self._empty_label
                                     in self._labels),
            OTEClassificationDataset(val_subset,
                                     self._labels,
                                     self._multilabel,
                                     keep_empty_label=self._empty_label
                                     in self._labels)
        ]
        datamanager = torchreid.data.ImageDataManager(
            **imagedata_kwargs(self._cfg))

        num_aux_models = len(self._cfg.mutual_learning.aux_configs)

        if self._cfg.use_gpu:
            main_device_ids = list(range(self.num_devices))
            extra_device_ids = [main_device_ids for _ in range(num_aux_models)]
            train_model = DataParallel(train_model,
                                       device_ids=main_device_ids,
                                       output_device=0).cuda(
                                           main_device_ids[0])
        else:
            extra_device_ids = [None for _ in range(num_aux_models)]

        optimizer = torchreid.optim.build_optimizer(
            train_model, **optimizer_kwargs(self._cfg))

        if self._cfg.lr_finder.enable:
            scheduler = None
        else:
            scheduler = torchreid.optim.build_lr_scheduler(
                optimizer,
                num_iter=datamanager.num_iter,
                **lr_scheduler_kwargs(self._cfg))

        if self._cfg.lr_finder.enable:
            _, train_model, optimizer, scheduler = \
                        run_lr_finder(self._cfg, datamanager, train_model, optimizer, scheduler, None,
                                      rebuild_model=False, gpu_num=self.num_devices, split_models=False)

        _, final_acc = run_training(self._cfg,
                                    datamanager,
                                    train_model,
                                    optimizer,
                                    scheduler,
                                    extra_device_ids,
                                    self._cfg.train.lr,
                                    tb_writer=self.metrics_monitor,
                                    perf_monitor=time_monitor,
                                    stop_callback=self.stop_callback)

        training_metrics = self._generate_training_metrics_group()

        self.metrics_monitor.close()
        if self.stop_callback.check_stop():
            logger.info('Training cancelled.')
            return

        logger.info("Training finished.")

        best_snap_path = os.path.join(self._scratch_space, 'best.pth')
        if os.path.isfile(best_snap_path):
            load_pretrained_weights(self._model, best_snap_path)

        for filename in os.listdir(self._scratch_space):
            match = re.match(r'best_(aux_model_[0-9]+\.pth)', filename)
            if match:
                aux_model_name = match.group(1)
                best_aux_snap_path = os.path.join(self._scratch_space,
                                                  filename)
                self._aux_model_snap_paths[aux_model_name] = best_aux_snap_path

        self.save_model(output_model)
        performance = Performance(score=ScoreMetric(value=final_acc,
                                                    name="accuracy"),
                                  dashboard_metrics=training_metrics)
        logger.info(f'FINAL MODEL PERFORMANCE {performance}')
        output_model.performance = performance
    def test_model_entity_sets_values(self):
        """
        <b>Description:</b>
        Check that ModelEntity correctly returns the set values

        <b>Expected results:</b>
        Test passes if ModelEntity correctly returns the set values

        <b>Steps</b>
        1. Check set values in the ModelEntity
        """
        def __get_path_to_file(filename: str):
            """
            Return the path to the file named 'filename', which lives in the tests/entities directory
            """
            return str(Path(__file__).parent / Path(filename))

        car = LabelEntity(name="car", domain=Domain.DETECTION)
        labels_list = [car]
        dummy_template = __get_path_to_file("./dummy_template.yaml")
        model_template = parse_model_template(dummy_template)
        hyper_parameters = model_template.hyper_parameters.data
        params = ote_config_helper.create(hyper_parameters)
        labels_schema = LabelSchemaEntity.from_labels(labels_list)
        environment = TaskEnvironment(
            model=None,
            hyper_parameters=params,
            label_schema=labels_schema,
            model_template=model_template,
        )

        item = self.generate_random_image()
        dataset = DatasetEntity(items=[item])
        score_metric = ScoreMetric(name="Model accuracy", value=0.5)

        model_entity = ModelEntity(train_dataset=self.dataset(),
                                   configuration=self.configuration())

        set_params = {
            "configuration": environment.get_model_configuration(),
            "train_dataset": dataset,
            "id": ID(1234567890),
            "creation_date": self.creation_date,
            "previous_trained_revision": 5,
            "previous_revision": 2,
            "version": 2,
            "tags": ["tree", "person"],
            "model_status": ModelStatus.TRAINED_NO_STATS,
            "model_format": ModelFormat.BASE_FRAMEWORK,
            "performance": Performance(score_metric),
            "training_duration": 5.8,
            "precision": [ModelPrecision.INT8],
            "latency": 328,
            "fps_throughput": 20,
            "target_device": TargetDevice.GPU,
            "target_device_type": "notebook",
            "optimization_methods": [OptimizationMethod.QUANTIZATION],
            "optimization_type": ModelOptimizationType.MO,
            "optimization_objectives": {
                "param": "Test param"
            },
            "performance_improvement": {"speed", 0.5},
            "model_size_reduction": 1.0,
        }

        for key, value in set_params.items():
            setattr(model_entity, key, value)
            assert getattr(model_entity, key) == value

        assert model_entity.is_optimized() is True