def evaluate(self,
              output_resultset: ResultSetEntity,
              evaluation_metric: Optional[str] = None):
     performance = MetricsHelper.compute_accuracy(
         output_resultset).get_performance()
     logger.info(f"Computes performance of {performance}")
     output_resultset.performance = performance
예제 #2
0
 def evaluate(self,
              output_result_set: ResultSetEntity,
              evaluation_metric: Optional[str] = None):
     if evaluation_metric is not None:
         logger.warning(f'Requested to use {evaluation_metric} metric,'
                        'but parameter is ignored. Use accuracy instead.')
     output_result_set.performance = MetricsHelper.compute_accuracy(
         output_result_set).get_performance()
예제 #3
0
 def evaluate(self,
              output_resultset: ResultSetEntity,
              evaluation_metric: Optional[str] = None):
     """
     Evaluate the performance on a result set.
     """
     f_measure_metrics = MetricsHelper.compute_f_measure(output_resultset)
     output_resultset.performance = f_measure_metrics.get_performance()
     logger.info("F-measure after evaluation: %d",
                 f_measure_metrics.f_measure.value)
    def evaluate(self,
                 output_resultset: ResultSetEntity,
                 evaluation_metric: Optional[str] = None):
        """Evaluate the performance of the model.

        Args:
            output_resultset (ResultSetEntity): Result set storing ground truth and predicted dataset.
            evaluation_metric (Optional[str], optional): Evaluation metric. Defaults to None.
        """
        output_resultset.performance = MetricsHelper.compute_f_measure(
            output_resultset).get_performance()
    def test_resultset_entity(self):
        """
        <b>Description:</b>
        Check the ResultSetEntity can correctly return the value

        <b>Input data:</b>
        Mock data

        <b>Expected results:</b>
        Test passes if incoming data is processed correctly

        <b>Steps</b>
        1. Create dummy data
        2. Check the processing of default values
        3. Check the processing of changed values
        """

        test_data = {
            "model": None,
            "ground_truth_dataset": None,
            "prediction_dataset": None,
            "purpose": None,
            "performance": None,
            "creation_date": None,
            "id": None,
        }

        result_set = ResultSetEntity(**test_data)

        for name, value in test_data.items():
            set_attr_name = f"test_{name}"
            if name in [
                "model",
                "ground_truth_dataset",
                "prediction_dataset",
                "purpose",
            ]:
                assert getattr(result_set, name) == value
                setattr(result_set, name, set_attr_name)
                assert getattr(result_set, name) == set_attr_name

        assert result_set.performance == NullPerformance()
        assert type(result_set.creation_date) == datetime.datetime
        assert result_set.id == ID()

        assert result_set.has_score_metric() is False
        result_set.performance = "test_performance"
        assert result_set.performance != NullPerformance()
        assert result_set.has_score_metric() is True

        creation_date = self.creation_date
        result_set.creation_date = creation_date
        assert result_set.creation_date == creation_date

        set_attr_id = ID(123456789)
        result_set.id = set_attr_id
        assert result_set.id == set_attr_id

        test_result_set_repr = [
            f"model={result_set.model}",
            f"ground_truth_dataset={result_set.ground_truth_dataset}",
            f"prediction_dataset={result_set.prediction_dataset}",
            f"purpose={result_set.purpose}",
            f"performance={result_set.performance}",
            f"creation_date={result_set.creation_date}",
            f"id={result_set.id}",
        ]

        for i in test_result_set_repr:
            assert i in repr(result_set)