Exemplo n.º 1
0
    def test_confidence_interval(self):
        rs = numpy.random.RandomState(0)
        yt = rs.rand(100)
        d = rs.rand(100)

        yp = yt + 0.1 * d
        mae = Metrics.call('mae', yt, yp)
        ci_low, ci_high = \
            Metrics.confidence_interval('mae', y_true=yt, y_pred=yp)

        self.assertTrue(ci_low < mae < ci_high)
Exemplo n.º 2
0
    def score_value_report(
            self,
            model,
            dmd_test: DMD,
            labels=None,
            y_proba: numpy.ndarray = None,
            y_pred: numpy.ndarray = None) -> [ScoringMetricReport]:
        '''

        :param model: model of interest
        :param dmd_test: test set
        :param y_proba: pre-calculated predicted probabilities for test set, if available
        :param y_pred: pre-calculated models' predictions for test set, if available
        :return: scoring report
        '''
        score_report = []

        model_support_dmd = GeneralUtils.dmd_supported(model, dmd_test)
        x_test = dmd_test if model_support_dmd else dmd_test.values
        y_true = dmd_test.target

        is_classification = GeneralUtils.is_classification(model)

        confusion_matrix, scatter, classification_report = None, None, None
        if is_classification:

            y_proba = y_proba if y_proba is not None else model.predict_proba(
                x_test)
            y_pred = y_pred if y_pred is not None else numpy.argmax(y_proba,
                                                                    axis=1)

            confusion_matrix = ConfusionMatrixReport(
                y_true=y_true,
                y_pred=y_pred,
                labels=labels if labels is not None else unique_labels(
                    y_true, y_pred))

            classification_report = SklearnClassificationReport(
                y_true=y_true, y_pred=y_pred, y_proba=y_proba, labels=labels)

            for metric in self.metrics:
                if not metric.ptype == CLASSIFICATION:
                    continue
                if metric.is_proba:
                    yp = y_proba
                else:
                    yp = y_pred

                score = metric.function(y_true, yp)
                ci_low, ci_high = Metrics.confidence_interval(metric,
                                                              y_true=y_true,
                                                              y_pred=y_pred,
                                                              y_proba=y_proba)
                score_report.append(
                    ScoringMetricReport(metric_name=metric.name,
                                        value=score,
                                        ci_low=ci_low,
                                        ci_high=ci_high))

        else:
            y_pred = y_pred if y_pred is not None else model.predict(x_test)

            error_bars = self._calc_error_bars(dmd_test, model)

            scatter = ScatterReport(y_true=y_true,
                                    y_pred=y_pred,
                                    error_bars=error_bars)
            for metric in self.metrics:
                if not metric.ptype == REGRESSION:
                    continue

                score = metric.function(y_true, y_pred)
                ci_low, ci_high = Metrics.confidence_interval(metric,
                                                              y_true=y_true,
                                                              y_pred=y_pred)

                ci_low = GeneralUtils.f5(ci_low)
                ci_high = GeneralUtils.f5(ci_high)
                score = GeneralUtils.f5(score)
                score_report.append(
                    ScoringMetricReport(metric_name=metric.name,
                                        value=score,
                                        ci_low=ci_low,
                                        ci_high=ci_high))

        return score_report, confusion_matrix, scatter, classification_report