def _loss(self, y_true, y_hat, all_scoring_functions=None):
        all_scoring_functions = (self.all_scoring_functions
                                 if all_scoring_functions is None else
                                 all_scoring_functions)
        if not isinstance(self.configuration, Configuration):
            if all_scoring_functions:
                return {self.metric: 1.0}
            else:
                return 1.0

        score = calculate_score(y_true,
                                y_hat,
                                self.task_type,
                                self.metric,
                                all_scoring_functions=all_scoring_functions)

        if hasattr(score, '__len__'):
            # TODO: instead of using self.metric, it should use all metrics given by key.
            # But now this throws error...

            err = {
                key: metric._optimum - score[key]
                for key, metric in CLASSIFICATION_METRICS.items()
                if key in score
            }
        else:
            err = self.metric._optimum - score

        return err
Exemple #2
0
    def predict_and_loss(self, train=False):

        if train:
            Y_pred = self.predict_function(self.X_train, self.model,
                                           self.task_type, self.Y_train)
            score = calculate_score(
                solution=self.Y_train,
                prediction=Y_pred,
                task_type=self.task_type,
                metric=self.metric,
                scoring_functions=self.scoring_functions)
        else:
            Y_pred = self.predict_function(self.X_test, self.model,
                                           self.task_type, self.Y_train)
            score = calculate_score(
                solution=self.Y_test,
                prediction=Y_pred,
                task_type=self.task_type,
                metric=self.metric,
                scoring_functions=self.scoring_functions)

        if hasattr(score, '__len__'):
            if self.task_type in CLASSIFICATION_TASKS:
                err = {key: metric._optimum - score[key] for key, metric in
                       CLASSIFICATION_METRICS.items() if key in score}
            else:
                err = {key: metric._optimum - score[key] for key, metric in
                       REGRESSION_METRICS.items() if key in score}
        else:
            err = self.metric._optimum - score

        return err, Y_pred, None, None
    def _loss(self, y_true, y_hat, all_scoring_functions=None):
        """Auto-sklearn follows a minimization goal, so the make_scorer
        sign is used as a guide to obtain the value to reduce.

        On this regard, to optimize a metric:
            1- score is calculared with calculate_score, with the caveat, that if
            for the metric greater is not better, a negative score is returned.
            2- the err (the optimization goal) is then:
                optimum - (metric.sign * actual_score)
                For accuracy for example: optimum(1) - (+1 * actual score)
                For logloss for example: optimum(0) - (-1 * actual score)
        """
        all_scoring_functions = (self.all_scoring_functions
                                 if all_scoring_functions is None else
                                 all_scoring_functions)
        if not isinstance(self.configuration, Configuration):
            if all_scoring_functions:
                return {self.metric: 1.0}
            else:
                return 1.0

        score = calculate_score(y_true,
                                y_hat,
                                self.task_type,
                                self.metric,
                                all_scoring_functions=all_scoring_functions)

        if hasattr(score, '__len__'):
            # TODO: instead of using self.metric, it should use all metrics given by key.
            # But now this throws error...
            if self.task_type in CLASSIFICATION_TASKS:
                err = {
                    key: metric._optimum - score[key]
                    for key, metric in CLASSIFICATION_METRICS.items()
                    if key in score
                }
            else:
                err = {
                    key: metric._optimum - score[key]
                    for key, metric in REGRESSION_METRICS.items()
                    if key in score
                }
        else:
            err = self.metric._optimum - score

        return err