示例#1
0
 def callback(env):
     """internal function"""
     score_train = env.evaluation_result_list[0][1]
     score = env.evaluation_result_list[1][1]
     if not state:
         init(env)
     best_score = state['best_score']
     best_iteration = state['best_iteration']
     maximize_score = state['maximize_score']
     if (maximize_score and score > best_score and ((not symetric_overfit and score_train - score <= max_overfit) or
                                                    (symetric_overfit and abs(score_train - score) <= max_overfit))) or \
             (not maximize_score and score < best_score and ((not symetric_overfit and score - score_train <= max_overfit) or
                                                             (symetric_overfit and abs(score - score_train) <= max_overfit))):
         msg = '[%d]\t%s' % (env.iteration, '\t'.join(
             [_fmt_metric(x) for x in env.evaluation_result_list]))
         state['best_msg'] = msg
         state['best_score'] = score
         state['best_score_train'] = score_train
         state['best_iteration'] = env.iteration
         # save the property to attributes, so they will occur in checkpoint.
         if env.model is not None:
             env.model.set_attr(best_score=str(state['best_score']),
                                best_score_train=str(
                                    state['best_score_train']),
                                best_iteration=str(state['best_iteration']),
                                best_msg=state['best_msg'])
     elif env.iteration - best_iteration >= stopping_rounds:
         best_msg = state['best_msg']
         if verbose and env.rank == 0:
             msg = "Stopping. Best iteration:\n{}\n\n"
             rabit.tracker_print(msg.format(best_msg))
         raise EarlyStopException(best_iteration)
示例#2
0
    def callback(env):
        """internal function"""
        if env.iteration < start_round:
            return

        score = env.evaluation_result_list[eval_idx][1]
        if len(state) == 0:
            init(env)
        best_score = state['best_score']
        best_iteration = state['best_iteration']
        maximize_score = state['maximize_score']
        if (maximize_score and score > best_score) or \
                (not maximize_score and score < best_score):
            msg = '[%d]\t%s' % (env.iteration, '\t'.join(
                [_fmt_metric(x) for x in env.evaluation_result_list]))
            state['best_msg'] = msg
            state['best_score'] = score
            state['best_iteration'] = env.iteration
            # save the property to attributes, so they will occur in checkpoint.
            if env.model is not None:
                env.model.set_attr(best_score=str(state['best_score']),
                                   best_iteration=str(state['best_iteration']),
                                   best_msg=state['best_msg'])
        elif env.iteration - best_iteration >= stopping_rounds:
            best_msg = state['best_msg']
            if verbose and env.rank == 0:
                msg = "Stopping. Best iteration:\n{}\n\n"
                rabit.tracker_print(msg.format(best_msg))
            raise xgb.core.EarlyStopException(best_iteration)
 def callback(env):
     """internal function"""
     if env.rank != 0 or (not env.evaluation_result_list) or period is False or period == 0:
         return
     i = env.iteration
     if i % period == 0 or i + 1 == env.begin_iteration or i + 1 == env.end_iteration:
         msg = '\t'.join([format_metric(x, show_stdv) for x in env.evaluation_result_list])
         rabit.tracker_print('[%d]\t%s\n' % (i + start_iteration, msg))
示例#4
0
    def init(env):
        """internal function"""
        bst = env.model

        if not env.evaluation_result_list:
            raise ValueError(
                'For early stopping you need at least one set in evals.')
        if len(env.evaluation_result_list) > 1 and verbose:
            msg = ("Multiple eval metrics have been passed: "
                   "'{0}' will be used for early stopping.\n\n")
            rabit.tracker_print(msg.format(env.evaluation_result_list[-1][0]))
        maximize_metrics = ('auc', 'aucpr', 'map', 'ndcg')
        maximize_at_n_metrics = ('auc@', 'aucpr@', 'map@', 'ndcg@')
        maximize_score = maximize
        metric_label = env.evaluation_result_list[-1][0]
        metric = metric_label.split('-', 1)[-1]

        if any(metric.startswith(x) for x in maximize_at_n_metrics):
            maximize_score = True

        if any(metric.split(":")[0] == x for x in maximize_metrics):
            maximize_score = True

        if verbose and env.rank == 0:
            msg = "Will train until {} hasn't improved in {} rounds.\n"
            rabit.tracker_print(msg.format(metric_label, stopping_rounds))

        state['maximize_score'] = maximize_score
        state['best_iteration'] = 0
        state['best_msg'] = ''
        if maximize_score:
            state['best_score'] = float('-inf')
            state['best_score_train'] = float('-inf')
        else:
            state['best_score'] = float('inf')
            state['best_score_train'] = float('inf')

        if bst is not None:
            if bst.attr('best_score') is not None:
                state['best_score'] = float(bst.attr('best_score'))
                state['best_iteration'] = int(bst.attr('best_iteration'))
                state['best_msg'] = bst.attr('best_msg')
            else:
                bst.set_attr(best_iteration=str(state['best_iteration']))
                bst.set_attr(best_score=str(state['best_score']))
                bst.set_attr(best_score_train=str(state['best_score_train']))
        else:
            assert env.cvfolds is not None
示例#5
0
    def init(env):
        """internal function"""
        bst = env.model

        if len(env.evaluation_result_list) == 0:
            raise ValueError(
                "For early stopping you need at least one set in evals.")
        if len(env.evaluation_result_list) > 1 and verbose:
            msg = "Multiple eval metrics have been passed: " "'{0}' will be used for early stopping.\n\n"
            rabit.tracker_print(
                msg.format(env.evaluation_result_list[eval_idx][0]))
        maximize_metrics = ("auc", "map", "ndcg")
        maximize_at_n_metrics = ("auc@", "map@", "ndcg@")
        maximize_score = maximize
        metric_label = env.evaluation_result_list[eval_idx][0]
        metric = metric_label.split("-", 1)[-1]

        if any(metric.startswith(x) for x in maximize_at_n_metrics):
            maximize_score = True

        if any(metric.split(":")[0] == x for x in maximize_metrics):
            maximize_score = True

        if verbose and env.rank == 0:
            msg = "Will train until {} hasn't improved in {} rounds.\n"
            rabit.tracker_print(msg.format(metric_label, stopping_rounds))

        state["maximize_score"] = maximize_score
        state["best_iteration"] = 0
        if maximize_score:
            state["best_score"] = float("-inf")
        else:
            state["best_score"] = float("inf")

        if bst is not None:
            if bst.attr("best_score") is not None:
                state["best_score"] = float(bst.attr("best_score"))
                state["best_iteration"] = int(bst.attr("best_iteration"))
                state["best_msg"] = bst.attr("best_msg")
            else:
                bst.set_attr(best_iteration=str(state["best_iteration"]))
                bst.set_attr(best_score=str(state["best_score"]))
        else:
            assert env.cvfolds is not None