Ejemplo n.º 1
0
def calculate_score(solution, prediction, task_type, metric, num_classes,
                    all_scoring_functions=False):
    if task_type == MULTICLASS_CLASSIFICATION:
        solution_binary = np.zeros((prediction.shape[0], num_classes))
        for i in range(solution_binary.shape[0]):
            label = solution[i]
            solution_binary[i, label] = 1
        solution = solution_binary

    elif task_type in [BINARY_CLASSIFICATION, REGRESSION]:
        if len(solution.shape) == 1:
            solution = solution.reshape((-1, 1))

    if task_type not in TASK_TYPES:
        raise NotImplementedError(task_type)


    # todo
    # надо чинить libscores
    scoring_func = getattr(classification_metrics, metric)
    if solution.shape != prediction.shape:
        raise ValueError('Solution shape %s != prediction shape %s' %
                         (solution.shape, prediction.shape))

    if all_scoring_functions:
        score = dict()
        if task_type in REGRESSION_TASKS:
            cprediction = sanitize_array(prediction)
            score['a_metric'] = a_metric(solution, cprediction,
                                                   task=task_type)
            score['r2_metric'] = r2_metric(solution, cprediction,
                                                     task=task_type)
        else:
            csolution, cprediction = normalize_array(solution,
                                                               prediction)
            score['bac_metric'] = bac_metric(csolution, cprediction,
                                                       task=task_type)
            score['auc_metric'] = auc_metric(csolution, cprediction,
                                                       task=task_type)
            score['f1_metric'] = f1_metric(csolution, cprediction,
                                                     task=task_type)
            score['pac_metric'] = pac_metric(csolution, cprediction,
                                                       task=task_type)
            score['acc_metric'] = acc_metric(csolution, cprediction,
                                                       task=task_type)

    else:
        if task_type in REGRESSION_TASKS:
            cprediction = sanitize_array(prediction)
            score = scoring_func(solution, cprediction, task=task_type)
        else:
            csolution, cprediction = normalize_array(solution,
                                                               prediction)
            score = scoring_func(csolution, cprediction, task=task_type)
    return score
Ejemplo n.º 2
0
            predict_name = predict_file[-predict_file[::-1].index(filesep):-
                                        predict_file[::-1].index('.') - 1]
            # Read the solution and prediction values into numpy arrays
            solution = read_array(solution_file)
            prediction = read_array(predict_file)
            if (solution.shape != prediction.shape):
                raise ValueError(
                    'Bad prediction shape {}'.format(prediction.shape))

            try:
                # Compute the score prescribed by the info file (for regression
                # scores, no normalization)
                if info['metric'] == 'r2_metric' or info[
                        'metric'] == 'a_metric':
                    # Remove NaN and Inf for regression
                    solution = sanitize_array(solution)
                    prediction = sanitize_array(prediction)
                    score = eval(info['metric'] + '(solution, prediction, "' +
                                 info['task'] + '")')
                else:
                    # Compute version that is normalized (for classification
                    # scores). This does nothing if all values are already in
                    # [0, 1]
                    [csolution,
                     cprediction] = normalize_array(solution, prediction)
                    score = eval(info['metric'] + '(csolution, cprediction, "'
                                 + info['task'] + '")')
                print(
                    '======= Set %d' % set_num +
                    ' (' + predict_name.capitalize()
                    + '): score(' + score_name + ')=%0.12f =======' % score)