コード例 #1
0
    def test_accuracy_metric_4_binary_classification(self):
        # 100% correct
        expected = np.array([0, 1, 1, 1, 0, 0, 1, 1, 1, 0]).reshape((-1, 1))
        prediction = expected.copy()
        score = acc_metric(expected, prediction)
        self.assertEqual(1, score)

        # 100% incorrect
        prediction = (expected.copy() - 1) * -1
        score = acc_metric(expected, prediction)
        self.assertAlmostEqual(-1, score)

        # Random
        prediction = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
        score = acc_metric(expected, prediction)
        self.assertAlmostEqual(0, score)
コード例 #2
0
def calculate_score(solution, prediction, task_type, metric, num_classes,
                    all_scoring_functions=False):
    if task_type == MULTICLASS_CLASSIFICATION:
        solution_binary = np.zeros((prediction.shape[0], num_classes))
        for i in range(solution_binary.shape[0]):
            label = solution[i]
            solution_binary[i, label] = 1
        solution = solution_binary

    elif task_type in [BINARY_CLASSIFICATION, REGRESSION]:
        if len(solution.shape) == 1:
            solution = solution.reshape((-1, 1))

    if task_type not in TASK_TYPES:
        raise NotImplementedError(task_type)


    # todo
    # надо чинить libscores
    scoring_func = getattr(classification_metrics, metric)
    if solution.shape != prediction.shape:
        raise ValueError('Solution shape %s != prediction shape %s' %
                         (solution.shape, prediction.shape))

    if all_scoring_functions:
        score = dict()
        if task_type in REGRESSION_TASKS:
            cprediction = sanitize_array(prediction)
            score['a_metric'] = a_metric(solution, cprediction,
                                                   task=task_type)
            score['r2_metric'] = r2_metric(solution, cprediction,
                                                     task=task_type)
        else:
            csolution, cprediction = normalize_array(solution,
                                                               prediction)
            score['bac_metric'] = bac_metric(csolution, cprediction,
                                                       task=task_type)
            score['auc_metric'] = auc_metric(csolution, cprediction,
                                                       task=task_type)
            score['f1_metric'] = f1_metric(csolution, cprediction,
                                                     task=task_type)
            score['pac_metric'] = pac_metric(csolution, cprediction,
                                                       task=task_type)
            score['acc_metric'] = acc_metric(csolution, cprediction,
                                                       task=task_type)

    else:
        if task_type in REGRESSION_TASKS:
            cprediction = sanitize_array(prediction)
            score = scoring_func(solution, cprediction, task=task_type)
        else:
            csolution, cprediction = normalize_array(solution,
                                                               prediction)
            score = scoring_func(csolution, cprediction, task=task_type)
    return score
コード例 #3
0
    def test_accuracy_metric_4_multilabel_classification(self):
        # 100% correct
        expected = np.array([[0, 0, 1, 1, 0, 1, 0, 1, 0, 1],
                             [1, 1, 0, 0, 1, 0, 1, 0, 1, 0], [1, 1, 0, 0, 1, 0,
                                                              1, 0, 1, 0]])
        prediction = expected.copy()
        score = acc_metric(expected, prediction)
        self.assertEqual(1, score)

        # 100% incorrect
        prediction = (expected.copy() - 1) * -1
        score = acc_metric(expected, prediction)
        self.assertAlmostEqual(-1, score)

        # Pseudorandom
        prediction = np.array([[0, 0, 0, 0, 0, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0,
                                                                1, 1, 1, 1, 1],
                               [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]])
        score = acc_metric(expected, prediction)
        self.assertAlmostEqual(-0.0666666666, score)