Exemple #1
0
def test_symmetry():
    """Test the symmetry of score and loss functions"""
    y_true, y_pred, _ = make_prediction(binary=True)

    # We shouldn't forget any metrics
    assert_equal(set(SYMETRIC_METRICS).union(NOT_SYMETRIC_METRICS,
                                             THRESHOLDED_METRICS),
                 set(ALL_METRICS))

    assert_equal(set(SYMETRIC_METRICS).intersection(set(NOT_SYMETRIC_METRICS)),
                 set([]))

    # Symmetric metric
    for name, metric in SYMETRIC_METRICS.items():
        assert_equal(metric(y_true, y_pred),
                     metric(y_pred, y_true),
                     msg="%s is not symetric" % name)

    # Not symmetric metrics
    for name, metric in NOT_SYMETRIC_METRICS.items():
        assert_true(metric(y_true, y_pred) != metric(y_pred, y_true),
                    msg="%s seems to be symetric" % name)

    # Deprecated metrics
    with warnings.catch_warnings(record=True):
        # Throw deprecated warning
        assert_equal(zero_one(y_true, y_pred),
                     zero_one(y_pred, y_true))

        assert_equal(zero_one(y_true, y_pred, normalize=False),
                     zero_one(y_pred, y_true, normalize=False))

        assert_equal(zero_one_score(y_true, y_pred),
                     zero_one_score(y_pred, y_true))
def test_symmetry():
    """Test the symmetry of score and loss functions"""
    y_true, y_pred, _ = make_prediction(binary=True)

    # We shouldn't forget any metrics
    assert_equal(set(SYMETRIC_METRICS).union(set(NOT_SYMETRIC_METRICS)),
                 set(ALL_METRICS))

    assert_equal(set(SYMETRIC_METRICS).intersection(set(NOT_SYMETRIC_METRICS)),
                 set([]))

    # Symmetric metric
    for name, metric in SYMETRIC_METRICS.items():
        assert_equal(metric(y_true, y_pred),
                     metric(y_pred, y_true),
                     msg="%s is not symetric" % name)

    # Not symmetric metrics
    for name, metric in NOT_SYMETRIC_METRICS.items():
        assert_true(metric(y_true, y_pred) != metric(y_pred, y_true),
                    msg="%s seems to be symetric" % name)

    # Deprecated metrics
    with warnings.catch_warnings(record=True):
        # Throw deprecated warning
        assert_equal(zero_one(y_true, y_pred),
                     zero_one(y_pred, y_true))

        assert_equal(zero_one(y_true, y_pred, normalize=False),
                     zero_one(y_pred, y_true, normalize=False))

        assert_equal(zero_one_score(y_true, y_pred),
                     zero_one_score(y_pred, y_true))
def test_losses():
    """Test loss functions"""
    y_true, y_pred, _ = make_prediction(binary=True)
    n_samples = y_true.shape[0]
    n_classes = np.size(unique_labels(y_true))

    # Classification
    # --------------
    with warnings.catch_warnings(True):
    # Throw deprecated warning
        assert_equal(zero_one(y_true, y_pred), 13)
        assert_almost_equal(zero_one(y_true, y_pred, normalize=True),
                            13 / float(n_samples), 2)

    assert_almost_equal(zero_one_loss(y_true, y_pred),
                        13 / float(n_samples), 2)
    assert_equal(zero_one_loss(y_true, y_pred, normalize=False), 13)
    assert_almost_equal(zero_one_loss(y_true, y_true), 0.0, 2)
    assert_almost_equal(zero_one_loss(y_true, y_true, normalize=False), 0, 2)

    assert_almost_equal(hamming_loss(y_true, y_pred),
                        2 * 13. / (n_samples * n_classes), 2)

    assert_equal(accuracy_score(y_true, y_pred),
                 1 - zero_one_loss(y_true, y_pred))

    assert_equal(accuracy_score(y_true, y_pred, normalize=False),
                 n_samples - zero_one_loss(y_true, y_pred, normalize=False))

    with warnings.catch_warnings(True):
    # Throw deprecated warning
        assert_equal(zero_one_score(y_true, y_pred),
                     1 - zero_one_loss(y_true, y_pred))

    # Regression
    # ----------
    assert_almost_equal(mean_squared_error(y_true, y_pred),
                        12.999 / n_samples, 2)
    assert_almost_equal(mean_squared_error(y_true, y_true),
                        0.00, 2)

    # mean_absolute_error and mean_squared_error are equal because
    # it is a binary problem.
    assert_almost_equal(mean_absolute_error(y_true, y_pred),
                        12.999 / n_samples, 2)
    assert_almost_equal(mean_absolute_error(y_true, y_true), 0.00, 2)

    assert_almost_equal(explained_variance_score(y_true, y_pred), -0.04, 2)
    assert_almost_equal(explained_variance_score(y_true, y_true), 1.00, 2)
    assert_equal(explained_variance_score([0, 0, 0], [0, 1, 1]), 0.0)

    assert_almost_equal(r2_score(y_true, y_pred), -0.04, 2)
    assert_almost_equal(r2_score(y_true, y_true), 1.00, 2)
    assert_equal(r2_score([0, 0, 0], [0, 0, 0]), 1.0)
    assert_equal(r2_score([0, 0, 0], [0, 1, 1]), 0.0)
Exemple #4
0
def test_losses():
    """Test loss functions"""
    y_true, y_pred, _ = make_prediction(binary=True)
    n_samples = y_true.shape[0]
    n_classes = np.size(unique_labels(y_true))

    # Classification
    # --------------
    with warnings.catch_warnings(record=True):
        # Throw deprecated warning
        assert_equal(zero_one(y_true, y_pred), 11)
        assert_almost_equal(zero_one(y_true, y_pred, normalize=True),
                            11 / float(n_samples), 2)

    assert_almost_equal(zero_one_loss(y_true, y_pred), 11 / float(n_samples),
                        2)
    assert_equal(zero_one_loss(y_true, y_pred, normalize=False), 11)
    assert_almost_equal(zero_one_loss(y_true, y_true), 0.0, 2)
    assert_almost_equal(zero_one_loss(y_true, y_true, normalize=False), 0, 2)

    assert_almost_equal(hamming_loss(y_true, y_pred),
                        2 * 11. / (n_samples * n_classes), 2)

    assert_equal(accuracy_score(y_true, y_pred),
                 1 - zero_one_loss(y_true, y_pred))

    assert_equal(accuracy_score(y_true, y_pred, normalize=False),
                 n_samples - zero_one_loss(y_true, y_pred, normalize=False))

    with warnings.catch_warnings(record=True):
        # Throw deprecated warning
        assert_equal(zero_one_score(y_true, y_pred),
                     1 - zero_one_loss(y_true, y_pred))

    # Regression
    # ----------
    assert_almost_equal(mean_squared_error(y_true, y_pred), 10.999 / n_samples,
                        2)
    assert_almost_equal(mean_squared_error(y_true, y_true), 0.00, 2)

    # mean_absolute_error and mean_squared_error are equal because
    # it is a binary problem.
    assert_almost_equal(mean_absolute_error(y_true, y_pred),
                        10.999 / n_samples, 2)
    assert_almost_equal(mean_absolute_error(y_true, y_true), 0.00, 2)

    assert_almost_equal(explained_variance_score(y_true, y_pred), 0.16, 2)
    assert_almost_equal(explained_variance_score(y_true, y_true), 1.00, 2)
    assert_equal(explained_variance_score([0, 0, 0], [0, 1, 1]), 0.0)

    assert_almost_equal(r2_score(y_true, y_pred), 0.12, 2)
    assert_almost_equal(r2_score(y_true, y_true), 1.00, 2)
    assert_equal(r2_score([0, 0, 0], [0, 0, 0]), 1.0)
    assert_equal(r2_score([0, 0, 0], [0, 1, 1]), 0.0)
Exemple #5
0
def test_symmetry():
    """Test the symmetry of score and loss functions"""
    y_true, y_pred, _ = make_prediction(binary=True)

    # symmetric
    assert_equal(zero_one(y_true, y_pred), zero_one(y_pred, y_true))
    assert_almost_equal(mean_squared_error(y_true, y_pred),
                        mean_squared_error(y_pred, y_true))
    # not symmetric
    assert_true(
        explained_variance_score(y_true, y_pred) != explained_variance_score(
            y_pred, y_true))
    assert_true(r2_score(y_true, y_pred) != r2_score(y_pred, y_true))
def test_symmetry():
    """Test the symmetry of score and loss functions"""
    y_true, y_pred, _ = make_prediction(binary=True)

    # symmetric
    assert_equal(zero_one(y_true, y_pred),
                 zero_one(y_pred, y_true))
    assert_almost_equal(mean_squared_error(y_true, y_pred),
                        mean_squared_error(y_pred, y_true))
    # not symmetric
    assert_true(explained_variance_score(y_true, y_pred) !=
            explained_variance_score(y_pred, y_true))
    assert_true(r2_score(y_true, y_pred) !=
            r2_score(y_pred, y_true))
def benchmark(clf):
    t0 = time()
    clf.fit(X_train, y_train)
    train_time = time() - t0
    t0 = time()
    pred = clf.predict(X_test)
    test_time = time() - t0
    err = metrics.zero_one(y_test, pred) / float(pred.shape[0])
    return err, train_time, test_time
def benchmark(clf):
    t0 = time()
    clf.fit(X_train, y_train)
    train_time = time() - t0
    t0 = time()
    pred = clf.predict(X_test)
    test_time = time() - t0
    err = metrics.zero_one(y_test, pred) / float(pred.shape[0])
    return err, train_time, test_time
Exemple #9
0
def num_diff_w_perms(l1, l2):
    """Compute min_{p in perm} |p(l1)-l2|_1
    
    This loops over all permutations so could be slow for many groups""" 
    label = list(set(l1))
    
    min_diff = np.Inf
    for p in permutations(label):
        l1p = [p[label.index(l)] for l in l1]
        min_diff = min(min_diff,metrics.zero_one(l1p, l2))
    return min_diff
Exemple #10
0
def num_diff_w_perms(l1, l2):
    """Compute min_{p in perm} |p(l1)-l2|_1
    
    This loops over all permutations so could be slow for many groups"""
    label = list(set(l1))

    min_diff = np.Inf
    for p in permutations(label):
        l1p = [p[label.index(l)] for l in l1]
        min_diff = min(min_diff, metrics.zero_one(l1p, l2))
    return min_diff
def test_symmetry():
    """Test the symmetry of score and loss functions"""
    y_true, y_pred, _ = make_prediction(binary=True)

    # Symmetric metric
    for metric in [accuracy_score,
                   lambda y1, y2: accuracy_score(y1, y2, normalize=False),
                   zero_one_loss,
                   lambda y1, y2: zero_one_loss(y1, y2, normalize=False),
                   hamming_loss,
                   f1_score,
                   matthews_corrcoef,
                   mean_squared_error,
                   mean_absolute_error]:

        assert_equal(metric(y_true, y_pred),
                     metric(y_pred, y_true),
                     msg="%s is not symetric" % metric)

    # Not symmetric metrics
    for metric in [precision_score,
                   recall_score,
                   lambda y1, y2: fbeta_score(y1, y2, beta=0.5),
                   lambda y1, y2: fbeta_score(y1, y2, beta=2),
                   explained_variance_score,
                   r2_score]:

        assert_true(metric(y_true, y_pred) != metric(y_pred, y_true),
                    msg="%s seems to be symetric" % metric)

    # Deprecated metrics
    with warnings.catch_warnings(True):
        # Throw deprecated warning
        assert_equal(zero_one(y_true, y_pred),
                     zero_one(y_pred, y_true))

        assert_equal(zero_one(y_true, y_pred, normalize=False),
                     zero_one(y_pred, y_true, normalize=False))

        assert_equal(zero_one_score(y_true, y_pred),
                     zero_one_score(y_pred, y_true))
Exemple #12
0
def test_symmetry():
    """Test the symmetry of score and loss functions"""
    y_true, y_pred, _ = make_prediction(binary=True)

    # Symmetric metric
    for metric in [
            accuracy_score,
            lambda y1, y2: accuracy_score(y1, y2, normalize=False),
            zero_one_loss,
            lambda y1, y2: zero_one_loss(y1, y2, normalize=False),
            hamming_loss, f1_score, matthews_corrcoef, mean_squared_error,
            mean_absolute_error
    ]:

        assert_equal(metric(y_true, y_pred),
                     metric(y_pred, y_true),
                     msg="%s is not symetric" % metric)

    # Not symmetric metrics
    for metric in [
            precision_score, recall_score,
            lambda y1, y2: fbeta_score(y1, y2, beta=0.5),
            lambda y1, y2: fbeta_score(y1, y2, beta=2),
            explained_variance_score, r2_score
    ]:

        assert_true(metric(y_true, y_pred) != metric(y_pred, y_true),
                    msg="%s seems to be symetric" % metric)

    # Deprecated metrics
    with warnings.catch_warnings(record=True):
        # Throw deprecated warning
        assert_equal(zero_one(y_true, y_pred), zero_one(y_pred, y_true))

        assert_equal(zero_one(y_true, y_pred, normalize=False),
                     zero_one(y_pred, y_true, normalize=False))

        assert_equal(zero_one_score(y_true, y_pred),
                     zero_one_score(y_pred, y_true))
Exemple #13
0
def test_symmetry():
    """Test the symmetry of score and loss functions"""
    y_true, y_pred, _ = make_prediction(binary=True)

    # symmetric
    assert_equal(accuracy_score(y_true, y_pred),
                 accuracy_score(y_pred, y_true))

    with warnings.catch_warnings(True):
        # Throw deprecated warning
        assert_equal(zero_one(y_true, y_pred),
                     zero_one(y_pred, y_true))

        assert_almost_equal(zero_one(y_true, y_pred, normalize=False),
                            zero_one(y_pred, y_true, normalize=False), 2)

    assert_equal(zero_one_loss(y_true, y_pred),
                 zero_one_loss(y_pred, y_true))

    assert_equal(zero_one_loss(y_true, y_pred, normalize=False),
                 zero_one_loss(y_pred, y_true, normalize=False))

    with warnings.catch_warnings(True):
    # Throw deprecated warning
        assert_equal(zero_one_score(y_true, y_pred),
                     zero_one_score(y_pred, y_true))

    assert_almost_equal(mean_squared_error(y_true, y_pred),
                        mean_squared_error(y_pred, y_true))

    assert_almost_equal(mean_absolute_error(y_true, y_pred),
                        mean_absolute_error(y_pred, y_true))

    # not symmetric
    assert_true(explained_variance_score(y_true, y_pred) !=
                explained_variance_score(y_pred, y_true))
    assert_true(r2_score(y_true, y_pred) !=
                r2_score(y_pred, y_true))
Exemple #14
0
def test_symmetry():
    """Test the symmetry of score and loss functions"""
    y_true, y_pred, _ = make_prediction(binary=True)

    # symmetric
    assert_equal(accuracy_score(y_true, y_pred),
                 accuracy_score(y_pred, y_true))

    with warnings.catch_warnings(True):
        # Throw deprecated warning
        assert_equal(zero_one(y_true, y_pred),
                     zero_one(y_pred, y_true))

        assert_almost_equal(zero_one(y_true, y_pred, normalize=False),
                            zero_one(y_pred, y_true, normalize=False), 2)

    assert_equal(zero_one_loss(y_true, y_pred),
                 zero_one_loss(y_pred, y_true))

    assert_equal(zero_one_loss(y_true, y_pred, normalize=False),
                 zero_one_loss(y_pred, y_true, normalize=False))

    with warnings.catch_warnings(True):
    # Throw deprecated warning
        assert_equal(zero_one_score(y_true, y_pred),
                     zero_one_score(y_pred, y_true))

    assert_almost_equal(mean_squared_error(y_true, y_pred),
                        mean_squared_error(y_pred, y_true))

    assert_almost_equal(mean_absolute_error(y_true, y_pred),
                        mean_absolute_error(y_pred, y_true))

    # not symmetric
    assert_true(explained_variance_score(y_true, y_pred) !=
                explained_variance_score(y_pred, y_true))
    assert_true(r2_score(y_true, y_pred) !=
                r2_score(y_pred, y_true))
Exemple #15
0
def test_losses():
    """Test loss functions"""
    y_true, y_pred, _ = make_prediction(binary=True)
    n = y_true.shape[0]

    assert_equal(zero_one(y_true, y_pred), 13)
    assert_almost_equal(zero_one(y_true, y_pred, normalize=True),
                        13 / float(n), 2)
    assert_almost_equal(mean_squared_error(y_true, y_pred), 12.999 / n, 2)
    assert_almost_equal(mean_squared_error(y_true, y_true), 0.00, 2)

    # mean_absolute_error and mean_squared_error are equal because of
    # it is a binary problem.
    assert_almost_equal(mean_absolute_error(y_true, y_pred), 12.999 / n, 2)
    assert_almost_equal(mean_absolute_error(y_true, y_true), 0.00, 2)

    assert_almost_equal(explained_variance_score(y_true, y_pred), -0.04, 2)
    assert_almost_equal(explained_variance_score(y_true, y_true), 1.00, 2)
    assert_equal(explained_variance_score([0, 0, 0], [0, 1, 1]), 0.0)

    assert_almost_equal(r2_score(y_true, y_pred), -0.04, 2)
    assert_almost_equal(r2_score(y_true, y_true), 1.00, 2)
    assert_equal(r2_score([0, 0, 0], [0, 0, 0]), 1.0)
    assert_equal(r2_score([0, 0, 0], [0, 1, 1]), 0.0)
def test_losses():
    """Test loss functions"""
    y_true, y_pred, _ = make_prediction(binary=True)
    n = y_true.shape[0]

    assert_equal(zero_one(y_true, y_pred), 13)
    assert_almost_equal(mean_squared_error(y_true, y_pred), 12.999 / n, 2)
    assert_almost_equal(mean_squared_error(y_true, y_true), 0.00, 2)

    assert_almost_equal(explained_variance_score(y_true, y_pred), -0.04, 2)
    assert_almost_equal(explained_variance_score(y_true, y_true), 1.00, 2)
    assert_equal(explained_variance_score([0, 0, 0], [0, 1, 1]), 0.0)

    assert_almost_equal(r2_score(y_true, y_pred), -0.04, 2)
    assert_almost_equal(r2_score(y_true, y_true), 1.00, 2)
    assert_equal(r2_score([0, 0, 0], [0, 0, 0]), 1.0)
    assert_equal(r2_score([0, 0, 0], [0, 1, 1]), 0.0)
Exemple #17
0
def test_losses():
    """Test loss functions"""
    y_true, y_pred, _ = make_prediction(binary=True)
    n = y_true.shape[0]

    assert_equal(zero_one(y_true, y_pred), 13)
    assert_almost_equal(mean_squared_error(y_true, y_pred), 12.999 / n, 2)
    assert_almost_equal(mean_squared_error(y_true, y_true), 0.00, 2)

    assert_almost_equal(explained_variance_score(y_true, y_pred), -0.04, 2)
    assert_almost_equal(explained_variance_score(y_true, y_true), 1.00, 2)
    assert_equal(explained_variance_score([0, 0, 0], [0, 1, 1]), 0.0)

    assert_almost_equal(r2_score(y_true, y_pred), -0.04, 2)
    assert_almost_equal(r2_score(y_true, y_true), 1.00, 2)
    assert_equal(r2_score([0, 0, 0], [0, 0, 0]), 1.0)
    assert_equal(r2_score([0, 0, 0], [0, 1, 1]), 0.0)
Exemple #18
0
def num_diff_w_perms(l1, l2):
    label1 = list(set(l1))
    label2 = list(set(l2))
    label  = label1
    n = len(l1)
    
#    cost = [[n-np.sum((l1==lab1)==(l2==lab2)) for lab1 in label1] for lab2 in label2]
#    
#    h = hungarian.Hungarian(cost)
#    try:
#        h.calculate()
#        return h.getTotalPotential()/2.0
#    except hungarian.HungarianError:
#        print 'Hungary lost this round'
#        return  
    
    min_diff = np.Inf
    for p in permutations(label):
        l1p = [p[label.index(l)] for l in l1]
        min_diff = min(min_diff,metrics.zero_one(l1p, l2))
    return min_diff