Ejemplo n.º 1
0
def test_scores_w_folds():
    """[SuperLearner] test scoring with folds."""

    scores = {'no__null': [], 'no__offs': [], 'sc__offs': []}

    for _, tei in FoldIndex(FOLDS, X1).generate(as_array=True):
        col = 0
        for case in sorted(PREPROCESSING):
            for est_name, _ in ESTIMATORS[case]:
                s = rmse(y1[tei], F1[tei][:, col])
                scores['%s__%s' % (case, est_name)].append(s)

                col += 1
Ejemplo n.º 2
0
def test_scores_w_folds():
    """[SuperLearner] test scoring with folds."""

    scores = {'null-1': [], 'offs-1': [], 'sc.offs-2': [], 'sc.null-2': []}

    for _, tei in FoldIndex(FOLDS, X1).generate(as_array=True):
        col = 0
        for case in sorted(PREPROCESSING):
            for est_name, _ in sorted(ESTIMATORS[case]):
                s = rmse(y1[tei], F1[tei][:, col])
                if case != 'no':
                    scores['%s.%s-2' % (case, est_name)].append(s)
                else:
                    scores['%s-1' % est_name].append(s)

                col += 1
Ejemplo n.º 3
0
def test_scores_wo_folds():
    """[SuperLearner] test scoring without folds."""

    scores = dict()
    for _, tei in FoldIndex(FOLDS, X2).generate(as_array=True):
        col = 0
        for est_name, _ in sorted(ECM):
            s = rmse(y2[tei], F2[tei][:, col])

            if not est_name in scores:
                scores[est_name] = []

            scores[est_name].append(s)

            col += 1

    for k in scores:
        scores[k] = np.mean(scores[k])

    for k in scores:
        assert scores[k] == ens2.data['score-m']['layer-1/%s' % k]
Ejemplo n.º 4
0
    safe_print()

    for size in sizes:
        n = int(np.floor(size / 2))

        X, y = make_friedman1(n_samples=size, random_state=SEED)

        safe_print('%6i' % n, end=' | ')
        for name in sorted(names):
            e = clone(ESTIMATORS[names[name]])
            t0 = time()
            e.fit(X[:n], y[:n])
            t1 = time() - t0
            times[names[name]].append(t1)

            s = rmse(y[n:], e.predict(X[n:]))
            scores[names[name]].append(s)

            safe_print('%8.2f' % (s), end=' | ', flush=True)

        safe_print()

    safe_print('\nFIT TIMES')
    safe_print('%6s' % 'size', end=' | ')

    for name in sorted(names):
        safe_print('%s' % names[name], end=' | ')
    safe_print()

    for i, size in enumerate(sizes):
        n = int(np.floor(size / 2))
Ejemplo n.º 5
0
def test_rmse():
    """[Metrics] rmse."""
    z = metrics.rmse(y, p)
    np.testing.assert_equal(np.array(z), np.array(4.5276925690687087))
Ejemplo n.º 6
0
        q = int(np.floor(s / 2))

        print('%11i' % s, end=" ", flush=True)

        X, y = make_friedman1(n_samples=s, n_features=COLS, random_state=SEED)

        # Iterate over ensembles with given number of cores
        for e in ens:
            name = e.__class__.__name__
            e = clone(e)

            t0 = perf_counter()
            e.fit(X[:q], y[:q])
            t1 = perf_counter() - t0

            sc = rmse(y[q:], e.predict(X[q:]))

            times[name].append(t1)
            scores[name].append(sc)

            print('%s : %.3f (%6.2fs) |' % (name, sc, t1), end=" ", flush=True)
        print()

    print_time(ts, "Benchmark done")

    if PLOT:
        try:
            import matplotlib.pyplot as plt

            plt.ion()
            print("Plotting results...", flush=True)