Ejemplo n.º 1
0
def test_prediction_getitem():
    "prediction.__getitem__"
    p = testing.micro_prediction()
    names = ['model2', 'model0']
    p2 = p[names]
    ok_(isinstance(p2, nx.Prediction), 'expecting a prediction')
    ok_(p2.names == names, 'names corrcupted')
Ejemplo n.º 2
0
def test_prediction_copies():
    "prediction properties should be copies"
    p = testing.micro_prediction()
    ok_(testing.shares_memory(p, p), "looks like shares_memory failed")
    ok_(testing.shares_memory(p, p.ids), "p.ids should be a view")
    ok_(testing.shares_memory(p, p.y), "p.y should be a view")
    ok_(not testing.shares_memory(p, p.copy()), "should be a copy")
Ejemplo n.º 3
0
def test_metrics_per_era():
    "make sure calc_metrics runs"
    d = micro_data()
    p = micro_prediction()
    metrics_per_era(d, p)
    metrics_per_era(d, p, 'yhat')
    metrics_per_era(d, p, 'inner')
    assert_raises(ValueError, metrics_per_era, d, p, 'outer')
Ejemplo n.º 4
0
def test_metrics_per_name():
    "make sure metrics_per_name runs"
    d = testing.micro_data()
    p = testing.micro_prediction()
    metrics_per_name(d, p, 1)
    metrics_per_name(d, p, 2, join='yhat')
    metrics_per_name(d, p, 3, columns=['sharpe'])
    assert_raises(ValueError, metrics_per_name, d, p, 4, 'data', ['wtf'])
Ejemplo n.º 5
0
def test_prediction_iter():
    "test prediction.iter"
    p = testing.micro_prediction()
    names = []
    for pi in p.iter():
        n = pi.names
        ok_(len(n) == 1, 'should only yield a single name')
        names.append(n[0])
    ok_(p.names == names, 'prediction.iter failed')
Ejemplo n.º 6
0
def test_prediction_to_csv():
    "make sure prediction.to_csv runs"
    p = testing.micro_prediction()
    with tempfile.NamedTemporaryFile() as temp:
        p['model1'].to_csv(temp.name)
        with testing.HiddenPrints():
            p['model1'].to_csv(temp.name, verbose=True)
        p2 = nx.load_prediction_csv(temp.name, 'model1')
        ade(p2, p['model1'], "prediction corrupted during roundtrip")
    assert_raises(ValueError, p.to_csv, 'unused')
Ejemplo n.º 7
0
def test_prediction_save():
    "test prediction.save with mode='a'"
    p = testing.micro_prediction()
    p1 = p['model0']
    p2 = p[['model1', 'model2']]
    with tempfile.NamedTemporaryFile() as temp:
        p1.save(temp.name)
        p2.save(temp.name, mode='a')
        p12 = nx.load_prediction(temp.name)
        ade(p, p12, "prediction corrupted during roundtrip")
Ejemplo n.º 8
0
def test_prediction_ynew():
    "test prediction.ynew"
    p = testing.micro_prediction()
    y = p.y.copy()
    y2 = np.random.rand(*y.shape)
    p2 = p.ynew(y2)
    np.testing.assert_array_equal(p2.y, y2, 'prediction.ynew failed')
    assert_raises(ValueError, p.ynew, y2[:3])
    assert_raises(ValueError, p.ynew, y2[:, :2])
    assert_raises(ValueError, p.ynew, y2.reshape(-1))
Ejemplo n.º 9
0
def test_metrics_per_era():
    "make sure metrics_per_era runs"
    d = testing.micro_data()
    p = testing.micro_prediction()
    metrics_per_era(d, p, 1)
    metrics_per_era(d, p, 2, join='yhat')
    metrics_per_era(d, p, 3, join='inner')
    assert_raises(ValueError, metrics_per_era, d, p, 4, 'outer')
    with testing.HiddenPrints():
        metrics_per_era(d, p, tournament=5, era_as_str=True)
Ejemplo n.º 10
0
def test_prediction_performance():
    "make sure prediction.performance runs"
    d = testing.micro_data()
    p = testing.micro_prediction()
    df = p.performance(d)
    ok_(isinstance(df, pd.DataFrame), 'expecting a dataframe')
    p.performance(d, sort_by='auc')
    p.performance(d, sort_by='acc')
    p.performance(d, sort_by='ystd')
    p.performance(d, sort_by='sharpe')
    p.performance(d, sort_by='consis')
Ejemplo n.º 11
0
def test_prediction_roundtrip():
    "save/load roundtrip shouldn't change prediction"
    p = testing.micro_prediction()
    with tempfile.NamedTemporaryFile() as temp:

        p.save(temp.name)
        p2 = nx.load_prediction(temp.name)
        ade(p, p2, "prediction corrupted during roundtrip")

        p.save(temp.name, compress=False)
        p2 = nx.load_prediction(temp.name)
        ade(p, p2, "prediction corrupted during roundtrip")
Ejemplo n.º 12
0
def test_prediction_rename():
    "prediction.rename"

    p = testing.micro_prediction()
    rename_dict = {}
    names = []
    original_names = p.names
    for i in range(p.shape[1]):
        key = original_names[i]
        value = 'm_%d' % i
        names.append(value)
        rename_dict[key] = value
    p2 = p.rename(rename_dict)
    ok_(p2.names == names, 'prediction.rename failed')

    p = testing.micro_prediction()
    assert_raises(ValueError, p.rename, 'modelX')

    p = p['model1']
    p2 = p.rename('modelX')
    ok_(p2.names[0] == 'modelX', 'prediction.rename failed')
Ejemplo n.º 13
0
def test_prediction_correlation():
    "make sure prediction.correlation runs"
    p = testing.micro_prediction()
    with testing.HiddenPrints():
        p.correlation()
Ejemplo n.º 14
0
def test_merge_predictions():
    "test merge_predictions"

    p = testing.micro_prediction()
    assert_raises(ValueError, nx.merge_predictions, [p, p])

    p2 = nx.merge_predictions([p, nx.Prediction()])
    ade(p2, p, 'corruption of merge predictions')

    p1 = testing.micro_prediction([0, 1, 2, 3, 4])
    p2 = testing.micro_prediction([5, 6, 7, 8, 9])
    p12 = nx.merge_predictions([p1, p2])
    ade(p12, p, 'corruption of merge predictions')

    p1 = testing.micro_prediction([0, 1, 2, 3])
    p2 = testing.micro_prediction([4, 5, 6])
    p3 = testing.micro_prediction([7, 8, 9])
    p123 = nx.merge_predictions([p1, p2, p3])
    ade(p123, p, 'corruption of merge predictions')

    p1 = testing.micro_prediction([9, 4, 3, 2])
    p2 = testing.micro_prediction([1, 8, 7])
    p3 = testing.micro_prediction([6, 5, 0])
    p123 = nx.merge_predictions([p1, p2, p3])
    ade(p123, p, 'corruption of merge predictions')

    p1 = testing.micro_prediction([0, 1, 2, 3, 4])
    p11 = p1[['model0', 'model1']]
    p12 = p1['model2']
    p2 = testing.micro_prediction([5, 6, 7, 8, 9])
    p21 = p2['model0']
    p22 = p2[['model1', 'model2']]
    p12 = nx.merge_predictions([p11, p21, p22, p12])
    ade(p12, p, 'corruption of merge predictions')
Ejemplo n.º 15
0
def test_prediction_originality():
    "make sure prediction.originality runs"
    p = testing.micro_prediction()
    df = p.originality(['model1'])
    ok_(isinstance(df, pd.DataFrame), 'expecting a dataframe')
Ejemplo n.º 16
0
def test_prediction_summary():
    "make sure prediction.summary runs"
    d = testing.micro_data()
    p = testing.micro_prediction()
    df = p['model1'].summary(d)
    ok_(isinstance(df, pd.DataFrame), 'expecting a dataframe')
Ejemplo n.º 17
0
def test_prediction_drop():
    "prediction.drop"
    p = testing.micro_prediction()
    p = p.drop(['model1'])
    ok_(p.names == ['model0', 'model2'], 'prediction.drop failed')
Ejemplo n.º 18
0
def test_prediction_repr():
    "make sure prediction.__repr__() runs"
    p = micro_prediction()
    p.__repr__()
Ejemplo n.º 19
0
def test_prediction_compare():
    "make sure prediction.compare runs"
    d = testing.micro_data()
    p = testing.micro_prediction()
    df = p.compare(d, p)
    ok_(isinstance(df, pd.DataFrame), 'expecting a dataframe')