示例#1
0
def test_hrp_portfolio():
    df = get_data()
    returns = df.pct_change().dropna(how="all")
    w = hrp_portfolio(returns)
    assert isinstance(w, dict)
    assert set(w.keys()) == set(df.columns)
    np.testing.assert_almost_equal(sum(w.values()), 1)
def test_ema_historical_return():
    df = get_data()
    mean = expected_returns.ema_historical_return(df)
    assert isinstance(mean, pd.Series)
    assert list(mean.index) == list(df.columns)
    assert mean.notnull().all()
    assert mean.dtype == "float64"
def test_mean_historical_returns():
    df = get_data()
    mean = expected_returns.mean_historical_return(df)
    assert isinstance(mean, pd.Series)
    assert list(mean.index) == list(df.columns)
    assert mean.notnull().all()
    assert mean.dtype == "float64"
    correct_mean = np.array(
        [
            0.26770284,
            0.3637864,
            0.31709032,
            0.22616723,
            0.49982007,
            0.16888704,
            0.22754479,
            0.14783539,
            0.19001915,
            0.08150653,
            0.12826351,
            0.25797816,
            0.07580128,
            0.16087243,
            0.20510267,
            0.3511536,
            0.38808003,
            0.24635612,
            0.21798433,
            0.28474973,
        ]
    )
    np.testing.assert_array_almost_equal(mean.values, correct_mean)
def test_sample_cov_real_data():
    df = get_data()
    S = risk_models.sample_cov(df)
    assert S.shape == (20, 20)
    assert S.index.equals(df.columns)
    assert S.index.equals(S.columns)
    assert S.notnull().all().all()
def test_returns_dataframe():
    df = get_data()
    returns_df = df.pct_change().dropna(how="all")
    assert isinstance(returns_df, pd.DataFrame)
    assert returns_df.shape[1] == 20
    assert len(returns_df) == 7125
    assert returns_df.index.is_all_dates
    assert not ((returns_df > 1) & returns_df.notnull()).any().any()
def test_returns_dataframe():
    df = get_data()
    returns_df = expected_returns.daily_price_returns(df)
    assert isinstance(returns_df, pd.DataFrame)
    assert returns_df.shape[1] == 20
    assert len(returns_df) == 7125
    assert returns_df.index.is_all_dates
    assert not ((returns_df > 1) & returns_df.notnull()).any().any()
def test_shrunk_covariance_frequency():
    df = get_data()
    cs = risk_models.CovarianceShrinkage(df, frequency=52)
    # if delta = 0, no shrinkage occurs
    shrunk_cov = cs.shrunk_covariance(0)

    S = risk_models.sample_cov(df, frequency=52)
    np.testing.assert_array_almost_equal(shrunk_cov.values, S)
def test_efficient_frontier_init_errors():
    df = get_data()
    mean_returns = df.pct_change().dropna(how="all").mean()
    with pytest.raises(TypeError):
        EfficientFrontier("test", "string")

    with pytest.raises(TypeError):
        EfficientFrontier(mean_returns, mean_returns)
def test_semicovariance():
    df = get_data()
    S = risk_models.semicovariance(df)
    assert S.shape == (20, 20)
    assert S.index.equals(df.columns)
    assert S.index.equals(S.columns)
    assert S.notnull().all().all()
    S2 = risk_models.semicovariance(df, frequency=2)
    pd.testing.assert_frame_equal(S / 126, S2)
def test_single_index():
    df = get_data()
    si = risk_models.SingleIndex(df)
    shrunk_cov = si.shrink()
    assert 0 < si.delta < 1
    assert shrunk_cov.shape == (20, 20)
    assert list(shrunk_cov.index) == list(df.columns)
    assert list(shrunk_cov.columns) == list(df.columns)
    assert not shrunk_cov.isnull().any().any()
def test_constant_correlation():
    df = get_data()
    cc = risk_models.ConstantCorrelation(df)
    shrunk_cov = cc.shrink()
    assert 0 < cc.delta < 1
    assert shrunk_cov.shape == (20, 20)
    assert list(shrunk_cov.index) == list(df.columns)
    assert list(shrunk_cov.columns) == list(df.columns)
    assert not shrunk_cov.isnull().any().any()
def test_oracle_approximating():
    df = get_data()
    cs = risk_models.CovarianceShrinkage(df)
    shrunk_cov = cs.oracle_approximating()
    assert 0 < cs.delta < 1
    assert shrunk_cov.shape == (20, 20)
    assert list(shrunk_cov.index) == list(df.columns)
    assert list(shrunk_cov.columns) == list(df.columns)
    assert not shrunk_cov.isnull().any().any()
def test_ledoit_wolf():
    df = get_data()
    cs = risk_models.CovarianceShrinkage(df)
    shrunk_cov = cs.ledoit_wolf()
    assert 0 < cs.delta < 1
    assert shrunk_cov.shape == (20, 20)
    assert list(shrunk_cov.index) == list(df.columns)
    assert list(shrunk_cov.columns) == list(df.columns)
    assert not shrunk_cov.isnull().any().any()
def test_shrunk_covariance():
    df = get_data()
    cs = risk_models.CovarianceShrinkage(df)
    shrunk_cov = cs.shrunk_covariance(0.2)
    assert cs.delta == 0.2
    assert shrunk_cov.shape == (20, 20)
    assert list(shrunk_cov.index) == list(df.columns)
    assert list(shrunk_cov.columns) == list(df.columns)
    assert not shrunk_cov.isnull().any().any()
def test_min_cov_det():
    df = get_data()
    S = risk_models.min_cov_determinant(df, random_state=8)
    assert S.shape == (20, 20)
    assert S.index.equals(df.columns)
    assert S.index.equals(S.columns)
    assert S.notnull().all().all()
    S2 = risk_models.min_cov_determinant(df, frequency=2, random_state=8)
    pd.testing.assert_frame_equal(S / 126, S2)
def test_exp_cov_matrix():
    df = get_data()
    S = risk_models.exp_cov(df)
    assert S.shape == (20, 20)
    assert S.index.equals(df.columns)
    assert S.index.equals(S.columns)
    assert S.notnull().all().all()
    S2 = risk_models.exp_cov(df, frequency=2)
    pd.testing.assert_frame_equal(S / 126, S2)
def test_exp_cov_limits():
    df = get_data()
    sample_cov = risk_models.sample_cov(df)
    S = risk_models.exp_cov(df)
    assert not np.allclose(sample_cov, S)

    # As span gets larger, it should tend towards sample covariance
    S2 = risk_models.exp_cov(df, span=1e20)
    assert np.abs(S2 - sample_cov).max().max() < 1e-3
def test_semicovariance_benchmark():
    df = get_data()
    # When the benchmark is very negative, the cov matrix should be zeroes
    S_negative_benchmark = risk_models.semicovariance(df, benchmark=-0.5)
    np.testing.assert_allclose(S_negative_benchmark, 0, atol=1e-4)

    # Increasing the benchmark should increase covariances on average
    S = risk_models.semicovariance(df, benchmark=0)
    S2 = risk_models.semicovariance(df, benchmark=1)
    assert S2.sum().sum() > S.sum().sum()
def test_negative_mean_return_real():
    df = get_data()
    e_rets = mean_historical_return(df)
    w = np.array([1 / len(e_rets)] * len(e_rets))
    negative_mu = objective_functions.negative_mean_return(w, e_rets)
    assert isinstance(negative_mu, float)
    assert negative_mu < 0
    assert negative_mu == -w.dot(e_rets)
    assert negative_mu == -(w * e_rets).sum()
    np.testing.assert_almost_equal(-e_rets.sum() / len(e_rets), negative_mu)
def test_mean_historical_returns_type_warning():
    df = get_data()
    mean = expected_returns.mean_historical_return(df)

    with warnings.catch_warnings(record=True) as w:
        mean_from_array = expected_returns.mean_historical_return(np.array(df))
        assert len(w) == 1
        assert issubclass(w[0].category, RuntimeWarning)
        assert str(w[0].message) == "prices are not in a dataframe"

    np.testing.assert_array_almost_equal(mean.values, mean_from_array.values, decimal=6)
def test_shrunk_covariance_extreme_delta():
    df = get_data()
    cs = risk_models.CovarianceShrinkage(df)
    # if delta = 0, no shrinkage occurs
    shrunk_cov = cs.shrunk_covariance(0)
    np.testing.assert_array_almost_equal(
        shrunk_cov.values, risk_models.sample_cov(df))
    # if delta = 1, sample cov does not contribute to shrunk cov
    shrunk_cov = cs.shrunk_covariance(1)
    N = df.shape[1]
    F = np.identity(N) * np.trace(cs.S) / N
    np.testing.assert_array_almost_equal(shrunk_cov.values, F * 252)
示例#22
0
def test_cla_custom_bounds():
    bounds = [(0.01, 0.13), (0.02, 0.11)] * 10
    cla = CLA(*setup_cla(data_only=True), weight_bounds=bounds)
    df = get_data()
    cla.cov_matrix = risk_models.exp_cov(df).values
    w = cla.min_volatility()
    assert isinstance(w, dict)
    assert set(w.keys()) == set(cla.tickers)
    np.testing.assert_almost_equal(cla.weights.sum(), 1)
    assert (0.01 <= cla.weights[::2]).all() and (cla.weights[::2] <=
                                                 0.13).all()
    assert (0.02 <= cla.weights[1::2]).all() and (cla.weights[1::2] <=
                                                  0.11).all()
def test_max_sharpe_semicovariance():
    # f
    df = get_data()
    ef = setup_efficient_frontier()
    ef.cov_matrix = risk_models.semicovariance(df, benchmark=0)
    w = ef.max_sharpe()
    assert isinstance(w, dict)
    assert set(w.keys()) == set(ef.tickers)
    assert set(w.keys()) == set(ef.expected_returns.index)
    np.testing.assert_almost_equal(ef.weights.sum(), 1)
    np.testing.assert_allclose(
        ef.portfolio_performance(),
        (0.2972237362989219, 0.064432672830601, 4.297294313174586))
def test_efficient_return_semicovariance():
    df = get_data()
    ef = setup_efficient_frontier()
    ef.cov_matrix = risk_models.semicovariance(df, benchmark=0)
    w = ef.efficient_return(0.12)
    assert isinstance(w, dict)
    assert set(w.keys()) == set(ef.tickers)
    assert set(w.keys()) == set(ef.expected_returns.index)
    np.testing.assert_almost_equal(ef.weights.sum(), 1)
    assert all([i >= 0 for i in w.values()])
    np.testing.assert_allclose(
        ef.portfolio_performance(),
        (0.11999999997948813, 0.06948386215256849, 1.4391830977949114))
示例#25
0
def test_max_sharpe_semicovariance():
    # f
    df = get_data()
    cla = setup_cla()
    cla.covar = risk_models.semicovariance(df, benchmark=0)
    w = cla.max_sharpe()
    assert isinstance(w, dict)
    assert set(w.keys()) == set(cla.tickers)
    np.testing.assert_almost_equal(cla.weights.sum(), 1)
    np.testing.assert_allclose(
        cla.portfolio_performance(),
        (0.3253436657555845, 0.2133353004830236, 1.4281588303044812),
    )
示例#26
0
def test_max_sharpe_short_semicovariance():
    df = get_data()
    ef = EfficientFrontier(*setup_efficient_frontier(data_only=True),
                           weight_bounds=(-1, 1))
    ef.cov_matrix = risk_models.semicovariance(df, benchmark=0)
    w = ef.max_sharpe()
    assert isinstance(w, dict)
    assert set(w.keys()) == set(ef.tickers)
    np.testing.assert_almost_equal(ef.weights.sum(), 1)
    np.testing.assert_allclose(
        ef.portfolio_performance(),
        (0.3564305116656491, 0.07201282488003401, 4.671813836300796),
    )
示例#27
0
def test_min_cov_det():
    df = get_data()
    S = risk_models.min_cov_determinant(df, random_state=8)
    assert S.shape == (20, 20)
    assert S.index.equals(df.columns)
    assert S.index.equals(S.columns)
    assert S.notnull().all().all()
    # assert risk_models._is_positive_semidefinite(S)
    # Cover that it works on np.ndarray, with a warning
    with pytest.warns(RuntimeWarning):
        S2 = risk_models.min_cov_determinant(df.to_numpy(), random_state=8)
        assert isinstance(S2, pd.DataFrame)
        np.testing.assert_equal(S.to_numpy(), S2.to_numpy())
示例#28
0
def test_mean_historical_returns_type_warning():
    df = get_data()
    mean = expected_returns.mean_historical_return(df)

    with warnings.catch_warnings(record=True) as w:
        mean_from_array = expected_returns.mean_historical_return(np.array(df))
        assert len(w) == 1
        assert issubclass(w[0].category, RuntimeWarning)
        assert str(w[0].message) == "prices are not in a dataframe"

    np.testing.assert_array_almost_equal(mean.values,
                                         mean_from_array.values,
                                         decimal=6)
示例#29
0
def test_bl_equal_prior():
    df = get_data()
    S = risk_models.sample_cov(df)

    viewdict = {"AAPL": 0.20, "BBY": -0.30, "BAC": 0, "SBUX": -0.2, "T": 0.131321}
    bl = BlackLittermanModel(S, absolute_views=viewdict, pi="equal")
    np.testing.assert_array_almost_equal(bl.pi, np.ones((20, 1)) * 0.05)

    bl.bl_weights()
    np.testing.assert_allclose(
        bl.portfolio_performance(),
        (0.1877432247395778, 0.3246889329226965, 0.5166274785827545),
    )
示例#30
0
def test_max_sharpe_short_semicovariance():
    df = get_data()
    ef = EfficientFrontier(*setup_efficient_frontier(data_only=True),
                           weight_bounds=(-1, 1))
    ef.cov_matrix = risk_models.semicovariance(df, benchmark=0)
    w = ef.max_sharpe()
    assert isinstance(w, dict)
    assert set(w.keys()) == set(ef.tickers)
    np.testing.assert_almost_equal(ef.weights.sum(), 1)
    np.testing.assert_allclose(
        ef.portfolio_performance(),
        (0.42444834528495234, 0.0898263632679403, 4.50255727350929),
    )
示例#31
0
def test_max_sharpe_short_semicovariance():
    df = get_data()
    ef = EfficientFrontier(*setup_efficient_frontier(data_only=True),
                           weight_bounds=(-1, 1))
    ef.cov_matrix = risk_models.semicovariance(df, benchmark=0)
    w = ef.max_sharpe()
    assert isinstance(w, dict)
    assert set(w.keys()) == set(ef.tickers)
    np.testing.assert_almost_equal(ef.weights.sum(), 1)
    np.testing.assert_allclose(
        ef.portfolio_performance(),
        (0.3907992623559733, 0.0809285460933456, 4.581810501430255),
    )
示例#32
0
def test_default_omega():
    df = get_data()
    S = risk_models.sample_cov(df)
    views = pd.Series(0.1, index=S.columns)
    bl = BlackLittermanModel(S, Q=views)

    # Check square and diagonal
    assert bl.omega.shape == (len(S), len(S))
    np.testing.assert_array_equal(bl.omega, np.diag(np.diagonal(bl.omega)))

    # In this case, we should have omega = tau * diag(S)
    np.testing.assert_array_almost_equal(np.diagonal(bl.omega),
                                         bl.tau * np.diagonal(S))
示例#33
0
def test_max_sharpe_semicovariance():
    df = get_data()
    ef = setup_efficient_frontier()
    ef.cov_matrix = risk_models.semicovariance(df, benchmark=0)
    w = ef.max_sharpe()
    assert isinstance(w, dict)
    assert set(w.keys()) == set(ef.tickers)
    np.testing.assert_almost_equal(ef.weights.sum(), 1)
    assert all([i >= 0 for i in w.values()])
    np.testing.assert_allclose(
        ef.portfolio_performance(),
        (0.2732301946250426, 0.06603231922971581, 3.834943215368455),
    )
示例#34
0
def test_cov_to_corr():
    df = get_data()
    rets = risk_models.returns_from_prices(df).dropna()
    test_corr = risk_models.cov_to_corr(rets.cov())
    pd.testing.assert_frame_equal(test_corr, rets.corr())

    with warnings.catch_warnings(record=True) as w:
        test_corr_numpy = risk_models.cov_to_corr(rets.cov().values)
        assert len(w) == 1
        assert issubclass(w[0].category, RuntimeWarning)
        assert str(w[0].message) == "cov_matrix is not a dataframe"
        np.testing.assert_array_almost_equal(test_corr_numpy,
                                             rets.corr().values)
示例#35
0
def test_max_sharpe_semicovariance():
    df = get_data()
    ef = setup_efficient_frontier()
    ef.cov_matrix = risk_models.semicovariance(df, benchmark=0)
    w = ef.max_sharpe()
    assert isinstance(w, dict)
    assert set(w.keys()) == set(ef.tickers)
    np.testing.assert_almost_equal(ef.weights.sum(), 1)
    assert all([i >= 0 for i in w.values()])
    np.testing.assert_allclose(
        ef.portfolio_performance(),
        (0.2762965426962885, 0.07372667096108301, 3.476307004714425),
    )
示例#36
0
def test_dendrogram_plot():
    df = get_data()
    returns = df.pct_change().dropna(how="all")
    hrp = HRPOpt(returns)
    hrp.optimize()

    ax = plotting.plot_dendrogram(hrp, showfig=False)
    assert len(ax.findobj()) == 185
    assert type(ax.findobj()[0]) == matplotlib.collections.LineCollection

    ax = plotting.plot_dendrogram(hrp, show_tickers=False, showfig=False)
    assert len(ax.findobj()) == 65
    assert type(ax.findobj()[0]) == matplotlib.collections.LineCollection
示例#37
0
def test_max_sharpe_exp_cov():
    df = get_data()
    ef = setup_efficient_frontier()
    ef.cov_matrix = risk_models.exp_cov(df)
    w = ef.max_sharpe()
    assert isinstance(w, dict)
    assert set(w.keys()) == set(ef.tickers)
    np.testing.assert_almost_equal(ef.weights.sum(), 1)
    assert all([i >= 0 for i in w.values()])
    np.testing.assert_allclose(
        ef.portfolio_performance(),
        (0.33700887443850647, 0.1807332515488447, 1.7540152225548384),
    )
def test_es_example_weekly():
    df = get_data()
    df = df.resample("W").first()
    mu = expected_returns.mean_historical_return(df, frequency=52)
    historical_rets = expected_returns.returns_from_prices(df).dropna()
    es = EfficientSemivariance(mu, historical_rets, frequency=52)
    es.efficient_return(0.2)
    np.testing.assert_allclose(
        es.portfolio_performance(),
        (0.2000000562544616, 0.07667633475531543, 2.3475307841574087),
        rtol=1e-4,
        atol=1e-4,
    )
示例#39
0
def test_bl_returns_no_prior():
    df = get_data()
    S = risk_models.sample_cov(df)

    viewdict = {"AAPL": 0.20, "BBY": -0.30, "BAC": 0, "SBUX": -0.2, "T": 0.131321}
    bl = BlackLittermanModel(S, absolute_views=viewdict)
    rets = bl.bl_returns()

    # Make sure it gives the same answer as explicit inverse
    test_rets = np.linalg.inv(
        np.linalg.inv(bl.tau * bl.cov_matrix) + bl.P.T @ np.linalg.inv(bl.omega) @ bl.P
    ) @ (bl.P.T @ np.linalg.inv(bl.omega) @ bl.Q)
    np.testing.assert_array_almost_equal(rets.values.reshape(-1, 1), test_rets)
def test_es_example_short():
    df = get_data()
    mu = expected_returns.mean_historical_return(df)
    historical_rets = expected_returns.returns_from_prices(df).dropna()
    es = EfficientSemivariance(mu, historical_rets, weight_bounds=(-1, 1))
    w = es.efficient_return(0.2, market_neutral=True)
    goog_weight = w["GOOG"]

    historical_rets["GOOG"] -= historical_rets["GOOG"].quantile(0.75)
    es = EfficientSemivariance(mu, historical_rets, weight_bounds=(-1, 1))
    w = es.efficient_return(0.2, market_neutral=True)
    goog_weight2 = w["GOOG"]
    assert abs(goog_weight2) >= abs(goog_weight)
示例#41
0
def test_max_sharpe_semicovariance():
    df = get_data()
    ef = setup_efficient_frontier()
    ef.cov_matrix = risk_models.semicovariance(df, benchmark=0)
    w = ef.max_sharpe()
    assert isinstance(w, dict)
    assert set(w.keys()) == set(ef.tickers)
    np.testing.assert_almost_equal(ef.weights.sum(), 1)
    assert all([i >= 0 for i in w.values()])
    np.testing.assert_allclose(
        ef.portfolio_performance(),
        (0.2972184894480104, 0.06443145011260347, 4.302533762060766),
    )
示例#42
0
def test_hrp_errors():
    with pytest.raises(ValueError):
        hrp = HRPOpt()

    df = get_data()
    returns = df.pct_change().dropna(how="all")
    returns_np = returns.to_numpy()
    with pytest.raises(TypeError):
        hrp = HRPOpt(returns_np)

    hrp = HRPOpt(returns)
    with pytest.raises(ValueError):
        hrp.optimize(linkage_method="blah")
示例#43
0
def test_max_sharpe_exp_cov():
    df = get_data()
    ef = setup_efficient_frontier()
    ef.cov_matrix = risk_models.exp_cov(df)
    w = ef.max_sharpe()
    assert isinstance(w, dict)
    assert set(w.keys()) == set(ef.tickers)
    np.testing.assert_almost_equal(ef.weights.sum(), 1)
    assert all([i >= 0 for i in w.values()])
    np.testing.assert_allclose(
        ef.portfolio_performance(),
        (0.3678817256187322, 0.1753405505478982, 1.9840346373481956),
    )
def test_lp_allocation_rmse_error():
    df = get_data()
    mu = mean_historical_return(df)
    S = sample_cov(df)
    ef = EfficientFrontier(mu, S)
    w = ef.max_sharpe()

    latest_prices = get_latest_prices(df)
    da = DiscreteAllocation(w, latest_prices, short_ratio=0.3)
    da.lp_portfolio()
    np.testing.assert_almost_equal(da._allocation_rmse_error(verbose=False),
                                   0.017082871441954087,
                                   decimal=5)
def test_prices_from_log_returns():
    df = get_data()
    returns_df = df.pct_change()  # keep NaN row
    log_returns_df = np.log1p(returns_df)

    # convert pseudo-price to price
    pseudo_prices = expected_returns.prices_from_returns(log_returns_df,
                                                         log_returns=True)
    initial_prices = df.bfill().iloc[0]
    test_prices = pseudo_prices * initial_prices

    # check equality, robust to floating point issues
    assert ((test_prices[1:] - df[1:]).fillna(0) < 1e-10).all().all()
def test_init_cvar_errors():
    df = get_data()
    returns = df.pct_change().dropna(how="all")
    with pytest.raises(ValueError):
        vr = CVAROpt(returns, weight_bounds=(0.5, 1))
    with pytest.raises(AttributeError):
        vr = CVAROpt(returns)
        vr.clean_weights()
    with pytest.raises(TypeError):
        vr = CVAROpt(returns.values)
    returns_list = df.values.tolist()
    with pytest.raises(TypeError):
        vr = CVAROpt(returns_list)
def test_max_sharpe_short_semicovariance():
    df = get_data()
    ef = EfficientFrontier(*setup_efficient_frontier(data_only=True),
                           weight_bounds=(-1, 1))
    ef.cov_matrix = risk_models.semicovariance(df, benchmark=0)
    w = ef.max_sharpe()
    assert isinstance(w, dict)
    assert set(w.keys()) == set(ef.tickers)
    assert set(w.keys()) == set(ef.expected_returns.index)
    np.testing.assert_almost_equal(ef.weights.sum(), 1)
    np.testing.assert_allclose(
        ef.portfolio_performance(),
        (0.3564654865246848, 0.07202031837368413, 4.671813373260894))
def test_max_sharpe_exp_cov():
    df = get_data()
    ef = setup_efficient_frontier()
    ef.cov_matrix = risk_models.exp_cov(df)
    w = ef.max_sharpe()
    assert isinstance(w, dict)
    assert set(w.keys()) == set(ef.tickers)
    assert set(w.keys()) == set(ef.expected_returns.index)
    np.testing.assert_almost_equal(ef.weights.sum(), 1)
    assert all([i >= 0 for i in w.values()])
    np.testing.assert_allclose(
        ef.portfolio_performance(),
        (0.3678835305574766, 0.17534146043561463, 1.9840346355802103))
def test_efficient_return_semicovariance():
    df = get_data()
    ef = setup_efficient_frontier()
    ef.cov_matrix = risk_models.semicovariance(df, benchmark=0)
    w = ef.efficient_return(0.12)
    assert isinstance(w, dict)
    assert set(w.keys()) == set(ef.tickers)
    assert set(w.keys()) == set(ef.expected_returns.index)
    np.testing.assert_almost_equal(ef.weights.sum(), 1)
    np.testing.assert_allclose(
        ef.portfolio_performance(),
        (0.12000000000871075, 0.06948386214063361, 1.4319423610177537)
    )
示例#50
0
def test_sample_cov_type_warning():
    df = get_data()
    cov_from_df = risk_models.sample_cov(df)

    returns_as_array = np.array(df)
    with pytest.warns(RuntimeWarning) as w:
        cov_from_array = risk_models.sample_cov(returns_as_array)
        assert len(w) == 1
        assert str(w[0].message) == "data is not in a dataframe"

    np.testing.assert_array_almost_equal(cov_from_df.values,
                                         cov_from_array.values,
                                         decimal=6)
示例#51
0
def test_corr_to_cov():
    df = get_data()
    rets = risk_models.returns_from_prices(df).dropna()
    test_corr = risk_models.cov_to_corr(rets.cov())
    new_cov = risk_models.corr_to_cov(test_corr, rets.std())
    pd.testing.assert_frame_equal(new_cov, rets.cov())

    with pytest.warns(RuntimeWarning) as w:
        cov_numpy = risk_models.corr_to_cov(test_corr.to_numpy(), rets.std())
        assert len(w) == 1
        assert str(w[0].message) == "corr_matrix is not a dataframe"
        assert isinstance(cov_numpy, pd.DataFrame)
        np.testing.assert_equal(cov_numpy.to_numpy(), new_cov.to_numpy())
示例#52
0
def test_cov_to_corr():
    df = get_data()
    rets = risk_models.returns_from_prices(df).dropna()
    test_corr = risk_models.cov_to_corr(rets.cov())
    pd.testing.assert_frame_equal(test_corr, rets.corr())

    with pytest.warns(RuntimeWarning) as w:
        test_corr_numpy = risk_models.cov_to_corr(rets.cov().values)
        assert len(w) == 1
        assert str(w[0].message) == "cov_matrix is not a dataframe"
        assert isinstance(test_corr_numpy, pd.DataFrame)
        np.testing.assert_array_almost_equal(test_corr_numpy,
                                             rets.corr().values)
def test_max_sharpe_semicovariance():
    # f
    df = get_data()
    ef = setup_efficient_frontier()
    ef.cov_matrix = risk_models.semicovariance(df, benchmark=0)
    w = ef.max_sharpe()
    assert isinstance(w, dict)
    assert set(w.keys()) == set(ef.tickers)
    assert set(w.keys()) == set(ef.expected_returns.index)
    np.testing.assert_almost_equal(ef.weights.sum(), 1)
    np.testing.assert_allclose(
        ef.portfolio_performance(),
        (0.2972237362989219, 0.064432672830601, 4.297294313174586)
    )
def test_sample_cov_type_warning():
    df = get_data()
    cov_from_df = risk_models.sample_cov(df)

    returns_as_array = np.array(df)
    with warnings.catch_warnings(record=True) as w:
        cov_from_array = risk_models.sample_cov(returns_as_array)

        assert len(w) == 1
        assert issubclass(w[0].category, RuntimeWarning)
        assert str(w[0].message) == "prices are not in a dataframe"

    np.testing.assert_array_almost_equal(
        cov_from_df.values, cov_from_array.values, decimal=6
    )
def test_cvar():
    df = get_data()
    returns = df.pct_change().dropna(how="all")
    w = np.array([1 / df.shape[1]] * df.shape[1])
    cvar0 = objective_functions.negative_cvar(
        w, returns, s=5000, random_state=0)
    assert cvar0 > 0
    cvar1 = objective_functions.negative_cvar(
        w, returns, s=5000, beta=0.98, random_state=0)
    assert cvar1 > 0

    # Nondeterministic
    cvar2 = objective_functions.negative_cvar(
        w, returns, s=5000, random_state=1)
    assert not cvar0 == cvar2
def test_min_volatilty_semicovariance_L2_reg():
    # f
    df = get_data()

    ef = setup_efficient_frontier()
    ef.cov_matrix = risk_models.semicovariance(df, benchmark=0)
    w = ef.min_volatility()
    assert isinstance(w, dict)
    assert set(w.keys()) == set(ef.tickers)
    assert set(w.keys()) == set(ef.expected_returns.index)
    np.testing.assert_almost_equal(ef.weights.sum(), 1)
    np.testing.assert_allclose(
        ef.portfolio_performance(),
        (0.20661406122127524, 0.055515981410304394, 3.3567606718215663)
    )
def test_negative_sharpe():
    df = get_data()
    e_rets = mean_historical_return(df)
    S = sample_cov(df)
    w = np.array([1 / len(e_rets)] * len(e_rets))

    sharpe = objective_functions.negative_sharpe(w, e_rets, S)
    assert isinstance(sharpe, float)
    assert sharpe < 0

    sigma = np.sqrt(np.dot(w, np.dot(S, w.T)))
    negative_mu = objective_functions.negative_mean_return(w, e_rets)
    np.testing.assert_almost_equal(sharpe * sigma - 0.02, negative_mu)

    # Risk free rate increasing should lead to negative Sharpe increasing.
    assert sharpe < objective_functions.negative_sharpe(
        w, e_rets, S, risk_free_rate=0.1
    )
def test_portfolio_allocation_errors():
    df = get_data()
    e_ret = mean_historical_return(df)
    cov = sample_cov(df)
    ef = EfficientFrontier(e_ret, cov)
    w = ef.max_sharpe()
    latest_prices = discrete_allocation.get_latest_prices(df)

    with pytest.raises(TypeError):
        discrete_allocation.portfolio(ef.weights, latest_prices)

    with pytest.raises(TypeError):
        discrete_allocation.portfolio(w, latest_prices.values.tolist())

    with pytest.raises(ValueError):
        discrete_allocation.portfolio(w, latest_prices, min_allocation=0.5)

    with pytest.raises(ValueError):
        discrete_allocation.portfolio(w, latest_prices, total_portfolio_value=0)
def test_portfolio_allocation():
    df = get_data()
    e_ret = mean_historical_return(df)
    cov = sample_cov(df)
    ef = EfficientFrontier(e_ret, cov)
    w = ef.max_sharpe()

    latest_prices = discrete_allocation.get_latest_prices(df)
    allocation, leftover = discrete_allocation.portfolio(w, latest_prices)
    assert allocation == {
        "MA": 14,
        "FB": 12,
        "PFE": 51,
        "BABA": 5,
        "AAPL": 5,
        "AMZN": 0,
        "BBY": 9,
        "SBUX": 6,
        "GOOG": 1,
    }
    total = 0
    for ticker, num in allocation.items():
        total += num * latest_prices[ticker]
    np.testing.assert_almost_equal(total + leftover, 10000)