def test_bl_tau(): df = get_data() S = risk_models.sample_cov(df) prices = pd.read_csv( resource("spy_prices.csv"), parse_dates=True, index_col=0, squeeze=True ) delta = black_litterman.market_implied_risk_aversion(prices) mcaps = get_market_caps() prior = black_litterman.market_implied_prior_returns(mcaps, delta, S) viewdict = {"GOOG": 0.40, "AAPL": -0.30, "FB": 0.30, "BABA": 0} # Need to change omega for this test to work omega = np.diag([0.01, 0.01, 0.01, 0.01]) bl0 = BlackLittermanModel( S, pi=prior, absolute_views=viewdict, tau=1e-10, omega=omega ) bl1 = BlackLittermanModel( S, pi=prior, absolute_views=viewdict, tau=0.01, omega=omega ) bl2 = BlackLittermanModel( S, pi=prior, absolute_views=viewdict, tau=0.1, omega=omega ) # For tiny tau, posterior should roughly equal prior np.testing.assert_allclose(bl0.bl_returns(), bl0.pi.flatten(), rtol=1e-5) # For bigger tau, GOOG should be given more weight assert bl1.bl_returns()["GOOG"] > bl0.bl_returns()["GOOG"] assert bl2.bl_returns()["GOOG"] > bl1.bl_returns()["GOOG"]
def test_cov_ndarray(): df = get_data() prior_df = df.pct_change().mean() S = risk_models.sample_cov(df) views = pd.Series(0.1, index=S.columns) bl = BlackLittermanModel(S, pi=prior_df, Q=views) bl_nd = BlackLittermanModel(S.to_numpy(), pi=prior_df.to_numpy(), Q=views) # Compare without missing ticker index values. np.testing.assert_equal(bl_nd.bl_returns().to_numpy(), bl.bl_returns().to_numpy()) np.testing.assert_equal(bl_nd.bl_cov().to_numpy(), bl.bl_cov().to_numpy()) assert list(bl_nd.bl_weights().values()) == list(bl.bl_weights().values())
def test_bl_market_automatic(): df = get_data() S = risk_models.sample_cov(df) mcaps = get_market_caps() viewdict = {"GOOG": 0.40, "AAPL": -0.30, "FB": 0.30, "BABA": 0} bl = BlackLittermanModel(S, pi="market", absolute_views=viewdict, market_caps=mcaps) rets = bl.bl_returns() # Compare with explicit prior = black_litterman.market_implied_prior_returns(mcaps, 1, S, 0) bl2 = BlackLittermanModel(S, pi=prior, absolute_views=viewdict) rets2 = bl2.bl_returns() pd.testing.assert_series_equal(rets, rets2)
def test_idzorek_basic(): # Identity covariance S = pd.DataFrame(np.diag(np.ones((5, ))), index=range(5), columns=range(5)) # Constant view of 0.3 return views = {k: 0.3 for k in range(5)} # Prior pi = pd.Series(0.1, index=range(5)) # Perfect confidence - should equal views bl = BlackLittermanModel( S, pi=pi, absolute_views=views, omega=np.diag(np.zeros(5)) # perfect confidence ) pd.testing.assert_series_equal(bl.bl_returns(), pd.Series([0.3] * 5)) # No confidence - should equal priors bl = BlackLittermanModel(S, pi=pi, absolute_views=views, omega=S * 1e6) pd.testing.assert_series_equal(bl.bl_returns(), pi) # Idzorek 100% confidence bl = BlackLittermanModel(S, pi=pi, absolute_views=views, omega="idzorek", view_confidences=[1] * 5) np.testing.assert_array_almost_equal(bl.omega, np.zeros((5, 5))) pd.testing.assert_series_equal(bl.bl_returns(), pd.Series(0.3, index=range(5))) # Idzorek 0% confidence bl = BlackLittermanModel(S, pi=pi, absolute_views=views, omega="idzorek", view_confidences=[0] * 5) np.testing.assert_array_almost_equal(bl.omega, np.diag([1e6] * 5)) pd.testing.assert_series_equal(bl.bl_returns(), pi) # Idzorek confidence range for i, conf in enumerate(np.arange(0, 1.2, 0.2)): bl = BlackLittermanModel(S, pi=pi, absolute_views=views, omega="idzorek", view_confidences=[conf] * 5) # Linear spacing np.testing.assert_almost_equal(bl.bl_returns()[0], 0.1 + i * 0.2 / 5)
def test_idzorek_with_priors(): df = get_data() S = risk_models.sample_cov(df) mcaps = get_market_caps() viewdict = {"GOOG": 0.40, "AAPL": -0.30, "FB": 0.30, "BABA": 0} bl = BlackLittermanModel( S, pi="market", market_caps=mcaps, absolute_views=viewdict, omega="idzorek", view_confidences=[1, 1, 0.25, 0.25], ) rets = bl.bl_returns() assert bl.omega[0, 0] == 0 np.testing.assert_almost_equal(rets["AAPL"], -0.3) with pytest.raises(ValueError): bl.portfolio_performance() bl.bl_weights() np.testing.assert_allclose( bl.portfolio_performance(), (0.943431295405105, 0.5361412623208567, 1.722365653051476), ) # Check that bl.cov() has been called and used assert bl.posterior_cov is not None
def test_bl_market_prior(): df = get_data() S = risk_models.sample_cov(df) prices = pd.read_csv(resource("spy_prices.csv"), parse_dates=True, index_col=0, squeeze=True) delta = black_litterman.market_implied_risk_aversion(prices) mcaps = get_market_caps() prior = black_litterman.market_implied_prior_returns(mcaps, delta, S) viewdict = {"GOOG": 0.40, "AAPL": -0.30, "FB": 0.30, "BABA": 0} bl = BlackLittermanModel(S, pi=prior, absolute_views=viewdict) rets = bl.bl_returns() # compare posterior with prior for v in viewdict: assert (prior[v] <= rets[v] <= viewdict[v]) or (viewdict[v] <= rets[v] <= prior[v]) with pytest.raises(ValueError): bl.portfolio_performance() bl.bl_weights(delta) np.testing.assert_allclose( bl.portfolio_performance(), (0.2580693114409672, 0.265445955488424, 0.8968654692926723), ) # Check that bl.cov() has been called and used assert bl.posterior_cov is not None
def test_idzorek_input_formats(): # Identity covariance S = pd.DataFrame(np.diag(np.ones((5, ))), index=range(5), columns=range(5)) # Constant view of 0.3 return views = {k: 0.3 for k in range(5)} # Prior pi = pd.Series(0.1, index=range(5)) test_result = pd.Series(0.2, index=range(5)) bl = BlackLittermanModel(S, pi=pi, absolute_views=views, omega="idzorek", view_confidences=[0.5] * 5) pd.testing.assert_series_equal(bl.bl_returns(), test_result) bl = BlackLittermanModel( S, pi=pi, absolute_views=views, omega="idzorek", view_confidences=(0.5, 0.5, 0.5, 0.5, 0.5), ) pd.testing.assert_series_equal(bl.bl_returns(), test_result) bl = BlackLittermanModel( S, pi=pi, absolute_views=views, omega="idzorek", view_confidences=np.array([0.5] * 5), ) pd.testing.assert_series_equal(bl.bl_returns(), test_result) bl = BlackLittermanModel( S, pi=pi, absolute_views=views, omega="idzorek", view_confidences=np.array([0.5] * 5).reshape(-1, 1), ) pd.testing.assert_series_equal(bl.bl_returns(), test_result)
def test_bl_no_uncertainty(): df = get_data() S = risk_models.sample_cov(df) omega = np.diag([0, 0, 0, 0]) viewdict = {"GOOG": 0.40, "AAPL": -0.30, "FB": 0.30, "BABA": 0} bl = BlackLittermanModel(S, absolute_views=viewdict, omega=omega) rets = bl.bl_returns() # For 100% confidence, posterior return should equal view return. for k, v in viewdict.items(): assert np.abs(rets[k] - v) < 1e-5 # If only one view has 100% confidencee, only that asset will have post = prior. omega = np.diag([0, 0.2, 0.2, 0.2]) bl = BlackLittermanModel(S, absolute_views=viewdict, omega=omega) rets = bl.bl_returns() assert np.abs(bl.bl_returns()["GOOG"] - viewdict["GOOG"]) < 1e-5 assert np.abs(rets["AAPL"] - viewdict["AAPL"]) > 0.01
def test_black_litterman_market_prior(): df = get_data() S = risk_models.sample_cov(df) prices = pd.read_csv( "tests/spy_prices.csv", parse_dates=True, index_col=0, squeeze=True ) delta = black_litterman.market_implied_risk_aversion(prices) mcaps = { "GOOG": 927e9, "AAPL": 1.19e12, "FB": 574e9, "BABA": 533e9, "AMZN": 867e9, "GE": 96e9, "AMD": 43e9, "WMT": 339e9, "BAC": 301e9, "GM": 51e9, "T": 61e9, "UAA": 78e9, "SHLD": 0, "XOM": 295e9, "RRC": 1e9, "BBY": 22e9, "MA": 288e9, "PFE": 212e9, "JPM": 422e9, "SBUX": 102e9, } prior = black_litterman.market_implied_prior_returns(mcaps, delta, S) viewdict = {"GOOG": 0.40, "AAPL": -0.30, "FB": 0.30, "BABA": 0} bl = BlackLittermanModel(S, pi=prior, absolute_views=viewdict) rets = bl.bl_returns() # compare posterior with prior for v in viewdict: assert (prior[v] <= rets[v] <= viewdict[v]) or ( viewdict[v] <= rets[v] <= prior[v] ) with pytest.raises(ValueError): bl.portfolio_performance() bl.bl_weights(delta) np.testing.assert_allclose( bl.portfolio_performance(), (0.2580693114409672, 0.265445955488424, 0.8968654692926723), ) # Check that bl.cov() has been called and used assert bl.posterior_cov is not None
def test_bl_returns_no_prior(): df = get_data() S = risk_models.sample_cov(df) viewdict = {"AAPL": 0.20, "BBY": -0.30, "BAC": 0, "SBUX": -0.2, "T": 0.131321} bl = BlackLittermanModel(S, absolute_views=viewdict) rets = bl.bl_returns() # Make sure it gives the same answer as explicit inverse test_rets = np.linalg.inv( np.linalg.inv(bl.tau * bl.cov_matrix) + bl.P.T @ np.linalg.inv(bl.omega) @ bl.P ) @ (bl.P.T @ np.linalg.inv(bl.omega) @ bl.Q) np.testing.assert_array_almost_equal(rets.values.reshape(-1, 1), test_rets)
def test_bl_relative_views(): df = get_data() S = risk_models.CovarianceShrinkage(df).ledoit_wolf() # 1. SBUX will drop by 20% # 2. GOOG outperforms FB by 10% # 3. BAC and JPM will outperform T and GE by 15% views = np.array([-0.20, 0.10, 0.15]).reshape(-1, 1) picking = np.array([ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -0.5, 0, 0, 0.5, 0, -0.5, 0, 0, 0, 0, 0, 0, 0, 0.5, 0], ]) bl = BlackLittermanModel(S, Q=views, P=picking) rets = bl.bl_returns() assert rets["SBUX"] < 0 assert rets["GOOG"] > rets["FB"] assert (rets["BAC"] > rets["T"]) and (rets["JPM"] > rets["GE"])
def test_bl_returns_all_views(): df = get_data() prior = expected_returns.ema_historical_return(df) S = risk_models.CovarianceShrinkage(df).ledoit_wolf() views = pd.Series(0.1, index=S.columns) bl = BlackLittermanModel(S, pi=prior, Q=views) posterior_rets = bl.bl_returns() assert isinstance(posterior_rets, pd.Series) assert list(posterior_rets.index) == list(df.columns) assert posterior_rets.notnull().all() assert posterior_rets.dtype == "float64" np.testing.assert_array_almost_equal( posterior_rets, np.array( [ 0.11168648, 0.16782938, 0.12516799, 0.24067997, 0.32848296, -0.22789895, 0.16311297, 0.11928542, 0.25414308, 0.11007738, 0.06282615, -0.03140218, -0.16977172, 0.05254821, -0.10463884, 0.32173375, 0.26399864, 0.1118594, 0.22999558, 0.08977448, ] ), )
def test_bl_returns_all_views(): df = get_data() prior = expected_returns.ema_historical_return(df) S = risk_models.CovarianceShrinkage(df).ledoit_wolf() views = pd.Series(0.1, index=S.columns) bl = BlackLittermanModel(S, pi=prior, Q=views) posterior_rets = bl.bl_returns() assert isinstance(posterior_rets, pd.Series) assert list(posterior_rets.index) == list(df.columns) assert posterior_rets.notnull().all() assert posterior_rets.dtype == "float64" np.testing.assert_array_almost_equal( posterior_rets, np.array( [ 0.11774473, 0.1709139, 0.12180833, 0.21202423, 0.28120945, -0.2787358, 0.17274774, 0.12714698, 0.25492005, 0.11229777, 0.07182723, -0.01521839, -0.21235465, 0.06399515, -0.11738365, 0.28865661, 0.23828607, 0.12038049, 0.2331218, 0.10485376, ] ), )
"JPM": 422e9, "SBUX": 102e9, } prior = black_litterman.market_implied_prior_returns(mcaps, delta, S) # 1. SBUX will drop by 20% # 2. GOOG outperforms FB by 10% # 3. BAC and JPM will outperform T and GE by 15% views = np.array([-0.20, 0.10, 0.15]).reshape(-1, 1) picking = np.array([ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -0.5, 0, 0, 0.5, 0, -0.5, 0, 0, 0, 0, 0, 0, 0, 0.5, 0], ]) bl = BlackLittermanModel(S, Q=views, P=picking, pi=prior, tau=0.01) rets = bl.bl_returns() ef = EfficientFrontier(rets, S) ef.max_sharpe() print(ef.clean_weights()) ef.portfolio_performance(verbose=True) """ {'GOOG': 0.2015, 'AAPL': 0.2368, 'FB': 0.0, 'BABA': 0.06098, 'AMZN': 0.17148, 'GE': 0.0, 'AMD': 0.0, 'WMT': 0.0, 'BAC': 0.18545, 'GM': 0.0,
def test_bl_tau(): df = get_data() S = risk_models.sample_cov(df) prices = pd.read_csv("tests/spy_prices.csv", parse_dates=True, index_col=0, squeeze=True) delta = black_litterman.market_implied_risk_aversion(prices) mcaps = { "GOOG": 927e9, "AAPL": 1.19e12, "FB": 574e9, "BABA": 533e9, "AMZN": 867e9, "GE": 96e9, "AMD": 43e9, "WMT": 339e9, "BAC": 301e9, "GM": 51e9, "T": 61e9, "UAA": 78e9, "SHLD": 0, "XOM": 295e9, "RRC": 1e9, "BBY": 22e9, "MA": 288e9, "PFE": 212e9, "JPM": 422e9, "SBUX": 102e9, } prior = black_litterman.market_implied_prior_returns(mcaps, delta, S) viewdict = {"GOOG": 0.40, "AAPL": -0.30, "FB": 0.30, "BABA": 0} # Need to change omega for this test to work omega = np.diag([0.01, 0.01, 0.01, 0.01]) bl0 = BlackLittermanModel(S, pi=prior, absolute_views=viewdict, tau=1e-10, omega=omega) bl1 = BlackLittermanModel(S, pi=prior, absolute_views=viewdict, tau=0.01, omega=omega) bl2 = BlackLittermanModel(S, pi=prior, absolute_views=viewdict, tau=0.1, omega=omega) # For tiny tau, posterior should roughly equal prior np.testing.assert_allclose(bl0.bl_returns(), bl0.pi.flatten(), rtol=1e-5) # For bigger tau, GOOG should be given more weight assert bl1.bl_returns()["GOOG"] > bl0.bl_returns()["GOOG"] assert bl2.bl_returns()["GOOG"] > bl1.bl_returns()["GOOG"]