def test_market_implied_prior(): df = get_data() S = risk_models.sample_cov(df) prices = pd.read_csv(resource("spy_prices.csv"), parse_dates=True, index_col=0, squeeze=True) delta = black_litterman.market_implied_risk_aversion(prices) mcaps = get_market_caps() pi = black_litterman.market_implied_prior_returns(mcaps, delta, S) assert isinstance(pi, pd.Series) assert list(pi.index) == list(df.columns) assert pi.notnull().all() assert pi.dtype == "float64" np.testing.assert_array_almost_equal( pi.values, np.array([ 0.14933293, 0.2168623, 0.11219185, 0.10362374, 0.28416295, 0.12196098, 0.19036819, 0.08860159, 0.17724273, 0.08779627, 0.0791797, 0.16460474, 0.12854665, 0.08657863, 0.11230036, 0.13875465, 0.15017163, 0.09066484, 0.1696369, 0.13270213, ]), ) mcaps = pd.Series(mcaps) pi2 = black_litterman.market_implied_prior_returns(mcaps, delta, S) pd.testing.assert_series_equal(pi, pi2, check_exact=False) # Test alternate syntax bl = BlackLittermanModel( S, pi="market", market_caps=mcaps, absolute_views={"AAPL": 0.1}, risk_aversion=delta, ) pi = black_litterman.market_implied_prior_returns(mcaps, delta, S, risk_free_rate=0) np.testing.assert_array_almost_equal(bl.pi, pi.values.reshape(-1, 1))
def test_bl_market_prior(): df = get_data() S = risk_models.sample_cov(df) prices = pd.read_csv(resource("spy_prices.csv"), parse_dates=True, index_col=0, squeeze=True) delta = black_litterman.market_implied_risk_aversion(prices) mcaps = get_market_caps() prior = black_litterman.market_implied_prior_returns(mcaps, delta, S) viewdict = {"GOOG": 0.40, "AAPL": -0.30, "FB": 0.30, "BABA": 0} bl = BlackLittermanModel(S, pi=prior, absolute_views=viewdict) rets = bl.bl_returns() # compare posterior with prior for v in viewdict: assert (prior[v] <= rets[v] <= viewdict[v]) or (viewdict[v] <= rets[v] <= prior[v]) with pytest.raises(ValueError): bl.portfolio_performance() bl.bl_weights(delta) np.testing.assert_allclose( bl.portfolio_performance(), (0.2580693114409672, 0.265445955488424, 0.8968654692926723), ) # Check that bl.cov() has been called and used assert bl.posterior_cov is not None
def test_bl_tau(): df = get_data() S = risk_models.sample_cov(df) prices = pd.read_csv( resource("spy_prices.csv"), parse_dates=True, index_col=0, squeeze=True ) delta = black_litterman.market_implied_risk_aversion(prices) mcaps = get_market_caps() prior = black_litterman.market_implied_prior_returns(mcaps, delta, S) viewdict = {"GOOG": 0.40, "AAPL": -0.30, "FB": 0.30, "BABA": 0} # Need to change omega for this test to work omega = np.diag([0.01, 0.01, 0.01, 0.01]) bl0 = BlackLittermanModel( S, pi=prior, absolute_views=viewdict, tau=1e-10, omega=omega ) bl1 = BlackLittermanModel( S, pi=prior, absolute_views=viewdict, tau=0.01, omega=omega ) bl2 = BlackLittermanModel( S, pi=prior, absolute_views=viewdict, tau=0.1, omega=omega ) # For tiny tau, posterior should roughly equal prior np.testing.assert_allclose(bl0.bl_returns(), bl0.pi.flatten(), rtol=1e-5) # For bigger tau, GOOG should be given more weight assert bl1.bl_returns()["GOOG"] > bl0.bl_returns()["GOOG"] assert bl2.bl_returns()["GOOG"] > bl1.bl_returns()["GOOG"]
def allocate(df): tickers = [df.columns[k] for k in range(df.shape[1])] ohlc = yf.download(tickers, start="2010-01-01", end="2020-01-01") prices = ohlc["Adj Close"] market_prices = yf.download("^BVSP", start="2010-01-01", end="2020-01-01")["Adj Close"] mcaps = {} for t in tickers: stock = yf.Ticker(t) mcaps[t] = stock.info["marketCap"] S = risk_models.CovarianceShrinkage(prices).ledoit_wolf() delta = black_litterman.market_implied_risk_aversion(market_prices) market_prior = black_litterman.market_implied_prior_returns( mcaps, delta, S) bl = BlackLittermanModel(S, pi="market", market_caps=mcaps, risk_aversion=delta, absolute_views=df.to_dict('records')[0]) ret_bl = bl.bl_returns() S_bl = bl.bl_cov() ef = EfficientFrontier(ret_bl, S_bl) ef.add_objective(objective_functions.L2_reg) ef.max_sharpe() weights = ef.clean_weights() return weights
def test_black_litterman_market_prior(): df = get_data() S = risk_models.sample_cov(df) prices = pd.read_csv( "tests/spy_prices.csv", parse_dates=True, index_col=0, squeeze=True ) delta = black_litterman.market_implied_risk_aversion(prices) mcaps = { "GOOG": 927e9, "AAPL": 1.19e12, "FB": 574e9, "BABA": 533e9, "AMZN": 867e9, "GE": 96e9, "AMD": 43e9, "WMT": 339e9, "BAC": 301e9, "GM": 51e9, "T": 61e9, "UAA": 78e9, "SHLD": 0, "XOM": 295e9, "RRC": 1e9, "BBY": 22e9, "MA": 288e9, "PFE": 212e9, "JPM": 422e9, "SBUX": 102e9, } prior = black_litterman.market_implied_prior_returns(mcaps, delta, S) viewdict = {"GOOG": 0.40, "AAPL": -0.30, "FB": 0.30, "BABA": 0} bl = BlackLittermanModel(S, pi=prior, absolute_views=viewdict) rets = bl.bl_returns() # compare posterior with prior for v in viewdict: assert (prior[v] <= rets[v] <= viewdict[v]) or ( viewdict[v] <= rets[v] <= prior[v] ) with pytest.raises(ValueError): bl.portfolio_performance() bl.bl_weights(delta) np.testing.assert_allclose( bl.portfolio_performance(), (0.2580693114409672, 0.265445955488424, 0.8968654692926723), ) # Check that bl.cov() has been called and used assert bl.posterior_cov is not None
def test_bl_market_automatic(): df = get_data() S = risk_models.sample_cov(df) mcaps = get_market_caps() viewdict = {"GOOG": 0.40, "AAPL": -0.30, "FB": 0.30, "BABA": 0} bl = BlackLittermanModel(S, pi="market", absolute_views=viewdict, market_caps=mcaps) rets = bl.bl_returns() # Compare with explicit prior = black_litterman.market_implied_prior_returns(mcaps, 1, S, 0) bl2 = BlackLittermanModel(S, pi=prior, absolute_views=viewdict) rets2 = bl2.bl_returns() pd.testing.assert_series_equal(rets, rets2)
def calc_black_litterman(market_prices, mkt_caps, covar, config, symbols): delta = black_litterman.market_implied_risk_aversion(market_prices) market_prior = black_litterman.market_implied_prior_returns( mkt_caps, delta, covar) mu = load_mean_views(config['views'], symbols) omega = calc_omega(config, symbols) bl = BlackLittermanModel(covar, pi="market", market_caps=mkt_caps, risk_aversion=delta, absolute_views=mu, omega=omega) rets_bl = bl.bl_returns() covar_bl = bl.bl_cov() plot_black_litterman_results(rets_bl, covar_bl, market_prior, mu) return rets_bl, covar_bl
"AMD": 43e9, "WMT": 339e9, "BAC": 301e9, "GM": 51e9, "T": 61e9, "UAA": 78e9, "SHLD": 0, "XOM": 295e9, "RRC": 1e9, "BBY": 22e9, "MA": 288e9, "PFE": 212e9, "JPM": 422e9, "SBUX": 102e9, } prior = black_litterman.market_implied_prior_returns(mcaps, delta, S) # 1. SBUX will drop by 20% # 2. GOOG outperforms FB by 10% # 3. BAC and JPM will outperform T and GE by 15% views = np.array([-0.20, 0.10, 0.15]).reshape(-1, 1) picking = np.array([ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -0.5, 0, 0, 0.5, 0, -0.5, 0, 0, 0, 0, 0, 0, 0, 0.5, 0], ]) bl = BlackLittermanModel(S, Q=views, P=picking, pi=prior, tau=0.01) rets = bl.bl_returns() ef = EfficientFrontier(rets, S) ef.max_sharpe() print(ef.clean_weights())
def BLmain(): #Excell Call sht = xw.Book.caller().sheets['Optim'] shtdata = xw.Book.caller().sheets['Data'] sht.range('J17').value = 'Optimizing...' #Clear Values sht.range('L23').expand().clear_contents() shtdata.range('A1').expand().clear_contents() shtdata.range('J1').expand().clear_contents() #Set variables from excel rf = sht.range('J10').value MinWeight = sht.range('J11').value MaxWeight = sht.range('J12').value Delta = sht.range('J13').value Tau = sht.range('J14').value Output = sht.range('J15').value ModelOptim = sht.range('J8').value RiskModel = sht.range('J9').value listticker = xw.Range('B3').expand().value indexname = sht.range('J7').value startdate = sht.range('J3').value enddate = sht.range('J6').value EFBool = sht.range('J16').value traintestdate = sht.range( 'J4' ).value #Dataset is divided in two sub: train (optimization) and test for backtest #Initializing train, test = initialize(startdate, enddate, traintestdate, listticker) trainindex, testindex = initializeIndex(startdate, enddate, traintestdate, indexname) #for risk aversion #Black Litterman if RiskModel == 'historicalcov': S = risk_models.sample_cov(train) elif RiskModel == 'exphistoricalcov': S = risk_models.exp_cov(train) if Delta != None: delta = Delta else: delta = black_litterman.market_implied_risk_aversion(trainindex, risk_free_rate=rf) s = data.get_quote_yahoo(listticker)['marketCap'] mcaps = {tick: mcap for tick, mcap in zip(listticker, s) } #Dictionnary of Market Cap for each stock #Expected returns implied from the market prior = black_litterman.market_implied_prior_returns(mcaps, delta, S, risk_free_rate=rf) views, picking = createviews(listticker) bl = BlackLittermanModel(S, Q=views, P=picking, pi=prior, tau=Tau) rets = bl.bl_returns() cov = bl.bl_cov() #Two ways of displaying outputs: either using Optimizer, either returning implied weights if Output == 'Optimization': ef = EfficientFrontier(rets, S, weight_bounds=(MinWeight, MaxWeight)) #RiskModel if ModelOptim == 'min_volatility': raw_weights = ef.min_volatility() elif ModelOptim == 'max_sharpe': raw_weights = ef.max_sharpe() cleaned_weights = ef.clean_weights() finalw = [cleaned_weights.get(i, 1) for i in listticker] perf = ef.portfolio_performance(verbose=True, risk_free_rate=rf) sht.range('H21').value = perf elif Output == 'Return-Implied-Weight': bl.bl_weights(delta) weights = bl.clean_weights() finalw = [weights.get(i, 1) for i in listticker] finalr = [rets.get(i, 1) for i in listticker] #E(R) from BL #Display results sht.range('L23').options(transpose=True).value = listticker sht.range('M23').options(transpose=True).value = finalw sht.range('N23').options(transpose=True).value = finalr #Copy Data in Data Range shtdata.range((1, 1)).value = train shtdata.range((1, len(listticker) + 3)).value = test #numshares, left = getoptimprices(test, cleanW, InitialAmountInPortfolio) #Visualisation sht.charts['BLweights'].set_source_data( sht.range((23, 12), (22 + len(listticker), 13))) CorrMap(sht, 'CorrMatPrior', S, 'coolwarm') CorrMap(sht, 'CorrMatBL', cov, 'YlGn') if EFBool == "YES": effrontier(rets, S, sht, 'EFBL') #Done sht.range('J17').value = 'Optimization Done'
def test_market_implied_prior(): df = get_data() S = risk_models.sample_cov(df) prices = pd.read_csv( "tests/spy_prices.csv", parse_dates=True, index_col=0, squeeze=True ) delta = black_litterman.market_implied_risk_aversion(prices) mcaps = { "GOOG": 927e9, "AAPL": 1.19e12, "FB": 574e9, "BABA": 533e9, "AMZN": 867e9, "GE": 96e9, "AMD": 43e9, "WMT": 339e9, "BAC": 301e9, "GM": 51e9, "T": 61e9, "UAA": 78e9, "SHLD": 0, "XOM": 295e9, "RRC": 1e9, "BBY": 22e9, "MA": 288e9, "PFE": 212e9, "JPM": 422e9, "SBUX": 102e9, } pi = black_litterman.market_implied_prior_returns(mcaps, delta, S) assert isinstance(pi, pd.Series) assert list(pi.index) == list(df.columns) assert pi.notnull().all() assert pi.dtype == "float64" np.testing.assert_array_almost_equal( pi.values, np.array( [ 0.14933293, 0.2168623, 0.11219185, 0.10362374, 0.28416295, 0.12196098, 0.19036819, 0.08860159, 0.17724273, 0.08779627, 0.0791797, 0.16460474, 0.12854665, 0.08657863, 0.11230036, 0.13875465, 0.15017163, 0.09066484, 0.1696369, 0.13270213, ] ), ) mcaps = pd.Series(mcaps) pi2 = black_litterman.market_implied_prior_returns(mcaps, delta, S) pd.testing.assert_series_equal(pi, pi2, check_exact=False)
def test_bl_tau(): df = get_data() S = risk_models.sample_cov(df) prices = pd.read_csv("tests/spy_prices.csv", parse_dates=True, index_col=0, squeeze=True) delta = black_litterman.market_implied_risk_aversion(prices) mcaps = { "GOOG": 927e9, "AAPL": 1.19e12, "FB": 574e9, "BABA": 533e9, "AMZN": 867e9, "GE": 96e9, "AMD": 43e9, "WMT": 339e9, "BAC": 301e9, "GM": 51e9, "T": 61e9, "UAA": 78e9, "SHLD": 0, "XOM": 295e9, "RRC": 1e9, "BBY": 22e9, "MA": 288e9, "PFE": 212e9, "JPM": 422e9, "SBUX": 102e9, } prior = black_litterman.market_implied_prior_returns(mcaps, delta, S) viewdict = {"GOOG": 0.40, "AAPL": -0.30, "FB": 0.30, "BABA": 0} # Need to change omega for this test to work omega = np.diag([0.01, 0.01, 0.01, 0.01]) bl0 = BlackLittermanModel(S, pi=prior, absolute_views=viewdict, tau=1e-10, omega=omega) bl1 = BlackLittermanModel(S, pi=prior, absolute_views=viewdict, tau=0.01, omega=omega) bl2 = BlackLittermanModel(S, pi=prior, absolute_views=viewdict, tau=0.1, omega=omega) # For tiny tau, posterior should roughly equal prior np.testing.assert_allclose(bl0.bl_returns(), bl0.pi.flatten(), rtol=1e-5) # For bigger tau, GOOG should be given more weight assert bl1.bl_returns()["GOOG"] > bl0.bl_returns()["GOOG"] assert bl2.bl_returns()["GOOG"] > bl1.bl_returns()["GOOG"]