Ejemplo n.º 1
0
def test_market_risk_aversion():
    prices = pd.read_csv(
        resource("spy_prices.csv"), parse_dates=True, index_col=0, squeeze=True
    )
    delta = black_litterman.market_implied_risk_aversion(prices)
    assert np.round(delta, 5) == 2.68549

    # check it works for df
    prices = pd.read_csv(resource("spy_prices.csv"), parse_dates=True, index_col=0)
    delta = black_litterman.market_implied_risk_aversion(prices)
    assert np.round(delta.iloc[0], 5) == 2.68549
Ejemplo n.º 2
0
def test_bl_market_prior():
    df = get_data()
    S = risk_models.sample_cov(df)

    prices = pd.read_csv(resource("spy_prices.csv"),
                         parse_dates=True,
                         index_col=0,
                         squeeze=True)

    delta = black_litterman.market_implied_risk_aversion(prices)

    mcaps = get_market_caps()
    prior = black_litterman.market_implied_prior_returns(mcaps, delta, S)

    viewdict = {"GOOG": 0.40, "AAPL": -0.30, "FB": 0.30, "BABA": 0}
    bl = BlackLittermanModel(S, pi=prior, absolute_views=viewdict)
    rets = bl.bl_returns()

    # compare posterior with prior
    for v in viewdict:
        assert (prior[v] <= rets[v] <= viewdict[v]) or (viewdict[v] <= rets[v]
                                                        <= prior[v])

    with pytest.raises(ValueError):
        bl.portfolio_performance()

    bl.bl_weights(delta)
    np.testing.assert_allclose(
        bl.portfolio_performance(),
        (0.2580693114409672, 0.265445955488424, 0.8968654692926723),
    )
    # Check that bl.cov() has been called and used
    assert bl.posterior_cov is not None
Ejemplo n.º 3
0
def test_bl_tau():
    df = get_data()
    S = risk_models.sample_cov(df)

    prices = pd.read_csv(
        resource("spy_prices.csv"), parse_dates=True, index_col=0, squeeze=True
    )

    delta = black_litterman.market_implied_risk_aversion(prices)

    mcaps = get_market_caps()
    prior = black_litterman.market_implied_prior_returns(mcaps, delta, S)

    viewdict = {"GOOG": 0.40, "AAPL": -0.30, "FB": 0.30, "BABA": 0}

    # Need to change omega for this test to work
    omega = np.diag([0.01, 0.01, 0.01, 0.01])

    bl0 = BlackLittermanModel(
        S, pi=prior, absolute_views=viewdict, tau=1e-10, omega=omega
    )
    bl1 = BlackLittermanModel(
        S, pi=prior, absolute_views=viewdict, tau=0.01, omega=omega
    )
    bl2 = BlackLittermanModel(
        S, pi=prior, absolute_views=viewdict, tau=0.1, omega=omega
    )

    # For tiny tau, posterior should roughly equal prior
    np.testing.assert_allclose(bl0.bl_returns(), bl0.pi.flatten(), rtol=1e-5)

    # For bigger tau, GOOG should be given more weight
    assert bl1.bl_returns()["GOOG"] > bl0.bl_returns()["GOOG"]
    assert bl2.bl_returns()["GOOG"] > bl1.bl_returns()["GOOG"]
Ejemplo n.º 4
0
def allocate(df):
    tickers = [df.columns[k] for k in range(df.shape[1])]

    ohlc = yf.download(tickers, start="2010-01-01", end="2020-01-01")
    prices = ohlc["Adj Close"]

    market_prices = yf.download("^BVSP", start="2010-01-01",
                                end="2020-01-01")["Adj Close"]

    mcaps = {}
    for t in tickers:
        stock = yf.Ticker(t)
        mcaps[t] = stock.info["marketCap"]

    S = risk_models.CovarianceShrinkage(prices).ledoit_wolf()
    delta = black_litterman.market_implied_risk_aversion(market_prices)

    market_prior = black_litterman.market_implied_prior_returns(
        mcaps, delta, S)

    bl = BlackLittermanModel(S,
                             pi="market",
                             market_caps=mcaps,
                             risk_aversion=delta,
                             absolute_views=df.to_dict('records')[0])

    ret_bl = bl.bl_returns()
    S_bl = bl.bl_cov()

    ef = EfficientFrontier(ret_bl, S_bl)
    ef.add_objective(objective_functions.L2_reg)
    ef.max_sharpe()
    weights = ef.clean_weights()

    return weights
Ejemplo n.º 5
0
def test_market_implied_prior():
    df = get_data()
    S = risk_models.sample_cov(df)

    prices = pd.read_csv(resource("spy_prices.csv"),
                         parse_dates=True,
                         index_col=0,
                         squeeze=True)
    delta = black_litterman.market_implied_risk_aversion(prices)

    mcaps = get_market_caps()
    pi = black_litterman.market_implied_prior_returns(mcaps, delta, S)
    assert isinstance(pi, pd.Series)
    assert list(pi.index) == list(df.columns)
    assert pi.notnull().all()
    assert pi.dtype == "float64"
    np.testing.assert_array_almost_equal(
        pi.values,
        np.array([
            0.14933293,
            0.2168623,
            0.11219185,
            0.10362374,
            0.28416295,
            0.12196098,
            0.19036819,
            0.08860159,
            0.17724273,
            0.08779627,
            0.0791797,
            0.16460474,
            0.12854665,
            0.08657863,
            0.11230036,
            0.13875465,
            0.15017163,
            0.09066484,
            0.1696369,
            0.13270213,
        ]),
    )

    mcaps = pd.Series(mcaps)
    pi2 = black_litterman.market_implied_prior_returns(mcaps, delta, S)
    pd.testing.assert_series_equal(pi, pi2, check_exact=False)

    # Test alternate syntax
    bl = BlackLittermanModel(
        S,
        pi="market",
        market_caps=mcaps,
        absolute_views={"AAPL": 0.1},
        risk_aversion=delta,
    )
    pi = black_litterman.market_implied_prior_returns(mcaps,
                                                      delta,
                                                      S,
                                                      risk_free_rate=0)
    np.testing.assert_array_almost_equal(bl.pi, pi.values.reshape(-1, 1))
def test_market_risk_aversion():
    prices = pd.read_csv(resource("spy_prices.csv"),
                         parse_dates=True,
                         index_col=0,
                         squeeze=True)
    delta = black_litterman.market_implied_risk_aversion(prices)
    assert np.round(delta, 5) == 2.68549

    # check it works for df
    prices = pd.read_csv(resource("spy_prices.csv"),
                         parse_dates=True,
                         index_col=0)
    delta = black_litterman.market_implied_risk_aversion(prices)
    assert np.round(delta.iloc[0], 5) == 2.68549

    # Check it raises for other types.
    list_invalid = [100.0, 110.0, 120.0, 130.0]
    with pytest.raises(TypeError):
        delta = black_litterman.market_implied_risk_aversion(list_invalid)
Ejemplo n.º 7
0
    def download_stocks(self, source, country):
        for i, symbol in enumerate(self.holding.Symbol):
            download_success = True
            if source == "yahoo":
                data = yf.download(symbol, start=self.start, end=self.today)

            elif source == "investing":
                try:
                    data = investpy.get_stock_historical_data(
                        stock=symbol.split('.')[0],
                        country=country,
                        from_date=str(self.start.day) + '/' +
                        str(self.start.month) + '/' + str(self.start.year),
                        to_date=str(self.today.day) + '/' +
                        str(self.today.month) + '/' + str(self.today.year))
                except:
                    print(symbol.split('.')[0] + " not found!")
                    download_success = False
            if download_success:  # days with bad data
                bad_days = data[data.Close == 0].index
                if data.shape[0] >= 2:
                    print(data.shape)
                    for bad_day in bad_days:
                        avg_close_price = (
                            data.loc[bad_day - dt.timedelta(days=5):bad_day +
                                     dt.timedelta(days=5)].Close)
                        avg_close_price = np.mean(avg_close_price)
                        data.at[bad_day, 'Close'] = avg_close_price
                    mcap = data["Close"][-2] * data["Volume"][-2]
                    delta = black_litterman.market_implied_risk_aversion(
                        data['Close'])
                    if (np.max(data.Close) / np.min(data.Close) < 20):
                        self.stock_prices = pd.concat([
                            self.stock_prices,
                            pd.DataFrame({symbol: data.Close})
                        ],
                                                      axis=1)
                        self.details.append([
                            symbol, mcap, delta,
                            np.array(self.holding["Holding"])[3]
                        ])
                        print(symbol + " passed")
                    else:
                        print(symbol + " failed")

        if self.save_output:
            self.stock_prices.to_csv(
                os.path.join(self.output_path,
                             self.name + '_stock_prices.csv'))
            with open(os.path.join(self.output_path,
                                   self.name + '_details.csv'),
                      'w',
                      newline='') as csvfile:
                writer = csv.writer(csvfile)
                writer.writerows(self.details)
Ejemplo n.º 8
0
def test_bl_weights():
    df = get_data()
    S = risk_models.sample_cov(df)

    viewdict = {"AAPL": 0.20, "BBY": -0.30, "BAC": 0, "SBUX": -0.2, "T": 0.131321}
    bl = BlackLittermanModel(S, absolute_views=viewdict)

    prices = pd.read_csv(
        resource("spy_prices.csv"), parse_dates=True, index_col=0, squeeze=True
    )

    delta = black_litterman.market_implied_risk_aversion(prices)
    bl.bl_weights(delta)
    w = bl.clean_weights()
    assert abs(sum(w.values()) - 1) < 1e-5

    # check weights are allocated in same direction as views
    # (in absence of priors)
    assert all(viewdict[t] * w[t] >= 0 for t in viewdict)

    # numerical check
    test_weights = {
        "GOOG": 0.0,
        "AAPL": 1.40675,
        "FB": 0.0,
        "BABA": 0.0,
        "AMZN": 0.0,
        "GE": 0.0,
        "AMD": 0.0,
        "WMT": 0.0,
        "BAC": 0.02651,
        "GM": 0.0,
        "T": 2.81117,
        "UAA": 0.0,
        "SHLD": 0.0,
        "XOM": 0.0,
        "RRC": 0.0,
        "BBY": -1.44667,
        "MA": 0.0,
        "PFE": 0.0,
        "JPM": 0.0,
        "SBUX": -1.79776,
    }
    assert w == test_weights

    bl = BlackLittermanModel(S, absolute_views=viewdict)
    bl.optimize(delta)
    w2 = bl.clean_weights()
    assert w2 == w

    bl = BlackLittermanModel(S, absolute_views=pd.Series(viewdict))
    bl.optimize(delta)
    w2 = bl.clean_weights()
    assert w2 == w
Ejemplo n.º 9
0
def test_black_litterman_market_prior():
    df = get_data()
    S = risk_models.sample_cov(df)

    prices = pd.read_csv(
        "tests/spy_prices.csv", parse_dates=True, index_col=0, squeeze=True
    )
    delta = black_litterman.market_implied_risk_aversion(prices)

    mcaps = {
        "GOOG": 927e9,
        "AAPL": 1.19e12,
        "FB": 574e9,
        "BABA": 533e9,
        "AMZN": 867e9,
        "GE": 96e9,
        "AMD": 43e9,
        "WMT": 339e9,
        "BAC": 301e9,
        "GM": 51e9,
        "T": 61e9,
        "UAA": 78e9,
        "SHLD": 0,
        "XOM": 295e9,
        "RRC": 1e9,
        "BBY": 22e9,
        "MA": 288e9,
        "PFE": 212e9,
        "JPM": 422e9,
        "SBUX": 102e9,
    }
    prior = black_litterman.market_implied_prior_returns(mcaps, delta, S)

    viewdict = {"GOOG": 0.40, "AAPL": -0.30, "FB": 0.30, "BABA": 0}
    bl = BlackLittermanModel(S, pi=prior, absolute_views=viewdict)
    rets = bl.bl_returns()

    # compare posterior with prior
    for v in viewdict:
        assert (prior[v] <= rets[v] <= viewdict[v]) or (
            viewdict[v] <= rets[v] <= prior[v]
        )

    with pytest.raises(ValueError):
        bl.portfolio_performance()

    bl.bl_weights(delta)
    np.testing.assert_allclose(
        bl.portfolio_performance(),
        (0.2580693114409672, 0.265445955488424, 0.8968654692926723),
    )
    # Check that bl.cov() has been called and used
    assert bl.posterior_cov is not None
Ejemplo n.º 10
0
def assign_strategy(ld: LazyDictionary, algo: str) -> tuple:
    assert ld is not None
    # use of black-litterman is based on https://github.com/robertmartin8/PyPortfolioOpt/blob/master/cookbook/4-Black-Litterman-Allocation.ipynb
    # print(market_prices)
    ld["s"] = CovarianceShrinkage(ld["filtered_stocks"]).ledoit_wolf()
    ld["delta"] = market_implied_risk_aversion(ld["market_prices"])

    # use BlackLitterman model to compute returns - hopefully better estimate of returns than extrapolation of historical prices
    # market_prior = market_implied_prior_returns(ld["market_caps"], delta, ld["s"])
    ld["bl"] = lambda ld: BlackLittermanModel(
        ld["s"],
        pi="market",
        market_caps=ld["market_caps"],
        risk_aversion=abs(ld["delta"]),
        absolute_views={},
    )
    ld["posterior_total_returns"] = lambda ld: ld["bl"].bl_returns()
    ld["posterior_s"] = lambda ld: ld["bl"].bl_cov()
    ld["mu"] = lambda ld: mean_historical_return(ld["filtered_stocks"])
    ld["returns_from_prices"] = lambda ld: returns_from_prices(ld[
        "filtered_stocks"])

    use_bl = ld["returns_by"] != "by_prices"
    kwargs = ({
        "returns": ld["mu"]
    } if use_bl else {
        "returns": ld["posterior_total_returns"]
    })
    if algo != "hrp":
        kwargs["cov_matrix"] = ld["s"] if not use_bl else ld["posterior_s"]
    else:
        # algo is HRP
        kwargs = {"returns": ld["returns_from_prices"]}

    if algo == "hrp":
        ld["title"] = "Hierarchical Risk Parity"
        return (hrp_strategy, kwargs)
    elif algo == "ef-sharpe":
        ld["title"] = "Efficient Frontier - max. sharpe"
        return (ef_sharpe_strategy, kwargs)
    elif algo == "ef-risk":
        ld["title"] = "Efficient Frontier - efficient risk"
        kwargs["target_volatility"] = 5.0
        return (ef_risk_strategy, kwargs)
    elif algo == "ef-minvol":
        ld["title"] = "Efficient Frontier - minimum volatility"
        return (ef_minvol_strategy, kwargs)
    else:
        assert False
Ejemplo n.º 11
0
def calc_black_litterman(market_prices, mkt_caps, covar, config, symbols):
    delta = black_litterman.market_implied_risk_aversion(market_prices)
    market_prior = black_litterman.market_implied_prior_returns(
        mkt_caps, delta, covar)
    mu = load_mean_views(config['views'], symbols)
    omega = calc_omega(config, symbols)
    bl = BlackLittermanModel(covar,
                             pi="market",
                             market_caps=mkt_caps,
                             risk_aversion=delta,
                             absolute_views=mu,
                             omega=omega)
    rets_bl = bl.bl_returns()
    covar_bl = bl.bl_cov()
    plot_black_litterman_results(rets_bl, covar_bl, market_prior, mu)
    return rets_bl, covar_bl
Ejemplo n.º 12
0
ef = EfficientFrontier(mu, S)
weights = ef.nonconvex_objective(deviation_risk_parity, ef.cov_matrix)
ef.portfolio_performance(verbose=True)
"""
Expected annual return: 22.9%
Annual volatility: 19.2%
Sharpe Ratio: 1.09
"""

# Black-Litterman
spy_prices = pd.read_csv("tests/spy_prices.csv",
                         parse_dates=True,
                         index_col=0,
                         squeeze=True)
delta = black_litterman.market_implied_risk_aversion(spy_prices)

mcaps = {
    "GOOG": 927e9,
    "AAPL": 1.19e12,
    "FB": 574e9,
    "BABA": 533e9,
    "AMZN": 867e9,
    "GE": 96e9,
    "AMD": 43e9,
    "WMT": 339e9,
    "BAC": 301e9,
    "GM": 51e9,
    "T": 61e9,
    "UAA": 78e9,
    "SHLD": 0,
Ejemplo n.º 13
0
timeseries_data = pd.DataFrame()
mcap_data = pd.DataFrame()
portfolio.symbol_stats = []
for i, symbol in enumerate(portfolio.holding.Symbol):
    print(i * 100 / len(portfolio.holding))
    data = yf.download(symbol, start=start, end=today)
    # days with bad data
    bad_days = data[data.Close == 0].index

    for bad_day in bad_days:
        avg_close_price = (data.loc[bad_day - dt.timedelta(days=5):bad_day +
                                    dt.timedelta(days=5)].Close)
        avg_close_price = np.mean(avg_close_price)
        data.at[bad_day, 'Close'] = avg_close_price
    mcap = data["Close"][-2] * data["Volume"][-2]
    delta = black_litterman.market_implied_risk_aversion(data['Close'])
    if (np.max(data.Close) / np.min(data.Close) < 20):
        timeseries_data = pd.concat(
            [timeseries_data,
             pd.DataFrame({symbol: data.Close})], axis=1)
        portfolio.symbol_stats.append(
            [symbol, mcap, delta,
             np.array(portfolio.holding["Holding"])[3]])
        print(symbol + " passed")
    else:
        print(symbol + " failed")

timeseries_data = timeseries_data.dropna(axis=1, how='all')

# SAVE FILES
# timeseries_data.to_csv(os.path.join(path,'stock_prices.csv'))
Ejemplo n.º 14
0
def BLmain():

    #Excell Call
    sht = xw.Book.caller().sheets['Optim']
    shtdata = xw.Book.caller().sheets['Data']
    sht.range('J17').value = 'Optimizing...'

    #Clear Values
    sht.range('L23').expand().clear_contents()
    shtdata.range('A1').expand().clear_contents()
    shtdata.range('J1').expand().clear_contents()

    #Set variables from excel
    rf = sht.range('J10').value
    MinWeight = sht.range('J11').value
    MaxWeight = sht.range('J12').value
    Delta = sht.range('J13').value
    Tau = sht.range('J14').value
    Output = sht.range('J15').value
    ModelOptim = sht.range('J8').value
    RiskModel = sht.range('J9').value
    listticker = xw.Range('B3').expand().value
    indexname = sht.range('J7').value
    startdate = sht.range('J3').value
    enddate = sht.range('J6').value
    EFBool = sht.range('J16').value
    traintestdate = sht.range(
        'J4'
    ).value  #Dataset is divided in two sub: train (optimization) and test for backtest

    #Initializing
    train, test = initialize(startdate, enddate, traintestdate, listticker)
    trainindex, testindex = initializeIndex(startdate, enddate, traintestdate,
                                            indexname)  #for risk aversion

    #Black Litterman
    if RiskModel == 'historicalcov':
        S = risk_models.sample_cov(train)
    elif RiskModel == 'exphistoricalcov':
        S = risk_models.exp_cov(train)

    if Delta != None:
        delta = Delta
    else:
        delta = black_litterman.market_implied_risk_aversion(trainindex,
                                                             risk_free_rate=rf)

    s = data.get_quote_yahoo(listticker)['marketCap']
    mcaps = {tick: mcap
             for tick, mcap in zip(listticker, s)
             }  #Dictionnary of Market Cap for each stock

    #Expected returns implied from the market
    prior = black_litterman.market_implied_prior_returns(mcaps,
                                                         delta,
                                                         S,
                                                         risk_free_rate=rf)
    views, picking = createviews(listticker)
    bl = BlackLittermanModel(S, Q=views, P=picking, pi=prior, tau=Tau)
    rets = bl.bl_returns()
    cov = bl.bl_cov()

    #Two ways of displaying outputs: either using Optimizer, either returning implied weights
    if Output == 'Optimization':
        ef = EfficientFrontier(rets, S, weight_bounds=(MinWeight, MaxWeight))
        #RiskModel
        if ModelOptim == 'min_volatility':
            raw_weights = ef.min_volatility()
        elif ModelOptim == 'max_sharpe':
            raw_weights = ef.max_sharpe()
        cleaned_weights = ef.clean_weights()
        finalw = [cleaned_weights.get(i, 1) for i in listticker]
        perf = ef.portfolio_performance(verbose=True, risk_free_rate=rf)
        sht.range('H21').value = perf

    elif Output == 'Return-Implied-Weight':
        bl.bl_weights(delta)
        weights = bl.clean_weights()
        finalw = [weights.get(i, 1) for i in listticker]
    finalr = [rets.get(i, 1) for i in listticker]  #E(R) from BL

    #Display results
    sht.range('L23').options(transpose=True).value = listticker
    sht.range('M23').options(transpose=True).value = finalw
    sht.range('N23').options(transpose=True).value = finalr

    #Copy Data in Data Range
    shtdata.range((1, 1)).value = train
    shtdata.range((1, len(listticker) + 3)).value = test
    #numshares, left = getoptimprices(test, cleanW, InitialAmountInPortfolio)

    #Visualisation
    sht.charts['BLweights'].set_source_data(
        sht.range((23, 12), (22 + len(listticker), 13)))
    CorrMap(sht, 'CorrMatPrior', S, 'coolwarm')
    CorrMap(sht, 'CorrMatBL', cov, 'YlGn')
    if EFBool == "YES":
        effrontier(rets, S, sht, 'EFBL')

    #Done
    sht.range('J17').value = 'Optimization Done'
Ejemplo n.º 15
0
def test_market_implied_prior():
    df = get_data()
    S = risk_models.sample_cov(df)

    prices = pd.read_csv(
        "tests/spy_prices.csv", parse_dates=True, index_col=0, squeeze=True
    )
    delta = black_litterman.market_implied_risk_aversion(prices)

    mcaps = {
        "GOOG": 927e9,
        "AAPL": 1.19e12,
        "FB": 574e9,
        "BABA": 533e9,
        "AMZN": 867e9,
        "GE": 96e9,
        "AMD": 43e9,
        "WMT": 339e9,
        "BAC": 301e9,
        "GM": 51e9,
        "T": 61e9,
        "UAA": 78e9,
        "SHLD": 0,
        "XOM": 295e9,
        "RRC": 1e9,
        "BBY": 22e9,
        "MA": 288e9,
        "PFE": 212e9,
        "JPM": 422e9,
        "SBUX": 102e9,
    }
    pi = black_litterman.market_implied_prior_returns(mcaps, delta, S)

    assert isinstance(pi, pd.Series)
    assert list(pi.index) == list(df.columns)
    assert pi.notnull().all()
    assert pi.dtype == "float64"
    np.testing.assert_array_almost_equal(
        pi.values,
        np.array(
            [
                0.14933293,
                0.2168623,
                0.11219185,
                0.10362374,
                0.28416295,
                0.12196098,
                0.19036819,
                0.08860159,
                0.17724273,
                0.08779627,
                0.0791797,
                0.16460474,
                0.12854665,
                0.08657863,
                0.11230036,
                0.13875465,
                0.15017163,
                0.09066484,
                0.1696369,
                0.13270213,
            ]
        ),
    )

    mcaps = pd.Series(mcaps)
    pi2 = black_litterman.market_implied_prior_returns(mcaps, delta, S)
    pd.testing.assert_series_equal(pi, pi2, check_exact=False)
Ejemplo n.º 16
0
def test_bl_tau():
    df = get_data()
    S = risk_models.sample_cov(df)

    prices = pd.read_csv("tests/spy_prices.csv",
                         parse_dates=True,
                         index_col=0,
                         squeeze=True)
    delta = black_litterman.market_implied_risk_aversion(prices)

    mcaps = {
        "GOOG": 927e9,
        "AAPL": 1.19e12,
        "FB": 574e9,
        "BABA": 533e9,
        "AMZN": 867e9,
        "GE": 96e9,
        "AMD": 43e9,
        "WMT": 339e9,
        "BAC": 301e9,
        "GM": 51e9,
        "T": 61e9,
        "UAA": 78e9,
        "SHLD": 0,
        "XOM": 295e9,
        "RRC": 1e9,
        "BBY": 22e9,
        "MA": 288e9,
        "PFE": 212e9,
        "JPM": 422e9,
        "SBUX": 102e9,
    }
    prior = black_litterman.market_implied_prior_returns(mcaps, delta, S)

    viewdict = {"GOOG": 0.40, "AAPL": -0.30, "FB": 0.30, "BABA": 0}

    # Need to change omega for this test to work
    omega = np.diag([0.01, 0.01, 0.01, 0.01])

    bl0 = BlackLittermanModel(S,
                              pi=prior,
                              absolute_views=viewdict,
                              tau=1e-10,
                              omega=omega)
    bl1 = BlackLittermanModel(S,
                              pi=prior,
                              absolute_views=viewdict,
                              tau=0.01,
                              omega=omega)
    bl2 = BlackLittermanModel(S,
                              pi=prior,
                              absolute_views=viewdict,
                              tau=0.1,
                              omega=omega)

    # For tiny tau, posterior should roughly equal prior
    np.testing.assert_allclose(bl0.bl_returns(), bl0.pi.flatten(), rtol=1e-5)

    # For bigger tau, GOOG should be given more weight
    assert bl1.bl_returns()["GOOG"] > bl0.bl_returns()["GOOG"]
    assert bl2.bl_returns()["GOOG"] > bl1.bl_returns()["GOOG"]