def test_semicovariance(): df = get_data() S = risk_models.semicovariance(df) assert S.shape == (20, 20) assert S.index.equals(df.columns) assert S.index.equals(S.columns) assert S.notnull().all().all() S2 = risk_models.semicovariance(df, frequency=2) pd.testing.assert_frame_equal(S / 126, S2)
def test_semicovariance_benchmark(): df = get_data() # When the benchmark is very negative, the cov matrix should be zeroes S_negative_benchmark = risk_models.semicovariance(df, benchmark=-0.5) np.testing.assert_allclose(S_negative_benchmark, 0, atol=1e-4) # Increasing the benchmark should increase covariances on average S = risk_models.semicovariance(df, benchmark=0) S2 = risk_models.semicovariance(df, benchmark=1) assert S2.sum().sum() > S.sum().sum()
def test_semicovariance(): df = get_data() S = risk_models.semicovariance(df) assert S.shape == (20, 20) assert S.index.equals(df.columns) assert S.index.equals(S.columns) assert S.notnull().all().all() assert risk_models._is_positive_semidefinite(S) S2 = risk_models.semicovariance(df, frequency=2) pd.testing.assert_frame_equal(S / 126, S2) # Cover that it works on np.ndarray, with a warning with pytest.warns(RuntimeWarning): S2_np = risk_models.semicovariance(df.to_numpy(), frequency=2) np.testing.assert_equal(S2_np, S2.to_numpy())
def test_efficient_semivariance_vs_heuristic(): benchmark = 0 es = setup_efficient_semivariance() es.efficient_return(0.20) mu_es, semi_deviation, _ = es.portfolio_performance() np.testing.assert_almost_equal(mu_es, 0.2) mean_return, historic_returns = setup_efficient_semivariance( data_only=True) pairwise_semivariance = risk_models.semicovariance(historic_returns, returns_data=True, benchmark=0, frequency=1) ef = EfficientFrontier(mean_return, pairwise_semivariance) ef.efficient_return(0.20) mu_ef, _, _ = ef.portfolio_performance() # mu_ef *= 252 portfolio_returns = historic_returns @ ef.weights drops = np.fmin(portfolio_returns - benchmark, 0) T = historic_returns.shape[0] semivariance = np.sum(np.square(drops)) / T * 252 semi_deviation_ef = np.sqrt(semivariance) assert semi_deviation < semi_deviation_ef assert mu_es / semi_deviation > mu_ef / semi_deviation_ef
def test_efficient_semivariance_vs_heuristic_weekly(): benchmark = 0 _, historic_returns = setup_efficient_semivariance(data_only=True) weekly_returns = historic_returns.resample("W").sum() mean_weekly_returns = weekly_returns.mean(axis=0) es = EfficientSemivariance(mean_weekly_returns, weekly_returns, frequency=52) es.efficient_return(0.20 / 52) mu_es, semi_deviation, _ = es.portfolio_performance() pairwise_semivariance = risk_models.semicovariance(weekly_returns, returns_data=True, benchmark=0, frequency=1) ef = EfficientFrontier(mean_weekly_returns, pairwise_semivariance) ef.efficient_return(0.20 / 52) mu_ef, _, _ = ef.portfolio_performance() portfolio_returns = historic_returns @ ef.weights drops = np.fmin(portfolio_returns - benchmark, 0) T = weekly_returns.shape[0] semivariance = np.sum(np.square(drops)) / T * 52 semi_deviation_ef = np.sqrt(semivariance) assert semi_deviation < semi_deviation_ef assert mu_es / semi_deviation > mu_ef / semi_deviation_ef
def test_efficient_return_semicovariance(): df = get_data() ef = setup_efficient_frontier() ef.cov_matrix = risk_models.semicovariance(df, benchmark=0) w = ef.efficient_return(0.12) assert isinstance(w, dict) assert set(w.keys()) == set(ef.tickers) assert set(w.keys()) == set(ef.expected_returns.index) np.testing.assert_almost_equal(ef.weights.sum(), 1) np.testing.assert_allclose( ef.portfolio_performance(), (0.11999999997948813, 0.06948386215256849, 1.4391830977949114))
def test_min_volatilty_semicovariance_L2_reg(): df = get_data() ef = setup_efficient_frontier() ef.cov_matrix = risk_models.semicovariance(df, benchmark=0) w = ef.min_volatility() assert isinstance(w, dict) assert set(w.keys()) == set(ef.tickers) assert set(w.keys()) == set(ef.expected_returns.index) np.testing.assert_almost_equal(ef.weights.sum(), 1) np.testing.assert_allclose( ef.portfolio_performance(), (0.20661406151867523, 0.05551598140785206, 3.3614475829527706))
def test_max_sharpe_semicovariance(): df = get_data() cla = setup_cla() cla.covar = risk_models.semicovariance(df, benchmark=0) w = cla.max_sharpe() assert isinstance(w, dict) assert set(w.keys()) == set(cla.tickers) np.testing.assert_almost_equal(cla.weights.sum(), 1) np.testing.assert_allclose( cla.portfolio_performance(), (0.3253436663900292, 0.21333530089904357, 1.4312852355106793), )
def test_cla_max_sharpe_semicovariance(): df = get_data() cla = setup_cla() cla.cov_matrix = risk_models.semicovariance(df, benchmark=0).values w = cla.max_sharpe() assert isinstance(w, dict) assert set(w.keys()) == set(cla.tickers) np.testing.assert_almost_equal(cla.weights.sum(), 1) np.testing.assert_allclose( cla.portfolio_performance(), (0.2936179968144084, 0.06362345488289835, 4.300583759841616), )
def test_cla_max_sharpe_semicovariance(): df = get_data() cla = setup_cla() cla.cov_matrix = risk_models.semicovariance(df, benchmark=0).values w = cla.max_sharpe() assert isinstance(w, dict) assert set(w.keys()) == set(cla.tickers) np.testing.assert_almost_equal(cla.weights.sum(), 1) np.testing.assert_allclose( cla.portfolio_performance(), (0.2686858719299194, 0.06489248187610204, 3.8322755539652578), )
def test_max_sharpe_semicovariance(): df = get_data() ef = setup_efficient_frontier() ef.cov_matrix = risk_models.semicovariance(df, benchmark=0) w = ef.max_sharpe() assert isinstance(w, dict) assert set(w.keys()) == set(ef.tickers) assert set(w.keys()) == set(ef.expected_returns.index) np.testing.assert_almost_equal(ef.weights.sum(), 1) np.testing.assert_allclose( ef.portfolio_performance(), (0.2972237371625498, 0.06443267303123411, 4.302533545801584))
def test_efficient_return_semicovariance(): df = get_data() ef = setup_efficient_frontier() ef.cov_matrix = risk_models.semicovariance(df, benchmark=0) w = ef.efficient_return(0.12) assert isinstance(w, dict) assert set(w.keys()) == set(ef.tickers) assert set(w.keys()) == set(ef.expected_returns.index) np.testing.assert_almost_equal(ef.weights.sum(), 1) np.testing.assert_allclose( ef.portfolio_performance(), (0.12000000000871075, 0.06948386214063361, 1.4319423610177537))
def test_max_sharpe_short_semicovariance(): df = get_data() ef = EfficientFrontier(*setup_efficient_frontier(data_only=True), weight_bounds=(-1, 1)) ef.cov_matrix = risk_models.semicovariance(df, benchmark=0) w = ef.max_sharpe() assert isinstance(w, dict) assert set(w.keys()) == set(ef.tickers) np.testing.assert_almost_equal(ef.weights.sum(), 1) np.testing.assert_allclose( ef.portfolio_performance(), (0.3907992623559733, 0.0809285460933456, 4.581810501430255), )
def test_max_sharpe_semicovariance(): # f df = get_data() cla = setup_cla() cla.covar = risk_models.semicovariance(df, benchmark=0) w = cla.max_sharpe() assert isinstance(w, dict) assert set(w.keys()) == set(cla.tickers) np.testing.assert_almost_equal(cla.weights.sum(), 1) np.testing.assert_allclose( cla.portfolio_performance(), (0.3253436657555845, 0.2133353004830236, 1.4281588303044812), )
def test_max_sharpe_semicovariance(): df = get_data() ef = setup_efficient_frontier() ef.cov_matrix = risk_models.semicovariance(df, benchmark=0) w = ef.max_sharpe() assert isinstance(w, dict) assert set(w.keys()) == set(ef.tickers) np.testing.assert_almost_equal(ef.weights.sum(), 1) assert all([i >= 0 for i in w.values()]) np.testing.assert_allclose( ef.portfolio_performance(), (0.2732301946250426, 0.06603231922971581, 3.834943215368455), )
def test_max_sharpe_semicovariance(): # f df = get_data() ef = setup_efficient_frontier() ef.cov_matrix = risk_models.semicovariance(df, benchmark=0) w = ef.max_sharpe() assert isinstance(w, dict) assert set(w.keys()) == set(ef.tickers) assert set(w.keys()) == set(ef.expected_returns.index) np.testing.assert_almost_equal(ef.weights.sum(), 1) np.testing.assert_allclose( ef.portfolio_performance(), (0.2972237362989219, 0.064432672830601, 4.297294313174586))
def test_max_sharpe_short_semicovariance(): df = get_data() ef = EfficientFrontier(*setup_efficient_frontier(data_only=True), weight_bounds=(-1, 1)) ef.cov_matrix = risk_models.semicovariance(df, benchmark=0) w = ef.max_sharpe() assert isinstance(w, dict) assert set(w.keys()) == set(ef.tickers) assert set(w.keys()) == set(ef.expected_returns.index) np.testing.assert_almost_equal(ef.weights.sum(), 1) np.testing.assert_allclose( ef.portfolio_performance(), (0.3564654865246848, 0.07202031837368413, 4.671813373260894))
def test_max_sharpe_semicovariance(): df = get_data() ef = setup_efficient_frontier() ef.cov_matrix = risk_models.semicovariance(df, benchmark=0) w = ef.max_sharpe() assert isinstance(w, dict) assert set(w.keys()) == set(ef.tickers) np.testing.assert_almost_equal(ef.weights.sum(), 1) assert all([i >= 0 for i in w.values()]) np.testing.assert_allclose( ef.portfolio_performance(), (0.2972184894480104, 0.06443145011260347, 4.302533762060766), )
def test_max_sharpe_short_semicovariance(): df = get_data() ef = EfficientFrontier(*setup_efficient_frontier(data_only=True), weight_bounds=(-1, 1)) ef.cov_matrix = risk_models.semicovariance(df, benchmark=0) w = ef.max_sharpe() assert isinstance(w, dict) assert set(w.keys()) == set(ef.tickers) np.testing.assert_almost_equal(ef.weights.sum(), 1) np.testing.assert_allclose( ef.portfolio_performance(), (0.3564305116656491, 0.07201282488003401, 4.671813836300796), )
def test_max_sharpe_short_semicovariance(): df = get_data() ef = EfficientFrontier(*setup_efficient_frontier(data_only=True), weight_bounds=(-1, 1)) ef.cov_matrix = risk_models.semicovariance(df, benchmark=0) w = ef.max_sharpe() assert isinstance(w, dict) assert set(w.keys()) == set(ef.tickers) np.testing.assert_almost_equal(ef.weights.sum(), 1) np.testing.assert_allclose( ef.portfolio_performance(), (0.42444834528495234, 0.0898263632679403, 4.50255727350929), )
def test_efficient_return_semicovariance(): df = get_data() ef = setup_efficient_frontier() ef.cov_matrix = risk_models.semicovariance(df, benchmark=0) w = ef.efficient_return(0.12) assert isinstance(w, dict) assert set(w.keys()) == set(ef.tickers) assert set(w.keys()) == set(ef.expected_returns.index) np.testing.assert_almost_equal(ef.weights.sum(), 1) np.testing.assert_allclose( ef.portfolio_performance(), (0.12000000000871075, 0.06948386214063361, 1.4319423610177537) )
def test_max_sharpe_semicovariance(): df = get_data() ef = setup_efficient_frontier() ef.cov_matrix = risk_models.semicovariance(df, benchmark=0) w = ef.max_sharpe() assert isinstance(w, dict) assert set(w.keys()) == set(ef.tickers) np.testing.assert_almost_equal(ef.weights.sum(), 1) assert all([i >= 0 for i in w.values()]) np.testing.assert_allclose( ef.portfolio_performance(), (0.2762965426962885, 0.07372667096108301, 3.476307004714425), )
def test_cla_max_sharpe_semicovariance(): df = get_data() cla = setup_cla() cla.cov_matrix = risk_models.semicovariance(df, benchmark=0).values w = cla.max_sharpe() assert isinstance(w, dict) assert set(w.keys()) == set(cla.tickers) np.testing.assert_almost_equal(cla.weights.sum(), 1) np.testing.assert_allclose( cla.portfolio_performance(), (0.2721798377099145, 0.07258537193305141, 3.474251505420551), atol=1e-4, rtol=1e-4, )
def test_min_volatilty_semicovariance_L2_reg(): df = get_data() ef = setup_efficient_frontier() ef.gamma = 1 ef.cov_matrix = risk_models.semicovariance(df, benchmark=0) w = ef.min_volatility() assert isinstance(w, dict) assert set(w.keys()) == set(ef.tickers) assert set(w.keys()) == set(ef.expected_returns.index) np.testing.assert_almost_equal(ef.weights.sum(), 1) assert all([i >= 0 for i in w.values()]) np.testing.assert_allclose( ef.portfolio_performance(), (0.23803779483710888, 0.0962263031034166, 2.265885603053655))
def test_max_sharpe_semicovariance(): # f df = get_data() ef = setup_efficient_frontier() ef.cov_matrix = risk_models.semicovariance(df, benchmark=0) w = ef.max_sharpe() assert isinstance(w, dict) assert set(w.keys()) == set(ef.tickers) assert set(w.keys()) == set(ef.expected_returns.index) np.testing.assert_almost_equal(ef.weights.sum(), 1) np.testing.assert_allclose( ef.portfolio_performance(), (0.2972237362989219, 0.064432672830601, 4.297294313174586) )
def test_min_volatilty_semicovariance_L2_reg(): # f df = get_data() ef = setup_efficient_frontier() ef.cov_matrix = risk_models.semicovariance(df, benchmark=0) w = ef.min_volatility() assert isinstance(w, dict) assert set(w.keys()) == set(ef.tickers) assert set(w.keys()) == set(ef.expected_returns.index) np.testing.assert_almost_equal(ef.weights.sum(), 1) np.testing.assert_allclose( ef.portfolio_performance(), (0.20661406122127524, 0.055515981410304394, 3.3567606718215663) )
def handle_data(context, data): date = data.today() if date in context.balance_dates: temp = {} for code in context.stocks: history_price = data.history_bars(code, context.expected_return_days, '1d', 'close') if history_price is not None: temp.update({code: history_price}) history_prices = pd.DataFrame(temp) mu = expected_returns.mean_historical_return(history_prices) if context.cov_method == 'sample': S = risk_models.sample_cov(history_prices) elif context.cov_method == 'semi': S = risk_models.semicovariance(history_prices) elif context.cov_method == 'exp_cov': S = risk_models.exp_cov(history_prices) ef = EfficientFrontier(mu, S) if context.opt_criterion == 'max_sharpe': weights = ef.max_sharpe() elif context.opt_criterion == 'efficient_return': weights = ef.efficient_return(context.target_return) elif context.opt_criterion == 'efficient_risk': weights = ef.efficient_risk(context.targe_risk, context.risk_free_rate) elif context.opt_criterion == 'min_volatility': weights = ef.min_volatility() if context.cleaned_weights is True: weights = ef.clean_weights() weight = [] prices = [] for code in context.stocks: weight.append(weights[code]) prices.append(data.latest_price(code, "1d")) data.order_target_percent(context.stocks, weight, prices)
def handle_bar(context, api): date = api.now() #if date in context.balance_dates: history_prices = {} for stock in context.stocks: history_price = api.history_bars(stock, context.expected_return_days, '1d', 'close') history_prices.update({stock: history_price}) history_prices = pd.DataFrame(history_prices) mu = expected_returns.mean_historical_return(history_prices) if context.cov_method == 'sample': S = risk_models.sample_cov(history_prices) elif context.cov_method == 'semi': S = risk_models.semicovariance(history_prices) elif context.cov_method == 'exp_cov': S = risk_models.exp_cov(history_prices) ef = EfficientFrontier(mu, S) if context.opt_criterion == 'max_sharpe': weights = ef.max_sharpe() elif context.opt_criterion == 'efficient_return': weights = ef.efficient_return(context.target_return) elif context.opt_criterion == 'efficient_risk': weights = ef.efficient_risk(context.targe_risk, context.risk_free_rate) elif context.opt_criterion == 'min_volatility': weights = ef.min_volatility() if context.cleaned_weights is True: weights = ef.clean_weights() prices = [] weight = [] for stock in context.stocks: weight.append(weights[stock]) prices.append(api.latest_price(stock, "1d")) api.order_target_percent(stocks, weight, prices)
tickers = ['BLK', "BAC", "AAPL", "TM", "WMT", "JD"] # "INTU", "MA", "UL", "CVS", # "DIS", "AMD", "NVDA", "PBI", "TGT"] ohlc = yf.download(tickers, period="max") prices = ohlc["Adj Close"] prices.tail() #various types of expected returns; these will be used for expected future returns mu = expected_returns.james_stein_shrinkage(prices) mum = expected_returns.mean_historical_return(prices) mummy = expected_returns.capm_return(prices) #different risk models; guessing that you can also use the pandas.cov function S = risk_models.semicovariance(prices) T = risk_models.CovarianceShrinkage(prices).ledoit_wolf() plotting.plot_covariance(S) plotting.plot_covariance(T) #equal weights initial_weights = np.array([1 / len(tickers)] * len(tickers)) print(initial_weights) #transaction cost objective ef = EfficientFrontier(mum, T) # 1% broker commission ef.add_objective(objective_functions.transaction_cost, w_prev=initial_weights, k=0.01)