def test_risk_matrix_and_returns_data(): # Test the switcher method for simple calls df = get_data() for method in { "sample_cov", "semicovariance", "exp_cov", # FIXME: this fails "min_cov_determinant", "ledoit_wolf", "ledoit_wolf_constant_variance", "ledoit_wolf_single_factor", "ledoit_wolf_constant_correlation", "oracle_approximating", }: S = risk_models.risk_matrix(df, method=method) assert S.shape == (20, 20) assert S.notnull().all().all() assert risk_models._is_positive_semidefinite(S) S2 = risk_models.risk_matrix(expected_returns.returns_from_prices(df), returns_data=True, method=method) pd.testing.assert_frame_equal(S, S2)
def main(): prices, market_prices, mkt_caps, symbols, config = load_data() #covar = risk_models.risk_matrix(prices, method='exp_cov', span=180) #covar = risk_models.risk_matrix(prices, method='semicovariance') #covar = risk_models.CovarianceShrinkage(prices).ledoit_wolf() covar = risk_models.risk_matrix(prices, method='oracle_approximating') rets_bl, covar_bl = calc_black_litterman(market_prices, mkt_caps, covar, config, symbols) kelly_w = kelly_optimize(rets_bl, covar_bl, config) max_quad_util_w, max_quad_util_ef = max_quad_utility_weights( rets_bl, covar_bl, config) min_vol_w, min_vol_ef = min_volatility_weights(rets_bl, covar_bl, config) max_sharpe_w, max_sharpe_ef = max_sharpe_weights(rets_bl, covar_bl, config) cla_max_sharpe_w, cla_max_sharpe_cla = cla_max_sharpe_weights( rets_bl, covar_bl, config) cla_min_vol_w, cla_min_vol_cla = cla_min_vol_weights( rets_bl, covar_bl, config) #ax = plotting.plot_efficient_frontier(cla_max_sharpe_cla, showfig=False) #plt.title('Efficient Frontier via CLA Max Sharpe Optimization') #plt.show() #ax = plotting.plot_efficient_frontier(cla_min_vol_cla, showfig=False) #plt.title('Efficient Frontier via CLA Min Volatility Optimization') #plt.show() weights_df = pd.merge(kelly_w, max_quad_util_w, left_index=True, right_index=True) weights_df = pd.merge(weights_df, max_sharpe_w, left_index=True, right_index=True) weights_df = pd.merge(weights_df, cla_max_sharpe_w, left_index=True, right_index=True) weights_df = pd.merge(weights_df, min_vol_w, left_index=True, right_index=True) weights_df = pd.merge(weights_df, cla_min_vol_w, left_index=True, right_index=True) weights_df.to_csv('portfolio_weight_results.csv') plot_heatmap(weights_df, 'Portfolio Weighting (%)', 'Optimization Method', 'Security')
def test_risk_matrix_additional_kwargs(): df = get_data() S = risk_models.sample_cov(df) S2 = risk_models.risk_matrix(df, frequency=2) pd.testing.assert_frame_equal(S / 126, S2) S = risk_models.risk_matrix( df, method="semicovariance", benchmark=0.0004, frequency=52 ) assert S.shape == (20, 20) assert S.notnull().all().all() assert risk_models._is_positive_semidefinite(S) S = risk_models.risk_matrix( expected_returns.returns_from_prices(df), returns_data=True, method="exp_cov", span=60, fix_method="diag", ) assert S.shape == (20, 20) assert S.notnull().all().all() assert risk_models._is_positive_semidefinite(S)
def test_risk_matrix_not_implemented(): df = get_data() with pytest.raises(NotImplementedError): risk_models.risk_matrix(df, method="fancy_new!")
returns3=pd.to_numeric(returns3) d4 = raw_return(prices=returns3, lag=True) target=qs.stats.volatility(d4) #for protfolio new d1=pd.read_csv("prices.csv",parse_dates=True, index_col="תאריך") stocks_t0=d1.dropna(thresh=len(d1) , axis=1) m1 = expected_returns.mean_historical_return(stocks_t0) m2=m1[m1 < m1.quantile(.95)] stocks_t0=stocks_t0[m2.index] stocks_t0=stocks_t0.sort_index() stocks_t0=stocks_t0.loc["2015-05-27":"2016-05-26"] stocks_t0=stocks_t0.dropna(thresh=len(stocks_t0) , axis=1) stocks_t0=stocks_t0.sort_index(ascending=False) S=risk_models.risk_matrix(stocks_t0,frequency=len(stocks_t0)) mu=expected_returns.mean_historical_return(stocks_t0,frequency=len(stocks_t0)) ef = EfficientFrontier(mu, S) ##target=target+0.02 weights = ef.efficient_risk(target_volatility=target) ef.portfolio_performance(verbose=True) wei=pd.Series(weights) ##d7 = raw_return(prices=stocks_t0, lag=True) d7=stocks_t0.pct_change().dropna() d7=d7[wei.index] returns1=wei*d7 d8=returns1.sum(axis=1) d8=d8.sort_index() #year t1
def generate_outlook(t): df = pd.read_csv("index-125.csv", index_col=None, header=0)## read index d2=get_prices(df)# put index in correct way ###build dates w="-05-27" a=t+w x=str(pd.to_numeric(a[:4])+1)+a[4:] y=str(pd.to_numeric(a[:4])+2)+a[4:] ##find index returns index_t0=d2.loc[a:x] d3=index_t0.sort_index() returns3 =d3.iloc[:,0] returns3=pd.to_numeric(returns3) index_t0 = raw_return(prices=returns3, lag=True) target=qs.stats.volatility(index_t0) #for protfolio for t0 d1=pd.read_csv("prices.csv",parse_dates=True, index_col="תאריך")##read data stocks_t0=d1.dropna(thresh=len(d1) , axis=1) m1 = expected_returns.mean_historical_return(stocks_t0) m2=m1[m1 < m1.quantile(.95)]##drop top 5% stocks_t0=stocks_t0[m2.index] stocks_t0=stocks_t0.sort_index() stocks_t0=stocks_t0.loc[a:x] stocks_t0=stocks_t0.dropna(thresh=len(stocks_t0) , axis=1) stocks_t0=stocks_t0.sort_index(ascending=False) S=risk_models.risk_matrix(stocks_t0,frequency=len(stocks_t0)) mu=expected_returns.mean_historical_return(stocks_t0,frequency=len(stocks_t0)) ef = EfficientFrontier(mu, S) ##target=target+0.017##use this if target is not correct weights = ef.efficient_risk(target_volatility=target)##build weights for EF ef.portfolio_performance(verbose=True) wei=pd.Series(weights)#change to pandas series d7=stocks_t0.pct_change().dropna() d7=d7[wei.index] returns1=wei*d7 d8=returns1.sum(axis=1) portfolio_t0=d8.sort_index() name_index_t0="index_t0"+str(t)+".html" name_portfolio_t0="portfolio_t0"+str(t)+".html" qs.reports.html(index_t0,output=name_index_t0) qs.reports.html(portfolio_t0,output=name_portfolio_t0) #year t1 df3=d1.dropna(thresh=len(d1) , axis=1) df3=df3.sort_index() stocks_t0=df3.loc[x:y] stocks_returns = raw_return(prices=stocks_t0,lag=True) new_rets=stocks_returns[wei.index] protfiolio=wei*new_rets port_t1=protfiolio.sum(axis=1) df = pd.read_csv("index-125.csv", index_col=None, header=0) d2=get_prices(df) index_t1=d2.loc[x:y] index_t1=index_t1.sort_index() returns3 =index_t1.iloc[:,0] returns3=pd.to_numeric(returns3) index_t1 = raw_return(prices=returns3, lag=True) qs.reports.metrics(port_t1,mode='full') name_index_t1="index_t1"+str(t)+".html" name_portfolio_t1="portfolio_t1"+str(t)+".html" name_compare_graph="compare_graph_t1"+str(t)+".html" qs.reports.html(index_t1,output=name_index_t1) qs.reports.html(port_t1,output=name_portfolio_t1) qs.reports.html(port_t1,index_t1,output=name_compare_graph)