def main(): for br, p in zip(BENCH_R, PORTFOLIOS): print(p) df = pd.DataFrame() # 计算组合分年收益率与最大回撤 returns = get_portfolio_return(p) returns_year = returns.resample('Y').apply( lambda x: empyrical.cum_returns_final(x)) mdd_year = returns.resample('Y').apply( lambda x: empyrical.max_drawdown(x)) # 扣除申购赎回费 filename = '%s/%s.xlsx' % (DATA_DIR, p) cost = pd.read_excel(filename, sheet_name='费率', index_col=0) df['组合收益率'] = returns_year - cost['申购费'] - cost['赎回费'] df['组合最大回撤'] = mdd_year # 计算基准分年收益率与最大回撤 returns = get_bench_return(br[0], br[1]) returns_year = returns.resample('Y').apply( lambda x: empyrical.cum_returns_final(x)) mdd_year = returns.resample('Y').apply( lambda x: empyrical.max_drawdown(x)) df['基准收益率'] = returns_year df['基准最大回撤'] = mdd_year df['超额收益率'] = df['组合收益率'] - df['基准收益率'] # 保存到结果 filename = '%s/%s分年统计.xlsx' % (DATA_DIR, p) df.to_excel(filename)
def test_max_drawdown_transformation(self, returns, constant): max_dd = empyrical.max_drawdown(returns) transformed_dd = empyrical.max_drawdown(constant * returns) if constant >= 1: assert constant * max_dd <= transformed_dd else: assert constant * max_dd >= transformed_dd
def test_max_drawdown(self): res_a = empyrical.max_drawdown(ret['a']) res_b = empyrical.max_drawdown(ret['b']) res_c = empyrical.max_drawdown(ret['c']) assert isclose(ret['a'].vbt.returns.max_drawdown(), res_a) pd.testing.assert_series_equal( ret.vbt.returns.max_drawdown(), pd.Series([res_a, res_b, res_c], index=ret.columns))
def test_max_drawdown_translation(self, returns, constant): depressed_returns = returns - constant raised_returns = returns + constant max_dd = empyrical.max_drawdown(returns) depressed_dd = empyrical.max_drawdown(depressed_returns) raised_dd = empyrical.max_drawdown(raised_returns) assert max_dd <= raised_dd assert depressed_dd <= max_dd
def ForwardPE_and_Return(self, sec, dailyreturn): memb = self.P.Sec_weight(sec) memb['date'] = [str(x)[0:10] for x in memb['date']] memb = memb.rename(columns={'weight': 'PortNav%'}) memb['PortNav%'] = memb['PortNav%'].astype(float) memb = memb.loc[memb['date'] >= '2019-12-30', :].copy() IndexFwdPEmedian = self.ForwardPE_median(memb) portmemb = pd.read_csv("D:/S/SuperTrend/" + sec + "_list.csv") portmemb['date'] = '2019-12-30' portmemb['ticker'] = [str(x)[0:6] for x in portmemb['ticker']] portmemb['PortNav%'] = 1 / len(portmemb) PortFwdPEmedian = self.ForwardPE_median(portmemb) PortFwdPEmedian = PortFwdPEmedian.rename( columns={'medianfrdPE': 'Port1yfrdPE'}) IndexFwdPEmedian = IndexFwdPEmedian.rename( columns={'medianfrdPE': 'Index1yfrdPE'}) IndexFwdPEmedian = pd.merge(IndexFwdPEmedian, PortFwdPEmedian, on='date', how='left') IndexReturn = RC.DailyPNL(dailyreturn, memb) PortReturn = RC.DailyPNL(dailyreturn, portmemb) PortReturn['PortReturn'] = np.exp( np.log1p(PortReturn['dailyreturn']).cumsum()) IndexReturn['IndexReturn'] = np.exp( np.log1p(IndexReturn['dailyreturn']).cumsum()) PortReturn = PortReturn.rename( columns={'dailyreturn': 'PortdailyReturn'}) IndexReturn = IndexReturn.rename( columns={'dailyreturn': 'IndexdailyReturn'}) IndexReturn = pd.merge( IndexReturn[['date', 'IndexdailyReturn', 'IndexReturn']], PortReturn[['date', 'PortdailyReturn', 'PortReturn']], on='date', how='left') IndexReturn['cumAlpha'] = IndexReturn['PortReturn'] - IndexReturn[ 'IndexReturn'] IndexFwdPEmedian['ValuationGap'] = IndexFwdPEmedian[ 'Port1yfrdPE'] / IndexFwdPEmedian['Index1yfrdPE'] #PEGap_Return=pd.merge(IndexFwdPEmedian[['date','ValuationGap']],IndexReturn[['date','PortdailyReturn','IndexdailyReturn','IndexReturn','PortReturn','cumAlpha']],on='date',how='left') #PEGap_Return.set_index(['date'],inplace=True) #PEGap_Return[['ValuationGap','cumAlpha']].plot() print('max drawdown index' + str(empyrical.max_drawdown(IndexReturn['IndexdailyReturn']))) print('sharpe index' + str(empyrical.sharpe_ratio(IndexReturn['IndexdailyReturn']))) print('max drawdown port' + str(empyrical.max_drawdown(IndexReturn['PortdailyReturn']))) print('sharpe port' + str(empyrical.sharpe_ratio(IndexReturn['PortdailyReturn']))) return (IndexReturn)
def test_max_drawdown(self): res_a = empyrical.max_drawdown(ret['a']) res_b = empyrical.max_drawdown(ret['b']) res_c = empyrical.max_drawdown(ret['c']) assert isclose(ret['a'].vbt.returns.max_drawdown(), res_a) pd.testing.assert_series_equal( ret.vbt.returns.max_drawdown(), pd.Series([res_a, res_b, res_c], index=ret.columns).rename('max_drawdown')) pd.testing.assert_series_equal( ret.vbt.returns.rolling_max_drawdown(ret.shape[0], minp=1).iloc[-1], pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1]))
def portfolioAnalysis(return_data): non_cum_return = getNonCumReturn(return_data) #计算年化收益: annual_return = empyrical.annual_return(non_cum_return, period='daily') #计算年化波动率: annual_volatility = empyrical.annual_volatility(non_cum_return, period='daily') #计算最大回撤 max_drawdown = empyrical.max_drawdown(non_cum_return) #计算夏普比率: sharpe_ratio = empyrical.sharpe_ratio( non_cum_return, risk_free=math.pow(1 + 0.03, 1 / 250) - 1, period='daily') #分年统计 aggr_returns = empyrical.aggregate_returns(non_cum_return, convert_to="yearly") print("年化收益:%f" % (annual_return)) print("年化波动率:%f" % (annual_volatility)) print("最大回撤:%f" % (max_drawdown)) print("夏普比率:%f" % (sharpe_ratio)) print("分年统计收益率:") print(aggr_returns) data = [annual_return, annual_volatility, max_drawdown, sharpe_ratio] return pd.Series(data, index=["年化收益率", "年化波动率", "最大回撤", "夏普比率"])
def Analysis(results): """ 技术指标分析器 :param results: { 'returns':[0.1,0.1,0.1], 'benchmark':[0.1,0.1,0.1] 'trades':[[2020.01.01 01:00:00,'BUY',6234.10,1]] } :return: """ res = pnl_res(results["returns"]) bres = pnl_res(results["benchmark"]) return_ratio = empyrical.cum_returns_final(res) annual_return_ratio = empyrical.annual_return(res) sharp_ratio = empyrical.sharpe_ratio(res, 0.035 / 252) return_volatility = empyrical.annual_volatility(res) max_drawdown = empyrical.max_drawdown(res) alpha, beta = empyrical.alpha_beta_aligned(res, bres) pls, wr = pls_ws(results["trades"]) return { 'pls': pls, 'wr': wr, 'return_ratio': return_ratio, 'annual_return_ratio': annual_return_ratio, 'beta': beta, 'alpha': alpha, 'sharp_ratio': sharp_ratio, 'return_volatility': return_volatility, 'max_drawdown': max_drawdown, }
def calculate_earning_portofolio_results(er_pf_return, target_rate, least_days, most_days): """ 计算一个止盈组合历史统计数据 return: 止盈组合结束时间 止盈组合累计天数 止盈组合结束年化收益率 止盈组合最大回撤 """ # print(er_pf_return.index[0]) er_pf_start_date = er_pf_return.index[0] er_pf_cum_return = (1 + er_pf_return).cumprod() # 计算止盈组合累计收益率 num_year = np.arange(1, er_pf_cum_return.size + 1) * 1.0 / 243 er_pf_ann_ret = er_pf_cum_return**(1. / num_year) - 1 # 计算止盈组合年化收益率 er_pf_ann_ret = er_pf_ann_ret[(er_pf_ann_ret.index >= er_pf_ann_ret.index[0] + pd.Timedelta(days=least_days)) & \ (er_pf_ann_ret.index <= er_pf_ann_ret.index[0] + pd.Timedelta(days=most_days))] er_pf_ann_ret_target = er_pf_ann_ret[er_pf_ann_ret > target_rate] if er_pf_ann_ret_target.size > 0: er_pf_end_date = er_pf_ann_ret_target.index[0] else: er_pf_end_date = er_pf_ann_ret.index[-1] return er_pf_end_date, \ (er_pf_end_date - er_pf_start_date).days, \ er_pf_ann_ret.loc[er_pf_end_date], \ empyrical.max_drawdown(er_pf_return[er_pf_return.index <= er_pf_end_date])
def test_max_drawdown(self, returns, expected): assert_almost_equal( empyrical.max_drawdown( returns ), expected, DECIMAL_PLACES)
def evaluation(self): ap.sound(f'entry: create_df') mdd = empyrical.max_drawdown(self.df.eac_stgy_rt) stgy_ret_an = empyrical.annual_return(self.df.eac_stgy_rt, annualization=self.cls.annualization) bcmk_ret_an = empyrical.annual_return(self.df.eac_bcmk_rt, annualization=self.cls.annualization) stgy_vlt_an = empyrical.annual_volatility(self.df.eac_stgy_rt, annualization=self.cls.annualization) bcmk_vlt_an = empyrical.annual_volatility(self.df.eac_bcmk_rt, annualization=self.cls.annualization) calmar = empyrical.calmar_ratio(self.df.eac_stgy_rt, annualization=self.cls.annualization) omega = empyrical.omega_ratio(self.df.eac_stgy_rt, risk_free=self.cls.rf, annualization=self.cls.annualization) sharpe = qp.sharpe_ratio(stgy_ret_an, self.df.cum_stgy_rt, self.cls.rf) sortino = empyrical.sortino_ratio(self.df.eac_stgy_rt, annualization=self.cls.annualization) dsrk = empyrical.downside_risk(self.df.eac_stgy_rt, annualization=self.cls.annualization) information = empyrical.information_ratio(self.df.eac_stgy_rt, factor_returns=self.df.eac_bcmk_rt) beta = empyrical.beta(self.df.eac_stgy_rt, factor_returns=self.df.eac_bcmk_rt, risk_free=self.cls.rf) tail_rt = empyrical.tail_ratio(self.df.eac_stgy_rt) alpha = qp.alpha_ratio(stgy_ret_an, bcmk_ret_an, self.cls.rf, beta) stgy_ttrt_rt = (self.cls.yd.ttas[-1] - self.cls.yd.ttas[0]) / self.cls.yd.ttas[0] bcmk_ttrt_rt = (self.cls.pc.close[-1] - self.cls.pc.close[0]) / self.cls.pc.close[0] car_rt = stgy_ttrt_rt - bcmk_ttrt_rt car_rt_an = stgy_ret_an - bcmk_ret_an self.cls.df_output = pd.DataFrame( {'sgty_ttrt_rt': [stgy_ttrt_rt], 'bcmk_ttrt_rt': [bcmk_ttrt_rt], 'car_rt': [car_rt], 'stgy_ret_an': [stgy_ret_an], 'bcmk_ret_an': [bcmk_ret_an], 'car_rt_an': [car_rt_an], 'stgy_vlt_an': [stgy_vlt_an], 'bcmk_vlt_an': [bcmk_vlt_an], 'mdd': [mdd], 'sharpe': [sharpe], 'alpha': [alpha], 'beta': [beta], 'information': [information], 'tail_rt': [tail_rt], 'calmar': [calmar], 'omega': [omega], 'sortino': [sortino], 'dsrk': [dsrk]}) print(f'feedback: \n{self.cls.df_output.T}')
def compute_stats(portfolio, benchmark): '''Compute statistics for the current portfolio''' stats = {} grp_by_year = portfolio.groupby(lambda x: x.year) stats['1yr_highest'] = grp_by_year.max().iloc[-1] stats['1yr_lowest'] = grp_by_year.min().iloc[-1] portfolio_return = simple_returns(portfolio) # benchmark_return = simple_returns(benchmark) stats['wtd_return'] = aggregate_returns(portfolio_return, 'weekly').iloc[-1] stats['mtd_return'] = aggregate_returns(portfolio_return, 'monthly').iloc[-1] stats['ytd_return'] = aggregate_returns(portfolio_return, 'yearly').iloc[-1] stats['max_drawdown'] = max_drawdown(portfolio_return) # stats['annual_return'] = annual_return(portfolio_return, period='daily') stats['annual_volatility'] = annual_volatility(portfolio_return, period='daily', alpha=2.0) # stats['calmar_ratio'] = calmar_ratio(portfolio_return, period='daily') # stats['omega_ratio'] = omega_ratio(portfolio_return, risk_free=0.0) stats['sharpe_ratio_1yr'] = sharpe_ratio(portfolio_return, risk_free=0.0, period='daily') # stats['alpha'], stats['beta'] = alpha_beta(portfolio_return, benchmark_return, # risk_free=0.0, period='daily') stats['tail_ratio'] = tail_ratio(portfolio_return) # stats['capture_ratio'] = capture(portfolio_return, benchmark_return, period='daily') return stats
def calculate_metrics(self): self.benchmark_period_returns = \ cum_returns(self.benchmark_returns).iloc[-1] self.algorithm_period_returns = \ cum_returns(self.algorithm_returns).iloc[-1] if not self.algorithm_returns.index.equals( self.benchmark_returns.index): message = "Mismatch between benchmark_returns ({bm_count}) and \ algorithm_returns ({algo_count}) in range {start} : {end}" message = message.format(bm_count=len(self.benchmark_returns), algo_count=len(self.algorithm_returns), start=self._start_session, end=self._end_session) raise Exception(message) self.num_trading_days = len(self.benchmark_returns) self.mean_algorithm_returns = ( self.algorithm_returns.cumsum() / np.arange(1, self.num_trading_days + 1, dtype=np.float64)) self.benchmark_volatility = annual_volatility(self.benchmark_returns) self.algorithm_volatility = annual_volatility(self.algorithm_returns) self.treasury_period_return = choose_treasury( self.treasury_curves, self._start_session, self._end_session, self.trading_calendar, ) self.sharpe = sharpe_ratio(self.algorithm_returns, ) # The consumer currently expects a 0.0 value for sharpe in period, # this differs from cumulative which was np.nan. # When factoring out the sharpe_ratio, the different return types # were collapsed into `np.nan`. # TODO: Either fix consumer to accept `np.nan` or make the # `sharpe_ratio` return type configurable. # In the meantime, convert nan values to 0.0 if pd.isnull(self.sharpe): self.sharpe = 0.0 self.downside_risk = downside_risk(self.algorithm_returns.values) self.sortino = sortino_ratio( self.algorithm_returns.values, _downside_risk=self.downside_risk, ) self.information = information_ratio( self.algorithm_returns.values, self.benchmark_returns.values, ) self.alpha, self.beta = alpha_beta_aligned( self.algorithm_returns.values, self.benchmark_returns.values, ) self.excess_return = self.algorithm_period_returns - \ self.treasury_period_return self.max_drawdown = max_drawdown(self.algorithm_returns.values) self.max_leverage = self.calculate_max_leverage()
def get_performance_summary(returns): stats = {'annualized_returns': ep.annual_return(returns), 'cumulative_returns': ep.cum_returns_final(returns), 'annual_volatility': ep.annual_volatility(returns), 'sharpe_ratio': ep.sharpe_ratio(returns), 'sortino_ratio': ep.sortino_ratio(returns), 'max_drawdown': ep.max_drawdown(returns)} return pd.Series(stats)
def prepare_performance(self): start = time.time() perf = self.data.copy() perf['price'] = perf['close'] size = self.account.initial_capital / perf.iloc[0]['close'] perf['base_equity'] = [price * size for price in perf['close']] perf['equity'] = [e for _, e in self.account.equity] # BENCHMARK perf['benchmark_period_return'] = [ helpers.percent_change(perf['base_equity'][0], perf['base_equity'][i]) for i in range(0, len(perf['base_equity']))] perf['benchmark_max_drawdown'] = [ max_drawdown(perf['base_equity'][:i].pct_change()) for i in range(0, len(perf['base_equity']))] # STRATEGY perf['algorithm_period_return'] = [ helpers.percent_change(perf['equity'][0], perf['equity'][i]) for i in range(0, len(perf['equity']))] perf['returns'] = perf['equity'].pct_change() perf['max_drawdown'] = [ max_drawdown(perf['equity'][:i].pct_change()) for i in range(0, len(perf['equity']))] logger.debug( 'Performance prepared for {:.2} sec'.format(time.time() - start)) perf['ending_value'] = 0 # value of opened positions perf['Annualized Return'] = (1 + perf['algorithm_period_return']).cumprod()[-1]**(len(perf['base_equity'])/365) - 1 perf['alpha'] = '0' perf['beta'] = '0' perf['std'] = np.std(perf['returns']) perf['sharpe'] = perf['Annualized Return']/perf['std'] perf['calmer'] = perf['Annualized Return']/perf['max_drawdown'] return perf
def plot(self): # show a plot of portfolio vs mean market performance df_info = pd.DataFrame(self.infos) df_info.set_index('current step', inplace=True) # df_info.set_index('date', inplace=True) rn = np.asarray(df_info['portfolio return']) try: spf = df_info['portfolio value'].iloc[1] # Start portfolio value epf = df_info['portfolio value'].iloc[-1] # End portfolio value pr = (epf - spf) / spf except: pr = 0 try: sr = sharpe_ratio(rn) except: sr = 0 try: sor = sortino_ratio(rn) except: sor = 0 try: mdd = max_drawdown(rn) except: mdd = 0 try: cr = calmar_ratio(rn) except: cr = 0 try: om = omega_ratio(rn) except: om = 0 try: dr = downside_risk(rn) except: dr = 0 print("First portfolio value: ", np.round(df_info['portfolio value'].iloc[1])) print("Last portfolio value: ", np.round(df_info['portfolio value'].iloc[-1])) title = self.strategy_name + ': ' + 'profit={: 2.2%} sharpe={: 2.2f} sortino={: 2.2f} max drawdown={: 2.2%} calmar={: 2.2f} omega={: 2.2f} downside risk={: 2.2f}'.format( pr, sr, sor, mdd, cr, om, dr) # df_info[['market value', 'portfolio value']].plot(title=title, fig=plt.gcf(), figsize=(15,10), rot=30) df_info[['portfolio value']].plot(title=title, fig=plt.gcf(), figsize=(15, 10), rot=30)
def get_metrics_single_model(ret): return { "Mean": ret.mean(), "Mean (Yearly)": aggregate_returns(ret, convert_to="yearly").mean(), "Standard Deviation": ret.std(), "Sharpe Ratio": sharpe_ratio(ret, period='monthly'), "Skewness": skew(ret), "Kurtosis": kurtosis(ret), "Max Drawdown": max_drawdown(ret), }
def _get_reward(self, current_prices, next_prices): if self.compute_reward == compute_reward.profit: returns_rate = next_prices / current_prices # pip_value = self._calculate_pip_value_in_account_currency(account_currency.USD, next_prices) # returns_rate = np.multiply(returns_rate, pip_value) log_returns = np.log(returns_rate) last_weight = self.current_weights securities_value = self.current_portfolio_values[:-1] * returns_rate self.current_portfolio_values[:-1] = securities_value self.current_weights = self.current_portfolio_values / np.sum( self.current_portfolio_values) reward = last_weight[:-1] * log_returns elif self.compute_reward == compute_reward.sharpe: try: sr = sharpe_ratio(np.asarray(self.returns)) except: sr = 0 reward = sr elif self.compute_reward == compute_reward.sortino: try: sr = sortino_ratio(np.asarray(self.returns)) except: sr = 0 reward = sr elif self.compute_reward == compute_reward.max_drawdown: try: mdd = max_drawdown(np.asarray(self.returns)) except: mdd = 0 reward = mdd elif self.compute_reward == compute_reward.calmar: try: cr = calmar_ratio(np.asarray(self.returns)) except: cr = 0 reward = cr elif self.compute_reward == compute_reward.omega: try: om = omega_ratio(np.asarray(self.returns)) except: om = 0 reward = om elif self.compute_reward == compute_reward.downside_risk: try: dr = downside_risk(np.asarray(self.returns)) except: dr = 0 reward = dr try: reward = reward.mean() except: reward = reward return reward
def single_sim(self): bootret_by_year = [] cumulative_lengths = [] data_panel = [] for i in range(self.holding_period): index_start = self.data.sample().index[0] index_start = self.data.index.get_loc(index_start) L_i = self.holding_period + 1 while L_i > self.holding_period: L_i = geom.rvs(p = 1/self.mean_block_length) cumulative_lengths.append(L_i) if sum(cumulative_lengths) > self.holding_period: L_final = self.holding_period - sum(cumulative_lengths[:-1]) if L_final > len(self.data) - index_start: diff = L_final - (len(self.data) - index_start) subsample_generated = self.data.iloc[index_start-diff: (index_start-diff + L_final), :] else: subsample_generated = self.data.iloc[index_start: index_start + L_final, :] data_panel.append(subsample_generated) break else: subsample_generated = self.data.iloc[index_start: index_start + L_i, :] if L_i > len(self.data) - index_start : L_i = len(self.data) - index_start data_panel.append(subsample_generated) cumulative_lengths[-1] = L_i bootstrapSample = pd.concat([subsample for subsample in data_panel], axis = 0, ignore_index = True) if self.stress_freq: historical_ret_by_year = self.data @ np.array([self.w1_stock, self.w2_bond, self.w3_gold]).T year_min_ret = historical_ret_by_year.idxmin() for i in range(self.holding_period): extreme_event_dummy = True if np.random.rand() < 0.05 else False if extreme_event_dummy: if self.stress_intensity == 1: bootstrapSample.iloc[i,:] = self.data.loc[year_min_ret,:] else: bootstrapSample.iloc[i,:] = self.data.loc[year_min_ret,:] bootstrapSample.iloc[i,:] *= 1.5 total_ret_by_year = bootstrapSample @ np.array([self.w1_stock, self.w2_bond, self.w3_gold]).T total_ret_by_year -= self.TER portfolio_path = self.capital * np.cumprod(total_ret_by_year + 1) cagr = (portfolio_path.values[-1] / self.capital) ** (1/self.holding_period) - 1 annual_volatility = total_ret_by_year.std() maxDrawdown = max_drawdown(pd.Series(total_ret_by_year)) omega_ratio2 = omega_ratio(pd.Series(total_ret_by_year), required_return = 0.02, annualization = 1) omega_ratio4 = omega_ratio(pd.Series(total_ret_by_year), required_return = 0.04, annualization = 1) omega_ratio8 = omega_ratio(pd.Series(total_ret_by_year), required_return = 0.08, annualization = 1) return (np.insert(portfolio_path.values, 0, self.capital), cagr, annual_volatility, maxDrawdown, omega_ratio2, omega_ratio4, omega_ratio8)
def _get_backtest_performance_metrics(ret, benchmark_ret): metrics = { 'alpha': empyrical.alpha(ret, benchmark_ret), 'beta': empyrical.beta(ret, benchmark_ret), 'return': empyrical.cum_returns_final(ret), 'cagr': empyrical.cagr(ret), 'sharpe': empyrical.sharpe_ratio(ret), 'max_drawdown': empyrical.max_drawdown(ret), 'var': empyrical.value_at_risk(ret), 'volatility': empyrical.annual_volatility(ret), } return metrics
def calculate_max_drawdown(self): final_balance, cost, balance, initial_cost = self.visual_account() returns = balance / initial_cost alpha, beta = alpha_beta(returns, self.benchmark_returns) maxdrawdown = max_drawdown(returns) plt.scatter(initial_cost, balance) plt.xlabel('cost history') plt.ylabel('balance history') plt.grid = (True) plt.show() print("Balance: " + str(final_balance) + " Investment cost: " + str(cost)) print('max drawdown = ' + str(maxdrawdown) + '; alpha = ' + str(alpha) + '; beta= ' + str(beta) + '.') return maxdrawdown, alpha, beta
def getStats(cls, returns, benchmark_returns): _alpha, _beta = alpha_beta_aligned( returns, benchmark_returns, ) _sharpe = sharpe_ratio( returns ) _downside_risk = downside_risk( returns ) _max_drawdown = max_drawdown( returns ) _annual_volatility = annual_volatility( returns ) _benchmark_volatility = annual_volatility( benchmark_returns ) _annual_return = annual_return( returns ) _cum_return = cum_returns( returns ) return { 'cum_return' : _cum_return, 'annual_return' : _annual_return, 'annual_volatility' : _annual_volatility, 'benchmark_volatility' : _benchmark_volatility, 'max_drawdown' : _max_drawdown, 'downside_risk' : _downside_risk, 'sharpe ratio' : _sharpe, 'alpha' : _alpha, 'beta' : _beta, }
def indexAnalysis(index_data): assets = index_data.columns n_asset = len(assets) data = np.zeros((4, n_asset)) for i in range(n_asset): data[0][i] = empyrical.annual_return(index_data.iloc[:, i], period='daily') data[1][i] = empyrical.annual_volatility(index_data.iloc[:, i], period='daily') data[2][i] = empyrical.max_drawdown(index_data.iloc[:, i]) data[3][i] = empyrical.sharpe_ratio( index_data.iloc[:, i], risk_free=math.pow(1 + 0.03, 1 / 250) - 1, period='daily') return pd.DataFrame(data, index=["年化收益率", "年化波动率", "最大回撤", "夏普比率"], columns=assets)
def update_performance(self): num_month = len(self.all_trade_dates) - 1 self.portfolio_net_value = (1 + self.portfolio_ret).cumprod() self.benchmark_net_value = (1 + self.benchmark_ret).cumprod() self.maxDD = max_drawdown(self.portfolio_ret) self.excess_ret = self.portfolio_ret - self.benchmark_ret self.annual_excess_ret = (self.portfolio_net_value[-1] / self.benchmark_net_value[-1])**( 12 / num_month) - 1 self.cum_excess_ret = self.excess_ret.cumsum() self.volatility = np.std(self.portfolio_ret[1:]) * np.sqrt(12) self.annual_ret = (self.portfolio_net_value[-1])**(12 / num_month) - 1 self.information_ratio = self.annual_excess_ret / self.volatility self.holding_result.to_csv("./backtest_result/holding.csv") print( f"Annual Return:{self.annual_ret}; \nInformation Ratio:{self.information_ratio};\nmaxDD:{self.maxDD};" f"\nvolatility:{self.volatility}" f"\nSharpe:{(self.annual_ret - 0.03)/self.volatility}")
def performance(ret, benchmark, rf=0.04): #计算评价指标 import empyrical max_drawdown = empyrical.max_drawdown(ret) total_return = empyrical.cum_returns_final(ret) annual_return = empyrical.annual_return(ret) sharpe_ratio = empyrical.sharpe_ratio(ret, risk_free=((1 + rf)**(1 / 252) - 1)) alpha, beta = empyrical.alpha_beta(ret, benchmark) return { 'total_return': total_return, 'annual_return': annual_return, 'max_drawdown': max_drawdown, 'sharpe_ratio': sharpe_ratio, 'alpha': alpha, 'beta': beta }
def result_stats(perf, verbose=False): if isinstance(perf, str): perf = pd.read_pickle(perf) prets = perf['returns'] asr = sharpe_ratio(returns=prets) aret = annual_return(returns=prets, period='daily') avol = annual_volatility(returns=prets, period='daily') maxdd = max_drawdown(prets) #perf['max_drawdown'] txns = perf['weight'] #perf['transactions'] tdf = pd.DataFrame() for index, value in txns.items(): #if verbose: print(index,value) if isinstance(value, dict): for k, v in value.items(): if verbose == 2: print(k, v) tdf = tdf.append( pd.DataFrame({ 'icker': [k], 'dt': [index], 'weight': [v] })) #tdf.set_index('dt',inplace=True) #tdf.sort_index(inplace=True) num_of_txns = 0 if not tdf.empty: tdf.sort_values(by=['dt'], inplace=True) tdf.reset_index(inplace=True) #tdf.to_csv('/tmp/tdf.csv') a = np.sign(tdf['weight']) num_of_txns = len(np.where(np.diff(np.sign(a)))[0]) #num_of_txns = perf['transactions'].size if verbose: print('asr', asr) print('aret', aret) print('avol', avol) print('maxdd', maxdd) #,get_max_dd(perf['portfolio_value'])) print('num_of_txns', num_of_txns) return asr, aret, avol, maxdd, num_of_txns
def max_drawdown(returns): """ Determines the maximum drawdown of a strategy. Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - See full explanation in tears.create_full_tear_sheet. Returns ------- float Maximum drawdown. Note ----- See https://en.wikipedia.org/wiki/Drawdown_(economics) for more details. """ return ep.max_drawdown(returns)
def get_performance_summary(daily_returns_series: pd.Series, final_returns: float): assert type(daily_returns_series) == pd.Series performance_summary = pd.Series() date_list = daily_returns_series.dropna().index.tolist() start_date = date_list[0] end_date = date_list[-1] cagr = get_annualized_returns(start_date, end_date, final_returns) annual_std = get_annualized_std(daily_returns_series.std(), "daily") mdd = empyrical.max_drawdown(daily_returns_series) performance_summary.loc["시작일"] = start_date performance_summary.loc["종료일"] = end_date performance_summary.loc["누적수익률"] = final_returns performance_summary.loc["CAGR"] = cagr performance_summary.loc["Ann.Std"] = annual_std performance_summary.loc["MDD"] = mdd performance_summary.loc["샤프지수"] = cagr / annual_std return performance_summary
def max_drawdown(returns): """ Determines the maximum drawdown of a strategy. Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - See full explanation in tears.create_full_tear_sheet. Returns ------- float Maximum drawdown. Note ----- See https://en.wikipedia.org/wiki/Drawdown_(economics) for more details. """ return empyrical.max_drawdown(returns)
def build_metrics(self): assert len(self.historical_caches) != 0 assert len(self.historical_capitals) != 0 assert len(self.historical_trade_returns) != 0 historical_returns = ( pd.Series(self.historical_capitals).pct_change(fill_method=None).fillna(0) ) historical_trade_returns = make_flat( pd.Series(self.historical_trade_returns).rename("trade_return").dropna() ) metrics = OrderedDict() metrics["trade_winning_ratio"] = ( historical_trade_returns[historical_trade_returns != 0] > 0 ).mean() metrics["trade_sharpe_ratio"] = emp.sharpe_ratio(historical_trade_returns) metrics["trade_avg_return"] = historical_trade_returns.mean() metrics["max_drawdown"] = emp.max_drawdown(historical_returns) metrics["total_return"] = historical_returns.add(1).cumprod().sub(1).iloc[-1] return pd.Series(metrics)
def stop(self): """ 清算 """ position = self.account.position_map usdt = 0 # 结算 for k, v in position.items(): # 如果k能直接换算为usdt usdt += v * self.usdt_price(k) logger.info("last usdt price : {usdt}".format(usdt=usdt)) # 计算每日收益率 self.stat["strategy_rate"] = (self.stat["strategy_balance"] - self.stat["strategy_balance"].shift(1)) / \ self.stat["strategy_balance"].shift(1) self.stat["market_rate"] = (self.stat["market_balance"] - self.stat["market_balance"].shift(1)) / \ self.stat["market_balance"].shift(1) # sharpe = sharpe_ratio(self.stat["strategy_rate"], self.stat["market_rate"]) # alpha, beta = alpha_beta(self.stat["strategy_rate"], self.stat["market_rate"]) max_dowm = max_drawdown(self.stat["strategy_rate"]) logger.info("-----------------------stat-------------------------") # logger.info("夏普率(未换算天与年) : {sharpe}".format(sharpe=sharpe)) # logger.info("alpha : {alpha} , beta : {beta}".format(alpha=alpha, beta=beta)) logger.info("最大回撤 {down}".format(down=max_dowm)) logger.info( "盈利 {profit}".format(profit=(self.stat["strategy_balance"][-1] - self.stat["strategy_balance"][0]) / self.stat["strategy_balance"][0])) if logger.level == logging.DEBUG: import matplotlib.pylab as plt plt.plot(self.stat['strategy_balance'], color='r') plt.plot(self.stat['market_balance'], color='g') plt.show() print(self.stat.tail(5)) print(self.stat.head(5))
def update(self, dt, algorithm_returns, benchmark_returns, leverage): # Keep track of latest dt for use in to_dict and other methods # that report current state. self.latest_dt = dt dt_loc = self.cont_index.get_loc(dt) self.latest_dt_loc = dt_loc self.algorithm_returns_cont[dt_loc] = algorithm_returns self.algorithm_returns = self.algorithm_returns_cont[:dt_loc + 1] self.num_trading_days = len(self.algorithm_returns) if self.create_first_day_stats: if len(self.algorithm_returns) == 1: self.algorithm_returns = np.append(0.0, self.algorithm_returns) self.algorithm_cumulative_returns[dt_loc] = cum_returns( self.algorithm_returns )[-1] algo_cumulative_returns_to_date = \ self.algorithm_cumulative_returns[:dt_loc + 1] self.mean_returns_cont[dt_loc] = \ algo_cumulative_returns_to_date[dt_loc] / self.num_trading_days self.mean_returns = self.mean_returns_cont[:dt_loc + 1] self.annualized_mean_returns_cont[dt_loc] = \ self.mean_returns_cont[dt_loc] * 252 self.annualized_mean_returns = \ self.annualized_mean_returns_cont[:dt_loc + 1] if self.create_first_day_stats: if len(self.mean_returns) == 1: self.mean_returns = np.append(0.0, self.mean_returns) self.annualized_mean_returns = np.append( 0.0, self.annualized_mean_returns) self.benchmark_returns_cont[dt_loc] = benchmark_returns self.benchmark_returns = self.benchmark_returns_cont[:dt_loc + 1] if self.create_first_day_stats: if len(self.benchmark_returns) == 1: self.benchmark_returns = np.append(0.0, self.benchmark_returns) self.benchmark_cumulative_returns[dt_loc] = cum_returns( self.benchmark_returns )[-1] benchmark_cumulative_returns_to_date = \ self.benchmark_cumulative_returns[:dt_loc + 1] self.mean_benchmark_returns_cont[dt_loc] = \ benchmark_cumulative_returns_to_date[dt_loc] / \ self.num_trading_days self.mean_benchmark_returns = self.mean_benchmark_returns_cont[:dt_loc] self.annualized_mean_benchmark_returns_cont[dt_loc] = \ self.mean_benchmark_returns_cont[dt_loc] * 252 self.annualized_mean_benchmark_returns = \ self.annualized_mean_benchmark_returns_cont[:dt_loc + 1] self.algorithm_cumulative_leverages_cont[dt_loc] = leverage self.algorithm_cumulative_leverages = \ self.algorithm_cumulative_leverages_cont[:dt_loc + 1] if self.create_first_day_stats: if len(self.algorithm_cumulative_leverages) == 1: self.algorithm_cumulative_leverages = np.append( 0.0, self.algorithm_cumulative_leverages) if not len(self.algorithm_returns) and len(self.benchmark_returns): message = "Mismatch between benchmark_returns ({bm_count}) and \ algorithm_returns ({algo_count}) in range {start} : {end} on {dt}" message = message.format( bm_count=len(self.benchmark_returns), algo_count=len(self.algorithm_returns), start=self.start_session, end=self.end_session, dt=dt ) raise Exception(message) self.update_current_max() self.benchmark_volatility[dt_loc] = annual_volatility( self.benchmark_returns ) self.algorithm_volatility[dt_loc] = annual_volatility( self.algorithm_returns ) # caching the treasury rates for the minutely case is a # big speedup, because it avoids searching the treasury # curves on every minute. # In both minutely and daily, the daily curve is always used. treasury_end = dt.replace(hour=0, minute=0) if np.isnan(self.daily_treasury[treasury_end]): treasury_period_return = choose_treasury( self.treasury_curves, self.start_session, treasury_end, self.trading_calendar, ) self.daily_treasury[treasury_end] = treasury_period_return self.treasury_period_return = self.daily_treasury[treasury_end] self.excess_returns[dt_loc] = ( self.algorithm_cumulative_returns[dt_loc] - self.treasury_period_return) self.alpha[dt_loc], self.beta[dt_loc] = alpha_beta_aligned( self.algorithm_returns, self.benchmark_returns, ) self.sharpe[dt_loc] = sharpe_ratio( self.algorithm_returns, ) self.downside_risk[dt_loc] = downside_risk( self.algorithm_returns ) self.sortino[dt_loc] = sortino_ratio( self.algorithm_returns, _downside_risk=self.downside_risk[dt_loc] ) self.information[dt_loc] = information_ratio( self.algorithm_returns, self.benchmark_returns, ) self.max_drawdown = max_drawdown( self.algorithm_returns ) self.max_drawdowns[dt_loc] = self.max_drawdown self.max_leverage = self.calculate_max_leverage() self.max_leverages[dt_loc] = self.max_leverage
def risk_metric_period(cls, start_session, end_session, algorithm_returns, benchmark_returns, algorithm_leverages): """ Creates a dictionary representing the state of the risk report. Parameters ---------- start_session : pd.Timestamp Start of period (inclusive) to produce metrics on end_session : pd.Timestamp End of period (inclusive) to produce metrics on algorithm_returns : pd.Series(pd.Timestamp -> float) Series of algorithm returns as of the end of each session benchmark_returns : pd.Series(pd.Timestamp -> float) Series of benchmark returns as of the end of each session algorithm_leverages : pd.Series(pd.Timestamp -> float) Series of algorithm leverages as of the end of each session Returns ------- risk_metric : dict[str, any] Dict of metrics that with fields like: { 'algorithm_period_return': 0.0, 'benchmark_period_return': 0.0, 'treasury_period_return': 0, 'excess_return': 0.0, 'alpha': 0.0, 'beta': 0.0, 'sharpe': 0.0, 'sortino': 0.0, 'period_label': '1970-01', 'trading_days': 0, 'algo_volatility': 0.0, 'benchmark_volatility': 0.0, 'max_drawdown': 0.0, 'max_leverage': 0.0, } """ algorithm_returns = algorithm_returns[ (algorithm_returns.index >= start_session) & (algorithm_returns.index <= end_session) ] # Benchmark needs to be masked to the same dates as the algo returns benchmark_returns = benchmark_returns[ (benchmark_returns.index >= start_session) & (benchmark_returns.index <= algorithm_returns.index[-1]) ] benchmark_period_returns = ep.cum_returns(benchmark_returns).iloc[-1] algorithm_period_returns = ep.cum_returns(algorithm_returns).iloc[-1] alpha, beta = ep.alpha_beta_aligned( algorithm_returns.values, benchmark_returns.values, ) sharpe = ep.sharpe_ratio(algorithm_returns) # The consumer currently expects a 0.0 value for sharpe in period, # this differs from cumulative which was np.nan. # When factoring out the sharpe_ratio, the different return types # were collapsed into `np.nan`. # TODO: Either fix consumer to accept `np.nan` or make the # `sharpe_ratio` return type configurable. # In the meantime, convert nan values to 0.0 if pd.isnull(sharpe): sharpe = 0.0 sortino = ep.sortino_ratio( algorithm_returns.values, _downside_risk=ep.downside_risk(algorithm_returns.values), ) rval = { 'algorithm_period_return': algorithm_period_returns, 'benchmark_period_return': benchmark_period_returns, 'treasury_period_return': 0, 'excess_return': algorithm_period_returns, 'alpha': alpha, 'beta': beta, 'sharpe': sharpe, 'sortino': sortino, 'period_label': end_session.strftime("%Y-%m"), 'trading_days': len(benchmark_returns), 'algo_volatility': ep.annual_volatility(algorithm_returns), 'benchmark_volatility': ep.annual_volatility(benchmark_returns), 'max_drawdown': ep.max_drawdown(algorithm_returns.values), 'max_leverage': algorithm_leverages.max(), } # check if a field in rval is nan or inf, and replace it with None # except period_label which is always a str return { k: ( None if k != 'period_label' and not np.isfinite(v) else v ) for k, v in iteritems(rval) }
def calculate_metrics(self): self.benchmark_period_returns = \ cum_returns(self.benchmark_returns).iloc[-1] self.algorithm_period_returns = \ cum_returns(self.algorithm_returns).iloc[-1] if not self.algorithm_returns.index.equals( self.benchmark_returns.index ): message = "Mismatch between benchmark_returns ({bm_count}) and \ algorithm_returns ({algo_count}) in range {start} : {end}" message = message.format( bm_count=len(self.benchmark_returns), algo_count=len(self.algorithm_returns), start=self._start_session, end=self._end_session ) raise Exception(message) self.num_trading_days = len(self.benchmark_returns) self.mean_algorithm_returns = ( self.algorithm_returns.cumsum() / np.arange(1, self.num_trading_days + 1, dtype=np.float64) ) self.benchmark_volatility = annual_volatility(self.benchmark_returns) self.algorithm_volatility = annual_volatility(self.algorithm_returns) self.treasury_period_return = choose_treasury( self.treasury_curves, self._start_session, self._end_session, self.trading_calendar, ) self.sharpe = sharpe_ratio( self.algorithm_returns, ) # The consumer currently expects a 0.0 value for sharpe in period, # this differs from cumulative which was np.nan. # When factoring out the sharpe_ratio, the different return types # were collapsed into `np.nan`. # TODO: Either fix consumer to accept `np.nan` or make the # `sharpe_ratio` return type configurable. # In the meantime, convert nan values to 0.0 if pd.isnull(self.sharpe): self.sharpe = 0.0 self.downside_risk = downside_risk( self.algorithm_returns.values ) self.sortino = sortino_ratio( self.algorithm_returns.values, _downside_risk=self.downside_risk, ) self.information = information_ratio( self.algorithm_returns.values, self.benchmark_returns.values, ) self.alpha, self.beta = alpha_beta_aligned( self.algorithm_returns.values, self.benchmark_returns.values, ) self.excess_return = self.algorithm_period_returns - \ self.treasury_period_return self.max_drawdown = max_drawdown(self.algorithm_returns.values) self.max_leverage = self.calculate_max_leverage()