def test_sharpe_translation_1(self, returns, required_return, translation): sr = empyrical.sharpe_ratio(returns, required_return) sr_depressed = empyrical.sharpe_ratio(returns, required_return - translation) sr_raised = empyrical.sharpe_ratio(returns, required_return + translation) assert sr_depressed > sr assert sr > sr_raised
def test_sharpe_noise(self, small, large): index = pd.date_range('2000-1-30', periods=1000, freq='D') smaller_normal = pd.Series( [random.gauss(.01, small) for i in range(1000)], index=index) larger_normal = pd.Series( [random.gauss(.01, large) for i in range(1000)], index=index) assert empyrical.sharpe_ratio(smaller_normal, 0.001) > \ empyrical.sharpe_ratio(larger_normal, 0.001)
def test_sharpe_translation_same(self, returns, required_return, translation): sr = empyrical.sharpe_ratio(returns, required_return) sr_depressed = empyrical.sharpe_ratio(returns - translation, required_return - translation) sr_raised = empyrical.sharpe_ratio(returns + translation, required_return + translation) assert_almost_equal(sr, sr_depressed, DECIMAL_PLACES) assert_almost_equal(sr, sr_raised, DECIMAL_PLACES)
def test_sharpe_ratio(self, test_risk_free): res_a = empyrical.sharpe_ratio(ret['a'], risk_free=test_risk_free) res_b = empyrical.sharpe_ratio(ret['b'], risk_free=test_risk_free) res_c = empyrical.sharpe_ratio(ret['c'], risk_free=test_risk_free) assert isclose( ret['a'].vbt.returns.sharpe_ratio(risk_free=test_risk_free), res_a) pd.testing.assert_series_equal( ret.vbt.returns.sharpe_ratio(risk_free=test_risk_free), pd.Series([res_a, res_b, res_c], index=ret.columns))
def test_sharpe(symbol, start_date, end_date): fname = "%s/%s.csv"%(const.DATA_DIR, symbol) dataframe = pd.read_csv(fname) dataframe['date'] = pd.to_datetime(dataframe['date'], format="%Y-%m-%d") dataframe = dataframe.set_index('date') dataframe['return'] = dataframe['close'].pct_change() dataframe = dataframe[(dataframe.index >= start_date) & (dataframe.index <= end_date)] dataframe.dropna(inplace=True) sharpe = utils.get_sharpe_ratio(dataframe['return']) print sharpe print empyrical.sharpe_ratio(dataframe['return'])
def test_sharpe_translation_diff(self, returns, required_return, translation_returns, translation_required): sr = empyrical.sharpe_ratio(returns, required_return) sr_depressed = empyrical.sharpe_ratio( returns - translation_returns, required_return - translation_required) sr_raised = empyrical.sharpe_ratio( returns + translation_returns, required_return + translation_required) assert sr != sr_depressed assert sr != sr_raised
def testSharpe(): # 读取数据 stock_data = pd.read_csv("stock_data.csv", parse_dates=["Date"], index_col=["Date"]).dropna() benchmark_data = pd.read_csv("benchmark_data.csv", parse_dates=["Date"], index_col=["Date"]).dropna() # 了解数据 print("Stocks\n") print(stock_data.info()) print(stock_data.head()) print("\nBenchmarks\n") print(benchmark_data.info()) print(benchmark_data.head()) # 输出统计量 print(stock_data.describe()) print(benchmark_data.describe()) # 计算每日回报率 stock_returns = stock_data.pct_change() print(stock_returns.describe()) sp_returns = benchmark_data.pct_change() print(sp_returns.describe()) # 每日超额回报 excess_returns = pd.DataFrame() risk_free = 0.04 / 252.0 excess_returns["Amazon"] = stock_returns["Amazon"] - risk_free excess_returns["Facebook"] = stock_returns["Facebook"] - risk_free print(excess_returns.describe()) # 超额回报的均值 avg_excess_return = excess_returns.mean() print(avg_excess_return) # 超额回报的标准差 std_excess_return = excess_returns.std() print(std_excess_return) # 计算夏普比率 # 日夏普比率 daily_sharpe_ratio = avg_excess_return.div(std_excess_return) # 年化夏普比率 annual_factor = np.sqrt(252) annual_sharpe_ratio = daily_sharpe_ratio.mul(annual_factor) print("年化夏普比率\n", annual_sharpe_ratio) # 用empyrical算 sharpe = pd.DataFrame() a = ey.sharpe_ratio(stock_returns["Amazon"], risk_free=risk_free) #, annualization = 252) b = ey.sharpe_ratio(stock_returns["Facebook"], risk_free=risk_free) print("empyrical计算结果") print(a, b) print(a / annual_sharpe_ratio["Amazon"], b / annual_sharpe_ratio["Facebook"])
def ForwardPE_and_Return(self, sec, dailyreturn): memb = self.P.Sec_weight(sec) memb['date'] = [str(x)[0:10] for x in memb['date']] memb = memb.rename(columns={'weight': 'PortNav%'}) memb['PortNav%'] = memb['PortNav%'].astype(float) memb = memb.loc[memb['date'] >= '2019-12-30', :].copy() IndexFwdPEmedian = self.ForwardPE_median(memb) portmemb = pd.read_csv("D:/S/SuperTrend/" + sec + "_list.csv") portmemb['date'] = '2019-12-30' portmemb['ticker'] = [str(x)[0:6] for x in portmemb['ticker']] portmemb['PortNav%'] = 1 / len(portmemb) PortFwdPEmedian = self.ForwardPE_median(portmemb) PortFwdPEmedian = PortFwdPEmedian.rename( columns={'medianfrdPE': 'Port1yfrdPE'}) IndexFwdPEmedian = IndexFwdPEmedian.rename( columns={'medianfrdPE': 'Index1yfrdPE'}) IndexFwdPEmedian = pd.merge(IndexFwdPEmedian, PortFwdPEmedian, on='date', how='left') IndexReturn = RC.DailyPNL(dailyreturn, memb) PortReturn = RC.DailyPNL(dailyreturn, portmemb) PortReturn['PortReturn'] = np.exp( np.log1p(PortReturn['dailyreturn']).cumsum()) IndexReturn['IndexReturn'] = np.exp( np.log1p(IndexReturn['dailyreturn']).cumsum()) PortReturn = PortReturn.rename( columns={'dailyreturn': 'PortdailyReturn'}) IndexReturn = IndexReturn.rename( columns={'dailyreturn': 'IndexdailyReturn'}) IndexReturn = pd.merge( IndexReturn[['date', 'IndexdailyReturn', 'IndexReturn']], PortReturn[['date', 'PortdailyReturn', 'PortReturn']], on='date', how='left') IndexReturn['cumAlpha'] = IndexReturn['PortReturn'] - IndexReturn[ 'IndexReturn'] IndexFwdPEmedian['ValuationGap'] = IndexFwdPEmedian[ 'Port1yfrdPE'] / IndexFwdPEmedian['Index1yfrdPE'] #PEGap_Return=pd.merge(IndexFwdPEmedian[['date','ValuationGap']],IndexReturn[['date','PortdailyReturn','IndexdailyReturn','IndexReturn','PortReturn','cumAlpha']],on='date',how='left') #PEGap_Return.set_index(['date'],inplace=True) #PEGap_Return[['ValuationGap','cumAlpha']].plot() print('max drawdown index' + str(empyrical.max_drawdown(IndexReturn['IndexdailyReturn']))) print('sharpe index' + str(empyrical.sharpe_ratio(IndexReturn['IndexdailyReturn']))) print('max drawdown port' + str(empyrical.max_drawdown(IndexReturn['PortdailyReturn']))) print('sharpe port' + str(empyrical.sharpe_ratio(IndexReturn['PortdailyReturn']))) return (IndexReturn)
def test_sharpe_ratio(self, test_risk_free): res_a = empyrical.sharpe_ratio(ret['a'], risk_free=test_risk_free) res_b = empyrical.sharpe_ratio(ret['b'], risk_free=test_risk_free) res_c = empyrical.sharpe_ratio(ret['c'], risk_free=test_risk_free) assert isclose( ret['a'].vbt.returns.sharpe_ratio(risk_free=test_risk_free), res_a) pd.testing.assert_series_equal( ret.vbt.returns.sharpe_ratio(risk_free=test_risk_free), pd.Series([res_a, res_b, res_c], index=ret.columns).rename('sharpe_ratio')) pd.testing.assert_series_equal( ret.vbt.returns.rolling_sharpe_ratio( ret.shape[0], minp=1, risk_free=test_risk_free).iloc[-1], pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1]))
def sharpe_ratio(returns, risk_free=0, period=DAILY): """ Determines the Sharpe ratio of a strategy. Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - See full explanation in :func:`~pyfolio.timeseries.cum_returns`. risk_free : int, float Constant risk-free return throughout the period. period : str, optional Defines the periodicity of the 'returns' data for purposes of annualizing. Can be 'monthly', 'weekly', or 'daily'. - Defaults to 'daily'. Returns ------- float Sharpe ratio. np.nan If insufficient length of returns or if if adjusted returns are 0. Note ----- See https://en.wikipedia.org/wiki/Sharpe_ratio for more details. """ return empyrical.sharpe_ratio(returns, risk_free=risk_free, period=period)
def calculate_metrics(self): self.benchmark_period_returns = \ cum_returns(self.benchmark_returns).iloc[-1] self.algorithm_period_returns = \ cum_returns(self.algorithm_returns).iloc[-1] if not self.algorithm_returns.index.equals( self.benchmark_returns.index): message = "Mismatch between benchmark_returns ({bm_count}) and \ algorithm_returns ({algo_count}) in range {start} : {end}" message = message.format(bm_count=len(self.benchmark_returns), algo_count=len(self.algorithm_returns), start=self._start_session, end=self._end_session) raise Exception(message) self.num_trading_days = len(self.benchmark_returns) self.mean_algorithm_returns = ( self.algorithm_returns.cumsum() / np.arange(1, self.num_trading_days + 1, dtype=np.float64)) self.benchmark_volatility = annual_volatility(self.benchmark_returns) self.algorithm_volatility = annual_volatility(self.algorithm_returns) self.treasury_period_return = choose_treasury( self.treasury_curves, self._start_session, self._end_session, self.trading_calendar, ) self.sharpe = sharpe_ratio(self.algorithm_returns, ) # The consumer currently expects a 0.0 value for sharpe in period, # this differs from cumulative which was np.nan. # When factoring out the sharpe_ratio, the different return types # were collapsed into `np.nan`. # TODO: Either fix consumer to accept `np.nan` or make the # `sharpe_ratio` return type configurable. # In the meantime, convert nan values to 0.0 if pd.isnull(self.sharpe): self.sharpe = 0.0 self.downside_risk = downside_risk(self.algorithm_returns.values) self.sortino = sortino_ratio( self.algorithm_returns.values, _downside_risk=self.downside_risk, ) self.information = information_ratio( self.algorithm_returns.values, self.benchmark_returns.values, ) self.alpha, self.beta = alpha_beta_aligned( self.algorithm_returns.values, self.benchmark_returns.values, ) self.excess_return = self.algorithm_period_returns - \ self.treasury_period_return self.max_drawdown = max_drawdown(self.algorithm_returns.values) self.max_leverage = self.calculate_max_leverage()
def portfolioAnalysis(return_data): non_cum_return = getNonCumReturn(return_data) #计算年化收益: annual_return = empyrical.annual_return(non_cum_return, period='daily') #计算年化波动率: annual_volatility = empyrical.annual_volatility(non_cum_return, period='daily') #计算最大回撤 max_drawdown = empyrical.max_drawdown(non_cum_return) #计算夏普比率: sharpe_ratio = empyrical.sharpe_ratio( non_cum_return, risk_free=math.pow(1 + 0.03, 1 / 250) - 1, period='daily') #分年统计 aggr_returns = empyrical.aggregate_returns(non_cum_return, convert_to="yearly") print("年化收益:%f" % (annual_return)) print("年化波动率:%f" % (annual_volatility)) print("最大回撤:%f" % (max_drawdown)) print("夏普比率:%f" % (sharpe_ratio)) print("分年统计收益率:") print(aggr_returns) data = [annual_return, annual_volatility, max_drawdown, sharpe_ratio] return pd.Series(data, index=["年化收益率", "年化波动率", "最大回撤", "夏普比率"])
def compute_stats(portfolio, benchmark): '''Compute statistics for the current portfolio''' stats = {} grp_by_year = portfolio.groupby(lambda x: x.year) stats['1yr_highest'] = grp_by_year.max().iloc[-1] stats['1yr_lowest'] = grp_by_year.min().iloc[-1] portfolio_return = simple_returns(portfolio) # benchmark_return = simple_returns(benchmark) stats['wtd_return'] = aggregate_returns(portfolio_return, 'weekly').iloc[-1] stats['mtd_return'] = aggregate_returns(portfolio_return, 'monthly').iloc[-1] stats['ytd_return'] = aggregate_returns(portfolio_return, 'yearly').iloc[-1] stats['max_drawdown'] = max_drawdown(portfolio_return) # stats['annual_return'] = annual_return(portfolio_return, period='daily') stats['annual_volatility'] = annual_volatility(portfolio_return, period='daily', alpha=2.0) # stats['calmar_ratio'] = calmar_ratio(portfolio_return, period='daily') # stats['omega_ratio'] = omega_ratio(portfolio_return, risk_free=0.0) stats['sharpe_ratio_1yr'] = sharpe_ratio(portfolio_return, risk_free=0.0, period='daily') # stats['alpha'], stats['beta'] = alpha_beta(portfolio_return, benchmark_return, # risk_free=0.0, period='daily') stats['tail_ratio'] = tail_ratio(portfolio_return) # stats['capture_ratio'] = capture(portfolio_return, benchmark_return, period='daily') return stats
def performance_measures(pct_chg, y): result = {} y_init = list(map(reverse_func, y)) predict = pd.Series(index=pct_chg.index, data=y_init) predict.name = 'label' df = pd.concat([pct_chg, predict.shift(1)], axis=1) df['return'] = 0 short_cond = (df['label'] - mid_type) < -epsilon long_cond = (df['label'] - mid_type) > epsilon df.loc[long_cond, 'return'] = pct_chg.loc[long_cond] df.loc[short_cond, 'return'] = -pct_chg[short_cond] returns = df['return'] if 'Y0' in performance_types: Y0 = pd.Series(index=pct_chg.index, data=list(y)) result['Y0'] = Y0 if 'Y' in performance_types: result['Y'] = predict if 'returns' in performance_types: result['returns'] = returns if 'cum_returns' in performance_types: result['cum_returns'] = empyrical.cum_returns(returns) if 'annual_return' in performance_types: result['annual_return'] = empyrical.annual_return(returns) if 'sharpe_ratio' in performance_types: result['sharpe_ratio'] = empyrical.sharpe_ratio(returns) return result
def sharpe_ratio(returns, risk_free=0, period=DAILY): """ Determines the Sharpe ratio of a strategy. Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - See full explanation in :func:`~pyfolio.timeseries.cum_returns`. risk_free : int, float Constant risk-free return throughout the period. period : str, optional Defines the periodicity of the 'returns' data for purposes of annualizing. Can be 'monthly', 'weekly', or 'daily'. - Defaults to 'daily'. Returns ------- float Sharpe ratio. np.nan If insufficient length of returns or if if adjusted returns are 0. Note ----- See https://en.wikipedia.org/wiki/Sharpe_ratio for more details. """ return ep.sharpe_ratio(returns, risk_free=risk_free, period=period)
def create_perf_attrib_stats(perf_attrib, risk_exposures): """ Takes perf attribution data over a period of time and computes annualized multifactor alpha, multifactor sharpe, risk exposures. """ summary = OrderedDict() specific_returns = perf_attrib['specific_returns'] common_returns = perf_attrib['common_returns'] summary['Annual multi-factor alpha'] =\ ep.annual_return(specific_returns) summary['Multi-factor sharpe'] =\ ep.sharpe_ratio(specific_returns) # empty line between common/specific/total returns summary[' '] = ' ' summary['Cumulative specific returns'] =\ ep.cum_returns_final(specific_returns) summary['Cumulative common returns'] =\ ep.cum_returns_final(common_returns) summary['Total returns'] =\ ep.cum_returns_final(perf_attrib['total_returns']) summary = pd.Series(summary) risk_exposure_summary = risk_exposures.sum(axis='rows') return summary, risk_exposure_summary
def objective(params, ohlcv_list, operation, mode, log_dir): print(params) identity = str(uuid.uuid1()) result_list = valid_wave_by_multi_processes(params, ohlcv_list, operation, mode) returns_list = [] for result in result_list: stk_returns = result['pct_chg'] * result['direction'] stk_returns = stk_returns.fillna(0) returns_list.append(stk_returns) returns = pd.concat(returns_list, axis=0) annual_return = empyrical.annual_return(returns) sharpe_ratio = empyrical.sharpe_ratio(returns) if np.isnan(sharpe_ratio): sharpe_ratio = 0 data = { 'id': identity, 'params': params, 'returns': returns, 'annual_return': annual_return, 'sharpe_ratio': sharpe_ratio, # 'result_list': result_list } with open(os.path.join(log_dir, identity + '.pkl'), 'wb') as f: pickle.dump(data, f) print('id: %s, annual_return: %s, sharpe_ratio: %s' % (identity, annual_return, sharpe_ratio)) return {'loss': -sharpe_ratio, 'status': STATUS_OK}
def test_sharpe_ratio(self, returns, risk_free, expected): assert_almost_equal( empyrical.sharpe_ratio( np.asarray(returns), risk_free=risk_free), expected, DECIMAL_PLACES)
def Analysis(results): """ 技术指标分析器 :param results: { 'returns':[0.1,0.1,0.1], 'benchmark':[0.1,0.1,0.1] 'trades':[[2020.01.01 01:00:00,'BUY',6234.10,1]] } :return: """ res = pnl_res(results["returns"]) bres = pnl_res(results["benchmark"]) return_ratio = empyrical.cum_returns_final(res) annual_return_ratio = empyrical.annual_return(res) sharp_ratio = empyrical.sharpe_ratio(res, 0.035 / 252) return_volatility = empyrical.annual_volatility(res) max_drawdown = empyrical.max_drawdown(res) alpha, beta = empyrical.alpha_beta_aligned(res, bres) pls, wr = pls_ws(results["trades"]) return { 'pls': pls, 'wr': wr, 'return_ratio': return_ratio, 'annual_return_ratio': annual_return_ratio, 'beta': beta, 'alpha': alpha, 'sharp_ratio': sharp_ratio, 'return_volatility': return_volatility, 'max_drawdown': max_drawdown, }
def get_performance_summary(returns): stats = {'annualized_returns': ep.annual_return(returns), 'cumulative_returns': ep.cum_returns_final(returns), 'annual_volatility': ep.annual_volatility(returns), 'sharpe_ratio': ep.sharpe_ratio(returns), 'sortino_ratio': ep.sortino_ratio(returns), 'max_drawdown': ep.max_drawdown(returns)} return pd.Series(stats)
def plot(self): # show a plot of portfolio vs mean market performance df_info = pd.DataFrame(self.infos) df_info.set_index('current step', inplace=True) # df_info.set_index('date', inplace=True) rn = np.asarray(df_info['portfolio return']) try: spf = df_info['portfolio value'].iloc[1] # Start portfolio value epf = df_info['portfolio value'].iloc[-1] # End portfolio value pr = (epf - spf) / spf except: pr = 0 try: sr = sharpe_ratio(rn) except: sr = 0 try: sor = sortino_ratio(rn) except: sor = 0 try: mdd = max_drawdown(rn) except: mdd = 0 try: cr = calmar_ratio(rn) except: cr = 0 try: om = omega_ratio(rn) except: om = 0 try: dr = downside_risk(rn) except: dr = 0 print("First portfolio value: ", np.round(df_info['portfolio value'].iloc[1])) print("Last portfolio value: ", np.round(df_info['portfolio value'].iloc[-1])) title = self.strategy_name + ': ' + 'profit={: 2.2%} sharpe={: 2.2f} sortino={: 2.2f} max drawdown={: 2.2%} calmar={: 2.2f} omega={: 2.2f} downside risk={: 2.2f}'.format( pr, sr, sor, mdd, cr, om, dr) # df_info[['market value', 'portfolio value']].plot(title=title, fig=plt.gcf(), figsize=(15,10), rot=30) df_info[['portfolio value']].plot(title=title, fig=plt.gcf(), figsize=(15, 10), rot=30)
def get_metrics_single_model(ret): return { "Mean": ret.mean(), "Mean (Yearly)": aggregate_returns(ret, convert_to="yearly").mean(), "Standard Deviation": ret.std(), "Sharpe Ratio": sharpe_ratio(ret, period='monthly'), "Skewness": skew(ret), "Kurtosis": kurtosis(ret), "Max Drawdown": max_drawdown(ret), }
def _get_reward(self, current_prices, next_prices): if self.compute_reward == compute_reward.profit: returns_rate = next_prices / current_prices # pip_value = self._calculate_pip_value_in_account_currency(account_currency.USD, next_prices) # returns_rate = np.multiply(returns_rate, pip_value) log_returns = np.log(returns_rate) last_weight = self.current_weights securities_value = self.current_portfolio_values[:-1] * returns_rate self.current_portfolio_values[:-1] = securities_value self.current_weights = self.current_portfolio_values / np.sum( self.current_portfolio_values) reward = last_weight[:-1] * log_returns elif self.compute_reward == compute_reward.sharpe: try: sr = sharpe_ratio(np.asarray(self.returns)) except: sr = 0 reward = sr elif self.compute_reward == compute_reward.sortino: try: sr = sortino_ratio(np.asarray(self.returns)) except: sr = 0 reward = sr elif self.compute_reward == compute_reward.max_drawdown: try: mdd = max_drawdown(np.asarray(self.returns)) except: mdd = 0 reward = mdd elif self.compute_reward == compute_reward.calmar: try: cr = calmar_ratio(np.asarray(self.returns)) except: cr = 0 reward = cr elif self.compute_reward == compute_reward.omega: try: om = omega_ratio(np.asarray(self.returns)) except: om = 0 reward = om elif self.compute_reward == compute_reward.downside_risk: try: dr = downside_risk(np.asarray(self.returns)) except: dr = 0 reward = dr try: reward = reward.mean() except: reward = reward return reward
def RiskRewardStats(df): global RiskRewardList RiskRewardIndex = ['Sharpe Ratio','Sortino Ratio','Omega Ratio','Skewness','Kurtosis', 'Correlation vs MSCI World TR Index','Correlation vs Bloomberg Index'] OmegaRatio = omega_ratio(df['Monthly Return']) Kurtosis = df['Monthly Return'].kurt() Skewness = df['Monthly Return'].skew() SharpeRatio = sharpe_ratio(df['Monthly Return'],period='monthly') SortinoRatio = sortino_ratio(df['Monthly Return'],period='monthly') RiskRewardList = [SharpeRatio,SortinoRatio,OmegaRatio,Skewness,Kurtosis,MSCIIndex,BloombergIndex] RiskRewardDf = pd.DataFrame(RiskRewardList,columns=['Value'],index=RiskRewardIndex) return RiskRewardDf
def _get_backtest_performance_metrics(ret, benchmark_ret): metrics = { 'alpha': empyrical.alpha(ret, benchmark_ret), 'beta': empyrical.beta(ret, benchmark_ret), 'return': empyrical.cum_returns_final(ret), 'cagr': empyrical.cagr(ret), 'sharpe': empyrical.sharpe_ratio(ret), 'max_drawdown': empyrical.max_drawdown(ret), 'var': empyrical.value_at_risk(ret), 'volatility': empyrical.annual_volatility(ret), } return metrics
def capacity_sweep(self, returns, transactions, market_data, bt_starting_capital, min_pv=100000, max_pv=300000000, step_size=1000000): """ 考虑资金量(每百万元)带来市场冲击后的修正夏普率 Parameters ---------- returns : pd.Series Timeseries of portfolio returns to be adjusted for various degrees of slippage. transactions : pd.DataFrame Prices and amounts of executed trades. One row per trade. - See full explanation in tears.create_full_tear_sheet. market_data : pd.Panel, optional Panel with items axis of 'price' and 'volume' DataFrames. The major and minor axes should match those of the the passed positions DataFrame (same dates and symbols). min_pv:int 最小入场资金 max_pv:int 最大入场资金 step_size:int 步长 Returns ------- 考虑资金量(每百万元)带来市场冲击后的修正夏普率(adj_sharpe) 资金量为index,单位为百万元 """ txn_daily_w_bar = capacity.daily_txns_with_bar_data( transactions, market_data) captial_base_sweep = pd.Series() for start_pv in range(min_pv, max_pv, step_size): adj_ret = capacity.apply_slippage_penalty(returns, txn_daily_w_bar, start_pv, bt_starting_capital) sharpe = empyrical.sharpe_ratio(adj_ret) if sharpe < -1: break captial_base_sweep.loc[start_pv] = sharpe captial_base_sweep.index = captial_base_sweep.index / MM_DISPLAY_UNIT return captial_base_sweep
def _func(_p): _ret = self._returns[_p._id] _sharpe = ep.sharpe_ratio(_ret) _p.__log__('Sharpe: %s' % _sharpe) self._sharpes[_p._id] = _sharpe if _p._id not in self._weights.keys(): weight = 0.0 else: weight = self._weights[_p._id] v = _sharpe * weight return v
def create_perf_attrib_stats(perf_attrib, risk_exposures): """ Takes perf attribution data over a period of time and computes annualized multifactor alpha, multifactor sharpe, risk exposures. """ summary = OrderedDict() total_returns = perf_attrib['total_returns'] specific_returns = perf_attrib['specific_returns'] common_returns = perf_attrib['common_returns'] summary['Annualized Specific Return'] =\ ep.annual_return(specific_returns) summary['Annualized Common Return'] =\ ep.annual_return(common_returns) summary['Annualized Total Return'] =\ ep.annual_return(total_returns) summary['Specific Sharpe Ratio'] =\ ep.sharpe_ratio(specific_returns) summary['Cumulative Specific Return'] =\ ep.cum_returns_final(specific_returns) summary['Cumulative Common Return'] =\ ep.cum_returns_final(common_returns) summary['Total Returns'] =\ ep.cum_returns_final(total_returns) summary = pd.Series(summary, name='') annualized_returns_by_factor = [ep.annual_return(perf_attrib[c]) for c in risk_exposures.columns] cumulative_returns_by_factor = [ep.cum_returns_final(perf_attrib[c]) for c in risk_exposures.columns] risk_exposure_summary = pd.DataFrame( data=OrderedDict([ ( 'Average Risk Factor Exposure', risk_exposures.mean(axis='rows') ), ('Annualized Return', annualized_returns_by_factor), ('Cumulative Return', cumulative_returns_by_factor), ]), index=risk_exposures.columns, ) return summary, risk_exposure_summary
def create_perf_attrib_stats(perf_attrib, risk_exposures): """ Takes perf attribution data over a period of time and computes annualized multifactor alpha, multifactor sharpe, risk exposures. """ summary = OrderedDict() total_returns = perf_attrib['total_returns'] specific_returns = perf_attrib['specific_returns'] common_returns = perf_attrib['common_returns'] summary['Annualized Specific Return'] =\ ep.annual_return(specific_returns, annualization=APPROX_BDAYS_PER_YEAR) summary['Annualized Common Return'] =\ ep.annual_return(common_returns, annualization=APPROX_BDAYS_PER_YEAR) summary['Annualized Total Return'] =\ ep.annual_return(total_returns, annualization=APPROX_BDAYS_PER_YEAR) summary['Specific Sharpe Ratio'] =\ ep.sharpe_ratio(specific_returns, annualization=APPROX_BDAYS_PER_YEAR) summary['Cumulative Specific Return'] =\ ep.cum_returns_final(specific_returns) summary['Cumulative Common Return'] =\ ep.cum_returns_final(common_returns) summary['Total Returns'] =\ ep.cum_returns_final(total_returns) summary = pd.Series(summary, name='') annualized_returns_by_factor = [ ep.annual_return(perf_attrib[c], annualization=APPROX_BDAYS_PER_YEAR) for c in risk_exposures.columns ] cumulative_returns_by_factor = [ ep.cum_returns_final(perf_attrib[c]) for c in risk_exposures.columns ] risk_exposure_summary = pd.DataFrame( data=OrderedDict([ ('Average Risk Factor Exposure', risk_exposures.mean(axis='rows')), ('Annualized Return', annualized_returns_by_factor), ('Cumulative Return', cumulative_returns_by_factor), ]), index=risk_exposures.columns, ) return summary, risk_exposure_summary
def create_perf_attrib_stats(perf_attrib, risk_exposures): """ Takes perf attribution data over a period of time and computes annualized multifactor alpha, multifactor sharpe, risk exposures. """ summary = OrderedDict() total_returns = perf_attrib["total_returns"] specific_returns = perf_attrib["specific_returns"] common_returns = perf_attrib["common_returns"] summary["Annualized Specific Return"] = ep.annual_return(specific_returns) summary["Annualized Common Return"] = ep.annual_return(common_returns) summary["Annualized Total Return"] = ep.annual_return(total_returns) summary["Specific Sharpe Ratio"] = ep.sharpe_ratio(specific_returns) summary["Cumulative Specific Return"] = ep.cum_returns_final( specific_returns ) summary["Cumulative Common Return"] = ep.cum_returns_final(common_returns) summary["Total Returns"] = ep.cum_returns_final(total_returns) summary = pd.Series(summary, name="") annualized_returns_by_factor = [ ep.annual_return(perf_attrib[c]) for c in risk_exposures.columns ] cumulative_returns_by_factor = [ ep.cum_returns_final(perf_attrib[c]) for c in risk_exposures.columns ] risk_exposure_summary = pd.DataFrame( data=OrderedDict( [ ( "Average Risk Factor Exposure", risk_exposures.mean(axis="rows"), ), ("Annualized Return", annualized_returns_by_factor), ("Cumulative Return", cumulative_returns_by_factor), ] ), index=risk_exposures.columns, ) return summary, risk_exposure_summary
def create_perf_attrib_stats(perf_attrib, risk_exposures): """ Takes perf attribution data over a period of time and computes annualized multifactor alpha, multifactor sharpe, risk exposures. """ summary = OrderedDict() total_returns = perf_attrib['total_returns'] specific_returns = perf_attrib['specific_returns'] common_returns = perf_attrib['common_returns'] summary['年化特定收益率'] =\ ep.annual_return(specific_returns) summary['年化共同收益率'] =\ ep.annual_return(common_returns) summary['年化总收益率'] =\ ep.annual_return(total_returns) summary['特定夏普比率'] =\ ep.sharpe_ratio(specific_returns) summary['累积特定收益率'] =\ ep.cum_returns_final(specific_returns) summary['累积共同收益率'] =\ ep.cum_returns_final(common_returns) summary['总收益率'] =\ ep.cum_returns_final(total_returns) summary = pd.Series(summary, name='') annualized_returns_by_factor = [ ep.annual_return(perf_attrib[c]) for c in risk_exposures.columns ] cumulative_returns_by_factor = [ ep.cum_returns_final(perf_attrib[c]) for c in risk_exposures.columns ] risk_exposure_summary = pd.DataFrame( data=OrderedDict([ ('因子平均风险敞口', risk_exposures.mean(axis='rows')), ('年化收益率', annualized_returns_by_factor), ('累积收益率', cumulative_returns_by_factor), ]), index=risk_exposures.columns, ) return summary, risk_exposure_summary
def _reward(self): length = min(self.current_step, self.forecast_steps) returns = np.diff(self.net_worths[-length:]) if np.count_nonzero(returns) < 1: return 0 if self.reward_strategy == 'sortino': reward = sortino_ratio(returns, annualization=365 * 24) elif self.reward_strategy == 'sharpe': reward = sharpe_ratio(returns, annualization=365 * 24) elif self.reward_strategy == 'omega': reward = omega_ratio(returns, annualization=365 * 24) else: reward = returns[-1] return reward if np.isfinite(reward) else 0
def test_perf_attrib_regression(self): positions = pd.read_csv('pyfolio/tests/test_data/positions.csv', index_col=0, parse_dates=True) positions.columns = [int(col) if col != 'cash' else col for col in positions.columns] returns = pd.read_csv('pyfolio/tests/test_data/returns.csv', index_col=0, parse_dates=True, header=None, squeeze=True) factor_loadings = pd.read_csv( 'pyfolio/tests/test_data/factor_loadings.csv', index_col=[0, 1], parse_dates=True ) factor_returns = pd.read_csv( 'pyfolio/tests/test_data/factor_returns.csv', index_col=0, parse_dates=True ) residuals = pd.read_csv('pyfolio/tests/test_data/residuals.csv', index_col=0, parse_dates=True) residuals.columns = [int(col) for col in residuals.columns] intercepts = pd.read_csv('pyfolio/tests/test_data/intercepts.csv', index_col=0, header=None, squeeze=True) risk_exposures_portfolio, perf_attrib_output = perf_attrib( returns, positions, factor_returns, factor_loadings, ) specific_returns = perf_attrib_output['specific_returns'] common_returns = perf_attrib_output['common_returns'] combined_returns = specific_returns + common_returns # since all returns are factor returns, common returns should be # equivalent to total returns, and specific returns should be 0 pd.util.testing.assert_series_equal(returns, common_returns, check_names=False) self.assertTrue(np.isclose(specific_returns, 0).all()) # specific and common returns combined should equal total returns pd.util.testing.assert_series_equal(returns, combined_returns, check_names=False) # check that residuals + intercepts = specific returns self.assertTrue(np.isclose((residuals + intercepts), 0).all()) # check that exposure * factor returns = common returns expected_common_returns = risk_exposures_portfolio.multiply( factor_returns, axis='rows' ).sum(axis='columns') pd.util.testing.assert_series_equal(expected_common_returns, common_returns, check_names=False) # since factor loadings are ones, portfolio risk exposures # should be ones pd.util.testing.assert_frame_equal( risk_exposures_portfolio, pd.DataFrame(np.ones_like(risk_exposures_portfolio), index=risk_exposures_portfolio.index, columns=risk_exposures_portfolio.columns) ) perf_attrib_summary, exposures_summary = create_perf_attrib_stats( perf_attrib_output, risk_exposures_portfolio ) self.assertEqual(ep.annual_return(specific_returns), perf_attrib_summary['Annualized Specific Return']) self.assertEqual(ep.annual_return(common_returns), perf_attrib_summary['Annualized Common Return']) self.assertEqual(ep.annual_return(combined_returns), perf_attrib_summary['Annualized Total Return']) self.assertEqual(ep.sharpe_ratio(specific_returns), perf_attrib_summary['Specific Sharpe Ratio']) self.assertEqual(ep.cum_returns_final(specific_returns), perf_attrib_summary['Cumulative Specific Return']) self.assertEqual(ep.cum_returns_final(common_returns), perf_attrib_summary['Cumulative Common Return']) self.assertEqual(ep.cum_returns_final(combined_returns), perf_attrib_summary['Total Returns']) avg_factor_exposure = risk_exposures_portfolio.mean().rename( 'Average Risk Factor Exposure' ) pd.util.testing.assert_series_equal( avg_factor_exposure, exposures_summary['Average Risk Factor Exposure'] ) cumulative_returns_by_factor = pd.Series( [ep.cum_returns_final(perf_attrib_output[c]) for c in risk_exposures_portfolio.columns], name='Cumulative Return', index=risk_exposures_portfolio.columns ) pd.util.testing.assert_series_equal( cumulative_returns_by_factor, exposures_summary['Cumulative Return'] ) annualized_returns_by_factor = pd.Series( [ep.annual_return(perf_attrib_output[c]) for c in risk_exposures_portfolio.columns], name='Annualized Return', index=risk_exposures_portfolio.columns ) pd.util.testing.assert_series_equal( annualized_returns_by_factor, exposures_summary['Annualized Return'] )
def update(self, dt, algorithm_returns, benchmark_returns, leverage): # Keep track of latest dt for use in to_dict and other methods # that report current state. self.latest_dt = dt dt_loc = self.cont_index.get_loc(dt) self.latest_dt_loc = dt_loc self.algorithm_returns_cont[dt_loc] = algorithm_returns self.algorithm_returns = self.algorithm_returns_cont[:dt_loc + 1] self.num_trading_days = len(self.algorithm_returns) if self.create_first_day_stats: if len(self.algorithm_returns) == 1: self.algorithm_returns = np.append(0.0, self.algorithm_returns) self.algorithm_cumulative_returns[dt_loc] = cum_returns( self.algorithm_returns )[-1] algo_cumulative_returns_to_date = \ self.algorithm_cumulative_returns[:dt_loc + 1] self.mean_returns_cont[dt_loc] = \ algo_cumulative_returns_to_date[dt_loc] / self.num_trading_days self.mean_returns = self.mean_returns_cont[:dt_loc + 1] self.annualized_mean_returns_cont[dt_loc] = \ self.mean_returns_cont[dt_loc] * 252 self.annualized_mean_returns = \ self.annualized_mean_returns_cont[:dt_loc + 1] if self.create_first_day_stats: if len(self.mean_returns) == 1: self.mean_returns = np.append(0.0, self.mean_returns) self.annualized_mean_returns = np.append( 0.0, self.annualized_mean_returns) self.benchmark_returns_cont[dt_loc] = benchmark_returns self.benchmark_returns = self.benchmark_returns_cont[:dt_loc + 1] if self.create_first_day_stats: if len(self.benchmark_returns) == 1: self.benchmark_returns = np.append(0.0, self.benchmark_returns) self.benchmark_cumulative_returns[dt_loc] = cum_returns( self.benchmark_returns )[-1] benchmark_cumulative_returns_to_date = \ self.benchmark_cumulative_returns[:dt_loc + 1] self.mean_benchmark_returns_cont[dt_loc] = \ benchmark_cumulative_returns_to_date[dt_loc] / \ self.num_trading_days self.mean_benchmark_returns = self.mean_benchmark_returns_cont[:dt_loc] self.annualized_mean_benchmark_returns_cont[dt_loc] = \ self.mean_benchmark_returns_cont[dt_loc] * 252 self.annualized_mean_benchmark_returns = \ self.annualized_mean_benchmark_returns_cont[:dt_loc + 1] self.algorithm_cumulative_leverages_cont[dt_loc] = leverage self.algorithm_cumulative_leverages = \ self.algorithm_cumulative_leverages_cont[:dt_loc + 1] if self.create_first_day_stats: if len(self.algorithm_cumulative_leverages) == 1: self.algorithm_cumulative_leverages = np.append( 0.0, self.algorithm_cumulative_leverages) if not len(self.algorithm_returns) and len(self.benchmark_returns): message = "Mismatch between benchmark_returns ({bm_count}) and \ algorithm_returns ({algo_count}) in range {start} : {end} on {dt}" message = message.format( bm_count=len(self.benchmark_returns), algo_count=len(self.algorithm_returns), start=self.start_session, end=self.end_session, dt=dt ) raise Exception(message) self.update_current_max() self.benchmark_volatility[dt_loc] = annual_volatility( self.benchmark_returns ) self.algorithm_volatility[dt_loc] = annual_volatility( self.algorithm_returns ) # caching the treasury rates for the minutely case is a # big speedup, because it avoids searching the treasury # curves on every minute. # In both minutely and daily, the daily curve is always used. treasury_end = dt.replace(hour=0, minute=0) if np.isnan(self.daily_treasury[treasury_end]): treasury_period_return = choose_treasury( self.treasury_curves, self.start_session, treasury_end, self.trading_calendar, ) self.daily_treasury[treasury_end] = treasury_period_return self.treasury_period_return = self.daily_treasury[treasury_end] self.excess_returns[dt_loc] = ( self.algorithm_cumulative_returns[dt_loc] - self.treasury_period_return) self.alpha[dt_loc], self.beta[dt_loc] = alpha_beta_aligned( self.algorithm_returns, self.benchmark_returns, ) self.sharpe[dt_loc] = sharpe_ratio( self.algorithm_returns, ) self.downside_risk[dt_loc] = downside_risk( self.algorithm_returns ) self.sortino[dt_loc] = sortino_ratio( self.algorithm_returns, _downside_risk=self.downside_risk[dt_loc] ) self.information[dt_loc] = information_ratio( self.algorithm_returns, self.benchmark_returns, ) self.max_drawdown = max_drawdown( self.algorithm_returns ) self.max_drawdowns[dt_loc] = self.max_drawdown self.max_leverage = self.calculate_max_leverage() self.max_leverages[dt_loc] = self.max_leverage
def calculate_metrics(self): self.benchmark_period_returns = \ cum_returns(self.benchmark_returns).iloc[-1] self.algorithm_period_returns = \ cum_returns(self.algorithm_returns).iloc[-1] if not self.algorithm_returns.index.equals( self.benchmark_returns.index ): message = "Mismatch between benchmark_returns ({bm_count}) and \ algorithm_returns ({algo_count}) in range {start} : {end}" message = message.format( bm_count=len(self.benchmark_returns), algo_count=len(self.algorithm_returns), start=self._start_session, end=self._end_session ) raise Exception(message) self.num_trading_days = len(self.benchmark_returns) self.mean_algorithm_returns = ( self.algorithm_returns.cumsum() / np.arange(1, self.num_trading_days + 1, dtype=np.float64) ) self.benchmark_volatility = annual_volatility(self.benchmark_returns) self.algorithm_volatility = annual_volatility(self.algorithm_returns) self.treasury_period_return = choose_treasury( self.treasury_curves, self._start_session, self._end_session, self.trading_calendar, ) self.sharpe = sharpe_ratio( self.algorithm_returns, ) # The consumer currently expects a 0.0 value for sharpe in period, # this differs from cumulative which was np.nan. # When factoring out the sharpe_ratio, the different return types # were collapsed into `np.nan`. # TODO: Either fix consumer to accept `np.nan` or make the # `sharpe_ratio` return type configurable. # In the meantime, convert nan values to 0.0 if pd.isnull(self.sharpe): self.sharpe = 0.0 self.downside_risk = downside_risk( self.algorithm_returns.values ) self.sortino = sortino_ratio( self.algorithm_returns.values, _downside_risk=self.downside_risk, ) self.information = information_ratio( self.algorithm_returns.values, self.benchmark_returns.values, ) self.alpha, self.beta = alpha_beta_aligned( self.algorithm_returns.values, self.benchmark_returns.values, ) self.excess_return = self.algorithm_period_returns - \ self.treasury_period_return self.max_drawdown = max_drawdown(self.algorithm_returns.values) self.max_leverage = self.calculate_max_leverage()
def risk_metric_period(cls, start_session, end_session, algorithm_returns, benchmark_returns, algorithm_leverages): """ Creates a dictionary representing the state of the risk report. Parameters ---------- start_session : pd.Timestamp Start of period (inclusive) to produce metrics on end_session : pd.Timestamp End of period (inclusive) to produce metrics on algorithm_returns : pd.Series(pd.Timestamp -> float) Series of algorithm returns as of the end of each session benchmark_returns : pd.Series(pd.Timestamp -> float) Series of benchmark returns as of the end of each session algorithm_leverages : pd.Series(pd.Timestamp -> float) Series of algorithm leverages as of the end of each session Returns ------- risk_metric : dict[str, any] Dict of metrics that with fields like: { 'algorithm_period_return': 0.0, 'benchmark_period_return': 0.0, 'treasury_period_return': 0, 'excess_return': 0.0, 'alpha': 0.0, 'beta': 0.0, 'sharpe': 0.0, 'sortino': 0.0, 'period_label': '1970-01', 'trading_days': 0, 'algo_volatility': 0.0, 'benchmark_volatility': 0.0, 'max_drawdown': 0.0, 'max_leverage': 0.0, } """ algorithm_returns = algorithm_returns[ (algorithm_returns.index >= start_session) & (algorithm_returns.index <= end_session) ] # Benchmark needs to be masked to the same dates as the algo returns benchmark_returns = benchmark_returns[ (benchmark_returns.index >= start_session) & (benchmark_returns.index <= algorithm_returns.index[-1]) ] benchmark_period_returns = ep.cum_returns(benchmark_returns).iloc[-1] algorithm_period_returns = ep.cum_returns(algorithm_returns).iloc[-1] alpha, beta = ep.alpha_beta_aligned( algorithm_returns.values, benchmark_returns.values, ) sharpe = ep.sharpe_ratio(algorithm_returns) # The consumer currently expects a 0.0 value for sharpe in period, # this differs from cumulative which was np.nan. # When factoring out the sharpe_ratio, the different return types # were collapsed into `np.nan`. # TODO: Either fix consumer to accept `np.nan` or make the # `sharpe_ratio` return type configurable. # In the meantime, convert nan values to 0.0 if pd.isnull(sharpe): sharpe = 0.0 sortino = ep.sortino_ratio( algorithm_returns.values, _downside_risk=ep.downside_risk(algorithm_returns.values), ) rval = { 'algorithm_period_return': algorithm_period_returns, 'benchmark_period_return': benchmark_period_returns, 'treasury_period_return': 0, 'excess_return': algorithm_period_returns, 'alpha': alpha, 'beta': beta, 'sharpe': sharpe, 'sortino': sortino, 'period_label': end_session.strftime("%Y-%m"), 'trading_days': len(benchmark_returns), 'algo_volatility': ep.annual_volatility(algorithm_returns), 'benchmark_volatility': ep.annual_volatility(benchmark_returns), 'max_drawdown': ep.max_drawdown(algorithm_returns.values), 'max_leverage': algorithm_leverages.max(), } # check if a field in rval is nan or inf, and replace it with None # except period_label which is always a str return { k: ( None if k != 'period_label' and not np.isfinite(v) else v ) for k, v in iteritems(rval) }