def test_sortino_translation_same(self, returns, required_return, translation): sr = empyrical.sortino_ratio(returns, required_return) sr_depressed = empyrical.sortino_ratio(returns - translation, required_return - translation) sr_raised = empyrical.sortino_ratio(returns + translation, required_return + translation) assert_almost_equal(sr, sr_depressed, DECIMAL_PLACES) assert_almost_equal(sr, sr_raised, DECIMAL_PLACES)
def test_sortino_ratio(self, test_required_return): res_a = empyrical.sortino_ratio(ret['a'], required_return=test_required_return) res_b = empyrical.sortino_ratio(ret['b'], required_return=test_required_return) res_c = empyrical.sortino_ratio(ret['c'], required_return=test_required_return) assert isclose(ret['a'].vbt.returns.sortino_ratio(required_return=test_required_return), res_a) pd.testing.assert_series_equal( ret.vbt.returns.sortino_ratio(required_return=test_required_return), pd.Series([res_a, res_b, res_c], index=ret.columns).rename('sortino_ratio') )
def test_sortino_sub_noise(self, returns, required_return): sr_1 = empyrical.sortino_ratio(returns, required_return) downside_values = returns[returns < required_return].index.tolist() # Replace some values below the required return to the required return loss_loc = random.sample(downside_values, 2) returns[loss_loc[0]] = required_return sr_2 = empyrical.sortino_ratio(returns, required_return) returns[loss_loc[1]] = required_return sr_3 = empyrical.sortino_ratio(returns, required_return) assert sr_1 <= sr_2 assert sr_2 <= sr_3
def test_sortino_add_noise(self, returns, required_return): sr_1 = empyrical.sortino_ratio(returns, required_return) upside_values = returns[returns > required_return].index.tolist() # Add large losses at random upside locations loss_loc = random.sample(upside_values, 2) returns[loss_loc[0]] = -0.01 sr_2 = empyrical.sortino_ratio(returns, required_return) returns[loss_loc[1]] = -0.01 sr_3 = empyrical.sortino_ratio(returns, required_return) assert sr_1 > sr_2 assert sr_2 > sr_3
def test_sortino_translation_diff(self, returns, required_return, translation_returns, translation_required): sr = empyrical.sortino_ratio(returns, required_return) sr_depressed = empyrical.sortino_ratio( returns - translation_returns, required_return - translation_required) sr_raised = empyrical.sortino_ratio( returns + translation_returns, required_return + translation_required) assert sr != sr_depressed assert sr != sr_raised
def evaluation(self): ap.sound(f'entry: create_df') mdd = empyrical.max_drawdown(self.df.eac_stgy_rt) stgy_ret_an = empyrical.annual_return(self.df.eac_stgy_rt, annualization=self.cls.annualization) bcmk_ret_an = empyrical.annual_return(self.df.eac_bcmk_rt, annualization=self.cls.annualization) stgy_vlt_an = empyrical.annual_volatility(self.df.eac_stgy_rt, annualization=self.cls.annualization) bcmk_vlt_an = empyrical.annual_volatility(self.df.eac_bcmk_rt, annualization=self.cls.annualization) calmar = empyrical.calmar_ratio(self.df.eac_stgy_rt, annualization=self.cls.annualization) omega = empyrical.omega_ratio(self.df.eac_stgy_rt, risk_free=self.cls.rf, annualization=self.cls.annualization) sharpe = qp.sharpe_ratio(stgy_ret_an, self.df.cum_stgy_rt, self.cls.rf) sortino = empyrical.sortino_ratio(self.df.eac_stgy_rt, annualization=self.cls.annualization) dsrk = empyrical.downside_risk(self.df.eac_stgy_rt, annualization=self.cls.annualization) information = empyrical.information_ratio(self.df.eac_stgy_rt, factor_returns=self.df.eac_bcmk_rt) beta = empyrical.beta(self.df.eac_stgy_rt, factor_returns=self.df.eac_bcmk_rt, risk_free=self.cls.rf) tail_rt = empyrical.tail_ratio(self.df.eac_stgy_rt) alpha = qp.alpha_ratio(stgy_ret_an, bcmk_ret_an, self.cls.rf, beta) stgy_ttrt_rt = (self.cls.yd.ttas[-1] - self.cls.yd.ttas[0]) / self.cls.yd.ttas[0] bcmk_ttrt_rt = (self.cls.pc.close[-1] - self.cls.pc.close[0]) / self.cls.pc.close[0] car_rt = stgy_ttrt_rt - bcmk_ttrt_rt car_rt_an = stgy_ret_an - bcmk_ret_an self.cls.df_output = pd.DataFrame( {'sgty_ttrt_rt': [stgy_ttrt_rt], 'bcmk_ttrt_rt': [bcmk_ttrt_rt], 'car_rt': [car_rt], 'stgy_ret_an': [stgy_ret_an], 'bcmk_ret_an': [bcmk_ret_an], 'car_rt_an': [car_rt_an], 'stgy_vlt_an': [stgy_vlt_an], 'bcmk_vlt_an': [bcmk_vlt_an], 'mdd': [mdd], 'sharpe': [sharpe], 'alpha': [alpha], 'beta': [beta], 'information': [information], 'tail_rt': [tail_rt], 'calmar': [calmar], 'omega': [omega], 'sortino': [sortino], 'dsrk': [dsrk]}) print(f'feedback: \n{self.cls.df_output.T}')
def step_act(self, action): if action == 1: self.buy(size=.1) for i in self.trades: if i.is_short: i.close() elif action == 2: self.sell(size=.1) for i in self.trades: if i.is_long: i.close() self.account_history.append(self.equity) length = min(self.step, self.reward_length) ret = np.diff(self.account_history)[-length:] r = sortino_ratio(ret) if abs(r) != np.inf and not np.isnan(r): reward = r else: reward = 0 if self.step > 5: if self.last_action[-1] == self.last_action[-2] and reward < 0: reward -= 5 done = False if reward > 10: done = True c = self.data.index[-1] new_state = get_state(c + 1) return new_state, reward, done
def calculate_metrics(self): self.benchmark_period_returns = \ cum_returns(self.benchmark_returns).iloc[-1] self.algorithm_period_returns = \ cum_returns(self.algorithm_returns).iloc[-1] if not self.algorithm_returns.index.equals( self.benchmark_returns.index): message = "Mismatch between benchmark_returns ({bm_count}) and \ algorithm_returns ({algo_count}) in range {start} : {end}" message = message.format(bm_count=len(self.benchmark_returns), algo_count=len(self.algorithm_returns), start=self._start_session, end=self._end_session) raise Exception(message) self.num_trading_days = len(self.benchmark_returns) self.mean_algorithm_returns = ( self.algorithm_returns.cumsum() / np.arange(1, self.num_trading_days + 1, dtype=np.float64)) self.benchmark_volatility = annual_volatility(self.benchmark_returns) self.algorithm_volatility = annual_volatility(self.algorithm_returns) self.treasury_period_return = choose_treasury( self.treasury_curves, self._start_session, self._end_session, self.trading_calendar, ) self.sharpe = sharpe_ratio(self.algorithm_returns, ) # The consumer currently expects a 0.0 value for sharpe in period, # this differs from cumulative which was np.nan. # When factoring out the sharpe_ratio, the different return types # were collapsed into `np.nan`. # TODO: Either fix consumer to accept `np.nan` or make the # `sharpe_ratio` return type configurable. # In the meantime, convert nan values to 0.0 if pd.isnull(self.sharpe): self.sharpe = 0.0 self.downside_risk = downside_risk(self.algorithm_returns.values) self.sortino = sortino_ratio( self.algorithm_returns.values, _downside_risk=self.downside_risk, ) self.information = information_ratio( self.algorithm_returns.values, self.benchmark_returns.values, ) self.alpha, self.beta = alpha_beta_aligned( self.algorithm_returns.values, self.benchmark_returns.values, ) self.excess_return = self.algorithm_period_returns - \ self.treasury_period_return self.max_drawdown = max_drawdown(self.algorithm_returns.values) self.max_leverage = self.calculate_max_leverage()
def sortino_ratio(returns, required_return=0, period=DAILY): """ Determines the Sortino ratio of a strategy. Parameters ---------- returns : pd.Series or pd.DataFrame Daily returns of the strategy, noncumulative. - See full explanation in :func:`~pyfolio.timeseries.cum_returns`. required_return: float / series minimum acceptable return period : str, optional Defines the periodicity of the 'returns' data for purposes of annualizing. Can be 'monthly', 'weekly', or 'daily'. - Defaults to 'daily'. Returns ------- depends on input type series ==> float DataFrame ==> np.array Annualized Sortino ratio. """ return ep.sortino_ratio(returns, required_return=required_return)
def getSortinoRatio(returns, required_return=0, period='daily', annualization=None, _downside_risk=None): return empyrical.sortino_ratio(returns, required_return, period, annualization, _downside_risk)
def get_perf_att(series, bnchmark, rf=0.03 / 12, freq='monthly'): """F: that provides performance statistic of the returns params ------- series: daily or monthly returns returns: dataframe of Strategy name and statistics""" port_mean, port_std, port_sr = (get_stats(series, dtime=freq)) perf = pd.Series( { 'Annualized_Mean': '{:,.2f}'.format(round(port_mean, 3)), 'Annualized_Volatility': round(port_std, 3), 'Sharpe Ratio': round(port_sr, 3), 'Calmar Ratio': round(empyrical.calmar_ratio(series, period=freq), 3), 'Alpha': round(empyrical.alpha(series, bnchmark, risk_free=rf, period=freq), 3), 'Beta': round(empyrical.beta(series, bnchmark), 3), 'Max Drawdown': '{:,.2%}'.format(drawdown(series, ret_='nottext')), 'Sortino Ratio': round( empyrical.sortino_ratio( series, required_return=rf, period=freq), 3), }, ) perf.name = series.name return perf.to_frame()
def small_metrics(): return { Returns(), ReturnsStatistic( lambda returns: empyrical.sortino_ratio(returns, period='monthly'), 'sortino monthly'), }
def _get_reward(self) -> float: """ This method computes the reward from each action, by looking at the annualized ratio, provided in the reward_function :return: annualized value of the selected reward ratio """ lookback = min(self.current_step, self._returns_lookback) returns = np.diff(self.portfolio[-lookback:]) if np.count_nonzero(returns) < 1: return 0 if np.count_nonzero(returns) < 1: return 0 if self._reward_function == 'sortino': reward = sortino_ratio(returns, annualization=365 * 24) elif self._reward_function == 'calmar': reward = calmar_ratio(returns, annualization=365 * 24) elif self._reward_function == 'omega': reward = omega_ratio(returns, annualization=365 * 24) else: reward = returns[-1] return reward if np.isfinite(reward) else 0
def sortino_ratio(returns, required_return=0, period=DAILY): """ Determines the Sortino ratio of a strategy. Parameters ---------- returns : pd.Series or pd.DataFrame Daily returns of the strategy, noncumulative. - See full explanation in :func:`~pyfolio.timeseries.cum_returns`. required_return: float / series minimum acceptable return period : str, optional Defines the periodicity of the 'returns' data for purposes of annualizing. Can be 'monthly', 'weekly', or 'daily'. - Defaults to 'daily'. Returns ------- depends on input type series ==> float DataFrame ==> np.array Annualized Sortino ratio. """ return empyrical.sortino_ratio(returns, required_return=required_return)
def get_sortino_ratio(self, data): data = copy.deepcopy(data) sortino_ratio = empyrical.sortino_ratio(data.rets.dropna(), required_return=self.rft_ret / self.q, period='weekly') return sortino_ratio
def get_performance_summary(returns): stats = {'annualized_returns': ep.annual_return(returns), 'cumulative_returns': ep.cum_returns_final(returns), 'annual_volatility': ep.annual_volatility(returns), 'sharpe_ratio': ep.sharpe_ratio(returns), 'sortino_ratio': ep.sortino_ratio(returns), 'max_drawdown': ep.max_drawdown(returns)} return pd.Series(stats)
def test_sortino(self, returns, required_return, period, expected): sortino_ratio = empyrical.sortino_ratio( returns, required_return=required_return, period=period) if isinstance(sortino_ratio, float): assert_almost_equal(sortino_ratio, expected, DECIMAL_PLACES) else: for i in range(sortino_ratio.size): assert_almost_equal(sortino_ratio[i], expected[i], DECIMAL_PLACES)
def plot(self): # show a plot of portfolio vs mean market performance df_info = pd.DataFrame(self.infos) df_info.set_index('current step', inplace=True) # df_info.set_index('date', inplace=True) rn = np.asarray(df_info['portfolio return']) try: spf = df_info['portfolio value'].iloc[1] # Start portfolio value epf = df_info['portfolio value'].iloc[-1] # End portfolio value pr = (epf - spf) / spf except: pr = 0 try: sr = sharpe_ratio(rn) except: sr = 0 try: sor = sortino_ratio(rn) except: sor = 0 try: mdd = max_drawdown(rn) except: mdd = 0 try: cr = calmar_ratio(rn) except: cr = 0 try: om = omega_ratio(rn) except: om = 0 try: dr = downside_risk(rn) except: dr = 0 print("First portfolio value: ", np.round(df_info['portfolio value'].iloc[1])) print("Last portfolio value: ", np.round(df_info['portfolio value'].iloc[-1])) title = self.strategy_name + ': ' + 'profit={: 2.2%} sharpe={: 2.2f} sortino={: 2.2f} max drawdown={: 2.2%} calmar={: 2.2f} omega={: 2.2f} downside risk={: 2.2f}'.format( pr, sr, sor, mdd, cr, om, dr) # df_info[['market value', 'portfolio value']].plot(title=title, fig=plt.gcf(), figsize=(15,10), rot=30) df_info[['portfolio value']].plot(title=title, fig=plt.gcf(), figsize=(15, 10), rot=30)
def _get_reward(self, current_prices, next_prices): if self.compute_reward == compute_reward.profit: returns_rate = next_prices / current_prices # pip_value = self._calculate_pip_value_in_account_currency(account_currency.USD, next_prices) # returns_rate = np.multiply(returns_rate, pip_value) log_returns = np.log(returns_rate) last_weight = self.current_weights securities_value = self.current_portfolio_values[:-1] * returns_rate self.current_portfolio_values[:-1] = securities_value self.current_weights = self.current_portfolio_values / np.sum( self.current_portfolio_values) reward = last_weight[:-1] * log_returns elif self.compute_reward == compute_reward.sharpe: try: sr = sharpe_ratio(np.asarray(self.returns)) except: sr = 0 reward = sr elif self.compute_reward == compute_reward.sortino: try: sr = sortino_ratio(np.asarray(self.returns)) except: sr = 0 reward = sr elif self.compute_reward == compute_reward.max_drawdown: try: mdd = max_drawdown(np.asarray(self.returns)) except: mdd = 0 reward = mdd elif self.compute_reward == compute_reward.calmar: try: cr = calmar_ratio(np.asarray(self.returns)) except: cr = 0 reward = cr elif self.compute_reward == compute_reward.omega: try: om = omega_ratio(np.asarray(self.returns)) except: om = 0 reward = om elif self.compute_reward == compute_reward.downside_risk: try: dr = downside_risk(np.asarray(self.returns)) except: dr = 0 reward = dr try: reward = reward.mean() except: reward = reward return reward
def RiskRewardStats(df): global RiskRewardList RiskRewardIndex = ['Sharpe Ratio','Sortino Ratio','Omega Ratio','Skewness','Kurtosis', 'Correlation vs MSCI World TR Index','Correlation vs Bloomberg Index'] OmegaRatio = omega_ratio(df['Monthly Return']) Kurtosis = df['Monthly Return'].kurt() Skewness = df['Monthly Return'].skew() SharpeRatio = sharpe_ratio(df['Monthly Return'],period='monthly') SortinoRatio = sortino_ratio(df['Monthly Return'],period='monthly') RiskRewardList = [SharpeRatio,SortinoRatio,OmegaRatio,Skewness,Kurtosis,MSCIIndex,BloombergIndex] RiskRewardDf = pd.DataFrame(RiskRewardList,columns=['Value'],index=RiskRewardIndex) return RiskRewardDf
def sortino_ratio_calc(self, net_worths): #Sortino Ratio if net_worths: length = len(net_worths) if length < 100: returns = np.diff(net_worths)[-length:] else: returns = np.diff(net_worths)[-100:] s_r = sortino_ratio(returns = returns) return s_r else: return 0
def _reward(self): length = min(self.current_step, self.reward_len) returns = np.diff(self.net_worths)[-length:] if self.reward_func == 'sortino': reward = sortino_ratio(returns) elif self.reward_func == 'calmar': reward = calmar_ratio(returns) elif self.reward_func == 'omega': reward = omega_ratio(returns) else reward = np.mean(returns) return reward if abs(reward) != inf and not np.isnan(reward) else 0
def get_perf_att(series, bnchmark, rf=0.03 / 12, freq='monthly'): """F: that provides performance statistic of the returns params ------- series: daily or monthly returns returns: dataframe of Strategy name and statistics""" port_mean, port_std, port_sr = (get_stats(series, dtime=freq)) regs = sm.OLS(series, sm.add_constant(bnchmark)).fit() alpha, beta = regs.params t_alpha, t_beta = regs.tvalues perf = pd.Series( { 'Annualized_Mean': '{:,.5f}'.format(round(port_mean, 5)), 'Annualized_Volatility': round(port_std, 5), 'Sharpe Ratio': round(port_sr, 3), 'Calmar Ratio': round(empyrical.calmar_ratio(series, period=freq), 3), # 'Alpha' : round(empyrical.alpha(series, # bnchmark, # risk_free = rf, # period = freq), 'Alpha': round(alpha, 3), # 'Beta': round(empyrical.beta(series, # bnchmark), 'Beta': round(beta, 3), 'T Value (Alpha)': round(t_alpha, 3), 'T Value (Beta)': round(t_beta, 3), 'Max Drawdown': '{:,.2%}'.format(drawdown(series, ret_='nottext')), 'Sortino Ratio': round( empyrical.sortino_ratio( series, required_return=rf, period=freq), 3), }, ) perf.name = series.name return perf.to_frame()
def _reward(self): length = min(self.current_step, self.forecast_len) returns = np.diff(self.net_worths[-length:]) if np.count_nonzero(returns) < 1: return 0 if self.reward_func == 'sortino': reward = sortino_ratio(returns, annualization=365 * 24) elif self.reward_func == 'calmar': reward = calmar_ratio(returns, annualization=365 * 24) elif self.reward_func == 'omega': reward = omega_ratio(returns, annualization=365 * 24) else: reward = returns[-1] return reward if np.isfinite(reward) else 0
def _reward(self): length = min(self.current_step, self.window_size) returns = np.diff(self.net_worths[-length:]) if np.count_nonzero(returns) < 1: return 0 if self.reward_func == 'sortino': reward = sortino_ratio(returns, annualization=self.annualization) elif self.reward_func == 'calmar': reward = calmar_ratio(returns, annualization=self.annualization) elif self.reward_func == 'omega': reward = omega_ratio(returns, annualization=self.annualization) elif self.reward_func == "logret": reward = np.log(returns[-1]) else: reward = returns[-1] return reward if np.isfinite(reward) else 0
def sortino_ratio(daily_returns, required_return=0, period='daily', annualization=None, out=None, _downside_risk=None): """Sortino Ratio""" try: logger.info("Calculating Sortino Ratio...") sr_data = empyrical.sortino_ratio(daily_returns, required_return, period=period, annualization=annualization, out=out, _downside_risk=_downside_risk) return sr_data except Exception as exception: logger.error('Oops! An Error Occurred ⚠️') raise exception
def _reward(self): # print("current step: " +str(self.current_step)) returns = np.diff(self.net_worths) if np.count_nonzero(returns) < 1: return 0 if self.reward_func == 'sortino': reward = sortino_ratio(returns, annualization=self.annualization) else: reward = returns[-1] # # Add decay incentive against # # stat hold position # hold_decay = 0 # margin_dist = 0 # if self.consec_steps > 60: # hold_decay = self.consec_steps * self.decay # # print("hold_decay: " +str(hold_decay)) # # print(hold_decay) # # else: # # hold_decay = self.consec_steps * self.decay # # hold_decay = 0 # # margin_dist = -(self.margin_ratio - self.maint_margin_ratio) # # print("margin_frac: " +str(margin_dist)) # # print(margin_frac) # # print("-" *90) # # print("equity before: " +str(equity)) # # equity = equity - (margin_frac+hold_decay) # # print("equity after: " +str(equity)) # reward = reward - (hold_decay + margin_dist) return reward if np.isfinite(reward) else 0
def plot_function(epoch_weights): ew = np.concatenate(epoch_weights).reshape(-1, No_Channels) comm = np.sum(np.abs(ew[1:] - ew[:-1]), axis=1) ret = np.sum(np.multiply(ew, y_test.numpy()), axis=1)[1:] ind = pd.date_range("20180101", periods=len(ret), freq='H') ret = pd.DataFrame(ret - comm * cost, index = ind) exp = np.exp(ret.resample('1D').sum()) - 1.0 ggg = 'Drawdown:', emp.max_drawdown(exp).values[0], 'Sharpe:', emp.sharpe_ratio(exp)[0], \ 'Sortino:', emp.sortino_ratio(exp).values[0], 'Stability:', emp.stability_of_timeseries(exp), \ 'Tail:', emp.tail_ratio(exp), 'ValAtRisk:', emp.value_at_risk(exp) ttt = ' '.join(str(x) for x in ggg) print(ttt) plt.figure() np.exp(ret).cumprod().plot(figsize=(48, 12), title=ttt) plt.savefig('cumulative_return') plt.close() ret = ret.resample('1W').sum() plt.figure(figsize=(48, 12)) pal = sns.color_palette("Greens_d", len(ret)) rank = ret.iloc[:,0].argsort() ax = sns.barplot(x=ret.index.strftime('%d-%m'), y=ret.values.reshape(-1), palette=np.array(pal[::-1])[rank]) ax.text(0.5, 1.0, ttt, horizontalalignment='center', verticalalignment='top', transform=ax.transAxes) plt.savefig('weekly_returns') plt.close() ew_df = pd.DataFrame(ew) plt.figure(figsize=(48, 12)) ax = sns.heatmap(ew_df.T, cmap=cmap, center=0, xticklabels=False, robust=True) ax.text(0.5, 1.0, ttt, horizontalalignment='center', verticalalignment='top', transform=ax.transAxes) plt.savefig('portfolio_weights') plt.close() tr = np.diff(ew.T, axis=1) plt.figure(figsize=(96, 12)) ax = sns.heatmap(tr, cmap=cmap, center=0, robust=True, yticklabels=False, xticklabels=False) ax.text(0.5, 1.0, ttt, horizontalalignment='center', verticalalignment='top', transform=ax.transAxes) plt.savefig('transactions') plt.close()
def get_performance_summary(returns): ''' Calculate selected performance evaluation metrics using provided returns. Parameters ------------ returns : pd.Series Series of returns we want to evaluate Returns ----------- stats : pd.Series The calculated performance metrics ''' stats = { 'annualized_returns': ep.annual_return(returns), 'cumulative_returns': ep.cum_returns_final(returns), 'annual_volatility': ep.annual_volatility(returns), 'sharpe_ratio': ep.sharpe_ratio(returns), 'sortino_ratio': ep.sortino_ratio(returns), 'max_drawdown': ep.max_drawdown(returns) } return pd.Series(stats)
def calculate_metrics(self): self.benchmark_period_returns = \ cum_returns(self.benchmark_returns).iloc[-1] self.algorithm_period_returns = \ cum_returns(self.algorithm_returns).iloc[-1] if not self.algorithm_returns.index.equals( self.benchmark_returns.index ): message = "Mismatch between benchmark_returns ({bm_count}) and \ algorithm_returns ({algo_count}) in range {start} : {end}" message = message.format( bm_count=len(self.benchmark_returns), algo_count=len(self.algorithm_returns), start=self._start_session, end=self._end_session ) raise Exception(message) self.num_trading_days = len(self.benchmark_returns) self.mean_algorithm_returns = ( self.algorithm_returns.cumsum() / np.arange(1, self.num_trading_days + 1, dtype=np.float64) ) self.benchmark_volatility = annual_volatility(self.benchmark_returns) self.algorithm_volatility = annual_volatility(self.algorithm_returns) self.treasury_period_return = choose_treasury( self.treasury_curves, self._start_session, self._end_session, self.trading_calendar, ) self.sharpe = sharpe_ratio( self.algorithm_returns, ) # The consumer currently expects a 0.0 value for sharpe in period, # this differs from cumulative which was np.nan. # When factoring out the sharpe_ratio, the different return types # were collapsed into `np.nan`. # TODO: Either fix consumer to accept `np.nan` or make the # `sharpe_ratio` return type configurable. # In the meantime, convert nan values to 0.0 if pd.isnull(self.sharpe): self.sharpe = 0.0 self.downside_risk = downside_risk( self.algorithm_returns.values ) self.sortino = sortino_ratio( self.algorithm_returns.values, _downside_risk=self.downside_risk, ) self.information = information_ratio( self.algorithm_returns.values, self.benchmark_returns.values, ) self.alpha, self.beta = alpha_beta_aligned( self.algorithm_returns.values, self.benchmark_returns.values, ) self.excess_return = self.algorithm_period_returns - \ self.treasury_period_return self.max_drawdown = max_drawdown(self.algorithm_returns.values) self.max_leverage = self.calculate_max_leverage()
def update(self, dt, algorithm_returns, benchmark_returns, leverage): # Keep track of latest dt for use in to_dict and other methods # that report current state. self.latest_dt = dt dt_loc = self.cont_index.get_loc(dt) self.latest_dt_loc = dt_loc self.algorithm_returns_cont[dt_loc] = algorithm_returns self.algorithm_returns = self.algorithm_returns_cont[:dt_loc + 1] self.num_trading_days = len(self.algorithm_returns) if self.create_first_day_stats: if len(self.algorithm_returns) == 1: self.algorithm_returns = np.append(0.0, self.algorithm_returns) self.algorithm_cumulative_returns[dt_loc] = cum_returns( self.algorithm_returns)[-1] algo_cumulative_returns_to_date = \ self.algorithm_cumulative_returns[:dt_loc + 1] self.mean_returns_cont[dt_loc] = \ algo_cumulative_returns_to_date[dt_loc] / self.num_trading_days self.mean_returns = self.mean_returns_cont[:dt_loc + 1] self.annualized_mean_returns_cont[dt_loc] = \ self.mean_returns_cont[dt_loc] * 252 self.annualized_mean_returns = \ self.annualized_mean_returns_cont[:dt_loc + 1] if self.create_first_day_stats: if len(self.mean_returns) == 1: self.mean_returns = np.append(0.0, self.mean_returns) self.annualized_mean_returns = np.append( 0.0, self.annualized_mean_returns) self.benchmark_returns_cont[dt_loc] = benchmark_returns self.benchmark_returns = self.benchmark_returns_cont[:dt_loc + 1] if self.create_first_day_stats: if len(self.benchmark_returns) == 1: self.benchmark_returns = np.append(0.0, self.benchmark_returns) self.benchmark_cumulative_returns[dt_loc] = cum_returns( self.benchmark_returns)[-1] benchmark_cumulative_returns_to_date = \ self.benchmark_cumulative_returns[:dt_loc + 1] self.mean_benchmark_returns_cont[dt_loc] = \ benchmark_cumulative_returns_to_date[dt_loc] / \ self.num_trading_days self.mean_benchmark_returns = self.mean_benchmark_returns_cont[:dt_loc] self.annualized_mean_benchmark_returns_cont[dt_loc] = \ self.mean_benchmark_returns_cont[dt_loc] * 252 self.annualized_mean_benchmark_returns = \ self.annualized_mean_benchmark_returns_cont[:dt_loc + 1] self.algorithm_cumulative_leverages_cont[dt_loc] = leverage self.algorithm_cumulative_leverages = \ self.algorithm_cumulative_leverages_cont[:dt_loc + 1] if self.create_first_day_stats: if len(self.algorithm_cumulative_leverages) == 1: self.algorithm_cumulative_leverages = np.append( 0.0, self.algorithm_cumulative_leverages) if not len(self.algorithm_returns) and len(self.benchmark_returns): message = "Mismatch between benchmark_returns ({bm_count}) and \ algorithm_returns ({algo_count}) in range {start} : {end} on {dt}" message = message.format(bm_count=len(self.benchmark_returns), algo_count=len(self.algorithm_returns), start=self.start_session, end=self.end_session, dt=dt) raise Exception(message) self.update_current_max() self.benchmark_volatility[dt_loc] = annual_volatility( self.benchmark_returns) self.algorithm_volatility[dt_loc] = annual_volatility( self.algorithm_returns) # caching the treasury rates for the minutely case is a # big speedup, because it avoids searching the treasury # curves on every minute. # In both minutely and daily, the daily curve is always used. treasury_end = dt.replace(hour=0, minute=0) if np.isnan(self.daily_treasury[treasury_end]): treasury_period_return = choose_treasury( self.treasury_curves, self.start_session, treasury_end, self.trading_calendar, ) self.daily_treasury[treasury_end] = treasury_period_return self.treasury_period_return = self.daily_treasury[treasury_end] self.excess_returns[dt_loc] = ( self.algorithm_cumulative_returns[dt_loc] - self.treasury_period_return) self.alpha[dt_loc], self.beta[dt_loc] = alpha_beta_aligned( self.algorithm_returns, self.benchmark_returns, ) self.sharpe[dt_loc] = sharpe_ratio(self.algorithm_returns, ) self.downside_risk[dt_loc] = downside_risk(self.algorithm_returns) self.sortino[dt_loc] = sortino_ratio( self.algorithm_returns, _downside_risk=self.downside_risk[dt_loc]) self.max_drawdown = max_drawdown(self.algorithm_returns) self.max_drawdowns[dt_loc] = self.max_drawdown self.max_leverage = self.calculate_max_leverage() self.max_leverages[dt_loc] = self.max_leverage
import pandas as pd import empyrical as emp df = pd.read_csv('ac-worth-from-2017/002138.csv') df['daily_return'] = df['worth'].pct_change() days = df['date'].count() return_days = days / 3.347 risk_free = 0.03 / return_days annual_return = emp.annual_return(df['daily_return'], annualization=return_days) max_drawdown = emp.max_drawdown(df['daily_return']) sharpe_ratio = emp.sharpe_ratio(df['daily_return'], risk_free, annualization=return_days) sortino_ratio = emp.sortino_ratio(df['daily_return'], risk_free, annualization=return_days) omega_ratio = emp.omega_ratio(df['daily_return'], risk_free, annualization=return_days) print(annual_return, max_drawdown, sharpe_ratio, sortino_ratio, omega_ratio)
def risk_metric_period(cls, start_session, end_session, algorithm_returns, benchmark_returns, algorithm_leverages): """ Creates a dictionary representing the state of the risk report. Parameters ---------- start_session : pd.Timestamp Start of period (inclusive) to produce metrics on end_session : pd.Timestamp End of period (inclusive) to produce metrics on algorithm_returns : pd.Series(pd.Timestamp -> float) Series of algorithm returns as of the end of each session benchmark_returns : pd.Series(pd.Timestamp -> float) Series of benchmark returns as of the end of each session algorithm_leverages : pd.Series(pd.Timestamp -> float) Series of algorithm leverages as of the end of each session Returns ------- risk_metric : dict[str, any] Dict of metrics that with fields like: { 'algorithm_period_return': 0.0, 'benchmark_period_return': 0.0, 'treasury_period_return': 0, 'excess_return': 0.0, 'alpha': 0.0, 'beta': 0.0, 'sharpe': 0.0, 'sortino': 0.0, 'period_label': '1970-01', 'trading_days': 0, 'algo_volatility': 0.0, 'benchmark_volatility': 0.0, 'max_drawdown': 0.0, 'max_leverage': 0.0, } """ algorithm_returns = algorithm_returns[ (algorithm_returns.index >= start_session) & (algorithm_returns.index <= end_session) ] # Benchmark needs to be masked to the same dates as the algo returns benchmark_returns = benchmark_returns[ (benchmark_returns.index >= start_session) & (benchmark_returns.index <= algorithm_returns.index[-1]) ] benchmark_period_returns = ep.cum_returns(benchmark_returns).iloc[-1] algorithm_period_returns = ep.cum_returns(algorithm_returns).iloc[-1] alpha, beta = ep.alpha_beta_aligned( algorithm_returns.values, benchmark_returns.values, ) sharpe = ep.sharpe_ratio(algorithm_returns) # The consumer currently expects a 0.0 value for sharpe in period, # this differs from cumulative which was np.nan. # When factoring out the sharpe_ratio, the different return types # were collapsed into `np.nan`. # TODO: Either fix consumer to accept `np.nan` or make the # `sharpe_ratio` return type configurable. # In the meantime, convert nan values to 0.0 if pd.isnull(sharpe): sharpe = 0.0 sortino = ep.sortino_ratio( algorithm_returns.values, _downside_risk=ep.downside_risk(algorithm_returns.values), ) rval = { 'algorithm_period_return': algorithm_period_returns, 'benchmark_period_return': benchmark_period_returns, 'treasury_period_return': 0, 'excess_return': algorithm_period_returns, 'alpha': alpha, 'beta': beta, 'sharpe': sharpe, 'sortino': sortino, 'period_label': end_session.strftime("%Y-%m"), 'trading_days': len(benchmark_returns), 'algo_volatility': ep.annual_volatility(algorithm_returns), 'benchmark_volatility': ep.annual_volatility(benchmark_returns), 'max_drawdown': ep.max_drawdown(algorithm_returns.values), 'max_leverage': algorithm_leverages.max(), } # check if a field in rval is nan or inf, and replace it with None # except period_label which is always a str return { k: ( None if k != 'period_label' and not np.isfinite(v) else v ) for k, v in iteritems(rval) }
def update(self, dt, algorithm_returns, benchmark_returns, leverage): # Keep track of latest dt for use in to_dict and other methods # that report current state. self.latest_dt = dt dt_loc = self.cont_index.get_loc(dt) self.latest_dt_loc = dt_loc self.algorithm_returns_cont[dt_loc] = algorithm_returns self.algorithm_returns = self.algorithm_returns_cont[:dt_loc + 1] self.num_trading_days = len(self.algorithm_returns) if self.create_first_day_stats: if len(self.algorithm_returns) == 1: self.algorithm_returns = np.append(0.0, self.algorithm_returns) self.algorithm_cumulative_returns[dt_loc] = cum_returns( self.algorithm_returns )[-1] algo_cumulative_returns_to_date = \ self.algorithm_cumulative_returns[:dt_loc + 1] self.mean_returns_cont[dt_loc] = \ algo_cumulative_returns_to_date[dt_loc] / self.num_trading_days self.mean_returns = self.mean_returns_cont[:dt_loc + 1] self.annualized_mean_returns_cont[dt_loc] = \ self.mean_returns_cont[dt_loc] * 252 self.annualized_mean_returns = \ self.annualized_mean_returns_cont[:dt_loc + 1] if self.create_first_day_stats: if len(self.mean_returns) == 1: self.mean_returns = np.append(0.0, self.mean_returns) self.annualized_mean_returns = np.append( 0.0, self.annualized_mean_returns) self.benchmark_returns_cont[dt_loc] = benchmark_returns self.benchmark_returns = self.benchmark_returns_cont[:dt_loc + 1] if self.create_first_day_stats: if len(self.benchmark_returns) == 1: self.benchmark_returns = np.append(0.0, self.benchmark_returns) self.benchmark_cumulative_returns[dt_loc] = cum_returns( self.benchmark_returns )[-1] benchmark_cumulative_returns_to_date = \ self.benchmark_cumulative_returns[:dt_loc + 1] self.mean_benchmark_returns_cont[dt_loc] = \ benchmark_cumulative_returns_to_date[dt_loc] / \ self.num_trading_days self.mean_benchmark_returns = self.mean_benchmark_returns_cont[:dt_loc] self.annualized_mean_benchmark_returns_cont[dt_loc] = \ self.mean_benchmark_returns_cont[dt_loc] * 252 self.annualized_mean_benchmark_returns = \ self.annualized_mean_benchmark_returns_cont[:dt_loc + 1] self.algorithm_cumulative_leverages_cont[dt_loc] = leverage self.algorithm_cumulative_leverages = \ self.algorithm_cumulative_leverages_cont[:dt_loc + 1] if self.create_first_day_stats: if len(self.algorithm_cumulative_leverages) == 1: self.algorithm_cumulative_leverages = np.append( 0.0, self.algorithm_cumulative_leverages) if not len(self.algorithm_returns) and len(self.benchmark_returns): message = "Mismatch between benchmark_returns ({bm_count}) and \ algorithm_returns ({algo_count}) in range {start} : {end} on {dt}" message = message.format( bm_count=len(self.benchmark_returns), algo_count=len(self.algorithm_returns), start=self.start_session, end=self.end_session, dt=dt ) raise Exception(message) self.update_current_max() self.benchmark_volatility[dt_loc] = annual_volatility( self.benchmark_returns ) self.algorithm_volatility[dt_loc] = annual_volatility( self.algorithm_returns ) # caching the treasury rates for the minutely case is a # big speedup, because it avoids searching the treasury # curves on every minute. # In both minutely and daily, the daily curve is always used. treasury_end = dt.replace(hour=0, minute=0) if np.isnan(self.daily_treasury[treasury_end]): treasury_period_return = choose_treasury( self.treasury_curves, self.start_session, treasury_end, self.trading_calendar, ) self.daily_treasury[treasury_end] = treasury_period_return self.treasury_period_return = self.daily_treasury[treasury_end] self.excess_returns[dt_loc] = ( self.algorithm_cumulative_returns[dt_loc] - self.treasury_period_return) self.alpha[dt_loc], self.beta[dt_loc] = alpha_beta_aligned( self.algorithm_returns, self.benchmark_returns, ) self.sharpe[dt_loc] = sharpe_ratio( self.algorithm_returns, ) self.downside_risk[dt_loc] = downside_risk( self.algorithm_returns ) self.sortino[dt_loc] = sortino_ratio( self.algorithm_returns, _downside_risk=self.downside_risk[dt_loc] ) self.information[dt_loc] = information_ratio( self.algorithm_returns, self.benchmark_returns, ) self.max_drawdown = max_drawdown( self.algorithm_returns ) self.max_drawdowns[dt_loc] = self.max_drawdown self.max_leverage = self.calculate_max_leverage() self.max_leverages[dt_loc] = self.max_leverage