def create_perf_attrib_stats(perf_attrib, risk_exposures): """ Takes perf attribution data over a period of time and computes annualized multifactor alpha, multifactor sharpe, risk exposures. """ summary = OrderedDict() total_returns = perf_attrib['total_returns'] specific_returns = perf_attrib['specific_returns'] common_returns = perf_attrib['common_returns'] summary['Annualized Specific Return'] =\ ep.annual_return(specific_returns) summary['Annualized Common Return'] =\ ep.annual_return(common_returns) summary['Annualized Total Return'] =\ ep.annual_return(total_returns) summary['Specific Sharpe Ratio'] =\ ep.sharpe_ratio(specific_returns) summary['Cumulative Specific Return'] =\ ep.cum_returns_final(specific_returns) summary['Cumulative Common Return'] =\ ep.cum_returns_final(common_returns) summary['Total Returns'] =\ ep.cum_returns_final(total_returns) summary = pd.Series(summary, name='') annualized_returns_by_factor = [ep.annual_return(perf_attrib[c]) for c in risk_exposures.columns] cumulative_returns_by_factor = [ep.cum_returns_final(perf_attrib[c]) for c in risk_exposures.columns] risk_exposure_summary = pd.DataFrame( data=OrderedDict([ ( 'Average Risk Factor Exposure', risk_exposures.mean(axis='rows') ), ('Annualized Return', annualized_returns_by_factor), ('Cumulative Return', cumulative_returns_by_factor), ]), index=risk_exposures.columns, ) return summary, risk_exposure_summary
def annual_return(returns, period=DAILY): """ Determines the mean annual growth rate of returns. Parameters ---------- returns : pd.Series Periodic returns of the strategy, noncumulative. - See full explanation in :func:`~pyfolio.timeseries.cum_returns`. period : str, optional Defines the periodicity of the 'returns' data for purposes of annualizing. Can be 'monthly', 'weekly', or 'daily'. - Defaults to 'daily'. Returns ------- float Annual Return as CAGR (Compounded Annual Growth Rate). """ return empyrical.annual_return(returns, period=period)
def common_sense_ratio(returns): """ Common sense ratio is the multiplication of the tail ratio and the Gain-to-Pain-Ratio -- sum(profits) / sum(losses). See http://bit.ly/1ORzGBk for more information on motivation of this metric. Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - See full explanation in tears.create_full_tear_sheet. Returns ------- float common sense ratio """ return empyrical.tail_ratio(returns) * \ (1 + empyrical.annual_return(returns))
def report_metrics(strategy_rets, benchmark_rets, factor_returns=0): """使用 `empyrical`_ 库计算各种常见财务风险和绩效指标。 Args: strategy_rets (:py:class:`pandas.Series`): 策略收益。 benchmark_rets (:py:class:`pandas.Series`): 基准收益。 factor_returns : 计算 excess_sharpe 时使用,策略计算时使用`strategy_rets`作为`factor_returns`, 当不存在`strategy_rets`时使用`factor_returns`。 `factor_returns`参考 :py:func:`empyrical.excess_sharpe` 中的`factor_returns`参数的解释。 Examples: >>> from finance_tools_py._jupyter_helper import report_metrics >>> import pandas as pd >>> rep = report_metrics(pd.Series([-0.01,0.04,0.03,-0.02]), pd.Series([0.04,0.05,0.06,0.07])) >>> print(rep) 基准 策略 最大回撤 0.000000 -0.020000 年化收益 713630.025679 10.326756 年度波动性 0.204939 0.467333 夏普比率 67.629875 5.392302 R平方 0.994780 0.614649 盈利比率 1.650602 2.081081 excess_sharpe 4.260282 -1.317465 年复合增长率 713630.025679 10.326756 Returns: :py:class:`pandas.DataFrame`: .. _empyrical: http://quantopian.github.io/empyrical/ """ if not benchmark_rets.empty: max_drawdown_benchmark = empyrical.max_drawdown(benchmark_rets) annual_return_benchmark = empyrical.annual_return(benchmark_rets) annual_volatility_benchmark = empyrical.annual_volatility( benchmark_rets) sharpe_ratio_benchmark = empyrical.sharpe_ratio(benchmark_rets) stability_of_timeseries_benchmark = empyrical.stability_of_timeseries( benchmark_rets) tail_ratio_benchmark = empyrical.tail_ratio(benchmark_rets) excess_sharpe_benchmark = empyrical.excess_sharpe( benchmark_rets, factor_returns) cagr_benchmark = empyrical.cagr(benchmark_rets) else: max_drawdown_benchmark = None annual_return_benchmark = None annual_volatility_benchmark = None sharpe_ratio_benchmark = None stability_of_timeseries_benchmark = None tail_ratio_benchmark = None excess_sharpe_benchmark = None cagr_benchmark = None max_drawdown_strategy = empyrical.max_drawdown(strategy_rets) annual_return_strategy = empyrical.annual_return(strategy_rets) annual_volatility_strategy = empyrical.annual_volatility(strategy_rets) sharpe_ratio_strategy = empyrical.sharpe_ratio(strategy_rets) stability_of_timeseries_strategy = empyrical.stability_of_timeseries( strategy_rets) tail_ratio_strategy = empyrical.tail_ratio(strategy_rets) excess_sharpe_strategy = empyrical.excess_sharpe( strategy_rets, benchmark_rets if not benchmark_rets.empty else factor_returns) cagr_strategy = empyrical.cagr(strategy_rets) return pd.DataFrame( { '基准': [ max_drawdown_benchmark, annual_return_benchmark, annual_volatility_benchmark, sharpe_ratio_benchmark, stability_of_timeseries_benchmark, tail_ratio_benchmark, excess_sharpe_benchmark, cagr_benchmark ], '策略': [ max_drawdown_strategy, annual_return_strategy, annual_volatility_strategy, sharpe_ratio_strategy, stability_of_timeseries_strategy, tail_ratio_strategy, excess_sharpe_strategy, cagr_strategy ] }, index=[ '最大回撤', '年化收益', '年度波动性', '夏普比率', 'R平方', '盈利比率', 'excess_sharpe', '年复合增长率' ])
def vizResults(slippageAdjustedReturn, returnStream, factorReturn, plotting = False): ##ENSURE EQUAL LENGTH factorReturn = factorReturn[returnStream.index[0]:] ##IF FACTOR DOES NOT START AT SAME SPOT CAN CREATE VERY SKEWED RESULTS ##CALCULATE SHARPE WITH SLIPPAGE sharpeDiffSlippage = empyrical.sharpe_ratio(slippageAdjustedReturn) - empyrical.sharpe_ratio(factorReturn) relativeSharpeSlippage = sharpeDiffSlippage / empyrical.sharpe_ratio(factorReturn) * (empyrical.sharpe_ratio(factorReturn)/abs(empyrical.sharpe_ratio(factorReturn))) alpha, beta = empyrical.alpha_beta(returnStream, factorReturn) alphaSlippage, betaSlippage = empyrical.alpha_beta(slippageAdjustedReturn, factorReturn) metrics = {"SHARPE": empyrical.sharpe_ratio(returnStream), "SHARPE SLIPPAGE":empyrical.sharpe_ratio(slippageAdjustedReturn), "STABILITY": empyrical.stability_of_timeseries(returnStream), "ALPHA":alpha, "ALPHA SLIPPAGE":alphaSlippage, "BETA":abs(beta), "ANNUALIZED RETURN": empyrical.annual_return(returnStream)[0], "ACTIVITY": np.count_nonzero(returnStream)/float(len(returnStream)), "TREYNOR": ((empyrical.annual_return(returnStream.values)[0] - empyrical.annual_return(factorReturn.values)[0]) \ / abs(empyrical.beta(returnStream, factorReturn))), "RAW BETA":abs(empyrical.alpha_beta(returnStream.apply(lambda x:applyBinary(x), axis=0), factorReturn.apply(lambda x:applyBinary(x), axis=0))[1]), "SHARPE DIFFERENCE": empyrical.sharpe_ratio(returnStream) - empyrical.sharpe_ratio(factorReturn), "RELATIVE SHARPE": (empyrical.sharpe_ratio(returnStream) - empyrical.sharpe_ratio(factorReturn))/empyrical.sharpe_ratio(factorReturn) * (empyrical.sharpe_ratio(factorReturn)/abs(empyrical.sharpe_ratio(factorReturn))), "FACTOR SHARPE": empyrical.sharpe_ratio(factorReturn), "SHARPE DIFFERENCE SLIPPAGE":sharpeDiffSlippage, "RELATIVE SHARPE SLIPPAGE":relativeSharpeSlippage, } metrics["FACTOR PROFITABILITY"] = len((factorReturn.values)[factorReturn.values > 0])/len(factorReturn.values) metrics["PROFITABILITY"] = len((returnStream.values)[returnStream.values > 0])/len(returnStream.values) metrics["PROFITABILITY DIFFERENCE"] = metrics["PROFITABILITY"] - metrics["FACTOR PROFITABILITY"] metrics["PROFITABILITY SLIPPAGE"] = len((slippageAdjustedReturn.values)[slippageAdjustedReturn.values > 0])/len(slippageAdjustedReturn.values) metrics["ACTIVE PROFITABILITY"] = len((returnStream.values)[returnStream.values > 0])/len((returnStream.values)[returnStream.values != 0]) metrics["ACTIVE PROFITABILITY SLIPPAGE"] = len((slippageAdjustedReturn.values)[slippageAdjustedReturn.values > 0])/len((slippageAdjustedReturn.values)[slippageAdjustedReturn.values != 0]) metrics["TOTAL DAYS SEEN"] = len(returnStream) metrics["SHARPE SLIPPAGE DECAY"] = metrics["SHARPE DIFFERENCE SLIPPAGE"] - metrics["SHARPE DIFFERENCE"] ##MEASURES BINARY STABILITY OF PREDICTIONS metrics["EXTREME STABILITY ROLLING 600"] = (returnStream.rolling(600, min_periods=600).apply(lambda x:empyrical.stability_of_timeseries(applyBinarySkipZero(x)) * (-1 if x[-1] - x[0] < 0 else 1)).dropna()).min().values[0] metrics["EXTREME STABILITY"] = empyrical.stability_of_timeseries(applyBinarySkipZero(returnStream.values)) rollingPeriod = 252 rollingSharpe = returnStream.rolling(rollingPeriod, min_periods=rollingPeriod).apply(lambda x:empyrical.sharpe_ratio(x)).dropna() rollingSharpe.columns = ["252 Day Rolling Sharpe"] rollingSharpeFactor = factorReturn.rolling(rollingPeriod, min_periods=rollingPeriod).apply(lambda x:empyrical.sharpe_ratio(x)).dropna() rollingSharpe = rollingSharpe.join(rollingSharpeFactor) rollingSharpe.columns = ["252 Day Rolling Sharpe Algo", "252 Day Rolling Sharpe Factor"] if len(rollingSharpe["252 Day Rolling Sharpe Algo"].values) > 50: diffSharpe = pd.DataFrame(rollingSharpe.apply(lambda x: x[0] - x[1], axis=1), columns=["Sharpe Difference"]) metrics["SHARPE DIFFERENCE MIN"] = np.percentile(diffSharpe["Sharpe Difference"].values, 1) metrics["SHARPE DIFFERENCE AVERAGE"] = np.percentile(diffSharpe["Sharpe Difference"].values, 50) difVals = diffSharpe["Sharpe Difference"].values metrics["SHARPE DIFFERENCE GREATER THAN 0"] = len(difVals[np.where(difVals > 0)])/float(len(difVals)) metrics["25TH PERCENTILE SHARPE DIFFERENCE"] = np.percentile(diffSharpe["Sharpe Difference"].values, 25) ### relDiffSharpe = pd.DataFrame(rollingSharpe.apply(lambda x: (x[0] - x[1])/x[1] * (x[1]/abs(x[1])), axis=1), columns=["Sharpe Difference"]) metrics["RELATIVE SHARPE DIFFERENCE MIN"] = np.percentile(relDiffSharpe["Sharpe Difference"].values, 1) metrics["RELATIVE SHARPE DIFFERENCE AVERAGE"] = np.percentile(relDiffSharpe["Sharpe Difference"].values, 50) relDifVals = relDiffSharpe["Sharpe Difference"].values metrics["RELATIVE SHARPE DIFFERENCE GREATER THAN 0"] = len(relDifVals[np.where(relDifVals > 0)])/float(len(relDifVals)) metrics["25TH PERCENTILE RELATIVE SHARPE DIFFERENCE"] = np.percentile(relDiffSharpe["Sharpe Difference"].values, 25) ### metrics["ROLLING SHARPE BETA"] = abs(empyrical.beta(rollingSharpe["252 Day Rolling Sharpe Algo"], rollingSharpe["252 Day Rolling Sharpe Factor"])) metrics["25TH PERCENTILE SHARPE"] = np.percentile(rollingSharpe["252 Day Rolling Sharpe Algo"].values, 25) metrics["MIN ROLLING SHARPE"] = np.percentile(rollingSharpe["252 Day Rolling Sharpe Algo"].values, 1) rollingDownside = returnStream.rolling(rollingPeriod, min_periods=rollingPeriod).apply(lambda x:empyrical.max_drawdown(x)).dropna() rollingDownside.columns = ["252 Day Rolling Downside"] rollingDownsideFactor = factorReturn.rolling(rollingPeriod, min_periods=rollingPeriod).apply(lambda x:empyrical.max_drawdown(x)).dropna() rollingDownside = rollingDownside.join(rollingDownsideFactor) rollingDownside.columns = ["252 Day Rolling Downside Algo", "252 Day Rolling Downside Factor"] metrics["ROLLING SHARPE STABILITY"] = abs(stats.linregress(np.arange(len(rollingSharpe["252 Day Rolling Sharpe Algo"].values)), rollingSharpe["252 Day Rolling Sharpe Algo"].values).rvalue) rollingReturn = returnStream.rolling(rollingPeriod, min_periods=rollingPeriod).apply(lambda x:empyrical.cum_returns(x)[-1]).dropna() rollingReturn.columns = ["ROLLING RETURN"] metrics["SMART INFORMATION RATIO"] = (np.percentile(rollingReturn["ROLLING RETURN"].values, 25) - empyrical.annual_return(factorReturn.values[0]))\ / returnStream.values.std() metrics["ROLLING SHARPE ERROR"] = rollingSharpe["252 Day Rolling Sharpe Algo"].std() metrics["ONE STD SHARPE"] = empyrical.sharpe_ratio(slippageAdjustedReturn) - metrics["ROLLING SHARPE ERROR"] if plotting == True: import matplotlib.pyplot as plt rollingSharpe.plot() rollingDownside.plot() rollingPeriod = 90 rollingSharpe = returnStream.rolling(rollingPeriod, min_periods=rollingPeriod).apply(lambda x:empyrical.sharpe_ratio(x)).dropna() rollingSharpe.columns = ["90 Day Rolling Sharpe"] if len(rollingSharpe["90 Day Rolling Sharpe"].values) > 50: metrics["25TH PERCENTILE SHARPE 90"] = np.percentile(rollingSharpe["90 Day Rolling Sharpe"].values, 25) metrics["MIN ROLLING SHARPE 90"] = np.percentile(rollingSharpe["90 Day Rolling Sharpe"].values, 1) metrics["ROLLING SHARPE ERROR 90"] = rollingSharpe["90 Day Rolling Sharpe"].std() metrics["SHARPE TO MIN RATIO 90"] = metrics["SHARPE"] / abs(metrics["MIN ROLLING SHARPE 90"]) metrics["MIN PROFITABILITY 90"] = np.percentile(returnStream.rolling(rollingPeriod, min_periods=rollingPeriod).apply(lambda x:len((x)[x > 0])/len(x)).dropna().values, 1) metrics["PROFITABILITY DROP 90"] = metrics["PROFITABILITY"] - metrics["MIN PROFITABILITY 90"] metrics["25TH PROFITABILITY 90"] = np.percentile(returnStream.rolling(rollingPeriod, min_periods=rollingPeriod).apply(lambda x:len((x)[x > 0])/len(x)).dropna().values, 25) metrics["MIN FACTOR PROFITABILITY 90"] = np.percentile(factorReturn.rolling(rollingPeriod, min_periods=rollingPeriod).apply(lambda x:len((x)[x > 0])/len(x)).dropna().values, 1) metrics["MIN PROFITABILITY DIFFERENCE 90"] = metrics["MIN PROFITABILITY 90"] - metrics["MIN FACTOR PROFITABILITY 90"] rollingPeriod = 45 rollingSharpe = returnStream.rolling(rollingPeriod, min_periods=rollingPeriod).apply(lambda x:empyrical.sharpe_ratio(x)).dropna() rollingSharpe.columns = ["45 Day Rolling Sharpe"] if len(rollingSharpe["45 Day Rolling Sharpe"].values) > 50: metrics["25TH PERCENTILE SHARPE 45"] = np.percentile(rollingSharpe["45 Day Rolling Sharpe"].values, 25) metrics["MIN ROLLING SHARPE 45"] = np.percentile(rollingSharpe["45 Day Rolling Sharpe"].values, 1) metrics["ROLLING SHARPE ERROR 45"] = rollingSharpe["45 Day Rolling Sharpe"].std() metrics["SHARPE TO MIN RATIO 45"] = metrics["SHARPE"] / abs(metrics["MIN ROLLING SHARPE 45"]) metrics["MIN PROFITABILITY 45"] = np.percentile(returnStream.rolling(rollingPeriod, min_periods=rollingPeriod).apply(lambda x:len((x)[x > 0])/len(x)).dropna().values, 1) metrics["PROFITABILITY DROP 45"] = metrics["PROFITABILITY"] - metrics["MIN PROFITABILITY 45"] metrics["25TH PROFITABILITY 45"] = np.percentile(returnStream.rolling(rollingPeriod, min_periods=rollingPeriod).apply(lambda x:len((x)[x > 0])/len(x)).dropna().values, 25) metrics["MIN FACTOR PROFITABILITY 45"] = np.percentile(factorReturn.rolling(rollingPeriod, min_periods=rollingPeriod).apply(lambda x:len((x)[x > 0])/len(x)).dropna().values, 1) metrics["MIN PROFITABILITY DIFFERENCE 45"] = metrics["MIN PROFITABILITY 45"] - metrics["MIN FACTOR PROFITABILITY 45"] returns = returnStream.apply(lambda x:empyrical.cum_returns(x)) returns.columns = ["algo"] factorReturn = factorReturn.apply(lambda x:empyrical.cum_returns(x)) returns = returns.join(factorReturn) returns.columns = ["Algo Return", "Factor Return"] ##FORCE SHOW if plotting == True: import matplotlib.pyplot as plt returns.plot() plt.show() return metrics
def work(PARAMS): info('work %s' % str(PARAMS)) stock_df_dict = None show_df = None order_df = None PROPERTY = None STRATEGY = PARAMS[0] POS = PARAMS[1] N = PARAMS[2] K = PARAMS[3] M = PARAMS[4] global ROTATION_LIST ROTATION_LIST = ROTATION_LIST stock_df_dict = get_stock_df_dict(N, M) show_df, order_df, PROPERTY = run_turtle(ROTATION_LIST, stock_df_dict, STRATEGY, POS, N, K, M) df = show_df.dropna(how='any', inplace=False).copy() df = df.loc[start_date:end_date] algo = df['PROPERTY'].pct_change() benchmark = df.open.pct_change() DAYS_ALL = len(df) DAYS_NOFULLHOLD = len(df[df['CASH'] > (df['PROPERTY'] / POS)]) output_str = '' for y in range(int(start_date.split('-')[0]), int(end_date.split('-')[0]) + 1, 1): # info('y = %d' % y) y_df = df.loc['%d-01-01' % y:'%d-01-01' % (y + 1)] if len(y_df) == 0: continue y_algo = y_df['PROPERTY'].pct_change() # info(y_algo) y_benchmark = y_df.open.pct_change() # info('y_benc') result = '%d-%d,%.3f,%.3f,%.3f,%.3f' % ( y, y + 1, emp.cum_returns(y_algo)[-1], emp.cum_returns(y_benchmark)[-1], emp.max_drawdown(y_algo), emp.max_drawdown(y_benchmark) ) output_str += result output_str += ';' # info(output_str) df = order_df.copy() df['pro_pct'] = (df.borrow_price - df.return_price) / df.return_price df = df.loc[:, ['symbol', 'pro_pct']] df = df.groupby(by='symbol').sum() buy_stock_count = len(df) score_sr = pd.Series({ 'START': start_date, 'END': end_date, 'STRATEGY': STRATEGY, 'POS': POS, 'N': N, 'K': K, 'M': M, 'ORDER': len(order_df), 'STOCK': buy_stock_count, 'RETURN_ALGO': emp.cum_returns(algo)[-1], 'RETURN_BENC': emp.cum_returns(benchmark)[-1], 'MAXDROPDOWN_ALGO': emp.max_drawdown(algo), 'MAXDROPDOWN_BENC': emp.max_drawdown(benchmark), 'WINRATE_ORDER': len(order_df[order_df.profit > 0]) / len(order_df[order_df.profit != 0]), 'WINRATE_YEARLY': 0, 'ANNUAL_RETURN': emp.annual_return(algo), 'ANNUAL_VOLATILITY': emp.annual_volatility(algo, period='daily'), 'CALMAR_RATIO': emp.calmar_ratio(algo), 'SHARPE_RATIO': emp.sharpe_ratio(returns=algo), 'ALPHA': emp.alpha(returns=algo, factor_returns=benchmark, risk_free=0.00), 'BETA': emp.beta(returns=algo, factor_returns=benchmark, risk_free=0.00), 'DAYS_ALL': DAYS_ALL, 'DAYS_NOFULLHOLD': DAYS_NOFULLHOLD, 'RET_PER_YEAR': output_str, }) YEAR_COUNT = 0 ALGO_WIN_YEAR_COUNT = 0 df = show_df.dropna(how='any', inplace=False).copy() df = df.loc[start_date:end_date] for y in range(int(start_date.split('-')[0]), int(end_date.split('-')[0]) + 1, 1): y_df = df.loc['%d-01-01' % y:'%d-01-01' % (y + 1)] # info('y = %d' % y) if len(y_df) == 0: continue y_algo = y_df['PROPERTY'].pct_change() y_benchmark = y_df.open.pct_change() score_sr['RETURN_ALGO_%d' % y] = emp.cum_returns(y_algo)[-1] score_sr['RETURN_BENC_%d' % y] = emp.cum_returns(y_benchmark)[-1] YEAR_COUNT += 1 if score_sr['RETURN_ALGO_%d' % y] > score_sr['RETURN_BENC_%d' % y]: ALGO_WIN_YEAR_COUNT += 1 score_sr['WINRATE_YEARLY'] = ALGO_WIN_YEAR_COUNT / YEAR_COUNT return PARAMS, score_sr, order_df
return empyrical.max_drawdown(self.returns) def getSharpeRatio(self, period='daily', annualization=None): return empyrical.sharpe_ratio(self.returns, self.riskFreeRate, period, annualization) def getAlpha(self, period='daily', annualization=None): return empyrical.alpha(self.returns, self.riskFreeRate, period, annualization, beta_=None) def getBeta(self): return empyrical.beta(self.returns, self.riskFreeRate) def getCAGR(self, period='daily', annualization=None): return empyrical.cagr(self.returns, period, annualization) def getAnnualReturn(self, period='daily', annualization=None) return empyrical.annual_return(self.returns, period, annualization) def getCumReturns(self, starting_value=0): return empyrical.cum_returns(self.returns, starting_value) def getAnnualVolatility(self, period='daily', alpha=2.0, annualization=None): return empyrical.annual_volatility(self.returns, period, alpha, annualization) def getDownsideRisk(self, required_return=0, period='daily', annualization=None): return empyrical.downside_risk(self.returns, required_return, period, annualization) """ def getCalmarRatio(returns, period='daily', annualization=None) return empyrical.calmar_ratio(returns, period, annualization) def getOmegaRatio(returns, risk_free=0.0, required_return=0.0, annualization=252):
def fin_funcs_port(df): """ Financial calculations taken from Quantopians Empirical Library. :param df: dataframe containing daily returns calculated for a portfolio and as well for the related accounts. :return: Dictionary of financial ratios both for percent change returns and log returns. """ returns_port = df["portfolio"] returns_acct = df["account"] risk_free_rate = 0.0 annual_return_port = ep.annual_return(returns_port, period="daily", annualization=None) annual_return_acct = ep.annual_return(returns_acct, period="daily", annualization=None) cumm_return_port = ep.cum_returns(returns_port, starting_value=0).iloc[-1] cumm_return_acct = ep.cum_returns(returns_acct, starting_value=0).iloc[-1] cagr_port = ep.cagr(returns_port, period="daily", annualization=None) cagr_acct = ep.cagr(returns_acct, period="daily", annualization=None) sharpe_port = ep.sharpe_ratio(returns_port, risk_free=risk_free_rate, period="daily", annualization=None) sharpe_acct = ep.sharpe_ratio(returns_acct, risk_free=risk_free_rate, period="daily", annualization=None) annual_volatility_port = ep.annual_volatility(returns_port, period="daily", alpha=2.0, annualization=None) annual_volatility_acct = ep.annual_volatility(returns_acct, period="daily", alpha=2.0, annualization=None) max_drawdown_port = ep.max_drawdown(returns_port) max_drawdown_acct = ep.max_drawdown(returns_acct) calmar_port = ep.calmar_ratio(returns_port, period="daily", annualization=None) calmar_acct = ep.calmar_ratio(returns_acct, period="daily", annualization=None) sortino_port = ep.sortino_ratio( returns_port, required_return=0, period="daily", annualization=None, _downside_risk=None, ) sortino_acct = ep.sortino_ratio( returns_acct, required_return=0, period="daily", annualization=None, _downside_risk=None, ) tail_ratio_port = ep.tail_ratio(returns_port) tail_ratio_acct = ep.tail_ratio(returns_acct) financials = { ("return_portfolio", "annual_return"): annual_return_port, ("return_portfolio", "cumm_return"): cumm_return_port, ("return_portfolio", "cagr"): cagr_port, ("return_portfolio", "sharpe"): sharpe_port, ("return_portfolio", "annual_volatility"): annual_volatility_port, ("return_portfolio", "max_drawdown"): max_drawdown_port, ("return_portfolio", "calmar"): calmar_port, ("return_portfolio", "sortino"): sortino_port, ("return_portfolio", "tail_ratio"): tail_ratio_port, ("return_account", "annual_return"): annual_return_acct, ("return_account", "cumm_return"): cumm_return_acct, ("return_account", "cagr"): cagr_acct, ("return_account", "sharpe"): sharpe_acct, ("return_account", "annual_volatility"): annual_volatility_acct, ("return_account", "max_drawdown"): max_drawdown_acct, ("return_account", "calmar"): calmar_acct, ("return_account", "sortino"): sortino_acct, ("return_account", "tail_ratio"): tail_ratio_acct, } return financials
plt.ylabel('Drawdown') plt.title('Underwater Plot of the S&P500 index') plt.xlabel('') plt.savefig('SP500_underwater.pdf') plt.show() annual_returns = pd.DataFrame(ep.aggregate_returns(sp500['Returns'], 'yearly')) ax = plt.gca() plt.gca().set_yticklabels( ['{:.0f}%'.format(x) for x in plt.gca().get_yticks()]) ax.axhline(100 * annual_returns.values.mean(), color='gray', linestyle='--', lw=2, alpha=0.7) (100 * annual_returns.sort_index(ascending=True)).plot(ax=ax, kind='bar', alpha=1) ax.axhline(0.0, color='black', linestyle='-', lw=3) plt.gca().set_yticklabels( ['{:.0f}%'.format(x) for x in plt.gca().get_yticks()]) ax.set_xlabel('') ax.set_ylabel('Returns') ax.set_title("Annual returns of the S&P 500 index") ax.legend(['Mean'], frameon=True, framealpha=1) ax.grid(b=True, axis='y') plt.savefig('SP500_annual_ret.pdf') plt.show() cagr = ep.annual_return(sp500['Returns']) sharpe = ep.sharpe_ratio(sp500['Returns'])
def trades(trades_list: list, daily_balance: list): starting_balance = 0 current_balance = 0 for e in store.exchanges.storage: starting_balance += store.exchanges.storage[e].starting_balance current_balance += store.exchanges.storage[e].balance starting_balance = round(starting_balance, 2) current_balance = round(current_balance, 2) if len(trades_list) == 0: return None df = pd.DataFrame.from_records([t.to_dict() for t in trades_list]) total_completed = len(df) winning_trades = df.loc[df['PNL'] > 0] losing_trades = df.loc[df['PNL'] < 0] win_rate = len(winning_trades) / (len(losing_trades) + len(winning_trades)) max_R = round(df['R'].max(), 2) min_R = round(df['R'].min(), 2) mean_R = round(df['R'].mean(), 2) longs_count = len(df.loc[df['type'] == 'long']) shorts_count = len(df.loc[df['type'] == 'short']) longs_percentage = longs_count / (longs_count + shorts_count) * 100 short_percentage = 100 - longs_percentage fee = df['fee'].sum() net_profit = round(df['PNL'].sum(), 2) net_profit_percentage = round((net_profit / starting_balance) * 100, 2) average_win = round(winning_trades['PNL'].mean(), 2) average_loss = round(abs(losing_trades['PNL'].mean()), 2) ratio_avg_win_loss = average_win / average_loss expectancy = (0 if np.isnan(average_win) else average_win) * win_rate - ( 0 if np.isnan(average_loss) else average_loss) * (1 - win_rate) expectancy = round(expectancy, 2) expectancy_percentage = round((expectancy / starting_balance) * 100, 2) expected_net_profit_every_100_trades = round(expectancy_percentage * 100, 2) average_holding_period = df['holding_period'].mean() average_winning_holding_period = winning_trades['holding_period'].mean() average_losing_holding_period = losing_trades['holding_period'].mean() gross_profit = round(df.loc[df['PNL'] > 0]['PNL'].sum(), 2) gross_loss = round(df.loc[df['PNL'] < 0]['PNL'].sum(), 2) daily_returns = pd.Series(daily_balance).pct_change(1).values max_drawdown = round(empyrical.max_drawdown(daily_returns) * 100, 2) annual_return = round(empyrical.annual_return(daily_returns) * 100, 2) sharpe_ratio = round(empyrical.sharpe_ratio(daily_returns), 2) total_open_trades = store.app.total_open_trades open_pl = store.app.total_open_pl return { 'total': np.nan if np.isnan(total_completed) else total_completed, 'starting_balance': np.nan if np.isnan(starting_balance) else starting_balance, 'finishing_balance': np.nan if np.isnan(current_balance) else current_balance, 'win_rate': np.nan if np.isnan(win_rate) else win_rate, 'max_R': np.nan if np.isnan(max_R) else max_R, 'min_R': np.nan if np.isnan(min_R) else min_R, 'mean_R': np.nan if np.isnan(mean_R) else mean_R, 'ratio_avg_win_loss': np.nan if np.isnan(ratio_avg_win_loss) else ratio_avg_win_loss, 'longs_count': np.nan if np.isnan(longs_count) else longs_count, 'longs_percentage': np.nan if np.isnan(longs_percentage) else longs_percentage, 'short_percentage': np.nan if np.isnan(short_percentage) else short_percentage, 'shorts_count': np.nan if np.isnan(shorts_count) else shorts_count, 'fee': np.nan if np.isnan(fee) else fee, 'net_profit': np.nan if np.isnan(net_profit) else net_profit, 'net_profit_percentage': np.nan if np.isnan(net_profit_percentage) else net_profit_percentage, 'average_win': np.nan if np.isnan(average_win) else average_win, 'average_loss': np.nan if np.isnan(average_loss) else average_loss, 'expectancy': np.nan if np.isnan(expectancy) else expectancy, 'expectancy_percentage': np.nan if np.isnan(expectancy_percentage) else expectancy_percentage, 'expected_net_profit_every_100_trades': np.nan if np.isnan(expected_net_profit_every_100_trades) else expected_net_profit_every_100_trades, 'average_holding_period': average_holding_period, 'average_winning_holding_period': average_winning_holding_period, 'average_losing_holding_period': average_losing_holding_period, 'gross_profit': gross_profit, 'gross_loss': gross_loss, 'max_drawdown': max_drawdown, 'annual_return': annual_return, 'sharpe_ratio': sharpe_ratio, 'total_open_trades': total_open_trades, 'open_pl': open_pl, }
def getFundData(): historicalAllocations, realizedAllocations = getNetAllocationAcrossPortfolios( ) if historicalAllocations is None: return None, None pulledData, unused_ = dataAck.downloadTickerData( historicalAllocations.columns.values) allocationJoinedData = dataAck.joinDatasets( [pulledData[ticker] for ticker in pulledData]) dataToCache = [] for allocationForm in [historicalAllocations, realizedAllocations]: performanceByTicker, fundPerformance, fundTransactionCost = portfolioGeneration.calculatePerformanceForAllocations( allocationForm, allocationJoinedData) if len(fundPerformance) == 0: dataToCache.append({}) continue ##CALCULATE BETAS FOR ALL TICKERS TO FUND PERFORMANCE tickerAlphaBetas = [] for ticker in allocationForm.columns.values: factorReturn = dataAck.getDailyFactorReturn( ticker, allocationJoinedData) alpha, beta = empyrical.alpha_beta(fundPerformance, factorReturn) tickerAlphaBetas.append({ "ticker": ticker, "alpha": alpha * 100, "beta": beta }) tickerCols, tickerRows = portfolioGeneration.convertTableToJSON( empyrical.cum_returns(performanceByTicker)) tickerAllocationsCols, tickerAllocationsRows = portfolioGeneration.convertTableToJSON( allocationForm) fundCols, fundRows = portfolioGeneration.convertTableToJSON( empyrical.cum_returns(fundPerformance)) sharpe = empyrical.sharpe_ratio(fundPerformance) annualReturn = empyrical.annual_return(fundPerformance)[0] annualVol = empyrical.annual_volatility(fundPerformance) commissionCols, commissionRows = portfolioGeneration.convertTableToJSON( fundTransactionCost) dataToCache.append({ "tickerAlphaBetas": tickerAlphaBetas, "tickerCols": json.dumps(tickerCols), "tickerRows": json.dumps(tickerRows), "tickerAllocationsCols": json.dumps(tickerAllocationsCols), "tickerAllocationsRows": json.dumps(tickerAllocationsRows), "fundCols": json.dumps(fundCols), "fundRows": json.dumps(fundRows), "sharpe": sharpe, "annualReturn": annualReturn * 100, "annualVol": annualVol * 100, "commissionCols": json.dumps(commissionCols), "commissionRows": json.dumps(commissionRows) }) historicalData = dataToCache[0] realizedData = dataToCache[1] ##GET TODAY ALLOCATION if realizedData != {}: newRows = [] tARows = json.loads(realizedData["tickerAllocationsRows"]) tACols = json.loads(realizedData["tickerAllocationsCols"]) print(tARows[-1]) for i in range(len(tACols)): newRows.append([tACols[i], abs(tARows[-1][i + 1])]) ##i+1 because date realizedData["todayAllocation"] = json.dumps(newRows) print(realizedData["todayAllocation"]) return historicalData, realizedData
def get_statistics(date): end_date = datetime.datetime.strptime(date, "%Y-%m-%d") portfolio_name, net_value, year_return, annual_return, total_return, max_drawdown, sharpe, volatility = [], [], [], [], [], [], [], [] zhiying_df = pd.read_csv(const.ZHIYING_FILE) zhiying_df.index = pd.to_datetime(zhiying_df['date'], format='%Y/%m/%d') zhiying_df = zhiying_df[zhiying_df.index <= end_date] if zhiying_df.shape[0] == 0: return assets = assets = [unicode(x) for x in range(const.first_num_of_portfolio, const.last_num_of_portfolio+1) \ if ((x not in const.exceptions) and (x < 24))] for asset in assets: df = zhiying_df[[asset]] df = df.dropna() df.loc[:, 'return'] = df.pct_change() df.loc[:, 'net value'] = (1 + df['return']).cumprod() df.loc[df.index[0], 'net value'] = 1 if df.index[-1] < end_date: return portfolio_name.append(u"智盈添易一号第%s期" % (asset)) net_value.append(df.loc[df.index[-1], 'net value']) total_return.append(df.loc[df.index[-1], 'net value'] - 1) annual_return.append(empyrical.annual_return(df['return'].dropna())) max_drawdown.append(empyrical.max_drawdown(df['return'].dropna())) sharpe.append(empyrical.sharpe_ratio(df['return'].dropna())) volatility.append(empyrical.annual_volatility(df['return'].dropna())) df = df[df.index >= datetime.datetime(2018, 1, 1)] start_value = df.loc[df.index[0], 'net value'] end_value = df.loc[df.index[-1], 'net value'] year_return.append((end_value - start_value) / start_value) # 24期之后 assets = assets = [x for x in range(const.first_num_of_portfolio, const.last_num_of_portfolio+1) \ if ((x not in const.exceptions) and (x >= 24))] zhiying_df = pd.read_excel(const.ZHIYING_FILE2, index_col=0) zhiying_df = zhiying_df[zhiying_df.index <= end_date] if zhiying_df.shape[0] == 0: return for asset in assets: df = zhiying_df[[asset]] df = df.dropna() df.loc[:, 'return'] = df[asset] / 100. df.loc[:, 'net value'] = (1 + df['return']).cumprod() df.loc[df.index[0], 'net value'] = 1 portfolio_name.append(u"智盈添易一号第%s期" % (asset)) net_value.append(df.loc[df.index[-1], 'net value']) total_return.append(df.loc[df.index[-1], 'net value'] - 1) annual_return.append(empyrical.annual_return(df['return'].dropna())) max_drawdown.append(empyrical.max_drawdown(df['return'].dropna())) sharpe.append(empyrical.sharpe_ratio(df['return'].dropna())) volatility.append(empyrical.annual_volatility(df['return'].dropna())) df = df[df.index >= datetime.datetime(2018, 1, 1)] start_value = df.loc[df.index[0], 'net value'] end_value = df.loc[df.index[-1], 'net value'] year_return.append((end_value - start_value) / start_value) ret = { u'组合': portfolio_name, u'单位净值': net_value, u'今年以来业绩': year_return, u'成立以来业绩': total_return, u'年化收益率': annual_return, u'最大回撤': max_drawdown, u'夏普率': sharpe, u'波动率': volatility } df = pd.DataFrame(ret) df = df[[ u"组合", u'单位净值', u'今年以来业绩', u'成立以来业绩', u'年化收益率', u'最大回撤', u'夏普率', u'波动率' ]] df.loc[:, u'单位净值'] = df[u'单位净值'].map(lambda x: "%.4f" % x) df.loc[:, u'今年以来业绩'] = df[u'今年以来业绩'].map(lambda x: "%.2f%%" % (x * 100)) df.loc[:, u"成立以来业绩"] = df[u"成立以来业绩"].map(lambda x: "%.2f%%" % (x * 100)) df.loc[:, u"年化收益率"] = df[u"年化收益率"].map(lambda x: "%.2f%%" % (x * 100)) df.loc[:, u"最大回撤"] = df[u"最大回撤"].map(lambda x: "%.2f%%" % (-x * 100)) df.loc[:, u"夏普率"] = df[u"夏普率"].map(lambda x: "%.2f" % x) df.loc[:, u"波动率"] = df[u"波动率"].map(lambda x: "%.2f%%" % (x * 100)) writer = pd.ExcelWriter('%s/%s.xlsx' % (DATA_DIR, date)) df.to_excel(writer, index=False) writer.save() df = pd.read_excel('%s/%s.xlsx' % (DATA_DIR, date))
def runstrategy(ticker_list, bench_ticker): args = parse_args() print(args) # Create a cerebro cerebro = bt.Cerebro() # Get the dates from the args fromdate = datetime.datetime.strptime(args.fromdate, '%Y-%m-%d') todate = datetime.datetime.strptime(args.todate, '%Y-%m-%d') # bench = bt.feeds.YahooFinanceData( # dataname=bench_ticker, # fromdate=fromdate, # todate=todate, # buffered=True,plot = False # ) bench = bt.feeds.GenericCSVData( dataname='/Users/joan/PycharmProjects/CSV_DB/IB/' + bench_ticker + '.csv', fromdate=fromdate, todate=todate, nullvalue=0.0, dtformat=('%Y%m%d'), datetime=1, open=2, high=3, low=4, close=5, volume=6, reverse=False, plot=False) cerebro.adddata(bench, name=bench_ticker) for i in ticker_list: print('Loading data: ' + i) # data = bt.feeds.YahooFinanceData( # dataname=i, # fromdate=fromdate, # todate=todate, # adjclose=True, # buffered=True, plot = False # ) data = bt.feeds.GenericCSVData( dataname='/Users/joan/PycharmProjects/CSV_DB/IB/' + i + '.csv', fromdate=fromdate, todate=todate, nullvalue=0.0, dtformat=('%Y%m%d'), datetime=1, open=2, high=3, low=4, close=5, volume=6, reverse=False, plot=False) cerebro.adddata(data, name=i) # Add the strategy cerebro.addstrategy(PairTradingStrategy, period=args.period, stake=args.stake) # cerebro.optstrategy( # PairTradingStrategy, # period=range(45, 75), # ) # Add the commission - only stocks like a for each operation cerebro.broker.setcash(args.cash) # Add the commission - only stocks like a for each operation # cerebro.broker.setcommission(commission=args.commperc) comminfo = FixedCommisionScheme() cerebro.broker.addcommissioninfo(comminfo) cerebro.addanalyzer(bt.analyzers.SharpeRatio, _name='sharpe_ratio') cerebro.addanalyzer(bt.analyzers.TradeAnalyzer, _name="ta") cerebro.addanalyzer(bt.analyzers.SQN, _name="sqn") cerebro.addanalyzer(bt.analyzers.SharpeRatio_A, _name='myysharpe', riskfreerate=args.rf_rate) cerebro.addanalyzer(bt.analyzers.PyFolio, _name='mypyf') cerebro.addanalyzer(bt.analyzers.TimeReturn, timeframe=bt.TimeFrame.Days, data=bench, _name='benchreturns') cerebro.addobserver(bt.observers.Value) cerebro.addobserver(bt.observers.Benchmark, plot=False) cerebro.addobserver(bt.observers.DrawDown) # And run it strat = cerebro.run(runonce=not args.runnext, preload=not args.nopreload, oldsync=args.oldsync) # # strat = cerebro.optstrategy( # PairTradingStrategy, # period=range(45, 75), # printlog=True) # Plot if requested if args.plot: cerebro.plot(style='candlestick', barup='green', bardown='red', figsize=(100, 100)) bench_returns = strat[0].analyzers.benchreturns.get_analysis() bench_df = pd.DataFrame.from_dict(bench_returns, orient='index', columns=['return']) return_df = pd.DataFrame.from_dict( strat[0].analyzers.mypyf.get_analysis()['returns'], orient='index', columns=['return']) # print('Sharpe Ratio(bt):', firstStrat.analyzers.myysharpe.get_analysis()['sharperatio']) # print('Sharpe Ratio:', empyrical.sharpe_ratio(return_df, risk_free=args.rf_rate / 252, period='daily')[0]) # print('Sharpe Ratio Benchmark:', empyrical.sharpe_ratio(bench_df, risk_free=args.rf_rate / 252, period='daily')[0]) # print('') # # print('Sortino Ratio:', empyrical.sortino_ratio(return_df, period='daily')[0]) # print('Sortino Ratio Benchmark:', empyrical.sortino_ratio(bench_df, period='daily')[0]) # print('') # print('VaR:', empyrical.value_at_risk(return_df) * 100, '%') # print('VaR Benchmark:', empyrical.value_at_risk(bench_df) * 100, '%') # # print('') # # print('Capture:', round(empyrical.capture(return_df, bench_df, period='daily')[0] * 100), '%') # print('') # # print('Max drawdown: ', round(empyrical.max_drawdown(return_df)[0] * 100), '%') # print('Max drawdown Benchmark: ', round(empyrical.max_drawdown(bench_df)[0] * 100), '%') # # print('') alpha, beta = empyrical.alpha_beta(return_df, bench_df, risk_free=args.rf_rate) # print('Beta: ', beta) # print('') # print('Annual return:', round(empyrical.annual_return(return_df)[0] * 100), '%') # print('Annual Vol:', round(empyrical.annual_volatility(return_df)[0] * 100), '%') # print('') # print('Annual return Benchmark:', round(empyrical.annual_return(bench_df)[0] * 100), '%') # print('Annual Vol Benchmark:', round(empyrical.annual_volatility(bench_df)[0] * 100), '%') # print('') printTradeAnalysis(strat[0].analyzers.ta.get_analysis()) dic = { 'SQN': printSQN(strat[0].analyzers.sqn.get_analysis()), 'sharpe': empyrical.sharpe_ratio(return_df, risk_free=args.rf_rate / 252, period='daily')[0], 'sharpe_bm': empyrical.sharpe_ratio(bench_df, risk_free=args.rf_rate / 252, period='daily')[0], 'sortino': empyrical.sortino_ratio(bench_df, period='daily')[0], 'sortino_bm': empyrical.sortino_ratio(bench_df, period='daily')[0], 'VaR': empyrical.value_at_risk(return_df) * 100, 'VaR_bm': empyrical.value_at_risk(bench_df) * 100, 'capture': round(empyrical.capture(return_df, bench_df, period='daily')[0] * 100), 'max_dd': round(empyrical.max_drawdown(return_df)[0] * 100, 2), 'max_dd_bm': round(empyrical.max_drawdown(bench_df)[0] * 100, 2), 'beta': beta, 'return_annual': round(empyrical.annual_return(return_df)[0] * 100, 2), 'return_annual_bm': round(empyrical.annual_volatility(return_df)[0] * 100, 2), 'vol_annual': round(empyrical.annual_return(bench_df)[0] * 100, 2), 'vol_annual_bm': round(empyrical.annual_volatility(bench_df)[0] * 100, 2) } df = pd.DataFrame(dic, index=[0]) print(df) def calc_stats(df): df['perc_ret'] = (1 + df['return']).cumprod() - 1 # print(df.tail()) return df s = return_df.rolling(30).std() b = bench_df.rolling(30).std() # Get final portfolio Value portvalue = cerebro.broker.getvalue() # Print out the final result print('Final Portfolio Value: ${}'.format(round(portvalue, 2)), 'PnL: ${}'.format(round(portvalue - args.cash, 2)), 'PnL: {}%'.format(round(((portvalue / args.cash) - 1) * 100, 2))) # Finally plot the end results if args.plot: fig, axs = plt.subplots(2, sharex=True) fig.autofmt_xdate() axs[1].plot(s) axs[1].plot(b) axs[1].set_title('Drawdown') axs[1].legend(['Fund', 'Benchmark']) axs[0].set_title('Returns') axs[0].plot(calc_stats(return_df)['perc_ret']) axs[0].plot(calc_stats(bench_df)['perc_ret']) axs[0].legend(['Fund', 'Benchmark']) plt.show()
# convert to daily return for different asset in t ind_return = pd.concat([ind_return, df[i + '-ind_return']], axis=1) #%% Portfolio Average # Strategy return in daily ind_return['port_avg'] = ind_return.mean(skipna=1, axis=1) # convert to monthly basis strategy_month_rtns = ind_return['port_avg'].resample('BM').last().ffill() strategy_cumm_rtns['cummulative'] = (1 + ind_return['port_avg']).cumprod() # convert to monthly cum return strategy_month = strategy_cumm_rtns['cummulative'].resample('BM').last().ffill() #%% Print Results print("Annualized Sharpe Ratio = ", empyrical.sharpe_ratio(ind_return['port_avg'], period='daily')) print("Annualized Mean Returns = ", empyrical.annual_return(ind_return['port_avg'], period='daily')) print("Annualized Standard Deviations = ", empyrical.annual_volatility(ind_return['port_avg'], period='daily')) print("Max Drawdown (MDD) = ", empyrical.max_drawdown(ind_return['port_avg'])) print("Sortino ratio = ", empyrical.sortino_ratio(ind_return['port_avg'], period='daily')) print("Calmar ratio = ", empyrical.calmar_ratio(ind_return['port_avg'], period='daily')) #%% Visualization #print(empyrical.sharpe_ratio(strategy_month_rtns, period='monthly')) a = empyrical.cum_returns(ind_return['port_avg']) #b = strategy_month plt.plot(a, color = 'red', label = 'Raw Portfolio') plt.title('Cumulative return in daily basis') plt.xlabel('Time') plt.ylabel('Cumulative return') plt.legend() plt.show()
target_vol = 0.15 for i in ast: # 进行水平方向的合并 df = pd.concat([ast[i], predicted_X_t[i + "-X_t"]], axis=1) day_vol = df[i].ewm(ignore_na=False, adjust=True, span=60, min_periods=0).std(bias=False) # daily return based on equation (1) for individual asset df[i + '-ind_return'] = df[i] * df[i + "-X_t"] * target_vol / day_vol # convert to daily return for different asset in t ind_return = pd.concat([ind_return, df[i + '-ind_return']], axis=1) # daily return in portfolio ind_return['port_avg'] = ind_return.mean(skipna=1, axis=1) #%% Print Results print("Annualized Sharpe Ratio = ", empyrical.sharpe_ratio(ind_return['port_avg'], period='daily')) print("Annualized Mean Returns = ", empyrical.annual_return(ind_return['port_avg'], period='daily')) print("Annualized Standard Deviations = ", empyrical.annual_volatility(ind_return['port_avg'], period='daily')) print("Max Drawdown (MDD) = ", empyrical.max_drawdown(ind_return['port_avg'])) print("Sortino ratio = ", empyrical.sortino_ratio(ind_return['port_avg'], period='daily')) print("Calmar ratio = ", empyrical.calmar_ratio(ind_return['port_avg'], period='daily'))
def Strategy_performance(returns: pd.DataFrame, mark_benchmark: str = 'benchmark', periods: str = 'daily') -> pd.DataFrame: ''' 风险指标计算 returns:index-date col-数据字段 mark_benchmark:用于指明基准 periods:频率 ''' df: pd.DataFrame = pd.DataFrame() df['年化收益率'] = ep.annual_return(returns, period=periods) df['累计收益'] = returns.apply(lambda x: ep.cum_returns(x).iloc[-1]) df['波动率'] = returns.apply( lambda x: ep.annual_volatility(x, period=periods)) df['夏普'] = returns.apply(ep.sharpe_ratio, period=periods) df['最大回撤'] = returns.apply(lambda x: ep.max_drawdown(x)) df['索提诺比率'] = returns.apply(lambda x: ep.sortino_ratio(x, period=periods)) df['Calmar'] = returns.apply(lambda x: ep.calmar_ratio(x, period=periods)) # 相对指标计算 if mark_benchmark in returns.columns: select_col = [col for col in returns.columns if col != mark_benchmark] df['IR'] = returns[select_col].apply( lambda x: information_ratio(x, returns[mark_benchmark])) df['Alpha'] = returns[select_col].apply( lambda x: ep.alpha(x, returns[mark_benchmark], period=periods)) df['Beta'] = returns[select_col].apply( lambda x: ep.beta(x, returns[mark_benchmark])) # 计算相对年化波动率 df['超额收益率'] = df['年化收益率'] - \ df.loc[mark_benchmark, '年化收益率'] return df.T # def show_worst_drawdown_periods(returns: pd.Series, # benchmark_code: str = "000300.SH", # top: int = 5): # """ # Prints information about the worst drawdown periods. # Prints peak dates, valley dates, recovery dates, and net # drawdowns. # Parameters # ---------- # returns : pd.Series # Daily returns of the strategy, noncumulative. # - See full explanation in tears.create_full_tear_sheet. # top : int, optional # Amount of top drawdowns periods to plot (default 5). # """ # drawdown_df = ts.gen_drawdown_table(returns, top=top) # drawdown_df.index = list(range(1, len(drawdown_df) + 1)) # phase_change = compare_phase_change(returns, benchmark_code, top) # df = pd.concat((drawdown_df, phase_change), axis=1) # # print_table( # # df.sort_values('区间最大回撤 %', ascending=False), # # name='序号', # # float_format='{0:.2f}'.format, # # ) # return df # def compare_phase_change(returns: pd.Series, # benchmark_code: str, # top: int = 5) -> pd.DataFrame: # ''' # 对比策略与基准在回撤区间内的收益 # ------ # returns:策略净值收益率 # benchmark_code:基准的代码 # ''' # beginDt = returns.index.min() # endDt = returns.index.max() # benchmark = get_wsd_data(benchmark_code, # 'pct_chg', # beginDt, # endDt, # 'priceAdj=B', # usedf=True) # benchmark = benchmark['PCT_CHG'] / 100 # df = pd.DataFrame(columns=['策略收益%', '基准收益%'], # index=list(range(1, top + 1))) # drawdowns_list = ts.get_top_drawdowns(returns, top=top) # for i, v in enumerate(drawdowns_list): # peak_date, _, recovery_date = v # if pd.isnull(recovery_date): # df.loc[i + 1, '策略收益%'] = np.nan # df.loc[i + 1, '基准收益'] = np.nan # else: # df.loc[i + 1, '策略收益%'] = ep.cum_returns( # returns.loc[peak_date:recovery_date]).iloc[-1] # df.loc[i + 1, '基准收益%'] = ep.cum_returns( # benchmark.loc[peak_date:recovery_date])[-1] # return df
import pandas as pd import empyrical as emp import datetime df = pd.read_csv('ac-worth-history/eastmoney-fund-000835.csv') df['daily_return'] = df['worth'].pct_change() days = df['date'].count() all_days = (datetime.date.fromisoformat(df['date'].max()) - datetime.date.fromisoformat(df['date'].min())).days return_days = 365 * days / all_days risk_free = 0.03/return_days annual_return = emp.annual_return(df['daily_return'], annualization=return_days) max_drawdown = emp.max_drawdown(df['daily_return']) sharpe_ratio = emp.sharpe_ratio(df['daily_return'], risk_free, annualization=return_days) sortino_ratio = emp.sortino_ratio(df['daily_return'], risk_free, annualization=return_days) omega_ratio = emp.omega_ratio(df['daily_return'], risk_free, annualization=return_days) print(annual_return, max_drawdown, sharpe_ratio, sortino_ratio, omega_ratio)
def fin_funcs(df): """ Financial calculations taken from Quantopians Empirical Library. :param df: dataframe containing daily returns calculated on a percentage change and also by log scale. :return: Dictionary of financial ratios both for percent change returns and log returns. """ returns_pct = df["pct_change"] risk_free_rate = 0.0 annual_return_pct = ep.annual_return(returns_pct, period="daily", annualization=None) cumm_return_pct = ep.cum_returns(returns_pct, starting_value=0).iloc[-1] cagr_pct = ep.cagr(returns_pct, period="daily", annualization=None) sharpe_pct = ep.sharpe_ratio(returns_pct, risk_free=risk_free_rate, period="daily", annualization=None) annual_volatility_pct = ep.annual_volatility(returns_pct, period="daily", alpha=2.0, annualization=None) max_drawdown_pct = ep.max_drawdown(returns_pct) calmar_pct = ep.calmar_ratio(returns_pct, period="daily", annualization=None) sortino_pct = ep.sortino_ratio( returns_pct, required_return=0, period="daily", annualization=None, _downside_risk=None, ) tail_ratio_pct = ep.tail_ratio(returns_pct) financials = { "annual_return": annual_return_pct, "cumm_return": cumm_return_pct, "cagr": cagr_pct, "sharpe": sharpe_pct, "annual_volatility": annual_volatility_pct, "max_drawdown": max_drawdown_pct, "calmar": calmar_pct, "sortino": sortino_pct, "tail_ratio": tail_ratio_pct, } # Originally set up program to analyse both pct_change and log returns, but the difference between log and # pct_change was not material to the final analysis. Consequently pct_change used exclusively. The code below # is left in tact should log returns at the account level be desired. # returns_log = df["log_ret"] # Log returns not used in final scenario. # annual_return_log = ep.annual_return( # returns_log, period="daily", annualization=None # ) # cumm_return_log = ep.cum_returns(returns_log, starting_value=0).iloc[-1] # cagr_log = ep.cagr(returns_log, period="daily", annualization=None) # sharpe_log = ep.sharpe_ratio( # returns_log, risk_free=risk_free_rate, period="daily", annualization=None # ) # annual_volatility_log = ep.annual_volatility( # returns_log, period="daily", alpha=2.0, annualization=None # ) # max_drawdown_log = ep.max_drawdown(returns_log) # calmar_log = ep.calmar_ratio(returns_log, period="daily", annualization=None) # sortino_log = ep.sortino_ratio( # returns_log, # required_return=0, # period="daily", # annualization=None, # _downside_risk=None, # ) # tail_ratio_log = ep.tail_ratio(returns_log) # financials = { # ("return_percent_change", "annual_return"): annual_return_pct, # ("return_percent_change", "cumm_return"): cumm_return_pct, # ("return_percent_change", "cagr"): cagr_pct, # ("return_percent_change", "sharpe"): sharpe_pct, # ("return_percent_change", "annual_volatility"): annual_volatility_pct, # ("return_percent_change", "max_drawdown"): max_drawdown_pct, # ("return_percent_change", "calmar"): calmar_pct, # ("return_percent_change", "sortino"): sortino_pct, # ("return_percent_change", "tail_ratio"): tail_ratio_pct, # ("return_log", "annual_return"): annual_return_log, # ("return_log", "cumm_return"): cumm_return_log, # ("return_log", "cagr"): cagr_log, # ("return_log", "sharpe"): sharpe_log, # ("return_log", "annual_volatility"): annual_volatility_log, # ("return_log", "max_drawdown"): max_drawdown_log, # ("return_log", "calmar"): calmar_log, # ("return_log", "sortino"): sortino_log, # ("return_log", "tail_ratio"): tail_ratio_log, # } return financials
def describer(self): tot_cnt = len(self.df) missed = self.df[ (np.sign(self.df['return_close']) != self.df['signal']) & (self.df['signal'] == 0)]['strat_return'].describe() wrong = self.df[ (np.sign(self.df['return_close']) == self.df['signal'] * (-1)) & (self.df['signal'] != 0)]['strat_return'].describe() jackpot = self.df[(np.sign( self.df['return_close']) == self.df['signal'] )]['strat_return'].describe() desc = pd.DataFrame([jackpot, wrong, missed], index=['jackpot', 'wrong', 'missed']).T.to_markdown() trans_fee_tot = self.df['transfee'].sum() return_ret, return_bench = {}, {} self.df['date'] = self.df.index.date for (k, v) in self.df.groupby('date'): return_ret[k] = v['strat_return'].sum() return_bench[k] = v['bench_return'].sum() rret = pd.Series(list(return_ret.values()), index=list(return_ret.keys()), name='rret') rbench = pd.Series(list(return_bench.values()), index=list(return_bench.keys()), name='rbench') (alpha, beta) = alpha_beta(rret, rbench, period='daily') sharpe = sharpe_ratio(rret, period='daily') max_down = max_drawdown(rret) ann_return_strat = annual_return(rret, period='daily') ann_return_bench = annual_return(rbench, period='daily') t_r = tail_ratio(rret) returns = f"Strategy Return: {round(self.pnl * 100, 2)}% | " \ f"Strategy Annualized Return: {round(ann_return_strat * 100, 2)}%. \n" \ f"BenchMark return: {round(self.df['bench_return'].sum() * 100, 2)}% | " \ f"BenchMark Annualized Return: {round(ann_return_bench * 100, 2)}%.\n" desc_ = f"Strategy: {self.func} \n" \ f"Transaction Fee Percentage: {self.trans_fee_pctg}\n" \ f"Intraday Closing Time: {self.trade_flag}\n" \ f"Params: {self.signal_params}\n" \ f"Test Period: {self.start_dt} - {self.end_dt}\n" \ f"-- {self.id} --\n" \ f"-- {self.timeframe} -- \n" \ f"-- Position: {self.pos_} --\n" \ f"-- Barly Stoploss: {self.stop_loss} --\n" \ f"-- Action on Sig0: {self.action_on_0} --\n" \ f"-- Signal Shift: {self.sig_shift} --\n" \ f"Transaction Fee Total: {round(trans_fee_tot * 100, 2)}%\n" \ f"Signal Ratio: {round(self.signal_ratio * 100, 2)}%\n" \ f"Open Position: {self.open_t} times; Close Position: {self.close_t} times\n" \ f"Sharpe Ratio: {round(sharpe, 2)} \n" \ f"Tail Ratio: {round(t_r, 2)}\n" \ f"Alpha: {round(alpha * 100, 2)}% | Beta: {round(beta * 100, 2)}% \n" \ f"Max Drawdown: {round(max_down * 100, 2)}% \n" \ f"Max Daily Drawdown: {round(rret.min() * 100, 2)}% \n" \ f"Total Win: {self.winner} | Total Loss: {self.loser} | " \ f"W/L Ratio: {round(self.winner / self.loser, 2) if self.loser != 0 else 0}\n" source_code = "\n\n".join([inspect.getsource(f) for f in self.func ]) if self.func is not None else " " source_code_neut = "\n\n".join( [inspect.getsource(f) for f in self.neut_func]) if self.neut_func is not None else " " t_stamp = datetime.now().strftime("%Y-%m-%d_%H:%M:%S") logger.info(f"-- {t_stamp} --\n{desc_}{returns}\n") # 所有回测都值得记录 path_ = os.path.join( fp, f"../../docs/backtest/" f"{self.id}-{self.timeframe}-Sharpe{round(sharpe, 2)}-{datetime.now().strftime('%y%m%d-%H:%M:%S')}/" ) os.mkdir(path_) plot = self.df[['close', 'strat_ret_cumsum', 'bench_ret_cumsum']].plot(figsize=(16, 9), secondary_y='close') fig = plot.get_figure() fig_path = os.path.join(path_, f"return_curve.png") fig.savefig(fig_path) rec_path = os.path.join(path_, "trade_record.csv") self.df.to_csv(rec_path) desc_path = os.path.join(path_, "desc.txt") with open(desc_path, mode="w+", encoding='utf8') as f: f.write( desc_ + returns + '\n' + f'\nTotal Bars: {tot_cnt} \n' + '\nStatistics Desc: \n' + desc + '\n* NOTE: THIS DESCRIPTION DIFFERS FROM W/L RATIO ABOVE ' 'BECAUSE ONLY SIGNAL DIRECTION CORRECTNESS IS CONSIDERED HERE.\n' + '\n\nBias_factors: \n' + source_code + '\nNeut_factors: \n' + source_code_neut) # 但只有高夏普低回撤的回测才配拥有高级可视化 if (sharpe > 1.5) & (max_down >= -0.2 * self.pos_): rich_visual_path = os.path.join(path_, "rich_visual.html") kline = rv.draw_kline_with_yield_and_signal(self.df) scatters_fr = rv.draw_factor_return_eval(self.bias_factor_df) scatters_ff = rv.draw_factor_eval(self.bias_factor_df) res_charts = [kline, *scatters_fr, *scatters_ff] if self.neut_factor_df is not None: sca_neut_fr = rv.draw_factor_return_eval(self.neut_factor_df) sca_neut_ff = rv.draw_factor_eval(self.neut_factor_df) res_charts += [*sca_neut_fr, *sca_neut_ff] rv.form_page(res_charts, rich_visual_path)
def make_empirical(self): statistics_dict = {} for item in self.df_list: df = item[0] df_name = item[1] daily_profit = df["daily_profit"] statistics_dict['sharpe ratio'] = round( emp.sharpe_ratio(daily_profit), 4) # sharpe ratio plt.plot(statistics_dict["sharpe ratio"], '-', label=df_name, marker="") plt.title("sharpe ratio") sr_figname = os.path.join(self.output, self.plot_name + "_sharpe_ratio.png") plt.savefig(sr_figname, dpi=200) plt.close() # annual return statistics_dict["annual returns"] = round( emp.annual_return(daily_profit), 4) plt.plot(statistics_dict["annual returns"], '-', label=df_name, marker="") plt.title("annual return") ar_figname = os.path.join(self.output, self.plot_name + "_annual_ret.png") plt.savefig(ar_figname, dpi=200) plt.close() # mean return statistics_dict['mean returns'] = round(daily_profit.mean(), 4) plt.plot(statistics_dict["mean returns"], '-', label=df_name, marker="") plt.title("mean returns") ar_figname = os.path.join(self.output, self.plot_name + "_mean_ret.png") plt.savefig(ar_figname, dpi=200) plt.close() # standard dev p.a. ###### ALERT ####### statistics_dict['Standard dev p.a.'] = round( emp.annual_volatility(daily_profit), 4) # plt.plot(statistics_dict["Standard dev p.a."], '-', label=df_name, marker="") # plt.title("Standard dev p.a.") # ar_figname = os.path.join(self.output, self.plot_name + "_standard_dev.png") # plt.savefig(ar_figname, dpi=200) # plt.close() # sortino ###### ALERT ####### statistics_dict['Sortino Ratio'] = round( emp.sortino_ratio(daily_profit), 4) # plt.plot(statistics_dict["Sortino Ratio"], '-', label=df_name, marker="") # plt.title("Sortino Ratio") # ar_figname = os.path.join(self.output, self.plot_name + "_sortino_ratio.png") # plt.savefig(ar_figname, dpi=200) # plt.close() # max dd ###### ALERT ####### statistics_dict['MaxDD'] = round(emp.max_drawdown(daily_profit), 4) # plt.plot(statistics_dict["MaxDD"], '-', label=df_name, marker="") # plt.title("MaxDD") # ar_figname = os.path.join(self.output, self.plot_name + "_maxdd.png") # plt.savefig(ar_figname, dpi=200) # plt.close() return statistics_dict
def test_perf_attrib_regression(self): positions = pd.read_csv('pyfolio/tests/test_data/positions.csv', index_col=0, parse_dates=True) positions.columns = [ int(col) if col != 'cash' else col for col in positions.columns ] returns = pd.read_csv('pyfolio/tests/test_data/returns.csv', index_col=0, parse_dates=True, header=None, squeeze=True) factor_loadings = pd.read_csv( 'pyfolio/tests/test_data/factor_loadings.csv', index_col=[0, 1], parse_dates=True) factor_returns = pd.read_csv( 'pyfolio/tests/test_data/factor_returns.csv', index_col=0, parse_dates=True) residuals = pd.read_csv('pyfolio/tests/test_data/residuals.csv', index_col=0, parse_dates=True) residuals.columns = [int(col) for col in residuals.columns] intercepts = pd.read_csv('pyfolio/tests/test_data/intercepts.csv', index_col=0, header=None, squeeze=True) risk_exposures_portfolio, perf_attrib_output = perf_attrib( returns, positions, factor_returns, factor_loadings, ) specific_returns = perf_attrib_output['specific_returns'] common_returns = perf_attrib_output['common_returns'] combined_returns = specific_returns + common_returns # since all returns are factor returns, common returns should be # equivalent to total returns, and specific returns should be 0 pd.util.testing.assert_series_equal(returns, common_returns, check_names=False) self.assertTrue(np.isclose(specific_returns, 0).all()) # specific and common returns combined should equal total returns pd.util.testing.assert_series_equal(returns, combined_returns, check_names=False) # check that residuals + intercepts = specific returns self.assertTrue(np.isclose((residuals + intercepts), 0).all()) # check that exposure * factor returns = common returns expected_common_returns = risk_exposures_portfolio.multiply( factor_returns, axis='rows').sum(axis='columns') pd.util.testing.assert_series_equal(expected_common_returns, common_returns, check_names=False) # since factor loadings are ones, portfolio risk exposures # should be ones pd.util.testing.assert_frame_equal( risk_exposures_portfolio, pd.DataFrame(np.ones_like(risk_exposures_portfolio), index=risk_exposures_portfolio.index, columns=risk_exposures_portfolio.columns)) perf_attrib_summary, exposures_summary = create_perf_attrib_stats( perf_attrib_output, risk_exposures_portfolio) self.assertEqual(ep.annual_return(specific_returns), perf_attrib_summary['Annualized Specific Return']) self.assertEqual(ep.annual_return(common_returns), perf_attrib_summary['Annualized Common Return']) self.assertEqual(ep.annual_return(combined_returns), perf_attrib_summary['Total Annualized Return']) self.assertEqual(ep.sharpe_ratio(specific_returns), perf_attrib_summary['Specific Sharpe Ratio']) self.assertEqual(ep.cum_returns_final(specific_returns), perf_attrib_summary['Cumulative Specific Return']) self.assertEqual(ep.cum_returns_final(common_returns), perf_attrib_summary['Cumulative Common Return']) self.assertEqual(ep.cum_returns_final(combined_returns), perf_attrib_summary['Total Returns']) avg_factor_exposure = risk_exposures_portfolio.mean().rename( 'Average Risk Factor Exposure') pd.util.testing.assert_series_equal( avg_factor_exposure, exposures_summary['Average Risk Factor Exposure']) cumulative_returns_by_factor = pd.Series( [ ep.cum_returns_final(perf_attrib_output[c]) for c in risk_exposures_portfolio.columns ], name='Cumulative Return', index=risk_exposures_portfolio.columns) pd.util.testing.assert_series_equal( cumulative_returns_by_factor, exposures_summary['Cumulative Return']) annualized_returns_by_factor = pd.Series( [ ep.annual_return(perf_attrib_output[c]) for c in risk_exposures_portfolio.columns ], name='Annualized Return', index=risk_exposures_portfolio.columns) pd.util.testing.assert_series_equal( annualized_returns_by_factor, exposures_summary['Annualized Return'])
def runModelsChunksSkipMP(self, dataOfInterest, daysToCheck = None): xVals, yVals, yIndex, xToday = self.walkForward.generateWindows(dataOfInterest) mpEngine = mp.get_context('fork') with mpEngine.Manager() as manager: returnDict = manager.dict() identifiersToCheck = [] for i in range(len(xVals) - 44): ##44 is lag...should not overlap with any other predictions or will ruin validity of walkforward optimization if i < 600: ##MIN TRAINING continue identifiersToCheck.append(str(i)) if daysToCheck is not None: identifiersToCheck = identifiersToCheck[-daysToCheck:] ##FIRST CHECK FIRST 500 IDENTIFIERS AND THEN IF GOOD CONTINUE identifierWindows = [identifiersToCheck[:252], identifiersToCheck[252:600], identifiersToCheck[600:900], identifiersToCheck[900:1200], identifiersToCheck[1200:]] ##EXACTLY TWO YEARS returnStream = None factorReturn = None predictions = None slippageAdjustedReturn = None shortSeen = 0 for clippedIdentifiers in identifierWindows: splitIdentifiers = np.array_split(np.array(clippedIdentifiers), 16) runningP = [] k = 0 for identifiers in splitIdentifiers: p = mpEngine.Process(target=endToEnd.runDayChunking, args=(self, xVals, yVals, identifiers, returnDict,k)) p.start() runningP.append(p) k += 1 while len(runningP) > 0: newP = [] for p in runningP: if p.is_alive() == True: newP.append(p) else: p.join() runningP = newP preds = [] actuals = [] days = [] for i in clippedIdentifiers: preds.append(returnDict[i]) actuals.append(yVals[int(i) + 44]) days.append(yIndex[int(i) + 44]) loss = log_loss(np.array(endToEnd.transformTargetArr(np.array(actuals), self.threshold)), np.array(preds)) roc_auc = roc_auc_score(np.array(endToEnd.transformTargetArr(np.array(actuals), self.threshold)), np.array(preds)) accuracy = accuracy_score(np.array(endToEnd.transformTargetArr(np.array(actuals), self.threshold)), np.array(preds).round()) print(loss, roc_auc, accuracy) ##CREATE ACCURATE BLENDING ACROSS DAYS predsTable = pd.DataFrame(preds, index=days, columns=["Predictions"]) i = 1 tablesToJoin = [] while i < self.walkForward.predictionPeriod: thisTable = predsTable.shift(i) thisTable.columns = ["Predictions_" + str(i)] tablesToJoin.append(thisTable) i += 1 predsTable = predsTable.join(tablesToJoin) transformedPreds = pd.DataFrame(predsTable.apply(lambda x:computePosition(x), axis=1), columns=["Predictions"]).dropna() dailyFactorReturn = getDailyFactorReturn(self.walkForward.targetTicker, dataOfInterest) transformedPreds = transformedPreds.join(dailyFactorReturn).dropna() returnStream = pd.DataFrame(transformedPreds.apply(lambda x:x[0] * x[1], axis=1), columns=["Algo Return"]) if returnStream is None else pd.concat([returnStream, pd.DataFrame(transformedPreds.apply(lambda x:x[0] * x[1], axis=1), columns=["Algo Return"])]) factorReturn = pd.DataFrame(transformedPreds[["Factor Return"]]) if factorReturn is None else pd.concat([factorReturn, pd.DataFrame(transformedPreds[["Factor Return"]])]) predictions = pd.DataFrame(transformedPreds[["Predictions"]]) if predictions is None else pd.concat([predictions, pd.DataFrame(transformedPreds[["Predictions"]])]) alpha, beta = empyrical.alpha_beta(returnStream, factorReturn) rawBeta = abs(empyrical.alpha_beta(returnStream.apply(lambda x:applyBinary(x), axis=0), factorReturn.apply(lambda x:applyBinary(x), axis=0))[1]) shortSharpe = empyrical.sharpe_ratio(returnStream) activity = np.count_nonzero(returnStream)/float(len(returnStream)) algoAnnualReturn = empyrical.annual_return(returnStream.values)[0] algoVol = empyrical.annual_volatility(returnStream.values) factorAnnualReturn = empyrical.annual_return(factorReturn.values)[0] factorVol = empyrical.annual_volatility(factorReturn.values) treynor = ((empyrical.annual_return(returnStream.values)[0] - empyrical.annual_return(factorReturn.values)[0]) \ / abs(empyrical.beta(returnStream, factorReturn))) sharpeDiff = empyrical.sharpe_ratio(returnStream) - empyrical.sharpe_ratio(factorReturn) relativeSharpe = sharpeDiff / empyrical.sharpe_ratio(factorReturn) * (empyrical.sharpe_ratio(factorReturn)/abs(empyrical.sharpe_ratio(factorReturn))) stability = empyrical.stability_of_timeseries(returnStream) ##CALCULATE SHARPE WITH SLIPPAGE estimatedSlippageLoss = portfolioGeneration.estimateTransactionCost(predictions) estimatedSlippageLoss.columns = returnStream.columns slippageAdjustedReturn = (returnStream - estimatedSlippageLoss).dropna() slippageSharpe = empyrical.sharpe_ratio(slippageAdjustedReturn) sharpeDiffSlippage = empyrical.sharpe_ratio(slippageAdjustedReturn) - empyrical.sharpe_ratio(factorReturn) relativeSharpeSlippage = sharpeDiffSlippage / empyrical.sharpe_ratio(factorReturn) * (empyrical.sharpe_ratio(factorReturn)/abs(empyrical.sharpe_ratio(factorReturn))) if (empyrical.sharpe_ratio(returnStream) < 0.0 or abs(beta) > 0.7 or activity < 0.5 or accuracy < 0.45) and shortSeen == 0: return None, { "sharpe":shortSharpe, ##OVERLOADED IN FAIL "factorSharpe":empyrical.sharpe_ratio(factorReturn), "sharpeSlippage":slippageSharpe, "beta":abs(beta), "alpha":alpha, "activity":activity, "treynor":treynor, "period":"first 252 days", "algoReturn":algoAnnualReturn, "algoVol":algoVol, "factorReturn":factorAnnualReturn, "factorVol":factorVol, "sharpeDiff":sharpeDiff, "relativeSharpe":relativeSharpe, "sharpeDiffSlippage":sharpeDiffSlippage, "relativeSharpeSlippage":relativeSharpeSlippage, "rawBeta":rawBeta, "stability":stability, "loss":loss, "roc_auc":roc_auc, "accuracy":accuracy }, None, None elif (((empyrical.sharpe_ratio(returnStream) < 0.25 or slippageSharpe < 0.0) and shortSeen == 1) or ((empyrical.sharpe_ratio(returnStream) < 0.25 or slippageSharpe < 0.0) and (shortSeen == 2 or shortSeen == 3)) or abs(beta) > 0.6 or activity < 0.6 or stability < 0.4 or accuracy < 0.45) and (shortSeen == 1 or shortSeen == 2 or shortSeen == 3): periodName = "first 600 days" if shortSeen == 2: periodName = "first 900 days" elif shortSeen == 3: periodName = "first 1200 days" return None, { "sharpe":shortSharpe, ##OVERLOADED IN FAIL "factorSharpe":empyrical.sharpe_ratio(factorReturn), "sharpeSlippage":slippageSharpe, "alpha":alpha, "beta":abs(beta), "activity":activity, "treynor":treynor, "period":periodName, "algoReturn":algoAnnualReturn, "algoVol":algoVol, "factorReturn":factorAnnualReturn, "factorVol":factorVol, "sharpeDiff":sharpeDiff, "relativeSharpe":relativeSharpe, "sharpeDiffSlippage":sharpeDiffSlippage, "relativeSharpeSlippage":relativeSharpeSlippage, "rawBeta":rawBeta, "stability":stability, "loss":loss, "roc_auc":roc_auc, "accuracy":accuracy }, None, None elif shortSeen < 4: print("CONTINUING", "SHARPE:", shortSharpe, "SHARPE DIFF:", sharpeDiff, "RAW BETA:", rawBeta, "TREYNOR:", treynor) shortSeen += 1 return returnStream, factorReturn, predictions, slippageAdjustedReturn
max_drawdown, sharpe_ratio, sortino_ratio, calmar_ratio, omega_ratio, tail_ratio ) import pandas as pd returns = pd.Series( index=pd.date_range('2017-03-10', '2017-03-19'), data=(-0.012143, 0.045350, 0.030957, 0.004902, 0.002341, -0.02103, 0.00148, 0.004820, -0.00023, 0.01201) ) benchmark_returns = pd.Series( index=pd.date_range('2017-03-10', '2017-03-19'), data=(-0.031940, 0.025350, -0.020957, -0.000902, 0.007341, -0.01103, 0.00248, 0.008820, -0.00123, 0.01091) ) creturns = cum_returns(returns) max_drawdown(returns) annual_return(returns) annual_volatility(returns, period='daily') calmar_ratio(returns) omega_ratio(returns=returns, risk_free=0.01) sharpe_ratio(returns=returns, risk_free=0.01) sortino_ratio(returns=returns) downside_risk(returns=returns) alpha(returns=returns, factor_returns=benchmark_returns, risk_free=0.01) beta(returns=returns, factor_returns=benchmark_returns, risk_free=0.01) tail_ratio(returns=returns)
plt.title("return rate of AAPL and SPX") plt.xlabel('time') plt.ylabel('return rate') plt.show() if __name__ == '__main__': # BenchmarkReturnsAndVolatility().start_of_simulation() perf = NocodeBacktestZiplineStrategy().run_algorithm() print(perf) print("========") # 画出收益曲线图 draw_return_rate_line(perf) return_list = perf['returns'] # 计算年化收益率 ann_return = annual_return(return_list) # 计算累计收益率 cum_return_list = cum_returns(return_list) # 计算sharp ratio sharp = sharpe_ratio(return_list) # 最大回撤 max_drawdown_ratio = max_drawdown(return_list) print( "年化收益率 = {:.2%}, 累计收益率 = {:.2%}, 最大回撤 = {:.2%}, 夏普比率 = {:.2f} ".format( ann_return, cum_return_list[-1], max_drawdown_ratio, sharp)) returns = pd.Series(index=pd.date_range('2017-03-10', '2017-03-19'), data=(-0.012143, 0.045350, 0.030957, 0.004902, 0.002341, -0.02103, 0.00148, 0.004820, -0.00023, 0.01201)) benchmark_returns = pd.Series(index=pd.date_range('2017-03-10',
def step(self, action): ## Normalise action space normalised_action = action / np.sum(np.abs(action)) done = False # Rebalance portfolio at open, use log return of open price in the following day next_day_log_return = self.pricedata[self.index + 1, 0, :] # transaction cost transaction_cost = self.transaction_cost(normalised_action, self.position_series[-1]) # Rebalancing self.position_series = np.append(self.position_series, [normalised_action], axis=0) today_portfolio_return = np.sum( normalised_action[:-1] * next_day_log_return) + np.sum(transaction_cost) self.log_return_series = np.append(self.log_return_series, [today_portfolio_return], axis=0) # Calculate reward # Need to cast log_return in pd series to use the functions in empyrical live_days = self.index - self.lookback burnin = 250 recent_series = pd.Series(self.log_return_series)[-100:] whole_series = pd.Series(self.log_return_series) if live_days > burnin: self.metric = annual_return( whole_series) + 0.5 * max_drawdown(whole_series) else: self.metric = ( annual_return(whole_series) + 0.5 * max_drawdown(whole_series) * live_days / burnin) reward = self.metric - self.metric_series[-1] # reward = self.metric self.metric_series = np.append(self.metric_series, [self.metric], axis=0) # Check if the end of backtest if self.index >= self.pricedata.shape[0] - 2: done = True # Prepare observation for next day self.index += 1 price_lookback = self.pricedata[ self.index - self.lookback:self.index, :, :].reshape( self.lookback, -1) metrics = np.vstack(( self.log_return_series[self.index - self.lookback:self.index], self.metric_series[self.index - self.lookback:self.index], )).transpose() self.observation = np.concatenate( ( price_lookback, metrics, self.position_series[self.index - self.lookback:self.index], ), axis=1, ) return self.observation, reward, done, {}
def test_perf_attrib_regression(self): positions = pd.read_csv('pyfolio/tests/test_data/positions.csv', index_col=0, parse_dates=True) positions.columns = [int(col) if col != 'cash' else col for col in positions.columns] returns = pd.read_csv('pyfolio/tests/test_data/returns.csv', index_col=0, parse_dates=True, header=None, squeeze=True) factor_loadings = pd.read_csv( 'pyfolio/tests/test_data/factor_loadings.csv', index_col=[0, 1], parse_dates=True ) factor_returns = pd.read_csv( 'pyfolio/tests/test_data/factor_returns.csv', index_col=0, parse_dates=True ) residuals = pd.read_csv('pyfolio/tests/test_data/residuals.csv', index_col=0, parse_dates=True) residuals.columns = [int(col) for col in residuals.columns] intercepts = pd.read_csv('pyfolio/tests/test_data/intercepts.csv', index_col=0, header=None, squeeze=True) risk_exposures_portfolio, perf_attrib_output = perf_attrib( returns, positions, factor_returns, factor_loadings, ) specific_returns = perf_attrib_output['specific_returns'] common_returns = perf_attrib_output['common_returns'] combined_returns = specific_returns + common_returns # since all returns are factor returns, common returns should be # equivalent to total returns, and specific returns should be 0 pd.util.testing.assert_series_equal(returns, common_returns, check_names=False) self.assertTrue(np.isclose(specific_returns, 0).all()) # specific and common returns combined should equal total returns pd.util.testing.assert_series_equal(returns, combined_returns, check_names=False) # check that residuals + intercepts = specific returns self.assertTrue(np.isclose((residuals + intercepts), 0).all()) # check that exposure * factor returns = common returns expected_common_returns = risk_exposures_portfolio.multiply( factor_returns, axis='rows' ).sum(axis='columns') pd.util.testing.assert_series_equal(expected_common_returns, common_returns, check_names=False) # since factor loadings are ones, portfolio risk exposures # should be ones pd.util.testing.assert_frame_equal( risk_exposures_portfolio, pd.DataFrame(np.ones_like(risk_exposures_portfolio), index=risk_exposures_portfolio.index, columns=risk_exposures_portfolio.columns) ) perf_attrib_summary, exposures_summary = create_perf_attrib_stats( perf_attrib_output, risk_exposures_portfolio ) self.assertEqual(ep.annual_return(specific_returns), perf_attrib_summary['Annualized Specific Return']) self.assertEqual(ep.annual_return(common_returns), perf_attrib_summary['Annualized Common Return']) self.assertEqual(ep.annual_return(combined_returns), perf_attrib_summary['Annualized Total Return']) self.assertEqual(ep.sharpe_ratio(specific_returns), perf_attrib_summary['Specific Sharpe Ratio']) self.assertEqual(ep.cum_returns_final(specific_returns), perf_attrib_summary['Cumulative Specific Return']) self.assertEqual(ep.cum_returns_final(common_returns), perf_attrib_summary['Cumulative Common Return']) self.assertEqual(ep.cum_returns_final(combined_returns), perf_attrib_summary['Total Returns']) avg_factor_exposure = risk_exposures_portfolio.mean().rename( 'Average Risk Factor Exposure' ) pd.util.testing.assert_series_equal( avg_factor_exposure, exposures_summary['Average Risk Factor Exposure'] ) cumulative_returns_by_factor = pd.Series( [ep.cum_returns_final(perf_attrib_output[c]) for c in risk_exposures_portfolio.columns], name='Cumulative Return', index=risk_exposures_portfolio.columns ) pd.util.testing.assert_series_equal( cumulative_returns_by_factor, exposures_summary['Cumulative Return'] ) annualized_returns_by_factor = pd.Series( [ep.annual_return(perf_attrib_output[c]) for c in risk_exposures_portfolio.columns], name='Annualized Return', index=risk_exposures_portfolio.columns ) pd.util.testing.assert_series_equal( annualized_returns_by_factor, exposures_summary['Annualized Return'] )
def getAnnualReturn(self, period='daily', annualization=None): return empyrical.annual_return(self.returns, period, annualization)