def stability_of_timeseries(returns): """ Determines R-squared of a linear fit to the cumulative log returns. Computes an ordinary least squares linear fit, and returns R-squared. Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - See full explanation in :func:`~pyfolio.timeseries.cum_returns`. Returns ------- float R-squared. """ return ep.stability_of_timeseries(returns)
def stability_of_timeseries(returns): """ Determines R-squared of a linear fit to the cumulative log returns. Computes an ordinary least squares linear fit, and returns R-squared. Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - See full explanation in :func:`~pyfolio.timeseries.cum_returns`. Returns ------- float R-squared. """ return empyrical.stability_of_timeseries(returns)
def plot_function(epoch_weights): ew = np.concatenate(epoch_weights).reshape(-1, No_Channels) comm = np.sum(np.abs(ew[1:] - ew[:-1]), axis=1) ret = np.sum(np.multiply(ew, y_test.numpy()), axis=1)[1:] ind = pd.date_range("20180101", periods=len(ret), freq='H') ret = pd.DataFrame(ret - comm * cost, index = ind) exp = np.exp(ret.resample('1D').sum()) - 1.0 ggg = 'Drawdown:', emp.max_drawdown(exp).values[0], 'Sharpe:', emp.sharpe_ratio(exp)[0], \ 'Sortino:', emp.sortino_ratio(exp).values[0], 'Stability:', emp.stability_of_timeseries(exp), \ 'Tail:', emp.tail_ratio(exp), 'ValAtRisk:', emp.value_at_risk(exp) ttt = ' '.join(str(x) for x in ggg) print(ttt) plt.figure() np.exp(ret).cumprod().plot(figsize=(48, 12), title=ttt) plt.savefig('cumulative_return') plt.close() ret = ret.resample('1W').sum() plt.figure(figsize=(48, 12)) pal = sns.color_palette("Greens_d", len(ret)) rank = ret.iloc[:,0].argsort() ax = sns.barplot(x=ret.index.strftime('%d-%m'), y=ret.values.reshape(-1), palette=np.array(pal[::-1])[rank]) ax.text(0.5, 1.0, ttt, horizontalalignment='center', verticalalignment='top', transform=ax.transAxes) plt.savefig('weekly_returns') plt.close() ew_df = pd.DataFrame(ew) plt.figure(figsize=(48, 12)) ax = sns.heatmap(ew_df.T, cmap=cmap, center=0, xticklabels=False, robust=True) ax.text(0.5, 1.0, ttt, horizontalalignment='center', verticalalignment='top', transform=ax.transAxes) plt.savefig('portfolio_weights') plt.close() tr = np.diff(ew.T, axis=1) plt.figure(figsize=(96, 12)) ax = sns.heatmap(tr, cmap=cmap, center=0, robust=True, yticklabels=False, xticklabels=False) ax.text(0.5, 1.0, ttt, horizontalalignment='center', verticalalignment='top', transform=ax.transAxes) plt.savefig('transactions') plt.close()
def test_stability_of_timeseries(self, returns, expected): assert_almost_equal(empyrical.stability_of_timeseries(returns), expected, DECIMAL_PLACES)
def getLimitedDataForPortfolio(historicalWeights, historicalPredictions, modelsUsed, factorToTrade, joinedData): normalTickerAllocationsTable, scaledTickerAllocationsTable = historicalWeightsToTickerAllocations(historicalWeights, historicalPredictions, modelsUsed) # capitalUsed = pd.DataFrame(normalTickerAllocationsTable.apply(lambda x: sum([abs(item) for item in x]), axis=1)) # print(capitalUsed) tickerAllocationsTable = scaledTickerAllocationsTable #scaledTickerAllocationsTable tickerAllocationsTable = tickerAllocationsTable.fillna(0) tickerPerformance, algoPerformance, algoTransactionCost = portfolioGeneration.calculatePerformanceForAllocations(tickerAllocationsTable, joinedData) benchmark = factorToTrade factorReturn = dataAck.getDailyFactorReturn(benchmark, joinedData) factorReturn.columns = ["Factor Return (" + benchmark + ")"] algoPerformance.columns = ["Algo Return"] algoPerformanceRollingWeekly = algoPerformance.rolling(5, min_periods=5).apply(lambda x:empyrical.cum_returns(x)[-1]).dropna() algoPerformanceRollingWeekly.columns = ["Weekly Rolling Performance"] algoPerformanceRollingMonthly = algoPerformance.rolling(22, min_periods=22).apply(lambda x:empyrical.cum_returns(x)[-1]).dropna() algoPerformanceRollingMonthly.columns = ["Monthly Rolling Performance"] algoPerformanceRollingYearly = algoPerformance.rolling(252, min_periods=252).apply(lambda x:empyrical.cum_returns(x)[-1]).dropna() algoPerformanceRollingYearly.columns = ["Yearly Rolling Performance"] tickersUsed = [] for mod in modelsUsed: tickersUsed.append(mod.targetTicker) # for ticker in tickersUsed: # thisFactorReturn = dataAck.getDailyFactorReturn(ticker, joinedData) # thisFactorReturn.columns = ["Factor Return (" + ticker + ")"] # alpha, beta = empyrical.alpha_beta(algoPerformance, thisFactorReturn) # print(ticker, beta) alpha, beta = empyrical.alpha_beta(algoPerformance, factorReturn) sharpe_difference = empyrical.sharpe_ratio(algoPerformance) - empyrical.sharpe_ratio(factorReturn) annualizedReturn = empyrical.annual_return(algoPerformance)[0] annualizedVolatility = empyrical.annual_volatility(algoPerformance) stability = empyrical.stability_of_timeseries(algoPerformance) profitability = len((algoPerformance.values)[algoPerformance.values > 0])/len(algoPerformance.values) rollingSharpe = algoPerformance.rolling(252, min_periods=252).apply(lambda x:empyrical.sharpe_ratio(x)).dropna() rollingSharpe.columns = ["252 Day Rolling Sharpe"] rollingSharpeError = rollingSharpe["252 Day Rolling Sharpe"].std() rollingSharpeMinimum = np.percentile(rollingSharpe["252 Day Rolling Sharpe"].values, 1) ##AUTOMATICALLY TAKES SLIPPAGE INTO ACCOUNT return { "benchmark":factorToTrade, "alpha":alpha, "beta":abs(beta), "sharpe difference":sharpe_difference, "annualizedReturn":annualizedReturn, "annualizedVolatility":annualizedVolatility, "sharpe":empyrical.sharpe_ratio(algoPerformance), "free return":annualizedReturn - annualizedVolatility, "stability":stability, "profitability":profitability, "rollingSharpeError":rollingSharpeError, "rollingSharpeMinimum":rollingSharpeMinimum, "weeklyMinimum":algoPerformanceRollingWeekly.min().values[0], "monthlyMinimum":algoPerformanceRollingMonthly.min().values[0], "yearlyMinimum":algoPerformanceRollingYearly.min().values[0] }, tickerAllocationsTable
def calculate_statistics(self, df: DataFrame = None, output=True): """""" self.output("开始计算策略统计指标") # Check DataFrame input exterior if df is None: df = self.daily_df # Check for init DataFrame if df is None: # Set all statistics to 0 if no trade. start_date = "" end_date = "" total_days = 0 profit_days = 0 loss_days = 0 end_balance = 0 max_drawdown = 0 max_ddpercent = 0 max_drawdown_duration = 0 max_drawdown_end = 0 total_net_pnl = 0 daily_net_pnl = 0 total_commission = 0 daily_commission = 0 total_slippage = 0 daily_slippage = 0 total_turnover = 0 daily_turnover = 0 total_trade_count = 0 daily_trade_count = 0 total_return = 0 annual_return = 0 daily_return = 0 return_std = 0 sharpe_ratio = 0 sortino_info = 0 win_ratio = 0 return_drawdown_ratio = 0 tail_ratio_info = 0 stability_return = 0 win_loss_pnl_ratio = 0 pnl_medio = 0 duration_medio = 0 calmar_ratio = 0 else: # Calculate balance related time series data df["balance"] = df["net_pnl"].cumsum() + self.capital df["return"] = np.log(df["balance"] / df["balance"].shift(1)).fillna(0) df["highlevel"] = (df["balance"].rolling(min_periods=1, window=len(df), center=False).max()) df["drawdown"] = df["balance"] - df["highlevel"] df["ddpercent"] = df["drawdown"] / df["highlevel"] * 100 # Calculate statistics value start_date = df.index[0] end_date = df.index[-1] total_days = len(df) profit_days = len(df[df["net_pnl"] > 0]) loss_days = len(df[df["net_pnl"] < 0]) end_balance = df["balance"].iloc[-1] max_drawdown = df["drawdown"].min() max_ddpercent = df["ddpercent"].min() max_drawdown_end = df["drawdown"].idxmin() if isinstance(max_drawdown_end, date): max_drawdown_start = df["balance"][:max_drawdown_end].idxmax() max_drawdown_duration = (max_drawdown_end - max_drawdown_start).days else: max_drawdown_duration = 0 total_net_pnl = df["net_pnl"].sum() daily_net_pnl = total_net_pnl / total_days win = df[df["net_pnl"] > 0] win_amount = win["net_pnl"].sum() win_pnl_medio = win["net_pnl"].mean() # win_duration_medio = win["duration"].mean().total_seconds()/3600 win_count = win["trade_count"].sum() pnl_medio = df["net_pnl"].mean() # duration_medio = df["duration"].mean().total_seconds()/3600 loss = df[df["net_pnl"] < 0] loss_amount = loss["net_pnl"].sum() loss_pnl_medio = loss["net_pnl"].mean() # loss_duration_medio = loss["duration"].mean().total_seconds()/3600 total_commission = df["commission"].sum() daily_commission = total_commission / total_days total_slippage = df["slippage"].sum() daily_slippage = total_slippage / total_days total_turnover = df["turnover"].sum() daily_turnover = total_turnover / total_days total_trade_count = df["trade_count"].sum() win_ratio = (win_count / total_trade_count) * 100 win_loss_pnl_ratio = -win_pnl_medio / loss_pnl_medio daily_trade_count = total_trade_count / total_days total_return = (end_balance / self.capital - 1) * 100 annual_return = total_return / total_days * 240 daily_return = df["return"].mean() * 100 return_std = df["return"].std() * 100 if return_std: sharpe_ratio = daily_return / return_std * np.sqrt(240) else: sharpe_ratio = 0 return_drawdown_ratio = -total_return / max_ddpercent #calmar_ratio:年化收益率与历史最大回撤率之间的比率 calmar_ratio = annual_return / abs(max_ddpercent) #sortino_info sortino_info = sortino_ratio(df['return']) omega_info = omega_ratio(df['return']) #年化波动率 annual_volatility_info = annual_volatility(df['return']) #年化复合增长率 cagr_info = cagr(df['return']) #年化下行风险率 annual_downside_risk = downside_risk(df['return']) """CVaR即条件风险价值,其含义为在投资组合的损失超过某个给定VaR值的条件下,该投资组合的平均损失值。""" c_var = conditional_value_at_risk(df['return']) """风险价值(VaR)是对投资损失风险的一种度量。它估计在正常的市场条件下,在设定的时间段(例如一天)中, 一组投资可能(以给定的概率)损失多少。金融业中的公司和监管机构通常使用VaR来衡量弥补可能损失所需的资产数量""" var_info = value_at_risk(df['return']) #收益稳定率 stability_return = stability_of_timeseries(df['return']) #尾部比率0.25 == 1/4,收益1,风险4 tail_ratio_info = tail_ratio(df['return']) # Output if output: self.output("-" * 30) self.output(f"首个交易日:\t{start_date}") self.output(f"最后交易日:\t{end_date}") self.output(f"总交易日:\t{total_days}") self.output(f"盈利交易日:\t{profit_days}") self.output(f"亏损交易日:\t{loss_days}") self.output(f"起始资金:\t{self.capital:,.2f}") self.output(f"结束资金:\t{end_balance:,.2f}") self.output(f"总收益率:\t{total_return:,.2f}%") self.output(f"年化收益:\t{annual_return:,.2f}%") self.output(f"最大回撤: \t{max_drawdown:,.2f}") self.output(f"百分比最大回撤: {max_ddpercent:,.2f}%") self.output(f"最长回撤天数: \t{max_drawdown_duration}") self.output(f"总盈亏:\t{total_net_pnl:,.2f}") self.output(f"总手续费:\t{total_commission:,.2f}") self.output(f"总滑点:\t{total_slippage:,.2f}") self.output(f"总成交金额:\t{total_turnover:,.2f}") self.output(f"总成交笔数:\t{total_trade_count}") self.output(f"日均盈亏:\t{daily_net_pnl:,.2f}") self.output(f"日均手续费:\t{daily_commission:,.2f}") self.output(f"日均滑点:\t{daily_slippage:,.2f}") self.output(f"日均成交金额:\t{daily_turnover:,.2f}") self.output(f"日均成交笔数:\t{daily_trade_count}") self.output(f"日均收益率:\t{daily_return:,.2f}%") self.output(f"收益标准差:\t{return_std:,.2f}%") self.output(f"胜率:\t{win_ratio:,.2f}") self.output(f"盈亏比:\t\t{win_loss_pnl_ratio:,.2f}") self.output(f"平均每笔盈亏:\t{pnl_medio:,.2f}") self.output(f"calmar_ratio:\t{calmar_ratio:,.3f}") # self.output(f"平均持仓小时:\t{duration_medio:,.2f}") self.output(f"Sharpe Ratio:\t{sharpe_ratio:,.2f}") self.output(f"sortino Ratio:\t{sortino_info:,.3f}") self.output(f"收益回撤比:\t{return_drawdown_ratio:,.2f}") statistics = { "start_date": start_date, "end_date": end_date, "total_days": total_days, "profit_days": profit_days, "loss_days": loss_days, "capital": self.capital, "end_balance": end_balance, "max_drawdown": max_drawdown, "max_ddpercent": max_ddpercent, "max_drawdown_end": max_drawdown_end, "max_drawdown_duration": max_drawdown_duration, "total_net_pnl": total_net_pnl, "daily_net_pnl": daily_net_pnl, "total_commission": total_commission, "daily_commission": daily_commission, "total_slippage": total_slippage, "daily_slippage": daily_slippage, "total_turnover": total_turnover, "daily_turnover": daily_turnover, "total_trade_count": total_trade_count, "daily_trade_count": daily_trade_count, "total_return": total_return, "annual_return": annual_return, "daily_return": daily_return, "return_std": return_std, "sharpe_ratio": sharpe_ratio, 'sortino_info': sortino_info, "win_ratio": win_ratio, "return_drawdown_ratio": return_drawdown_ratio, "tail_ratio_info": tail_ratio_info, "stability_return": stability_return, "win_loss_pnl_ratio": win_loss_pnl_ratio, "pnl_medio": pnl_medio, "calmar_ratio": calmar_ratio } # Filter potential error infinite value for key, value in statistics.items(): if value in (np.inf, -np.inf): value = 0 statistics[key] = np.nan_to_num(value) self.output("策略统计指标计算完成") return statistics
def stability(portfolio_daily_returns): return ep.stability_of_timeseries(portfolio_daily_returns)
def vizResults(slippageAdjustedReturn, returnStream, factorReturn, plotting = False): ##ENSURE EQUAL LENGTH factorReturn = factorReturn[returnStream.index[0]:] ##IF FACTOR DOES NOT START AT SAME SPOT CAN CREATE VERY SKEWED RESULTS ##CALCULATE SHARPE WITH SLIPPAGE sharpeDiffSlippage = empyrical.sharpe_ratio(slippageAdjustedReturn) - empyrical.sharpe_ratio(factorReturn) relativeSharpeSlippage = sharpeDiffSlippage / empyrical.sharpe_ratio(factorReturn) * (empyrical.sharpe_ratio(factorReturn)/abs(empyrical.sharpe_ratio(factorReturn))) alpha, beta = empyrical.alpha_beta(returnStream, factorReturn) alphaSlippage, betaSlippage = empyrical.alpha_beta(slippageAdjustedReturn, factorReturn) metrics = {"SHARPE": empyrical.sharpe_ratio(returnStream), "SHARPE SLIPPAGE":empyrical.sharpe_ratio(slippageAdjustedReturn), "STABILITY": empyrical.stability_of_timeseries(returnStream), "ALPHA":alpha, "ALPHA SLIPPAGE":alphaSlippage, "BETA":abs(beta), "ANNUALIZED RETURN": empyrical.annual_return(returnStream)[0], "ACTIVITY": np.count_nonzero(returnStream)/float(len(returnStream)), "TREYNOR": ((empyrical.annual_return(returnStream.values)[0] - empyrical.annual_return(factorReturn.values)[0]) \ / abs(empyrical.beta(returnStream, factorReturn))), "RAW BETA":abs(empyrical.alpha_beta(returnStream.apply(lambda x:applyBinary(x), axis=0), factorReturn.apply(lambda x:applyBinary(x), axis=0))[1]), "SHARPE DIFFERENCE": empyrical.sharpe_ratio(returnStream) - empyrical.sharpe_ratio(factorReturn), "RELATIVE SHARPE": (empyrical.sharpe_ratio(returnStream) - empyrical.sharpe_ratio(factorReturn))/empyrical.sharpe_ratio(factorReturn) * (empyrical.sharpe_ratio(factorReturn)/abs(empyrical.sharpe_ratio(factorReturn))), "FACTOR SHARPE": empyrical.sharpe_ratio(factorReturn), "SHARPE DIFFERENCE SLIPPAGE":sharpeDiffSlippage, "RELATIVE SHARPE SLIPPAGE":relativeSharpeSlippage, } metrics["FACTOR PROFITABILITY"] = len((factorReturn.values)[factorReturn.values > 0])/len(factorReturn.values) metrics["PROFITABILITY"] = len((returnStream.values)[returnStream.values > 0])/len(returnStream.values) metrics["PROFITABILITY DIFFERENCE"] = metrics["PROFITABILITY"] - metrics["FACTOR PROFITABILITY"] metrics["PROFITABILITY SLIPPAGE"] = len((slippageAdjustedReturn.values)[slippageAdjustedReturn.values > 0])/len(slippageAdjustedReturn.values) metrics["ACTIVE PROFITABILITY"] = len((returnStream.values)[returnStream.values > 0])/len((returnStream.values)[returnStream.values != 0]) metrics["ACTIVE PROFITABILITY SLIPPAGE"] = len((slippageAdjustedReturn.values)[slippageAdjustedReturn.values > 0])/len((slippageAdjustedReturn.values)[slippageAdjustedReturn.values != 0]) metrics["TOTAL DAYS SEEN"] = len(returnStream) metrics["SHARPE SLIPPAGE DECAY"] = metrics["SHARPE DIFFERENCE SLIPPAGE"] - metrics["SHARPE DIFFERENCE"] ##MEASURES BINARY STABILITY OF PREDICTIONS metrics["EXTREME STABILITY ROLLING 600"] = (returnStream.rolling(600, min_periods=600).apply(lambda x:empyrical.stability_of_timeseries(applyBinarySkipZero(x)) * (-1 if x[-1] - x[0] < 0 else 1)).dropna()).min().values[0] metrics["EXTREME STABILITY"] = empyrical.stability_of_timeseries(applyBinarySkipZero(returnStream.values)) rollingPeriod = 252 rollingSharpe = returnStream.rolling(rollingPeriod, min_periods=rollingPeriod).apply(lambda x:empyrical.sharpe_ratio(x)).dropna() rollingSharpe.columns = ["252 Day Rolling Sharpe"] rollingSharpeFactor = factorReturn.rolling(rollingPeriod, min_periods=rollingPeriod).apply(lambda x:empyrical.sharpe_ratio(x)).dropna() rollingSharpe = rollingSharpe.join(rollingSharpeFactor) rollingSharpe.columns = ["252 Day Rolling Sharpe Algo", "252 Day Rolling Sharpe Factor"] if len(rollingSharpe["252 Day Rolling Sharpe Algo"].values) > 50: diffSharpe = pd.DataFrame(rollingSharpe.apply(lambda x: x[0] - x[1], axis=1), columns=["Sharpe Difference"]) metrics["SHARPE DIFFERENCE MIN"] = np.percentile(diffSharpe["Sharpe Difference"].values, 1) metrics["SHARPE DIFFERENCE AVERAGE"] = np.percentile(diffSharpe["Sharpe Difference"].values, 50) difVals = diffSharpe["Sharpe Difference"].values metrics["SHARPE DIFFERENCE GREATER THAN 0"] = len(difVals[np.where(difVals > 0)])/float(len(difVals)) metrics["25TH PERCENTILE SHARPE DIFFERENCE"] = np.percentile(diffSharpe["Sharpe Difference"].values, 25) ### relDiffSharpe = pd.DataFrame(rollingSharpe.apply(lambda x: (x[0] - x[1])/x[1] * (x[1]/abs(x[1])), axis=1), columns=["Sharpe Difference"]) metrics["RELATIVE SHARPE DIFFERENCE MIN"] = np.percentile(relDiffSharpe["Sharpe Difference"].values, 1) metrics["RELATIVE SHARPE DIFFERENCE AVERAGE"] = np.percentile(relDiffSharpe["Sharpe Difference"].values, 50) relDifVals = relDiffSharpe["Sharpe Difference"].values metrics["RELATIVE SHARPE DIFFERENCE GREATER THAN 0"] = len(relDifVals[np.where(relDifVals > 0)])/float(len(relDifVals)) metrics["25TH PERCENTILE RELATIVE SHARPE DIFFERENCE"] = np.percentile(relDiffSharpe["Sharpe Difference"].values, 25) ### metrics["ROLLING SHARPE BETA"] = abs(empyrical.beta(rollingSharpe["252 Day Rolling Sharpe Algo"], rollingSharpe["252 Day Rolling Sharpe Factor"])) metrics["25TH PERCENTILE SHARPE"] = np.percentile(rollingSharpe["252 Day Rolling Sharpe Algo"].values, 25) metrics["MIN ROLLING SHARPE"] = np.percentile(rollingSharpe["252 Day Rolling Sharpe Algo"].values, 1) rollingDownside = returnStream.rolling(rollingPeriod, min_periods=rollingPeriod).apply(lambda x:empyrical.max_drawdown(x)).dropna() rollingDownside.columns = ["252 Day Rolling Downside"] rollingDownsideFactor = factorReturn.rolling(rollingPeriod, min_periods=rollingPeriod).apply(lambda x:empyrical.max_drawdown(x)).dropna() rollingDownside = rollingDownside.join(rollingDownsideFactor) rollingDownside.columns = ["252 Day Rolling Downside Algo", "252 Day Rolling Downside Factor"] metrics["ROLLING SHARPE STABILITY"] = abs(stats.linregress(np.arange(len(rollingSharpe["252 Day Rolling Sharpe Algo"].values)), rollingSharpe["252 Day Rolling Sharpe Algo"].values).rvalue) rollingReturn = returnStream.rolling(rollingPeriod, min_periods=rollingPeriod).apply(lambda x:empyrical.cum_returns(x)[-1]).dropna() rollingReturn.columns = ["ROLLING RETURN"] metrics["SMART INFORMATION RATIO"] = (np.percentile(rollingReturn["ROLLING RETURN"].values, 25) - empyrical.annual_return(factorReturn.values[0]))\ / returnStream.values.std() metrics["ROLLING SHARPE ERROR"] = rollingSharpe["252 Day Rolling Sharpe Algo"].std() metrics["ONE STD SHARPE"] = empyrical.sharpe_ratio(slippageAdjustedReturn) - metrics["ROLLING SHARPE ERROR"] if plotting == True: import matplotlib.pyplot as plt rollingSharpe.plot() rollingDownside.plot() rollingPeriod = 90 rollingSharpe = returnStream.rolling(rollingPeriod, min_periods=rollingPeriod).apply(lambda x:empyrical.sharpe_ratio(x)).dropna() rollingSharpe.columns = ["90 Day Rolling Sharpe"] if len(rollingSharpe["90 Day Rolling Sharpe"].values) > 50: metrics["25TH PERCENTILE SHARPE 90"] = np.percentile(rollingSharpe["90 Day Rolling Sharpe"].values, 25) metrics["MIN ROLLING SHARPE 90"] = np.percentile(rollingSharpe["90 Day Rolling Sharpe"].values, 1) metrics["ROLLING SHARPE ERROR 90"] = rollingSharpe["90 Day Rolling Sharpe"].std() metrics["SHARPE TO MIN RATIO 90"] = metrics["SHARPE"] / abs(metrics["MIN ROLLING SHARPE 90"]) metrics["MIN PROFITABILITY 90"] = np.percentile(returnStream.rolling(rollingPeriod, min_periods=rollingPeriod).apply(lambda x:len((x)[x > 0])/len(x)).dropna().values, 1) metrics["PROFITABILITY DROP 90"] = metrics["PROFITABILITY"] - metrics["MIN PROFITABILITY 90"] metrics["25TH PROFITABILITY 90"] = np.percentile(returnStream.rolling(rollingPeriod, min_periods=rollingPeriod).apply(lambda x:len((x)[x > 0])/len(x)).dropna().values, 25) metrics["MIN FACTOR PROFITABILITY 90"] = np.percentile(factorReturn.rolling(rollingPeriod, min_periods=rollingPeriod).apply(lambda x:len((x)[x > 0])/len(x)).dropna().values, 1) metrics["MIN PROFITABILITY DIFFERENCE 90"] = metrics["MIN PROFITABILITY 90"] - metrics["MIN FACTOR PROFITABILITY 90"] rollingPeriod = 45 rollingSharpe = returnStream.rolling(rollingPeriod, min_periods=rollingPeriod).apply(lambda x:empyrical.sharpe_ratio(x)).dropna() rollingSharpe.columns = ["45 Day Rolling Sharpe"] if len(rollingSharpe["45 Day Rolling Sharpe"].values) > 50: metrics["25TH PERCENTILE SHARPE 45"] = np.percentile(rollingSharpe["45 Day Rolling Sharpe"].values, 25) metrics["MIN ROLLING SHARPE 45"] = np.percentile(rollingSharpe["45 Day Rolling Sharpe"].values, 1) metrics["ROLLING SHARPE ERROR 45"] = rollingSharpe["45 Day Rolling Sharpe"].std() metrics["SHARPE TO MIN RATIO 45"] = metrics["SHARPE"] / abs(metrics["MIN ROLLING SHARPE 45"]) metrics["MIN PROFITABILITY 45"] = np.percentile(returnStream.rolling(rollingPeriod, min_periods=rollingPeriod).apply(lambda x:len((x)[x > 0])/len(x)).dropna().values, 1) metrics["PROFITABILITY DROP 45"] = metrics["PROFITABILITY"] - metrics["MIN PROFITABILITY 45"] metrics["25TH PROFITABILITY 45"] = np.percentile(returnStream.rolling(rollingPeriod, min_periods=rollingPeriod).apply(lambda x:len((x)[x > 0])/len(x)).dropna().values, 25) metrics["MIN FACTOR PROFITABILITY 45"] = np.percentile(factorReturn.rolling(rollingPeriod, min_periods=rollingPeriod).apply(lambda x:len((x)[x > 0])/len(x)).dropna().values, 1) metrics["MIN PROFITABILITY DIFFERENCE 45"] = metrics["MIN PROFITABILITY 45"] - metrics["MIN FACTOR PROFITABILITY 45"] returns = returnStream.apply(lambda x:empyrical.cum_returns(x)) returns.columns = ["algo"] factorReturn = factorReturn.apply(lambda x:empyrical.cum_returns(x)) returns = returns.join(factorReturn) returns.columns = ["Algo Return", "Factor Return"] ##FORCE SHOW if plotting == True: import matplotlib.pyplot as plt returns.plot() plt.show() return metrics
def runModelsChunksSkipMP(self, dataOfInterest, daysToCheck = None): xVals, yVals, yIndex, xToday = self.walkForward.generateWindows(dataOfInterest) mpEngine = mp.get_context('fork') with mpEngine.Manager() as manager: returnDict = manager.dict() identifiersToCheck = [] for i in range(len(xVals) - 44): ##44 is lag...should not overlap with any other predictions or will ruin validity of walkforward optimization if i < 600: ##MIN TRAINING continue identifiersToCheck.append(str(i)) if daysToCheck is not None: identifiersToCheck = identifiersToCheck[-daysToCheck:] ##FIRST CHECK FIRST 500 IDENTIFIERS AND THEN IF GOOD CONTINUE identifierWindows = [identifiersToCheck[:252], identifiersToCheck[252:600], identifiersToCheck[600:900], identifiersToCheck[900:1200], identifiersToCheck[1200:]] ##EXACTLY TWO YEARS returnStream = None factorReturn = None predictions = None slippageAdjustedReturn = None shortSeen = 0 for clippedIdentifiers in identifierWindows: splitIdentifiers = np.array_split(np.array(clippedIdentifiers), 16) runningP = [] k = 0 for identifiers in splitIdentifiers: p = mpEngine.Process(target=endToEnd.runDayChunking, args=(self, xVals, yVals, identifiers, returnDict,k)) p.start() runningP.append(p) k += 1 while len(runningP) > 0: newP = [] for p in runningP: if p.is_alive() == True: newP.append(p) else: p.join() runningP = newP preds = [] actuals = [] days = [] for i in clippedIdentifiers: preds.append(returnDict[i]) actuals.append(yVals[int(i) + 44]) days.append(yIndex[int(i) + 44]) loss = log_loss(np.array(endToEnd.transformTargetArr(np.array(actuals), self.threshold)), np.array(preds)) roc_auc = roc_auc_score(np.array(endToEnd.transformTargetArr(np.array(actuals), self.threshold)), np.array(preds)) accuracy = accuracy_score(np.array(endToEnd.transformTargetArr(np.array(actuals), self.threshold)), np.array(preds).round()) print(loss, roc_auc, accuracy) ##CREATE ACCURATE BLENDING ACROSS DAYS predsTable = pd.DataFrame(preds, index=days, columns=["Predictions"]) i = 1 tablesToJoin = [] while i < self.walkForward.predictionPeriod: thisTable = predsTable.shift(i) thisTable.columns = ["Predictions_" + str(i)] tablesToJoin.append(thisTable) i += 1 predsTable = predsTable.join(tablesToJoin) transformedPreds = pd.DataFrame(predsTable.apply(lambda x:computePosition(x), axis=1), columns=["Predictions"]).dropna() dailyFactorReturn = getDailyFactorReturn(self.walkForward.targetTicker, dataOfInterest) transformedPreds = transformedPreds.join(dailyFactorReturn).dropna() returnStream = pd.DataFrame(transformedPreds.apply(lambda x:x[0] * x[1], axis=1), columns=["Algo Return"]) if returnStream is None else pd.concat([returnStream, pd.DataFrame(transformedPreds.apply(lambda x:x[0] * x[1], axis=1), columns=["Algo Return"])]) factorReturn = pd.DataFrame(transformedPreds[["Factor Return"]]) if factorReturn is None else pd.concat([factorReturn, pd.DataFrame(transformedPreds[["Factor Return"]])]) predictions = pd.DataFrame(transformedPreds[["Predictions"]]) if predictions is None else pd.concat([predictions, pd.DataFrame(transformedPreds[["Predictions"]])]) alpha, beta = empyrical.alpha_beta(returnStream, factorReturn) rawBeta = abs(empyrical.alpha_beta(returnStream.apply(lambda x:applyBinary(x), axis=0), factorReturn.apply(lambda x:applyBinary(x), axis=0))[1]) shortSharpe = empyrical.sharpe_ratio(returnStream) activity = np.count_nonzero(returnStream)/float(len(returnStream)) algoAnnualReturn = empyrical.annual_return(returnStream.values)[0] algoVol = empyrical.annual_volatility(returnStream.values) factorAnnualReturn = empyrical.annual_return(factorReturn.values)[0] factorVol = empyrical.annual_volatility(factorReturn.values) treynor = ((empyrical.annual_return(returnStream.values)[0] - empyrical.annual_return(factorReturn.values)[0]) \ / abs(empyrical.beta(returnStream, factorReturn))) sharpeDiff = empyrical.sharpe_ratio(returnStream) - empyrical.sharpe_ratio(factorReturn) relativeSharpe = sharpeDiff / empyrical.sharpe_ratio(factorReturn) * (empyrical.sharpe_ratio(factorReturn)/abs(empyrical.sharpe_ratio(factorReturn))) stability = empyrical.stability_of_timeseries(returnStream) ##CALCULATE SHARPE WITH SLIPPAGE estimatedSlippageLoss = portfolioGeneration.estimateTransactionCost(predictions) estimatedSlippageLoss.columns = returnStream.columns slippageAdjustedReturn = (returnStream - estimatedSlippageLoss).dropna() slippageSharpe = empyrical.sharpe_ratio(slippageAdjustedReturn) sharpeDiffSlippage = empyrical.sharpe_ratio(slippageAdjustedReturn) - empyrical.sharpe_ratio(factorReturn) relativeSharpeSlippage = sharpeDiffSlippage / empyrical.sharpe_ratio(factorReturn) * (empyrical.sharpe_ratio(factorReturn)/abs(empyrical.sharpe_ratio(factorReturn))) if (empyrical.sharpe_ratio(returnStream) < 0.0 or abs(beta) > 0.7 or activity < 0.5 or accuracy < 0.45) and shortSeen == 0: return None, { "sharpe":shortSharpe, ##OVERLOADED IN FAIL "factorSharpe":empyrical.sharpe_ratio(factorReturn), "sharpeSlippage":slippageSharpe, "beta":abs(beta), "alpha":alpha, "activity":activity, "treynor":treynor, "period":"first 252 days", "algoReturn":algoAnnualReturn, "algoVol":algoVol, "factorReturn":factorAnnualReturn, "factorVol":factorVol, "sharpeDiff":sharpeDiff, "relativeSharpe":relativeSharpe, "sharpeDiffSlippage":sharpeDiffSlippage, "relativeSharpeSlippage":relativeSharpeSlippage, "rawBeta":rawBeta, "stability":stability, "loss":loss, "roc_auc":roc_auc, "accuracy":accuracy }, None, None elif (((empyrical.sharpe_ratio(returnStream) < 0.25 or slippageSharpe < 0.0) and shortSeen == 1) or ((empyrical.sharpe_ratio(returnStream) < 0.25 or slippageSharpe < 0.0) and (shortSeen == 2 or shortSeen == 3)) or abs(beta) > 0.6 or activity < 0.6 or stability < 0.4 or accuracy < 0.45) and (shortSeen == 1 or shortSeen == 2 or shortSeen == 3): periodName = "first 600 days" if shortSeen == 2: periodName = "first 900 days" elif shortSeen == 3: periodName = "first 1200 days" return None, { "sharpe":shortSharpe, ##OVERLOADED IN FAIL "factorSharpe":empyrical.sharpe_ratio(factorReturn), "sharpeSlippage":slippageSharpe, "alpha":alpha, "beta":abs(beta), "activity":activity, "treynor":treynor, "period":periodName, "algoReturn":algoAnnualReturn, "algoVol":algoVol, "factorReturn":factorAnnualReturn, "factorVol":factorVol, "sharpeDiff":sharpeDiff, "relativeSharpe":relativeSharpe, "sharpeDiffSlippage":sharpeDiffSlippage, "relativeSharpeSlippage":relativeSharpeSlippage, "rawBeta":rawBeta, "stability":stability, "loss":loss, "roc_auc":roc_auc, "accuracy":accuracy }, None, None elif shortSeen < 4: print("CONTINUING", "SHARPE:", shortSharpe, "SHARPE DIFF:", sharpeDiff, "RAW BETA:", rawBeta, "TREYNOR:", treynor) shortSeen += 1 return returnStream, factorReturn, predictions, slippageAdjustedReturn
def get_report(my_portfolio, rf=0.0, sigma_value=1, confidence_value=0.95, filename: str = "report.pdf"): try: # we want to get the dataframe with the dates and weights rebalance_schedule = my_portfolio.rebalance columns = [] for date in rebalance_schedule.columns: date = date[0:10] columns.append(date) rebalance_schedule.columns = columns # then want to make a list of the dates and start with our first date dates = [my_portfolio.start_date] # then our rebalancing dates into that list dates = dates + rebalance_schedule.columns.to_list() datess = [] for date in dates: date = date[0:10] datess.append(date) dates = datess # this will hold returns returns = pd.Series() # then we want to be able to call the dates like tuples for i in range(len(dates) - 1): # get our weights weights = rebalance_schedule[str(dates[i + 1])] # then we want to get the returns add_returns = get_returns( my_portfolio.portfolio, weights, start_date=dates[i], end_date=dates[i + 1], ) # then append those returns returns = returns.append(add_returns) except AttributeError: try: returns = get_returns_from_data(my_portfolio.data, my_portfolio.weights) except AttributeError: returns = get_returns( my_portfolio.portfolio, my_portfolio.weights, start_date=my_portfolio.start_date, end_date=my_portfolio.end_date, ) creturns = (returns + 1).cumprod() # risk manager try: if list(my_portfolio.risk_manager.keys())[0] == "Stop Loss": values = [] for r in creturns: if r <= 1 + my_portfolio.risk_manager["Stop Loss"]: values.append(r) else: pass try: date = creturns[creturns == values[0]].index[0] date = str(date.to_pydatetime()) my_portfolio.end_date = date[0:10] returns = returns[:my_portfolio.end_date] except Exception as e: pass if list(my_portfolio.risk_manager.keys())[0] == "Take Profit": values = [] for r in creturns: if r >= 1 + my_portfolio.risk_manager["Take Profit"]: values.append(r) else: pass try: date = creturns[creturns == values[0]].index[0] date = str(date.to_pydatetime()) my_portfolio.end_date = date[0:10] returns = returns[:my_portfolio.end_date] except Exception as e: pass if list(my_portfolio.risk_manager.keys())[0] == "Max Drawdown": drawdown = qs.stats.to_drawdown_series(returns) values = [] for r in drawdown: if r <= my_portfolio.risk_manager["Max Drawdown"]: values.append(r) else: pass try: date = drawdown[drawdown == values[0]].index[0] date = str(date.to_pydatetime()) my_portfolio.end_date = date[0:10] returns = returns[:my_portfolio.end_date] except Exception as e: pass except Exception as e: pass fig1, ax1 = plt.subplots() fig1.set_size_inches(5, 5) #defining colors for the allocation pie cs = [ "#ff9999", "#66b3ff", "#99ff99", "#ffcc99", "#f6c9ff", "#a6fff6", "#fffeb8", "#ffe1d4", "#cccdff", "#fad6ff", ] wts = copy.deepcopy(my_portfolio.weights) port = copy.deepcopy(my_portfolio.portfolio) indices = [i for i, x in enumerate(wts) if x == 0.0] while 0.0 in wts: wts.remove(0.0) for i in sorted(indices, reverse=True): del port[i] ax1.pie(wts, labels=port, autopct="%1.1f%%", shadow=False, colors=cs) ax1.axis( "equal") # Equal aspect ratio ensures that pie is drawn as a circle. plt.rcParams["font.size"] = 12 plt.close(fig1) fig1.savefig("allocation.png") pdf = FPDF() pdf.add_page() pdf.set_font("arial", "B", 14) pdf.image( "https://user-images.githubusercontent.com/61618641/120909011-98f8a180-c670-11eb-8844-2d423ba3fa9c.png", x=None, y=None, w=45, h=5, type="", link="https://github.com/ssantoshp/Empyrial", ) pdf.cell(20, 15, f"Report", ln=1) pdf.set_font("arial", size=11) pdf.image("allocation.png", x=135, y=0, w=70, h=70, type="", link="") pdf.cell(20, 7, f"Start date: " + str(my_portfolio.start_date), ln=1) pdf.cell(20, 7, f"End date: " + str(my_portfolio.end_date), ln=1) benchmark = get_returns( my_portfolio.benchmark, wts=[1], start_date=my_portfolio.start_date, end_date=my_portfolio.end_date, ) CAGR = cagr(returns, period='daily', annualization=None) # CAGR = round(CAGR, 2) # CAGR = CAGR.tolist() CAGR = str(round(CAGR * 100, 2)) + "%" CUM = cum_returns(returns, starting_value=0, out=None) * 100 CUM = CUM.iloc[-1] CUM = CUM.tolist() CUM = str(round(CUM, 2)) + "%" VOL = qs.stats.volatility(returns, annualize=True) VOL = VOL.tolist() VOL = str(round(VOL * 100, 2)) + " %" SR = qs.stats.sharpe(returns, rf=rf) SR = np.round(SR, decimals=2) SR = str(SR) empyrial.SR = SR CR = qs.stats.calmar(returns) CR = CR.tolist() CR = str(round(CR, 2)) empyrial.CR = CR STABILITY = stability_of_timeseries(returns) STABILITY = round(STABILITY, 2) STABILITY = str(STABILITY) MD = max_drawdown(returns, out=None) MD = str(round(MD * 100, 2)) + " %" """OR = omega_ratio(returns, risk_free=0.0, required_return=0.0) OR = round(OR,2) OR = str(OR) print(OR)""" SOR = sortino_ratio(returns, required_return=0, period='daily') SOR = round(SOR, 2) SOR = str(SOR) SK = qs.stats.skew(returns) SK = round(SK, 2) SK = SK.tolist() SK = str(SK) KU = qs.stats.kurtosis(returns) KU = round(KU, 2) KU = KU.tolist() KU = str(KU) TA = tail_ratio(returns) TA = round(TA, 2) TA = str(TA) CSR = qs.stats.common_sense_ratio(returns) CSR = round(CSR, 2) CSR = CSR.tolist() CSR = str(CSR) VAR = qs.stats.value_at_risk(returns, sigma=sigma_value, confidence=confidence_value) VAR = np.round(VAR, decimals=2) VAR = str(VAR * 100) + " %" alpha, beta = alpha_beta(returns, benchmark, risk_free=rf) AL = round(alpha, 2) BTA = round(beta, 2) def condition(x): return x > 0 win = sum(condition(x) for x in returns) total = len(returns) win_ratio = win / total win_ratio = win_ratio * 100 win_ratio = round(win_ratio, 2) IR = calculate_information_ratio(returns, benchmark.iloc[:, 0]) IR = round(IR, 2) data = { "": [ "Annual return", "Cumulative return", "Annual volatility", "Winning day ratio", "Sharpe ratio", "Calmar ratio", "Information ratio", "Stability", "Max Drawdown", "Sortino ratio", "Skew", "Kurtosis", "Tail Ratio", "Common sense ratio", "Daily value at risk", "Alpha", "Beta", ], "Backtest": [ CAGR, CUM, VOL, f"{win_ratio}%", SR, CR, IR, STABILITY, MD, SOR, SK, KU, TA, CSR, VAR, AL, BTA, ], } # Create DataFrame df = pd.DataFrame(data) df.set_index("", inplace=True) df.style.set_properties(**{ "background-color": "white", "color": "black", "border-color": "black" }) empyrial.df = data y = [] for x in returns: y.append(x) arr = np.array(y) # arr # returns.index my_color = np.where(arr >= 0, "blue", "grey") ret = plt.figure(figsize=(30, 8)) plt.vlines(x=returns.index, ymin=0, ymax=arr, color=my_color, alpha=0.4) plt.title("Returns") plt.close(ret) ret.savefig("ret.png") pdf.cell(20, 7, f"", ln=1) pdf.cell(20, 7, f"Annual return: " + str(CAGR), ln=1) pdf.cell(20, 7, f"Cumulative return: " + str(CUM), ln=1) pdf.cell(20, 7, f"Annual volatility: " + str(VOL), ln=1) pdf.cell(20, 7, f"Winning day ratio: " + str(win_ratio), ln=1) pdf.cell(20, 7, f"Sharpe ratio: " + str(SR), ln=1) pdf.cell(20, 7, f"Calmar ratio: " + str(CR), ln=1) pdf.cell(20, 7, f"Information ratio: " + str(IR), ln=1) pdf.cell(20, 7, f"Stability: " + str(STABILITY), ln=1) pdf.cell(20, 7, f"Max drawdown: " + str(MD), ln=1) pdf.cell(20, 7, f"Sortino ratio: " + str(SOR), ln=1) pdf.cell(20, 7, f"Skew: " + str(SK), ln=1) pdf.cell(20, 7, f"Kurtosis: " + str(KU), ln=1) pdf.cell(20, 7, f"Tail ratio: " + str(TA), ln=1) pdf.cell(20, 7, f"Common sense ratio: " + str(CSR), ln=1) pdf.cell(20, 7, f"Daily value at risk: " + str(VAR), ln=1) pdf.cell(20, 7, f"Alpha: " + str(AL), ln=1) pdf.cell(20, 7, f"Beta: " + str(BTA), ln=1) qs.plots.returns(returns, benchmark, cumulative=True, savefig="retbench.png", show=False) qs.plots.yearly_returns(returns, benchmark, savefig="y_returns.png", show=False), qs.plots.monthly_heatmap(returns, savefig="heatmap.png", show=False) qs.plots.drawdown(returns, savefig="drawdown.png", show=False) qs.plots.drawdowns_periods(returns, savefig="d_periods.png", show=False) qs.plots.rolling_volatility(returns, savefig="rvol.png", show=False) qs.plots.rolling_sharpe(returns, savefig="rsharpe.png", show=False) qs.plots.rolling_beta(returns, benchmark, savefig="rbeta.png", show=False) pdf.image("ret.png", x=-20, y=None, w=250, h=80, type="", link="") pdf.cell(20, 7, f"", ln=1) pdf.image("y_returns.png", x=-20, y=None, w=200, h=100, type="", link="") pdf.cell(20, 7, f"", ln=1) pdf.image("retbench.png", x=None, y=None, w=200, h=100, type="", link="") pdf.cell(20, 7, f"", ln=1) pdf.image("heatmap.png", x=None, y=None, w=200, h=80, type="", link="") pdf.cell(20, 7, f"", ln=1) pdf.image("drawdown.png", x=None, y=None, w=200, h=80, type="", link="") pdf.cell(20, 7, f"", ln=1) pdf.image("d_periods.png", x=None, y=None, w=200, h=80, type="", link="") pdf.cell(20, 7, f"", ln=1) pdf.image("rvol.png", x=None, y=None, w=190, h=80, type="", link="") pdf.cell(20, 7, f"", ln=1) pdf.image("rsharpe.png", x=None, y=None, w=190, h=80, type="", link="") pdf.cell(20, 7, f"", ln=1) pdf.image("rbeta.png", x=None, y=None, w=190, h=80, type="", link="") pdf.output(dest="F", name=filename)
def empyrial(my_portfolio, rf=0.0, sigma_value=1, confidence_value=0.95): try: # we want to get the dataframe with the dates and weights rebalance_schedule = my_portfolio.rebalance columns = [] for date in rebalance_schedule.columns: date = date[0:10] columns.append(date) rebalance_schedule.columns = columns # then want to make a list of the dates and start with our first date dates = [my_portfolio.start_date] # then our rebalancing dates into that list dates = dates + rebalance_schedule.columns.to_list() datess = [] for date in dates: date = date[0:10] datess.append(date) dates = datess # this will hold returns returns = pd.Series() # then we want to be able to call the dates like tuples for i in range(len(dates) - 1): # get our weights weights = rebalance_schedule[str(dates[i + 1])] # then we want to get the returns add_returns = get_returns( my_portfolio.portfolio, weights, start_date=dates[i], end_date=dates[i + 1], ) # then append those returns returns = returns.append(add_returns) except AttributeError: try: returns = get_returns_from_data(my_portfolio.data, my_portfolio.weights) except AttributeError: returns = get_returns( my_portfolio.portfolio, my_portfolio.weights, start_date=my_portfolio.start_date, end_date=my_portfolio.end_date, ) creturns = (returns + 1).cumprod() # risk manager try: if list(my_portfolio.risk_manager.keys())[0] == "Stop Loss": values = [] for r in creturns: if r <= 1 + my_portfolio.risk_manager["Stop Loss"]: values.append(r) else: pass try: date = creturns[creturns == values[0]].index[0] date = str(date.to_pydatetime()) my_portfolio.end_date = date[0:10] returns = returns[:my_portfolio.end_date] except Exception as e: pass if list(my_portfolio.risk_manager.keys())[0] == "Take Profit": values = [] for r in creturns: if r >= 1 + my_portfolio.risk_manager["Take Profit"]: values.append(r) else: pass try: date = creturns[creturns == values[0]].index[0] date = str(date.to_pydatetime()) my_portfolio.end_date = date[0:10] returns = returns[:my_portfolio.end_date] except Exception as e: pass if list(my_portfolio.risk_manager.keys())[0] == "Max Drawdown": drawdown = qs.stats.to_drawdown_series(returns) values = [] for r in drawdown: if r <= my_portfolio.risk_manager["Max Drawdown"]: values.append(r) else: pass try: date = drawdown[drawdown == values[0]].index[0] date = str(date.to_pydatetime()) my_portfolio.end_date = date[0:10] returns = returns[:my_portfolio.end_date] except Exception as e: pass except Exception as e: pass print("Start date: " + str(my_portfolio.start_date)) print("End date: " + str(my_portfolio.end_date)) benchmark = get_returns( my_portfolio.benchmark, wts=[1], start_date=my_portfolio.start_date, end_date=my_portfolio.end_date, ) CAGR = cagr(returns, period='daily', annualization=None) # CAGR = round(CAGR, 2) # CAGR = CAGR.tolist() CAGR = str(round(CAGR * 100, 2)) + "%" CUM = cum_returns(returns, starting_value=0, out=None) * 100 CUM = CUM.iloc[-1] CUM = CUM.tolist() CUM = str(round(CUM, 2)) + "%" VOL = qs.stats.volatility(returns, annualize=True) VOL = VOL.tolist() VOL = str(round(VOL * 100, 2)) + " %" SR = qs.stats.sharpe(returns, rf=rf) SR = np.round(SR, decimals=2) SR = str(SR) empyrial.SR = SR CR = qs.stats.calmar(returns) CR = CR.tolist() CR = str(round(CR, 2)) empyrial.CR = CR STABILITY = stability_of_timeseries(returns) STABILITY = round(STABILITY, 2) STABILITY = str(STABILITY) MD = max_drawdown(returns, out=None) MD = str(round(MD * 100, 2)) + " %" """OR = omega_ratio(returns, risk_free=0.0, required_return=0.0) OR = round(OR,2) OR = str(OR) print(OR)""" SOR = sortino_ratio(returns, required_return=0, period='daily') SOR = round(SOR, 2) SOR = str(SOR) SK = qs.stats.skew(returns) SK = round(SK, 2) SK = SK.tolist() SK = str(SK) KU = qs.stats.kurtosis(returns) KU = round(KU, 2) KU = KU.tolist() KU = str(KU) TA = tail_ratio(returns) TA = round(TA, 2) TA = str(TA) CSR = qs.stats.common_sense_ratio(returns) CSR = round(CSR, 2) CSR = CSR.tolist() CSR = str(CSR) VAR = qs.stats.value_at_risk(returns, sigma=sigma_value, confidence=confidence_value) VAR = np.round(VAR, decimals=2) VAR = str(VAR * 100) + " %" alpha, beta = alpha_beta(returns, benchmark, risk_free=rf) AL = round(alpha, 2) BTA = round(beta, 2) def condition(x): return x > 0 win = sum(condition(x) for x in returns) total = len(returns) win_ratio = win / total win_ratio = win_ratio * 100 win_ratio = round(win_ratio, 2) IR = calculate_information_ratio(returns, benchmark.iloc[:, 0]) IR = round(IR, 2) data = { "": [ "Annual return", "Cumulative return", "Annual volatility", "Winning day ratio", "Sharpe ratio", "Calmar ratio", "Information ratio", "Stability", "Max Drawdown", "Sortino ratio", "Skew", "Kurtosis", "Tail Ratio", "Common sense ratio", "Daily value at risk", "Alpha", "Beta", ], "Backtest": [ CAGR, CUM, VOL, f"{win_ratio}%", SR, CR, IR, STABILITY, MD, SOR, SK, KU, TA, CSR, VAR, AL, BTA, ], } # Create DataFrame df = pd.DataFrame(data) df.set_index("", inplace=True) df.style.set_properties(**{ "background-color": "white", "color": "black", "border-color": "black" }) display(df) empyrial.df = data y = [] for x in returns: y.append(x) arr = np.array(y) # arr # returns.index my_color = np.where(arr >= 0, "blue", "grey") plt.figure(figsize=(30, 8)) plt.vlines(x=returns.index, ymin=0, ymax=arr, color=my_color, alpha=0.4) plt.title("Returns") empyrial.returns = returns empyrial.creturns = creturns empyrial.benchmark = benchmark empyrial.CAGR = CAGR empyrial.CUM = CUM empyrial.VOL = VOL empyrial.SR = SR empyrial.win_ratio = win_ratio empyrial.CR = CR empyrial.IR = IR empyrial.STABILITY = STABILITY empyrial.MD = MD empyrial.SOR = SOR empyrial.SK = SK empyrial.KU = KU empyrial.TA = TA empyrial.CSR = CSR empyrial.VAR = VAR empyrial.AL = AL empyrial.BTA = BTA try: empyrial.orderbook = make_rebalance.output except Exception as e: OrderBook = pd.DataFrame({ "Assets": my_portfolio.portfolio, "Allocation": my_portfolio.weights, }) empyrial.orderbook = OrderBook.T wts = copy.deepcopy(my_portfolio.weights) indices = [i for i, x in enumerate(wts) if x == 0.0] while 0.0 in wts: wts.remove(0.0) for i in sorted(indices, reverse=True): del my_portfolio.portfolio[i] return ( qs.plots.returns(returns, benchmark, cumulative=True), qs.plots.yearly_returns(returns, benchmark), qs.plots.monthly_heatmap(returns), qs.plots.drawdown(returns), qs.plots.drawdowns_periods(returns), qs.plots.rolling_volatility(returns), qs.plots.rolling_sharpe(returns), qs.plots.rolling_beta(returns, benchmark), graph_opt(my_portfolio.portfolio, wts, pie_size=7, font_size=14), )
def report_metrics(strategy_rets, benchmark_rets, factor_returns=0): """使用 `empyrical`_ 库计算各种常见财务风险和绩效指标。 Args: strategy_rets (:py:class:`pandas.Series`): 策略收益。 benchmark_rets (:py:class:`pandas.Series`): 基准收益。 factor_returns : 计算 excess_sharpe 时使用,策略计算时使用`strategy_rets`作为`factor_returns`, 当不存在`strategy_rets`时使用`factor_returns`。 `factor_returns`参考 :py:func:`empyrical.excess_sharpe` 中的`factor_returns`参数的解释。 Examples: >>> from finance_tools_py._jupyter_helper import report_metrics >>> import pandas as pd >>> rep = report_metrics(pd.Series([-0.01,0.04,0.03,-0.02]), pd.Series([0.04,0.05,0.06,0.07])) >>> print(rep) 基准 策略 最大回撤 0.000000 -0.020000 年化收益 713630.025679 10.326756 年度波动性 0.204939 0.467333 夏普比率 67.629875 5.392302 R平方 0.994780 0.614649 盈利比率 1.650602 2.081081 excess_sharpe 4.260282 -1.317465 年复合增长率 713630.025679 10.326756 Returns: :py:class:`pandas.DataFrame`: .. _empyrical: http://quantopian.github.io/empyrical/ """ if not benchmark_rets.empty: max_drawdown_benchmark = empyrical.max_drawdown(benchmark_rets) annual_return_benchmark = empyrical.annual_return(benchmark_rets) annual_volatility_benchmark = empyrical.annual_volatility( benchmark_rets) sharpe_ratio_benchmark = empyrical.sharpe_ratio(benchmark_rets) stability_of_timeseries_benchmark = empyrical.stability_of_timeseries( benchmark_rets) tail_ratio_benchmark = empyrical.tail_ratio(benchmark_rets) excess_sharpe_benchmark = empyrical.excess_sharpe( benchmark_rets, factor_returns) cagr_benchmark = empyrical.cagr(benchmark_rets) else: max_drawdown_benchmark = None annual_return_benchmark = None annual_volatility_benchmark = None sharpe_ratio_benchmark = None stability_of_timeseries_benchmark = None tail_ratio_benchmark = None excess_sharpe_benchmark = None cagr_benchmark = None max_drawdown_strategy = empyrical.max_drawdown(strategy_rets) annual_return_strategy = empyrical.annual_return(strategy_rets) annual_volatility_strategy = empyrical.annual_volatility(strategy_rets) sharpe_ratio_strategy = empyrical.sharpe_ratio(strategy_rets) stability_of_timeseries_strategy = empyrical.stability_of_timeseries( strategy_rets) tail_ratio_strategy = empyrical.tail_ratio(strategy_rets) excess_sharpe_strategy = empyrical.excess_sharpe( strategy_rets, benchmark_rets if not benchmark_rets.empty else factor_returns) cagr_strategy = empyrical.cagr(strategy_rets) return pd.DataFrame( { '基准': [ max_drawdown_benchmark, annual_return_benchmark, annual_volatility_benchmark, sharpe_ratio_benchmark, stability_of_timeseries_benchmark, tail_ratio_benchmark, excess_sharpe_benchmark, cagr_benchmark ], '策略': [ max_drawdown_strategy, annual_return_strategy, annual_volatility_strategy, sharpe_ratio_strategy, stability_of_timeseries_strategy, tail_ratio_strategy, excess_sharpe_strategy, cagr_strategy ] }, index=[ '最大回撤', '年化收益', '年度波动性', '夏普比率', 'R平方', '盈利比率', 'excess_sharpe', '年复合增长率' ])