def evaluation(self): ap.sound(f'entry: create_df') mdd = empyrical.max_drawdown(self.df.eac_stgy_rt) stgy_ret_an = empyrical.annual_return(self.df.eac_stgy_rt, annualization=self.cls.annualization) bcmk_ret_an = empyrical.annual_return(self.df.eac_bcmk_rt, annualization=self.cls.annualization) stgy_vlt_an = empyrical.annual_volatility(self.df.eac_stgy_rt, annualization=self.cls.annualization) bcmk_vlt_an = empyrical.annual_volatility(self.df.eac_bcmk_rt, annualization=self.cls.annualization) calmar = empyrical.calmar_ratio(self.df.eac_stgy_rt, annualization=self.cls.annualization) omega = empyrical.omega_ratio(self.df.eac_stgy_rt, risk_free=self.cls.rf, annualization=self.cls.annualization) sharpe = qp.sharpe_ratio(stgy_ret_an, self.df.cum_stgy_rt, self.cls.rf) sortino = empyrical.sortino_ratio(self.df.eac_stgy_rt, annualization=self.cls.annualization) dsrk = empyrical.downside_risk(self.df.eac_stgy_rt, annualization=self.cls.annualization) information = empyrical.information_ratio(self.df.eac_stgy_rt, factor_returns=self.df.eac_bcmk_rt) beta = empyrical.beta(self.df.eac_stgy_rt, factor_returns=self.df.eac_bcmk_rt, risk_free=self.cls.rf) tail_rt = empyrical.tail_ratio(self.df.eac_stgy_rt) alpha = qp.alpha_ratio(stgy_ret_an, bcmk_ret_an, self.cls.rf, beta) stgy_ttrt_rt = (self.cls.yd.ttas[-1] - self.cls.yd.ttas[0]) / self.cls.yd.ttas[0] bcmk_ttrt_rt = (self.cls.pc.close[-1] - self.cls.pc.close[0]) / self.cls.pc.close[0] car_rt = stgy_ttrt_rt - bcmk_ttrt_rt car_rt_an = stgy_ret_an - bcmk_ret_an self.cls.df_output = pd.DataFrame( {'sgty_ttrt_rt': [stgy_ttrt_rt], 'bcmk_ttrt_rt': [bcmk_ttrt_rt], 'car_rt': [car_rt], 'stgy_ret_an': [stgy_ret_an], 'bcmk_ret_an': [bcmk_ret_an], 'car_rt_an': [car_rt_an], 'stgy_vlt_an': [stgy_vlt_an], 'bcmk_vlt_an': [bcmk_vlt_an], 'mdd': [mdd], 'sharpe': [sharpe], 'alpha': [alpha], 'beta': [beta], 'information': [information], 'tail_rt': [tail_rt], 'calmar': [calmar], 'omega': [omega], 'sortino': [sortino], 'dsrk': [dsrk]}) print(f'feedback: \n{self.cls.df_output.T}')
def calculate_metrics(self): self.benchmark_period_returns = \ cum_returns(self.benchmark_returns).iloc[-1] self.algorithm_period_returns = \ cum_returns(self.algorithm_returns).iloc[-1] if not self.algorithm_returns.index.equals( self.benchmark_returns.index): message = "Mismatch between benchmark_returns ({bm_count}) and \ algorithm_returns ({algo_count}) in range {start} : {end}" message = message.format(bm_count=len(self.benchmark_returns), algo_count=len(self.algorithm_returns), start=self._start_session, end=self._end_session) raise Exception(message) self.num_trading_days = len(self.benchmark_returns) self.mean_algorithm_returns = ( self.algorithm_returns.cumsum() / np.arange(1, self.num_trading_days + 1, dtype=np.float64)) self.benchmark_volatility = annual_volatility(self.benchmark_returns) self.algorithm_volatility = annual_volatility(self.algorithm_returns) self.treasury_period_return = choose_treasury( self.treasury_curves, self._start_session, self._end_session, self.trading_calendar, ) self.sharpe = sharpe_ratio(self.algorithm_returns, ) # The consumer currently expects a 0.0 value for sharpe in period, # this differs from cumulative which was np.nan. # When factoring out the sharpe_ratio, the different return types # were collapsed into `np.nan`. # TODO: Either fix consumer to accept `np.nan` or make the # `sharpe_ratio` return type configurable. # In the meantime, convert nan values to 0.0 if pd.isnull(self.sharpe): self.sharpe = 0.0 self.downside_risk = downside_risk(self.algorithm_returns.values) self.sortino = sortino_ratio( self.algorithm_returns.values, _downside_risk=self.downside_risk, ) self.information = information_ratio( self.algorithm_returns.values, self.benchmark_returns.values, ) self.alpha, self.beta = alpha_beta_aligned( self.algorithm_returns.values, self.benchmark_returns.values, ) self.excess_return = self.algorithm_period_returns - \ self.treasury_period_return self.max_drawdown = max_drawdown(self.algorithm_returns.values) self.max_leverage = self.calculate_max_leverage()
def test_annualized_volatility(self, test_alpha): res_a = empyrical.annual_volatility(ret['a'], alpha=test_alpha) res_b = empyrical.annual_volatility(ret['b'], alpha=test_alpha) res_c = empyrical.annual_volatility(ret['c'], alpha=test_alpha) assert isclose(ret['a'].vbt.returns.annualized_volatility(levy_alpha=test_alpha), res_a) pd.testing.assert_series_equal( ret.vbt.returns.annualized_volatility(levy_alpha=test_alpha), pd.Series([res_a, res_b, res_c], index=ret.columns).rename('annualized_volatility') )
def compute_stats(portfolio, benchmark): '''Compute statistics for the current portfolio''' stats = {} grp_by_year = portfolio.groupby(lambda x: x.year) stats['1yr_highest'] = grp_by_year.max().iloc[-1] stats['1yr_lowest'] = grp_by_year.min().iloc[-1] portfolio_return = simple_returns(portfolio) # benchmark_return = simple_returns(benchmark) stats['wtd_return'] = aggregate_returns(portfolio_return, 'weekly').iloc[-1] stats['mtd_return'] = aggregate_returns(portfolio_return, 'monthly').iloc[-1] stats['ytd_return'] = aggregate_returns(portfolio_return, 'yearly').iloc[-1] stats['max_drawdown'] = max_drawdown(portfolio_return) # stats['annual_return'] = annual_return(portfolio_return, period='daily') stats['annual_volatility'] = annual_volatility(portfolio_return, period='daily', alpha=2.0) # stats['calmar_ratio'] = calmar_ratio(portfolio_return, period='daily') # stats['omega_ratio'] = omega_ratio(portfolio_return, risk_free=0.0) stats['sharpe_ratio_1yr'] = sharpe_ratio(portfolio_return, risk_free=0.0, period='daily') # stats['alpha'], stats['beta'] = alpha_beta(portfolio_return, benchmark_return, # risk_free=0.0, period='daily') stats['tail_ratio'] = tail_ratio(portfolio_return) # stats['capture_ratio'] = capture(portfolio_return, benchmark_return, period='daily') return stats
def Analysis(results): """ 技术指标分析器 :param results: { 'returns':[0.1,0.1,0.1], 'benchmark':[0.1,0.1,0.1] 'trades':[[2020.01.01 01:00:00,'BUY',6234.10,1]] } :return: """ res = pnl_res(results["returns"]) bres = pnl_res(results["benchmark"]) return_ratio = empyrical.cum_returns_final(res) annual_return_ratio = empyrical.annual_return(res) sharp_ratio = empyrical.sharpe_ratio(res, 0.035 / 252) return_volatility = empyrical.annual_volatility(res) max_drawdown = empyrical.max_drawdown(res) alpha, beta = empyrical.alpha_beta_aligned(res, bres) pls, wr = pls_ws(results["trades"]) return { 'pls': pls, 'wr': wr, 'return_ratio': return_ratio, 'annual_return_ratio': annual_return_ratio, 'beta': beta, 'alpha': alpha, 'sharp_ratio': sharp_ratio, 'return_volatility': return_volatility, 'max_drawdown': max_drawdown, }
def portfolioAnalysis(return_data): non_cum_return = getNonCumReturn(return_data) #计算年化收益: annual_return = empyrical.annual_return(non_cum_return, period='daily') #计算年化波动率: annual_volatility = empyrical.annual_volatility(non_cum_return, period='daily') #计算最大回撤 max_drawdown = empyrical.max_drawdown(non_cum_return) #计算夏普比率: sharpe_ratio = empyrical.sharpe_ratio( non_cum_return, risk_free=math.pow(1 + 0.03, 1 / 250) - 1, period='daily') #分年统计 aggr_returns = empyrical.aggregate_returns(non_cum_return, convert_to="yearly") print("年化收益:%f" % (annual_return)) print("年化波动率:%f" % (annual_volatility)) print("最大回撤:%f" % (max_drawdown)) print("夏普比率:%f" % (sharpe_ratio)) print("分年统计收益率:") print(aggr_returns) data = [annual_return, annual_volatility, max_drawdown, sharpe_ratio] return pd.Series(data, index=["年化收益率", "年化波动率", "最大回撤", "夏普比率"])
def comp_analysis(start_date='2017-07-01'): ''' 分析区间内基金公司收益率、波动率、beta与仓位变化 ''' comp_ret = pd.read_excel('%s/comp_ret.xlsx' % (const.FOF_DIR), index_col=0) comp_pos = pd.read_excel('%s/comp_position.xlsx' % (const.FOF_DIR), index_col=0) wdf = pd.read_csv('%s/881001.WI.csv' % (const.INDEX_DIR), index_col=1) wseries = wdf.pct_change()[wdf.index >= start_date]['close'] wseries.index = pd.to_datetime(wseries.index) df = pd.DataFrame(index=comp_ret.columns, columns=['ret', 'vol', 'beta', 'pos']) for c in df.index: series = comp_ret[c] series = series[series.index >= start_date] df.loc[c, 'ret'] = (1 + series).cumprod()[-1] - 1 df.loc[c, 'vol'] = empyrical.annual_volatility(series) wseries = wseries[(wseries.index >= series.index[0]) & (wseries.index <= series.index[-1])] df.loc[c, 'beta'] = empyrical.beta(series, wseries) if comp_pos[c].shape[0] > 1 and comp_pos[c][-2] != 0: df.loc[c, 'pos'] = (comp_pos[c][-1] - comp_pos[c][-2]) / comp_pos[c][-2] df = df.sort_values('ret', ascending=False) df = df.dropna() df.to_excel('%s/comp_analysis.xlsx' % (const.FOF_DIR))
def get_performance_summary(returns): stats = {'annualized_returns': ep.annual_return(returns), 'cumulative_returns': ep.cum_returns_final(returns), 'annual_volatility': ep.annual_volatility(returns), 'sharpe_ratio': ep.sharpe_ratio(returns), 'sortino_ratio': ep.sortino_ratio(returns), 'max_drawdown': ep.max_drawdown(returns)} return pd.Series(stats)
def getStats(cls, returns, benchmark_returns): _alpha, _beta = alpha_beta_aligned( returns, benchmark_returns, ) _sharpe = sharpe_ratio( returns ) _downside_risk = downside_risk( returns ) _max_drawdown = max_drawdown( returns ) _annual_volatility = annual_volatility( returns ) _benchmark_volatility = annual_volatility( benchmark_returns ) _annual_return = annual_return( returns ) _cum_return = cum_returns( returns ) return { 'cum_return' : _cum_return, 'annual_return' : _annual_return, 'annual_volatility' : _annual_volatility, 'benchmark_volatility' : _benchmark_volatility, 'max_drawdown' : _max_drawdown, 'downside_risk' : _downside_risk, 'sharpe ratio' : _sharpe, 'alpha' : _alpha, 'beta' : _beta, }
def test_annual_volatility(self, returns, period, expected): assert_almost_equal( empyrical.annual_volatility( returns, period=period ), expected, DECIMAL_PLACES )
def _get_backtest_performance_metrics(ret, benchmark_ret): metrics = { 'alpha': empyrical.alpha(ret, benchmark_ret), 'beta': empyrical.beta(ret, benchmark_ret), 'return': empyrical.cum_returns_final(ret), 'cagr': empyrical.cagr(ret), 'sharpe': empyrical.sharpe_ratio(ret), 'max_drawdown': empyrical.max_drawdown(ret), 'var': empyrical.value_at_risk(ret), 'volatility': empyrical.annual_volatility(ret), } return metrics
def _func(_p): _cov = self._cov[_p._id].sum() _ret = self._returns[_p._id] _risk = ep.annual_volatility(_ret, period='daily') risk = _risk + _cov _p.__log__('Risk: %s' % risk) self._risks[_p._id] = risk v = risk * self._weights[_p._id] return v
def indexAnalysis(index_data): assets = index_data.columns n_asset = len(assets) data = np.zeros((4, n_asset)) for i in range(n_asset): data[0][i] = empyrical.annual_return(index_data.iloc[:, i], period='daily') data[1][i] = empyrical.annual_volatility(index_data.iloc[:, i], period='daily') data[2][i] = empyrical.max_drawdown(index_data.iloc[:, i]) data[3][i] = empyrical.sharpe_ratio( index_data.iloc[:, i], risk_free=math.pow(1 + 0.03, 1 / 250) - 1, period='daily') return pd.DataFrame(data, index=["年化收益率", "年化波动率", "最大回撤", "夏普比率"], columns=assets)
def result_stats(perf, verbose=False): if isinstance(perf, str): perf = pd.read_pickle(perf) prets = perf['returns'] asr = sharpe_ratio(returns=prets) aret = annual_return(returns=prets, period='daily') avol = annual_volatility(returns=prets, period='daily') maxdd = max_drawdown(prets) #perf['max_drawdown'] txns = perf['weight'] #perf['transactions'] tdf = pd.DataFrame() for index, value in txns.items(): #if verbose: print(index,value) if isinstance(value, dict): for k, v in value.items(): if verbose == 2: print(k, v) tdf = tdf.append( pd.DataFrame({ 'icker': [k], 'dt': [index], 'weight': [v] })) #tdf.set_index('dt',inplace=True) #tdf.sort_index(inplace=True) num_of_txns = 0 if not tdf.empty: tdf.sort_values(by=['dt'], inplace=True) tdf.reset_index(inplace=True) #tdf.to_csv('/tmp/tdf.csv') a = np.sign(tdf['weight']) num_of_txns = len(np.where(np.diff(np.sign(a)))[0]) #num_of_txns = perf['transactions'].size if verbose: print('asr', asr) print('aret', aret) print('avol', avol) print('maxdd', maxdd) #,get_max_dd(perf['portfolio_value'])) print('num_of_txns', num_of_txns) return asr, aret, avol, maxdd, num_of_txns
def annual_volatility(returns, period=DAILY): """ Determines the annual volatility of a strategy. Parameters ---------- returns : pd.Series Periodic returns of the strategy, noncumulative. - See full explanation in :func:`~pyfolio.timeseries.cum_returns`. period : str, optional Defines the periodicity of the 'returns' data for purposes of annualizing volatility. Can be 'monthly' or 'weekly' or 'daily'. - Defaults to 'daily'. Returns ------- float Annual volatility. """ return empyrical.annual_volatility(returns, period=period)
def annual_volatility(returns, period=DAILY): """ Determines the annual volatility of a strategy. Parameters ---------- returns : pd.Series Periodic returns of the strategy, noncumulative. - See full explanation in :func:`~pyfolio.timeseries.cum_returns`. period : str, optional Defines the periodicity of the 'returns' data for purposes of annualizing volatility. Can be 'monthly' or 'weekly' or 'daily'. - Defaults to 'daily'. Returns ------- float Annual volatility. """ return ep.annual_volatility(returns, period=period)
def get_statistics(ticker, end_date, k): """ 得到一只基金在end_date前推k个交易日的 1. 收益率 2. 波动率 3. 最大回撤 4. Sharpe 5. Calmar """ fname = '%s/history/%s.xlsx'%(const.DATA_DIR, ticker) df = pd.read_excel(fname, index_col=0) df = df[df.index <= end_date] if k > df.shape[0]: return 0, 0, 0, 0, 0 df = df[df.index >= df.index[-k]] df.loc[:, 'return'] = df.loc[:, 'nav_adj'].pct_change() returns = (df.ix[-1, 'nav_adj'] - df.ix[0, 'nav_adj']) / df.ix[0, 'nav_adj'] volatility = empyrical.annual_volatility(df['return']) max_drawdown = empyrical.max_drawdown(df['return']) sharpe = empyrical.sharpe_ratio(df['return']) calmar = empyrical.calmar_ratio(df['return']) return returns, volatility, max_drawdown, sharpe, calmar
def get_performance_summary(returns): ''' Calculate selected performance evaluation metrics using provided returns. Parameters ------------ returns : pd.Series Series of returns we want to evaluate Returns ----------- stats : pd.Series The calculated performance metrics ''' stats = { 'annualized_returns': ep.annual_return(returns), 'cumulative_returns': ep.cum_returns_final(returns), 'annual_volatility': ep.annual_volatility(returns), 'sharpe_ratio': ep.sharpe_ratio(returns), 'sortino_ratio': ep.sortino_ratio(returns), 'max_drawdown': ep.max_drawdown(returns) } return pd.Series(stats)
def get_performance_table(return_df: pd.DataFrame, benchmark_name: str = None, periods: str = 'daily') -> pd.DataFrame: """收益指标 Args: return_df (pd.DataFrame): 收益率表格 benchmark_name (str): 基准的列名 periods (str, optional): 频率. Defaults to 'daily'. Returns: pd.DataFrame """ ser: pd.DataFrame = pd.DataFrame() ser['年化收益率'] = ep.annual_return(return_df, period=periods) ser['累计收益'] = ep.cum_returns(return_df).iloc[-1] ser['波动率'] = return_df.apply( lambda x: ep.annual_volatility(x, period=periods)) ser['夏普'] = return_df.apply(ep.sharpe_ratio, period=periods) ser['最大回撤'] = return_df.apply(lambda x: ep.max_drawdown(x)) if benchmark_name is not None: select_col = [ col for col in return_df.columns if col != benchmark_name ] ser['IR'] = return_df[select_col].apply( lambda x: information_ratio(x, return_df[benchmark_name])) ser['Alpha'] = return_df[select_col].apply( lambda x: ep.alpha(x, return_df[benchmark_name], period=periods)) ser['超额收益'] = ser['年化收益率'] - ser.loc[benchmark_name, '年化收益率'] #计算相对年化波动率 return ser.T
from scipy import stats import warnings warnings.filterwarnings("ignore") import empyrical as ep #with warnings.catch_warnings(): # warnings.filterwarnings("ignore", category=DeprecationWarning) backtest_results = pd.read_csv('../data_files/backtest_results.csv', index_col=0, parse_dates=True) start = backtest_results.iloc[0] end = backtest_results.iloc[-1] # print(backtest_results) # print(cumulative_return(start.portfolio_value, end.portfolio_value)) # print(annualized_return(start.portfolio_value, end.portfolio_value, 2*365)) prices = pd.read_csv('../data_files/prices.csv', index_col=0) #print(np.log(prices.iloc[1] / prices.iloc[0])) #print(np.log(prices.shift(-1) / prices)) #print(np.log(prices.shift(-1) / prices)) log_returns = log_returns(prices) #print(sharpe_ratio(log_returns, 0)) #print(ep.sharpe_ratio(log_returns)) print(ep.annual_volatility(log_returns))
def getDataForPortfolio(portfolioKey, factorToTrade, joinedData, availableStartDate): modelHashes = portfolio.getPortfolioModels(portfolioKey) models = getModelsByKey(modelHashes) for model in models: print(model.describe()) ##GENERATE RETURNS FOR PORTFOLIO portfolioAllocations = portfolio.getPortfolioAllocations(portfolioKey) predsTable = pd.DataFrame([]) weightsTable = pd.DataFrame([]) tickerAllocationsTable = pd.DataFrame([]) scaledTickerAllocationsTable = pd.DataFrame([]) for allocation in portfolioAllocations: colsAlgo = [] valsAlgo = [] colsAlgoWeight = [] valsAlgoWeight = [] colsTicker = [] valsTicker = [] colsTickerScaled = [] valsTickerScaled = [] for key in allocation: if key.startswith("ticker_"): colsTicker.append(key[len("ticker_"):]) valsTicker.append(allocation[key]) if key.startswith("scaled_ticker_"): colsTickerScaled.append(key[len("scaled_ticker_"):]) valsTickerScaled.append( abs(allocation[key]) if np.isnan(allocation[key]) == False else 0.0) if key.startswith("algo_") and not key.startswith("algo_weight_"): colsAlgo.append(key[len("algo_"):]) valsAlgo.append(allocation[key]) if key.startswith("algo_weight_"): colsAlgoWeight.append(key[len("algo_weight_"):]) valsAlgoWeight.append(allocation[key]) predsTable = pd.concat([ predsTable, pd.DataFrame([valsAlgo], index=[allocation["predictionDay"]], columns=colsAlgo).tz_localize(None) ]) weightsTable = pd.concat([ weightsTable, pd.DataFrame([valsAlgoWeight], index=[allocation["predictionDay"]], columns=colsAlgoWeight).tz_localize(None) ]) tickerAllocationsTable = pd.concat([ tickerAllocationsTable, pd.DataFrame([valsTicker], index=[allocation["predictionDay"]], columns=colsTicker).tz_localize(None) ]) scaledTickerAllocationsTable = pd.concat([ scaledTickerAllocationsTable, pd.DataFrame([valsTickerScaled], index=[allocation["predictionDay"]], columns=colsTickerScaled).tz_localize(None) ]) predsTable = predsTable.sort_index() weightsTable = weightsTable.sort_index().fillna(0) tickerAllocationsTable = tickerAllocationsTable.sort_index().fillna(0) scaledTickerAllocationsTable = scaledTickerAllocationsTable.sort_index( ).fillna(0) rawTickerPerformance = portfolioGeneration.calculatePerformanceForTable( tickerAllocationsTable, tickerAllocationsTable.columns, joinedData) rawAlgoPerformance = pd.DataFrame( rawTickerPerformance.apply(lambda x: sum(x), axis=1), columns=["Algo Return Without Commissions"]) tickerPerformance, algoPerformance, algoTransactionCost = portfolioGeneration.calculatePerformanceForAllocations( tickerAllocationsTable, joinedData) benchmark = portfolio.getPortfolioByKey(portfolioKey)["benchmark"] factorReturn = dataAck.getDailyFactorReturn(benchmark, joinedData) factorReturn.columns = ["Factor Return (" + benchmark + ")"] algoPerformance.columns = ["Algo Return"] algoVsBenchmark = factorReturn.join(algoPerformance).fillna(0) algoVsBenchmark = algoVsBenchmark.join(rawAlgoPerformance).dropna() tickerAlphaBetas = [] for ticker in tickerAllocationsTable.columns.values: thisFactorReturn = dataAck.getDailyFactorReturn(ticker, joinedData) alpha, beta = empyrical.alpha_beta(algoPerformance, thisFactorReturn) tickerAlphaBetas.append({ "ticker": ticker, "alpha": alpha * 100, "beta": beta }) ##GET SCALED PERFORMANCE [FULL CAPITAL USED EACH DAY] rawTickerPerformanceScaled = portfolioGeneration.calculatePerformanceForTable( scaledTickerAllocationsTable, scaledTickerAllocationsTable.columns, joinedData) rawAlgoPerformanceScaled = pd.DataFrame( rawTickerPerformanceScaled.apply(lambda x: sum(x), axis=1), columns=["Algo Return Without Commissions"]) unused, algoPerformanceScaled, algoTransactionCostScaled = portfolioGeneration.calculatePerformanceForAllocations( scaledTickerAllocationsTable, joinedData) algoPerformanceScaled.columns = ["Algo Return"] algoVsBenchmarkScaled = factorReturn.join(algoPerformanceScaled).fillna(0) algoVsBenchmarkScaled = algoVsBenchmarkScaled.join( rawAlgoPerformanceScaled).dropna() ##FORM HASH TO TICKER hashToTicker = {} for model in models: hashToTicker[model.getHash()] = model.targetTicker print(hashToTicker) individualAlgoPerformance = portfolioGeneration.calculatePerformanceForTable( predsTable, [hashToTicker[modelHash] for modelHash in predsTable.columns], joinedData) ##CONVERT TO USABLE OBJECTS tickerCols, tickerRows = portfolioGeneration.convertTableToJSON( empyrical.cum_returns(tickerPerformance)) tickerAllocationsCols, tickerAllocationsRows = portfolioGeneration.convertTableToJSON( tickerAllocationsTable[-10:]) algoCols, algoRows = portfolioGeneration.convertTableToJSON( empyrical.cum_returns(algoPerformance)) algoVsBenchmarkCols, algoVsBenchmarkRows = portfolioGeneration.convertTableToJSON( empyrical.cum_returns(algoVsBenchmark)) individualAlgoPerformanceCols, individualAlgoPerformanceRows = portfolioGeneration.convertTableToJSON( empyrical.cum_returns(individualAlgoPerformance)) scaledAllocationCols, scaledAllocationRows = portfolioGeneration.convertTableToJSON( scaledTickerAllocationsTable) weightsCols, weightsRows = portfolioGeneration.convertTableToJSON( weightsTable) alpha, beta = empyrical.alpha_beta(algoPerformance, factorReturn) recentAlpha, recentBeta = empyrical.alpha_beta(algoPerformance[-100:], factorReturn[-100:]) recentSharpe = empyrical.sharpe_ratio(algoPerformance[-100:]) recentReturn = empyrical.cum_returns( algoPerformance[-100:]).values[-1][0] * 100 algoVsBenchmarkColsRecent, algoVsBenchmarkRowsRecent = portfolioGeneration.convertTableToJSON( empyrical.cum_returns(algoVsBenchmark[-100:])) commissionCols, commissionRows = portfolioGeneration.convertTableToJSON( algoTransactionCost) algoVsBenchmarkScaledCols, algoVsBenchmarkScaledRows = portfolioGeneration.convertTableToJSON( empyrical.cum_returns(algoVsBenchmarkScaled)) commissionScaledCols, commissionScaledRows = portfolioGeneration.convertTableToJSON( algoTransactionCostScaled) scaledSharpe = empyrical.sharpe_ratio(algoPerformanceScaled) scaledReturn = empyrical.annual_return(algoPerformanceScaled)[0] * 100 scaledVolatility = empyrical.annual_volatility(algoPerformanceScaled) * 100 scaledAlpha, scaledBeta = empyrical.alpha_beta(algoPerformanceScaled, factorReturn) algoVsBenchmarkScaledColsRecent, algoVsBenchmarkScaledRowsRecent = portfolioGeneration.convertTableToJSON( empyrical.cum_returns(algoVsBenchmarkScaled[-100:])) scaledSharpeRecent = empyrical.sharpe_ratio(algoPerformanceScaled[-100:]) scaledReturnRecent = empyrical.annual_return( algoPerformanceScaled[-100:])[0] * 100 scaledVolatilityRecent = empyrical.annual_volatility( algoPerformanceScaled[-100:]) * 100 scaledAlphaRecent, scaledBetaRecent = empyrical.alpha_beta( algoPerformanceScaled[-100:], factorReturn[-100:]) if len(algoPerformance[availableStartDate:]) > 0: ##NORMAL availableAlpha, availableBeta = empyrical.alpha_beta( algoPerformance[availableStartDate:], factorReturn[availableStartDate:]) availableAlpha = availableAlpha * 100 availableSharpe = empyrical.sharpe_ratio( algoPerformance[availableStartDate:]) availableReturn = empyrical.cum_returns( algoPerformance[availableStartDate:]).values[-1][0] * 100 algoVsBenchmarkColsAvailable, algoVsBenchmarkRowsAvailable = portfolioGeneration.convertTableToJSON( empyrical.cum_returns(algoVsBenchmark[availableStartDate:])) ##SCALED availableAlphaScaled, availableBetaScaled = empyrical.alpha_beta( algoPerformanceScaled[availableStartDate:], factorReturn[availableStartDate:]) availableAlphaScaled = availableAlphaScaled * 100 availableSharpeScaled = empyrical.sharpe_ratio( algoPerformanceScaled[availableStartDate:]) availableReturnScaled = empyrical.cum_returns( algoPerformanceScaled[availableStartDate:]).values[-1][0] * 100 algoVsBenchmarkColsAvailableScaled, algoVsBenchmarkRowsAvailableScaled = portfolioGeneration.convertTableToJSON( empyrical.cum_returns(algoVsBenchmarkScaled[availableStartDate:])) else: #NORMAL availableAlpha, availableBeta = ("NaN", "NaN") availableSharpe = "NaN" availableReturn = "NaN" algoVsBenchmarkColsAvailable, algoVsBenchmarkRowsAvailable = ([], []) #SCALED availableAlphaScaled, availableBetaScaled = ("NaN", "NaN") availableSharpeScaled = "NaN" availableReturnScaled = "NaN" algoVsBenchmarkColsAvailableScaled, algoVsBenchmarkRowsAvailableScaled = ( [], []) return { "tickerCols": json.dumps(tickerCols), "tickerRows": json.dumps(tickerRows), "tickerAllocationsCols": json.dumps(tickerAllocationsCols), "tickerAllocationsRows": json.dumps(tickerAllocationsRows), "algoCols": json.dumps(algoCols), "algoRows": json.dumps(algoRows), "tickerCols": json.dumps(tickerCols), "tickerRows": json.dumps(tickerRows), "algoVsBenchmarkCols": json.dumps(algoVsBenchmarkCols), "algoVsBenchmarkRows": json.dumps(algoVsBenchmarkRows), "individualAlgoPerformanceCols": json.dumps(individualAlgoPerformanceCols), "individualAlgoPerformanceRows": json.dumps(individualAlgoPerformanceRows), "scaledAllocationCols": json.dumps(scaledAllocationCols), "scaledAllocationRows": json.dumps(scaledAllocationRows), "weightsCols": json.dumps(weightsCols), "weightsRows": json.dumps(weightsRows), "algoSharpe": empyrical.sharpe_ratio(algoPerformance), "alpha": alpha * 100, "beta": beta, "annualReturn": empyrical.annual_return(algoPerformance)[0] * 100, "annualVolatility": empyrical.annual_volatility(algoPerformance) * 100, "recentSharpe": recentSharpe, "recentReturn": recentReturn, "recentAlpha": recentAlpha * 100, "recentBeta": recentBeta, "algoVsBenchmarkColsRecent": json.dumps(algoVsBenchmarkColsRecent), "algoVsBenchmarkRowsRecent": json.dumps(algoVsBenchmarkRowsRecent), "commissionCols": json.dumps(commissionCols), "commissionRows": json.dumps(commissionRows), "tickerAlphaBetas": tickerAlphaBetas, "availableAlpha": availableAlpha, "availableBeta": availableBeta, "availableSharpe": availableSharpe, "availableReturn": availableReturn, "algoVsBenchmarkColsAvailable": json.dumps(algoVsBenchmarkColsAvailable), "algoVsBenchmarkRowsAvailable": json.dumps(algoVsBenchmarkRowsAvailable), "algoVsBenchmarkScaledCols": json.dumps(algoVsBenchmarkScaledCols), "algoVsBenchmarkScaledRows": json.dumps(algoVsBenchmarkScaledRows), "commissionScaledCols": json.dumps(commissionScaledCols), "commissionScaledRows": json.dumps(commissionScaledRows), "scaledReturn": scaledReturn, "scaledSharpe": scaledSharpe, "scaledVolatility": scaledVolatility, "scaledAlpha": scaledAlpha * 100, "scaledBeta": scaledBeta, "algoVsBenchmarkScaledColsRecent": json.dumps(algoVsBenchmarkScaledColsRecent), "algoVsBenchmarkScaledRowsRecent": json.dumps(algoVsBenchmarkScaledRowsRecent), "scaledReturnRecent": scaledReturnRecent, "scaledVolatilityRecent": scaledVolatilityRecent, "scaledAlphaRecent": scaledAlphaRecent * 100, "scaledBetaRecent": scaledBetaRecent, "scaledSharpeRecent": scaledSharpeRecent, "availableAlphaScaled": availableAlphaScaled, "availableBetaScaled": availableBetaScaled, "availableSharpeScaled": availableSharpeScaled, "availableReturnScaled": availableReturnScaled, "algoVsBenchmarkColsAvailableScaled": json.dumps(algoVsBenchmarkColsAvailableScaled), "algoVsBenchmarkRowsAvailableScaled": json.dumps(algoVsBenchmarkRowsAvailableScaled), }
def _vola(self): self.__vola = ey.annual_volatility(self.__returns, period='daily')
metavar='', required=True, help='Specify the path of csv file') parser.add_argument('-o', '--output', type=str, metavar='', required=True, help='Specify the plot dir') args = parser.parse_args() csv_file = args.input out_dir = args.output if not os.path.exists(out_dir): os.makedirs(out_dir) dataset = web.get_data_yahoo("SPY", "03/01/2003", "12/01/2015") print(dataset.head()) dataset.index = dataset.index.tz_localize('UTC') dataset['Value'] = dataset['Adj Close'].pct_change() returns = pd.Series(dataset['Value'], index=dataset.index) pf.tears.create_full_tear_sheet(returns) print(tearsheet) import empyrical as emp statistics_dict = {} statistics_dict['annual returns'] = emp.annual_return(returns) statistics_dict['mean returns'] = returns.mean() statistics_dict['Standard dev p.a.'] = emp.annual_volatility(returns) statistics_dict['Sharpe ratio'] = emp.sharpe_ratio(returns) statistics_dict['Sortino Ratio'] = emp.sortino_ratio(returns) statistics_dict['MaxDD'] = emp.max_drawdown(returns)
def risk_metric_period( cls, start_session, end_session, algorithm_returns, benchmark_returns, algorithm_leverages, ): """ Creates a dictionary representing the state of the risk report. Parameters ---------- start_session : pd.Timestamp Start of period (inclusive) to produce metrics on end_session : pd.Timestamp End of period (inclusive) to produce metrics on algorithm_returns : pd.Series(pd.Timestamp -> float) Series of algorithm returns as of the end of each session benchmark_returns : pd.Series(pd.Timestamp -> float) Series of benchmark returns as of the end of each session algorithm_leverages : pd.Series(pd.Timestamp -> float) Series of algorithm leverages as of the end of each session Returns ------- risk_metric : dict[str, any] Dict of metrics that with fields like: { 'algorithm_period_return': 0.0, 'benchmark_period_return': 0.0, 'treasury_period_return': 0, 'excess_return': 0.0, 'alpha': 0.0, 'beta': 0.0, 'sharpe': 0.0, 'sortino': 0.0, 'period_label': '1970-01', 'trading_days': 0, 'algo_volatility': 0.0, 'benchmark_volatility': 0.0, 'max_drawdown': 0.0, 'max_leverage': 0.0, } """ algorithm_returns = algorithm_returns[ (algorithm_returns.index >= start_session) & (algorithm_returns.index <= end_session)] # Benchmark needs to be masked to the same dates as the algo returns benchmark_returns = benchmark_returns[ (benchmark_returns.index >= start_session) & (benchmark_returns.index <= algorithm_returns.index[-1])] benchmark_period_returns = ep.cum_returns(benchmark_returns).iloc[-1] algorithm_period_returns = ep.cum_returns(algorithm_returns).iloc[-1] alpha, beta = ep.alpha_beta_aligned( algorithm_returns.values, benchmark_returns.values, ) benchmark_volatility = ep.annual_volatility(benchmark_returns) sharpe = ep.sharpe_ratio(algorithm_returns) # The consumer currently expects a 0.0 value for sharpe in period, # this differs from cumulative which was np.nan. # When factoring out the sharpe_ratio, the different return types # were collapsed into `np.nan`. # TODO: Either fix consumer to accept `np.nan` or make the # `sharpe_ratio` return type configurable. # In the meantime, convert nan values to 0.0 if pd.isnull(sharpe): sharpe = 0.0 sortino = ep.sortino_ratio( algorithm_returns.values, _downside_risk=ep.downside_risk(algorithm_returns.values), ) rval = { "algorithm_period_return": algorithm_period_returns, "benchmark_period_return": benchmark_period_returns, "treasury_period_return": 0, "excess_return": algorithm_period_returns, "alpha": alpha, "beta": beta, "sharpe": sharpe, "sortino": sortino, "period_label": end_session.strftime("%Y-%m"), "trading_days": len(benchmark_returns), "algo_volatility": ep.annual_volatility(algorithm_returns), "benchmark_volatility": benchmark_volatility, "max_drawdown": ep.max_drawdown(algorithm_returns.values), "max_leverage": algorithm_leverages.max(), } # check if a field in rval is nan or inf, and replace it with None # except period_label which is always a str return { k: (None if k != "period_label" and not np.isfinite(v) else v) for k, v in rval.items() }
def risk_metric_period(cls, start_session, end_session, algorithm_returns, benchmark_returns, algorithm_leverages): """ Creates a dictionary representing the state of the risk report. Parameters ---------- start_session : pd.Timestamp Start of period (inclusive) to produce metrics on end_session : pd.Timestamp End of period (inclusive) to produce metrics on algorithm_returns : pd.Series(pd.Timestamp -> float) Series of algorithm returns as of the end of each session benchmark_returns : pd.Series(pd.Timestamp -> float) Series of benchmark returns as of the end of each session algorithm_leverages : pd.Series(pd.Timestamp -> float) Series of algorithm leverages as of the end of each session Returns ------- risk_metric : dict[str, any] Dict of metrics that with fields like: { 'algorithm_period_return': 0.0, 'benchmark_period_return': 0.0, 'treasury_period_return': 0, 'excess_return': 0.0, 'alpha': 0.0, 'beta': 0.0, 'sharpe': 0.0, 'sortino': 0.0, 'period_label': '1970-01', 'trading_days': 0, 'algo_volatility': 0.0, 'benchmark_volatility': 0.0, 'max_drawdown': 0.0, 'max_leverage': 0.0, } """ algorithm_returns = algorithm_returns[ (algorithm_returns.index >= start_session) & (algorithm_returns.index <= end_session) ] # Benchmark needs to be masked to the same dates as the algo returns benchmark_returns = benchmark_returns[ (benchmark_returns.index >= start_session) & (benchmark_returns.index <= algorithm_returns.index[-1]) ] benchmark_period_returns = ep.cum_returns(benchmark_returns).iloc[-1] algorithm_period_returns = ep.cum_returns(algorithm_returns).iloc[-1] alpha, beta = ep.alpha_beta_aligned( algorithm_returns.values, benchmark_returns.values, ) sharpe = ep.sharpe_ratio(algorithm_returns) # The consumer currently expects a 0.0 value for sharpe in period, # this differs from cumulative which was np.nan. # When factoring out the sharpe_ratio, the different return types # were collapsed into `np.nan`. # TODO: Either fix consumer to accept `np.nan` or make the # `sharpe_ratio` return type configurable. # In the meantime, convert nan values to 0.0 if pd.isnull(sharpe): sharpe = 0.0 sortino = ep.sortino_ratio( algorithm_returns.values, _downside_risk=ep.downside_risk(algorithm_returns.values), ) rval = { 'algorithm_period_return': algorithm_period_returns, 'benchmark_period_return': benchmark_period_returns, 'treasury_period_return': 0, 'excess_return': algorithm_period_returns, 'alpha': alpha, 'beta': beta, 'sharpe': sharpe, 'sortino': sortino, 'period_label': end_session.strftime("%Y-%m"), 'trading_days': len(benchmark_returns), 'algo_volatility': ep.annual_volatility(algorithm_returns), 'benchmark_volatility': ep.annual_volatility(benchmark_returns), 'max_drawdown': ep.max_drawdown(algorithm_returns.values), 'max_leverage': algorithm_leverages.max(), } # check if a field in rval is nan or inf, and replace it with None # except period_label which is always a str return { k: ( None if k != 'period_label' and not np.isfinite(v) else v ) for k, v in iteritems(rval) }
def update(self, dt, algorithm_returns, benchmark_returns, leverage): # Keep track of latest dt for use in to_dict and other methods # that report current state. self.latest_dt = dt dt_loc = self.cont_index.get_loc(dt) self.latest_dt_loc = dt_loc self.algorithm_returns_cont[dt_loc] = algorithm_returns self.algorithm_returns = self.algorithm_returns_cont[:dt_loc + 1] self.num_trading_days = len(self.algorithm_returns) if self.create_first_day_stats: if len(self.algorithm_returns) == 1: self.algorithm_returns = np.append(0.0, self.algorithm_returns) self.algorithm_cumulative_returns[dt_loc] = cum_returns( self.algorithm_returns )[-1] algo_cumulative_returns_to_date = \ self.algorithm_cumulative_returns[:dt_loc + 1] self.mean_returns_cont[dt_loc] = \ algo_cumulative_returns_to_date[dt_loc] / self.num_trading_days self.mean_returns = self.mean_returns_cont[:dt_loc + 1] self.annualized_mean_returns_cont[dt_loc] = \ self.mean_returns_cont[dt_loc] * 252 self.annualized_mean_returns = \ self.annualized_mean_returns_cont[:dt_loc + 1] if self.create_first_day_stats: if len(self.mean_returns) == 1: self.mean_returns = np.append(0.0, self.mean_returns) self.annualized_mean_returns = np.append( 0.0, self.annualized_mean_returns) self.benchmark_returns_cont[dt_loc] = benchmark_returns self.benchmark_returns = self.benchmark_returns_cont[:dt_loc + 1] if self.create_first_day_stats: if len(self.benchmark_returns) == 1: self.benchmark_returns = np.append(0.0, self.benchmark_returns) self.benchmark_cumulative_returns[dt_loc] = cum_returns( self.benchmark_returns )[-1] benchmark_cumulative_returns_to_date = \ self.benchmark_cumulative_returns[:dt_loc + 1] self.mean_benchmark_returns_cont[dt_loc] = \ benchmark_cumulative_returns_to_date[dt_loc] / \ self.num_trading_days self.mean_benchmark_returns = self.mean_benchmark_returns_cont[:dt_loc] self.annualized_mean_benchmark_returns_cont[dt_loc] = \ self.mean_benchmark_returns_cont[dt_loc] * 252 self.annualized_mean_benchmark_returns = \ self.annualized_mean_benchmark_returns_cont[:dt_loc + 1] self.algorithm_cumulative_leverages_cont[dt_loc] = leverage self.algorithm_cumulative_leverages = \ self.algorithm_cumulative_leverages_cont[:dt_loc + 1] if self.create_first_day_stats: if len(self.algorithm_cumulative_leverages) == 1: self.algorithm_cumulative_leverages = np.append( 0.0, self.algorithm_cumulative_leverages) if not len(self.algorithm_returns) and len(self.benchmark_returns): message = "Mismatch between benchmark_returns ({bm_count}) and \ algorithm_returns ({algo_count}) in range {start} : {end} on {dt}" message = message.format( bm_count=len(self.benchmark_returns), algo_count=len(self.algorithm_returns), start=self.start_session, end=self.end_session, dt=dt ) raise Exception(message) self.update_current_max() self.benchmark_volatility[dt_loc] = annual_volatility( self.benchmark_returns ) self.algorithm_volatility[dt_loc] = annual_volatility( self.algorithm_returns ) # caching the treasury rates for the minutely case is a # big speedup, because it avoids searching the treasury # curves on every minute. # In both minutely and daily, the daily curve is always used. treasury_end = dt.replace(hour=0, minute=0) if np.isnan(self.daily_treasury[treasury_end]): treasury_period_return = choose_treasury( self.treasury_curves, self.start_session, treasury_end, self.trading_calendar, ) self.daily_treasury[treasury_end] = treasury_period_return self.treasury_period_return = self.daily_treasury[treasury_end] self.excess_returns[dt_loc] = ( self.algorithm_cumulative_returns[dt_loc] - self.treasury_period_return) self.alpha[dt_loc], self.beta[dt_loc] = alpha_beta_aligned( self.algorithm_returns, self.benchmark_returns, ) self.sharpe[dt_loc] = sharpe_ratio( self.algorithm_returns, ) self.downside_risk[dt_loc] = downside_risk( self.algorithm_returns ) self.sortino[dt_loc] = sortino_ratio( self.algorithm_returns, _downside_risk=self.downside_risk[dt_loc] ) self.information[dt_loc] = information_ratio( self.algorithm_returns, self.benchmark_returns, ) self.max_drawdown = max_drawdown( self.algorithm_returns ) self.max_drawdowns[dt_loc] = self.max_drawdown self.max_leverage = self.calculate_max_leverage() self.max_leverages[dt_loc] = self.max_leverage
def run_turtle(): PROPERTY = START_MONEY CASH = START_MONEY show_df = None show_df = stock_df_dict['NDX'].copy() order_df = None order_df = pd.DataFrame(columns=[ 'buy_date', 'symbol', 'buy_count', 'buy_price', 'buy_reason', 'sell_date', 'sell_price', 'sell_reason', 'profit', 'cash', 'property' ]) count_day = 0 yesterday = None for today in pd.period_range(start=start_date, end=end_date, freq='D'): count_day += 1 if yesterday is None: yesterday = today continue if today not in stock_df_dict['NDX'].index: continue if IS_HAPPY_MONEY: if PROPERTY > START_MONEY * 2: global HAPPY_MONEY HAPPY_MONEY += int(START_MONEY / 2) PROPERTY -= int(START_MONEY / 2) CASH = PROPERTY # 买卖过程 sell_signal = [] buy_signal = [] for symbol in NASDAQ100[:]: # for symbol in ['TSLA']: if symbol in [ 'ALGN', 'ROST', 'ORLY', 'ESRX', 'ULTA', 'REGN', 'MNST' ]: # continue pass if symbol == 'NDX': continue if today not in stock_df_dict[ symbol].index or yesterday not in stock_df_dict[ symbol].index: continue # 突破下行趋势,清仓退出 order_arr = order_df.to_records(index=False) if len(order_arr[(order_arr.symbol == symbol) & (order_arr.sell_price == 0)]) != 0: is_sell = False for idx in order_df[(order_df['symbol'] == symbol) & (order_df['sell_price'] == 0)].index: if order_df.loc[idx, 'buy_reason'] == 'SHORT': is_sell = ( stock_df_dict[symbol].loc[today, 'open'] <= stock_df_dict[symbol].loc[today, 'ROLLING_%d_MIN' % TURTLE_SHORT_SELL_N]) if order_df.loc[idx, 'buy_reason'] == 'LONG': is_sell = ( stock_df_dict[symbol].loc[today, 'open'] <= stock_df_dict[symbol].loc[today, 'ROLLING_%d_MIN' % TURTLE_LONG_SELL_N]) if is_sell: CASH += order_df.loc[idx, 'buy_count'] * \ stock_df_dict[symbol].loc[today, 'open'] order_df.loc[idx, 'sell_date'] = today order_df.loc[idx, 'sell_price'] = stock_df_dict[symbol].loc[ today, 'open'] order_df.loc[idx, 'sell_reason'] = 'EXIT' order_df.loc[idx, 'profit'] = \ (order_df.loc[idx, 'sell_price'] - order_df.loc[idx, 'buy_price']) * order_df.loc[idx, 'buy_count'] # print(today, '退出', stock_df_dict[symbol].loc[today, 'open'], CASH) # 突破上行趋势,买入一份 order_arr = order_df.to_records(index=False) if stock_df_dict[symbol].loc[ today, 'MA30'] >= stock_df_dict[symbol].loc[today, 'MA180']: is_buy = False if stock_df_dict[symbol].loc[today, 'open'] >= stock_df_dict[ symbol].loc[today, 'ROLLING_%d_MAX' % TURTLE_LONG_BUY_N]: is_buy = True buy_reason = 'LONG' elif stock_df_dict[symbol].loc[today, 'open'] >= stock_df_dict[ symbol].loc[today, 'ROLLING_%d_MAX' % TURTLE_SHORT_BUY_N]: is_buy = True buy_reason = 'SHORT' if is_buy: buy_count = 0 if CASH >= PROPERTY / TURTLE_POS: buy_count = int( (PROPERTY / TURTLE_POS) / stock_df_dict[symbol].loc[today, 'open']) if buy_count > 0: CASH -= buy_count * \ stock_df_dict[symbol].loc[today, 'open'] # print(today, '买入', buy_count, stock_df_dict[symbol].loc[today, 'open'], CASH) order_df = order_df.append( { 'buy_date': today, 'symbol': symbol, 'buy_count': buy_count, 'buy_price': stock_df_dict[symbol].loc[today, 'open'], 'buy_reason': buy_reason, 'sell_date': pd.np.nan, 'sell_price': 0, 'profit': 0, 'cash': CASH, 'property': PROPERTY, }, ignore_index=True) # 每天盘点财产 show_df.loc[today, 'CASH_TURTLE_%d_%d_%d' % (TURTLE_POS, TURTLE_LONG_BUY_N, TURTLE_LONG_SELL_N)] = CASH PROPERTY = CASH + \ sum( [ stock_df_dict[order_df.loc[idx, 'symbol']].loc[today, 'open'] * order_df.loc[idx, 'buy_count'] for idx in order_df.loc[order_df['sell_price'] == 0].index ] ) show_df.loc[today, 'PROPERTY_TURTLE_%d_%d_%d' % (TURTLE_POS, TURTLE_LONG_BUY_N, TURTLE_LONG_SELL_N)] = PROPERTY yesterday = today # 最终结果 print('CASH', CASH) print('HAPPY_MONEY', HAPPY_MONEY) print('PROPERTY', PROPERTY) benchmark_symbol = 'NDX' s_p = stock_df_dict[benchmark_symbol][start_date:].iloc[0].open e_p = stock_df_dict[benchmark_symbol].iloc[-1].open print(benchmark_symbol, s_p, e_p, e_p / s_p) show_df = show_df[start_date:].dropna(how='any', inplace=False) show_df['strategy_pct'] = show_df['PROPERTY_TURTLE_%d_%d_%d' % (TURTLE_POS, TURTLE_LONG_BUY_N, TURTLE_LONG_SELL_N)].pct_change() # show_df['benchmark_pct'] = show_df['open'].pct_change() show_df['benchmark_pct'] = stock_df_dict[benchmark_symbol].open.pct_change( ) # print('cum_returns', emp.cum_returns(show_df.strategy_pct)) print('max_drawdown', emp.max_drawdown(show_df.strategy_pct)) print( 'MDD', MDD(show_df['PROPERTY_TURTLE_%d_%d_%d' % (TURTLE_POS, TURTLE_LONG_BUY_N, TURTLE_LONG_SELL_N)])) print('annual_return', emp.annual_return(show_df.strategy_pct)) print('annual_volatility', emp.annual_volatility(show_df.strategy_pct, period='daily')) print('calmar_ratio', emp.calmar_ratio(show_df.strategy_pct)) print('sharpe_ratio', emp.sharpe_ratio(returns=show_df.strategy_pct)) print( 'alpha', emp.alpha(returns=show_df.strategy_pct, factor_returns=show_df.benchmark_pct, risk_free=0.00)) print( 'beta', emp.beta(returns=show_df.strategy_pct, factor_returns=show_df.benchmark_pct, risk_free=0.00))
def calculate_statistics(self, df: DataFrame = None, output=True): """""" self.output("开始计算策略统计指标") # Check DataFrame input exterior if df is None: df = self.daily_df # Check for init DataFrame if df is None: # Set all statistics to 0 if no trade. start_date = "" end_date = "" total_days = 0 profit_days = 0 loss_days = 0 end_balance = 0 max_drawdown = 0 max_ddpercent = 0 max_drawdown_duration = 0 max_drawdown_end = 0 total_net_pnl = 0 daily_net_pnl = 0 total_commission = 0 daily_commission = 0 total_slippage = 0 daily_slippage = 0 total_turnover = 0 daily_turnover = 0 total_trade_count = 0 daily_trade_count = 0 total_return = 0 annual_return = 0 daily_return = 0 return_std = 0 sharpe_ratio = 0 sortino_info = 0 win_ratio = 0 return_drawdown_ratio = 0 tail_ratio_info = 0 stability_return = 0 win_loss_pnl_ratio = 0 pnl_medio = 0 duration_medio = 0 calmar_ratio = 0 else: # Calculate balance related time series data df["balance"] = df["net_pnl"].cumsum() + self.capital df["return"] = np.log(df["balance"] / df["balance"].shift(1)).fillna(0) df["highlevel"] = (df["balance"].rolling(min_periods=1, window=len(df), center=False).max()) df["drawdown"] = df["balance"] - df["highlevel"] df["ddpercent"] = df["drawdown"] / df["highlevel"] * 100 # Calculate statistics value start_date = df.index[0] end_date = df.index[-1] total_days = len(df) profit_days = len(df[df["net_pnl"] > 0]) loss_days = len(df[df["net_pnl"] < 0]) end_balance = df["balance"].iloc[-1] max_drawdown = df["drawdown"].min() max_ddpercent = df["ddpercent"].min() max_drawdown_end = df["drawdown"].idxmin() if isinstance(max_drawdown_end, date): max_drawdown_start = df["balance"][:max_drawdown_end].idxmax() max_drawdown_duration = (max_drawdown_end - max_drawdown_start).days else: max_drawdown_duration = 0 total_net_pnl = df["net_pnl"].sum() daily_net_pnl = total_net_pnl / total_days win = df[df["net_pnl"] > 0] win_amount = win["net_pnl"].sum() win_pnl_medio = win["net_pnl"].mean() # win_duration_medio = win["duration"].mean().total_seconds()/3600 win_count = win["trade_count"].sum() pnl_medio = df["net_pnl"].mean() # duration_medio = df["duration"].mean().total_seconds()/3600 loss = df[df["net_pnl"] < 0] loss_amount = loss["net_pnl"].sum() loss_pnl_medio = loss["net_pnl"].mean() # loss_duration_medio = loss["duration"].mean().total_seconds()/3600 total_commission = df["commission"].sum() daily_commission = total_commission / total_days total_slippage = df["slippage"].sum() daily_slippage = total_slippage / total_days total_turnover = df["turnover"].sum() daily_turnover = total_turnover / total_days total_trade_count = df["trade_count"].sum() win_ratio = (win_count / total_trade_count) * 100 win_loss_pnl_ratio = -win_pnl_medio / loss_pnl_medio daily_trade_count = total_trade_count / total_days total_return = (end_balance / self.capital - 1) * 100 annual_return = total_return / total_days * 240 daily_return = df["return"].mean() * 100 return_std = df["return"].std() * 100 if return_std: sharpe_ratio = daily_return / return_std * np.sqrt(240) else: sharpe_ratio = 0 return_drawdown_ratio = -total_return / max_ddpercent #calmar_ratio:年化收益率与历史最大回撤率之间的比率 calmar_ratio = annual_return / abs(max_ddpercent) #sortino_info sortino_info = sortino_ratio(df['return']) omega_info = omega_ratio(df['return']) #年化波动率 annual_volatility_info = annual_volatility(df['return']) #年化复合增长率 cagr_info = cagr(df['return']) #年化下行风险率 annual_downside_risk = downside_risk(df['return']) """CVaR即条件风险价值,其含义为在投资组合的损失超过某个给定VaR值的条件下,该投资组合的平均损失值。""" c_var = conditional_value_at_risk(df['return']) """风险价值(VaR)是对投资损失风险的一种度量。它估计在正常的市场条件下,在设定的时间段(例如一天)中, 一组投资可能(以给定的概率)损失多少。金融业中的公司和监管机构通常使用VaR来衡量弥补可能损失所需的资产数量""" var_info = value_at_risk(df['return']) #收益稳定率 stability_return = stability_of_timeseries(df['return']) #尾部比率0.25 == 1/4,收益1,风险4 tail_ratio_info = tail_ratio(df['return']) # Output if output: self.output("-" * 30) self.output(f"首个交易日:\t{start_date}") self.output(f"最后交易日:\t{end_date}") self.output(f"总交易日:\t{total_days}") self.output(f"盈利交易日:\t{profit_days}") self.output(f"亏损交易日:\t{loss_days}") self.output(f"起始资金:\t{self.capital:,.2f}") self.output(f"结束资金:\t{end_balance:,.2f}") self.output(f"总收益率:\t{total_return:,.2f}%") self.output(f"年化收益:\t{annual_return:,.2f}%") self.output(f"最大回撤: \t{max_drawdown:,.2f}") self.output(f"百分比最大回撤: {max_ddpercent:,.2f}%") self.output(f"最长回撤天数: \t{max_drawdown_duration}") self.output(f"总盈亏:\t{total_net_pnl:,.2f}") self.output(f"总手续费:\t{total_commission:,.2f}") self.output(f"总滑点:\t{total_slippage:,.2f}") self.output(f"总成交金额:\t{total_turnover:,.2f}") self.output(f"总成交笔数:\t{total_trade_count}") self.output(f"日均盈亏:\t{daily_net_pnl:,.2f}") self.output(f"日均手续费:\t{daily_commission:,.2f}") self.output(f"日均滑点:\t{daily_slippage:,.2f}") self.output(f"日均成交金额:\t{daily_turnover:,.2f}") self.output(f"日均成交笔数:\t{daily_trade_count}") self.output(f"日均收益率:\t{daily_return:,.2f}%") self.output(f"收益标准差:\t{return_std:,.2f}%") self.output(f"胜率:\t{win_ratio:,.2f}") self.output(f"盈亏比:\t\t{win_loss_pnl_ratio:,.2f}") self.output(f"平均每笔盈亏:\t{pnl_medio:,.2f}") self.output(f"calmar_ratio:\t{calmar_ratio:,.3f}") # self.output(f"平均持仓小时:\t{duration_medio:,.2f}") self.output(f"Sharpe Ratio:\t{sharpe_ratio:,.2f}") self.output(f"sortino Ratio:\t{sortino_info:,.3f}") self.output(f"收益回撤比:\t{return_drawdown_ratio:,.2f}") statistics = { "start_date": start_date, "end_date": end_date, "total_days": total_days, "profit_days": profit_days, "loss_days": loss_days, "capital": self.capital, "end_balance": end_balance, "max_drawdown": max_drawdown, "max_ddpercent": max_ddpercent, "max_drawdown_end": max_drawdown_end, "max_drawdown_duration": max_drawdown_duration, "total_net_pnl": total_net_pnl, "daily_net_pnl": daily_net_pnl, "total_commission": total_commission, "daily_commission": daily_commission, "total_slippage": total_slippage, "daily_slippage": daily_slippage, "total_turnover": total_turnover, "daily_turnover": daily_turnover, "total_trade_count": total_trade_count, "daily_trade_count": daily_trade_count, "total_return": total_return, "annual_return": annual_return, "daily_return": daily_return, "return_std": return_std, "sharpe_ratio": sharpe_ratio, 'sortino_info': sortino_info, "win_ratio": win_ratio, "return_drawdown_ratio": return_drawdown_ratio, "tail_ratio_info": tail_ratio_info, "stability_return": stability_return, "win_loss_pnl_ratio": win_loss_pnl_ratio, "pnl_medio": pnl_medio, "calmar_ratio": calmar_ratio } # Filter potential error infinite value for key, value in statistics.items(): if value in (np.inf, -np.inf): value = 0 statistics[key] = np.nan_to_num(value) self.output("策略统计指标计算完成") return statistics
def runstrategy(ticker_list,bench_ticker): args = parse_args() print(args) # Create a cerebro cerebro = bt.Cerebro() # Get the dates from the args fromdate = datetime.datetime.strptime(args.fromdate, '%Y-%m-%d') todate = datetime.datetime.strptime(args.todate, '%Y-%m-%d') # bench = bt.feeds.YahooFinanceData( # dataname=bench_ticker, # fromdate=fromdate, # todate=todate, # buffered=True,plot = False # ) bench = bt.feeds.GenericCSVData( dataname='/Users/joan/PycharmProjects/CSV_DB/IB/' + bench_ticker + '.csv', fromdate=fromdate, todate=todate, nullvalue=0.0, dtformat=('%Y%m%d'), datetime=1, open=2, high=3, low=4, close=5, volume=6, reverse=False, plot=False) cerebro.adddata(bench, name=bench_ticker) for i in ticker_list: print('Loading data: '+ i) # data = bt.feeds.YahooFinanceData( # dataname=i, # fromdate=fromdate, # todate=todate, # adjclose=True, # buffered=True, plot = False # ) data = bt.feeds.GenericCSVData( dataname='/Users/joan/PycharmProjects/CSV_DB/IB/'+i+'.csv', fromdate=fromdate, todate=todate, nullvalue=0.0, dtformat=('%Y%m%d'), datetime=1, open=2, high=3, low=4, close=5, volume=6, reverse=False, plot= False) cerebro.adddata(data,name = i) # Add the strategy cerebro.addstrategy(PairTradingStrategy, period=args.period, stake=args.stake) # Add the commission - only stocks like a for each operation cerebro.broker.setcash(args.cash) # Add the commission - only stocks like a for each operation # cerebro.broker.setcommission(commission=args.commperc) comminfo = FixedCommisionScheme() cerebro.broker.addcommissioninfo(comminfo) cerebro.addanalyzer(bt.analyzers.SharpeRatio, _name='sharpe_ratio') cerebro.addanalyzer(bt.analyzers.TradeAnalyzer, _name="ta") cerebro.addanalyzer(bt.analyzers.SQN, _name="sqn") cerebro.addanalyzer(bt.analyzers.SharpeRatio_A, _name='myysharpe', riskfreerate=args.rf_rate) cerebro.addanalyzer(bt.analyzers.PyFolio, _name='mypyf') cerebro.addanalyzer(bt.analyzers.TimeReturn, timeframe=bt.TimeFrame.Days, data=bench, _name='benchreturns') cerebro.addobserver(bt.observers.Value) cerebro.addobserver(bt.observers.Benchmark,plot = False) cerebro.addobserver(bt.observers.DrawDown) # And run it strat = cerebro.run(runonce=not args.runnext, preload=not args.nopreload, oldsync=args.oldsync ) # Plot if requested if args.plot: cerebro.plot(style='candlestick', barup='green', bardown='red',figsize=(100,100)) bench_returns = strat[0].analyzers.benchreturns.get_analysis() bench_df = pd.DataFrame.from_dict(bench_returns, orient='index', columns=['return']) return_df = pd.DataFrame.from_dict(strat[0].analyzers.mypyf.get_analysis()['returns'], orient='index', columns=['return']) # print('Sharpe Ratio(bt):', firstStrat.analyzers.myysharpe.get_analysis()['sharperatio']) # print('Sharpe Ratio:', empyrical.sharpe_ratio(return_df, risk_free=args.rf_rate / 252, period='daily')[0]) # print('Sharpe Ratio Benchmark:', empyrical.sharpe_ratio(bench_df, risk_free=args.rf_rate / 252, period='daily')[0]) # print('') # # print('Sortino Ratio:', empyrical.sortino_ratio(return_df, period='daily')[0]) # print('Sortino Ratio Benchmark:', empyrical.sortino_ratio(bench_df, period='daily')[0]) # print('') # print('VaR:', empyrical.value_at_risk(return_df) * 100, '%') # print('VaR Benchmark:', empyrical.value_at_risk(bench_df) * 100, '%') # # print('') # # print('Capture:', round(empyrical.capture(return_df, bench_df, period='daily')[0] * 100), '%') # print('') # # print('Max drawdown: ', round(empyrical.max_drawdown(return_df)[0] * 100), '%') # print('Max drawdown Benchmark: ', round(empyrical.max_drawdown(bench_df)[0] * 100), '%') # # print('') alpha, beta = empyrical.alpha_beta(return_df, bench_df, risk_free=args.rf_rate) # print('Beta: ', beta) # print('') # print('Annual return:', round(empyrical.annual_return(return_df)[0] * 100), '%') # print('Annual Vol:', round(empyrical.annual_volatility(return_df)[0] * 100), '%') # print('') # print('Annual return Benchmark:', round(empyrical.annual_return(bench_df)[0] * 100), '%') # print('Annual Vol Benchmark:', round(empyrical.annual_volatility(bench_df)[0] * 100), '%') # print('') dic = {'SQN': printSQN(strat[0].analyzers.sqn.get_analysis()), 'sharpe': empyrical.sharpe_ratio(return_df, risk_free=args.rf_rate / 252, period='daily')[0], 'sharpe_bm': empyrical.sharpe_ratio(bench_df, risk_free=args.rf_rate / 252, period='daily')[0], 'sortino': empyrical.sortino_ratio(bench_df, period='daily')[0], 'sortino_bm': empyrical.sortino_ratio(bench_df, period='daily')[0], 'VaR': empyrical.value_at_risk(return_df) * 100, 'VaR_bm': empyrical.value_at_risk(bench_df) * 100, 'capture': round(empyrical.capture(return_df, bench_df, period='daily')[0] * 100), 'max_dd': round(empyrical.max_drawdown(return_df)[0] * 100), 'max_dd_bm':round(empyrical.max_drawdown(bench_df)[0] * 100), 'beta': beta, 'return_annual':round(empyrical.annual_return(return_df)[0] * 100,2), 'return_annual_bm':round(empyrical.annual_volatility(return_df)[0] * 100,2), 'vol_annual':round(empyrical.annual_return(bench_df)[0] * 100,2), 'vol_annual_bm':round(empyrical.annual_volatility(bench_df)[0] * 100,2)} df = pd.DataFrame(dic,index = [0]) print(df) def calc_stats(df): df['perc_ret'] = (1 + df['return']).cumprod() - 1 # print(df.tail()) return df s = return_df.rolling(30).std() b = bench_df.rolling(30).std() # Get final portfolio Value portvalue = cerebro.broker.getvalue() # Print out the final result print('Final Portfolio Value: ${}'.format(round(portvalue)), 'PnL: ${}'.format(round(portvalue - args.cash)), 'PnL: {}%'.format(((portvalue / args.cash) - 1) * 100)) # Finally plot the end results if args.plot: fig, axs = plt.subplots(2, sharex=True) fig.autofmt_xdate() axs[1].plot(s) axs[1].plot(b) axs[1].set_title('Drawdown') axs[1].legend(['Fund', 'Benchmark']) axs[0].set_title('Returns') axs[0].plot(calc_stats(return_df)['perc_ret']) axs[0].plot(calc_stats(bench_df)['perc_ret']) axs[0].legend(['Fund', 'Benchmark']) plt.show()
def calculate_metrics(self): self.benchmark_period_returns = \ cum_returns(self.benchmark_returns).iloc[-1] self.algorithm_period_returns = \ cum_returns(self.algorithm_returns).iloc[-1] if not self.algorithm_returns.index.equals( self.benchmark_returns.index ): message = "Mismatch between benchmark_returns ({bm_count}) and \ algorithm_returns ({algo_count}) in range {start} : {end}" message = message.format( bm_count=len(self.benchmark_returns), algo_count=len(self.algorithm_returns), start=self._start_session, end=self._end_session ) raise Exception(message) self.num_trading_days = len(self.benchmark_returns) self.mean_algorithm_returns = ( self.algorithm_returns.cumsum() / np.arange(1, self.num_trading_days + 1, dtype=np.float64) ) self.benchmark_volatility = annual_volatility(self.benchmark_returns) self.algorithm_volatility = annual_volatility(self.algorithm_returns) self.treasury_period_return = choose_treasury( self.treasury_curves, self._start_session, self._end_session, self.trading_calendar, ) self.sharpe = sharpe_ratio( self.algorithm_returns, ) # The consumer currently expects a 0.0 value for sharpe in period, # this differs from cumulative which was np.nan. # When factoring out the sharpe_ratio, the different return types # were collapsed into `np.nan`. # TODO: Either fix consumer to accept `np.nan` or make the # `sharpe_ratio` return type configurable. # In the meantime, convert nan values to 0.0 if pd.isnull(self.sharpe): self.sharpe = 0.0 self.downside_risk = downside_risk( self.algorithm_returns.values ) self.sortino = sortino_ratio( self.algorithm_returns.values, _downside_risk=self.downside_risk, ) self.information = information_ratio( self.algorithm_returns.values, self.benchmark_returns.values, ) self.alpha, self.beta = alpha_beta_aligned( self.algorithm_returns.values, self.benchmark_returns.values, ) self.excess_return = self.algorithm_period_returns - \ self.treasury_period_return self.max_drawdown = max_drawdown(self.algorithm_returns.values) self.max_leverage = self.calculate_max_leverage()
print('Capture:', round(empyrical.capture(return_df, bench_df, period='daily')[0] * 100), '%') print('') print('Max drawdown: ', round(empyrical.max_drawdown(return_df)[0] * 100), '%') print('Max drawdown Benchmark: ', round(empyrical.max_drawdown(bench_df)[0] * 100), '%') print('') alpha, beta = empyrical.alpha_beta(return_df, bench_df, risk_free=rf) print('Beta: ', beta) print('') print('Annual return:', round(empyrical.annual_return(return_df)[0] * 100), '%') print('Annual Vol:', round(empyrical.annual_volatility(return_df)[0] * 100), '%') print('') print('Annual return Benchmark:', round(empyrical.annual_return(bench_df)[0] * 100), '%') print('Annual Vol Benchmark:', round(empyrical.annual_volatility(bench_df)[0] * 100), '%') print('') def calc_stats(df): df['perc_ret'] = (1 + df['return']).cumprod() - 1 # print(df.tail()) return df
def update(self, dt, algorithm_returns, benchmark_returns, leverage): # Keep track of latest dt for use in to_dict and other methods # that report current state. self.latest_dt = dt dt_loc = self.cont_index.get_loc(dt) self.latest_dt_loc = dt_loc self.algorithm_returns_cont[dt_loc] = algorithm_returns self.algorithm_returns = self.algorithm_returns_cont[:dt_loc + 1] self.num_trading_days = len(self.algorithm_returns) if self.create_first_day_stats: if len(self.algorithm_returns) == 1: self.algorithm_returns = np.append(0.0, self.algorithm_returns) self.algorithm_cumulative_returns[dt_loc] = cum_returns( self.algorithm_returns)[-1] algo_cumulative_returns_to_date = \ self.algorithm_cumulative_returns[:dt_loc + 1] self.mean_returns_cont[dt_loc] = \ algo_cumulative_returns_to_date[dt_loc] / self.num_trading_days self.mean_returns = self.mean_returns_cont[:dt_loc + 1] self.annualized_mean_returns_cont[dt_loc] = \ self.mean_returns_cont[dt_loc] * 252 self.annualized_mean_returns = \ self.annualized_mean_returns_cont[:dt_loc + 1] if self.create_first_day_stats: if len(self.mean_returns) == 1: self.mean_returns = np.append(0.0, self.mean_returns) self.annualized_mean_returns = np.append( 0.0, self.annualized_mean_returns) self.benchmark_returns_cont[dt_loc] = benchmark_returns self.benchmark_returns = self.benchmark_returns_cont[:dt_loc + 1] if self.create_first_day_stats: if len(self.benchmark_returns) == 1: self.benchmark_returns = np.append(0.0, self.benchmark_returns) self.benchmark_cumulative_returns[dt_loc] = cum_returns( self.benchmark_returns)[-1] benchmark_cumulative_returns_to_date = \ self.benchmark_cumulative_returns[:dt_loc + 1] self.mean_benchmark_returns_cont[dt_loc] = \ benchmark_cumulative_returns_to_date[dt_loc] / \ self.num_trading_days self.mean_benchmark_returns = self.mean_benchmark_returns_cont[:dt_loc] self.annualized_mean_benchmark_returns_cont[dt_loc] = \ self.mean_benchmark_returns_cont[dt_loc] * 252 self.annualized_mean_benchmark_returns = \ self.annualized_mean_benchmark_returns_cont[:dt_loc + 1] self.algorithm_cumulative_leverages_cont[dt_loc] = leverage self.algorithm_cumulative_leverages = \ self.algorithm_cumulative_leverages_cont[:dt_loc + 1] if self.create_first_day_stats: if len(self.algorithm_cumulative_leverages) == 1: self.algorithm_cumulative_leverages = np.append( 0.0, self.algorithm_cumulative_leverages) if not len(self.algorithm_returns) and len(self.benchmark_returns): message = "Mismatch between benchmark_returns ({bm_count}) and \ algorithm_returns ({algo_count}) in range {start} : {end} on {dt}" message = message.format(bm_count=len(self.benchmark_returns), algo_count=len(self.algorithm_returns), start=self.start_session, end=self.end_session, dt=dt) raise Exception(message) self.update_current_max() self.benchmark_volatility[dt_loc] = annual_volatility( self.benchmark_returns) self.algorithm_volatility[dt_loc] = annual_volatility( self.algorithm_returns) # caching the treasury rates for the minutely case is a # big speedup, because it avoids searching the treasury # curves on every minute. # In both minutely and daily, the daily curve is always used. treasury_end = dt.replace(hour=0, minute=0) if np.isnan(self.daily_treasury[treasury_end]): treasury_period_return = choose_treasury( self.treasury_curves, self.start_session, treasury_end, self.trading_calendar, ) self.daily_treasury[treasury_end] = treasury_period_return self.treasury_period_return = self.daily_treasury[treasury_end] self.excess_returns[dt_loc] = ( self.algorithm_cumulative_returns[dt_loc] - self.treasury_period_return) self.alpha[dt_loc], self.beta[dt_loc] = alpha_beta_aligned( self.algorithm_returns, self.benchmark_returns, ) self.sharpe[dt_loc] = sharpe_ratio(self.algorithm_returns, ) self.downside_risk[dt_loc] = downside_risk(self.algorithm_returns) self.sortino[dt_loc] = sortino_ratio( self.algorithm_returns, _downside_risk=self.downside_risk[dt_loc]) self.max_drawdown = max_drawdown(self.algorithm_returns) self.max_drawdowns[dt_loc] = self.max_drawdown self.max_leverage = self.calculate_max_leverage() self.max_leverages[dt_loc] = self.max_leverage
def getLimitedDataForPortfolio(historicalWeights, historicalPredictions, modelsUsed, factorToTrade, joinedData): normalTickerAllocationsTable, scaledTickerAllocationsTable = historicalWeightsToTickerAllocations(historicalWeights, historicalPredictions, modelsUsed) # capitalUsed = pd.DataFrame(normalTickerAllocationsTable.apply(lambda x: sum([abs(item) for item in x]), axis=1)) # print(capitalUsed) tickerAllocationsTable = scaledTickerAllocationsTable #scaledTickerAllocationsTable tickerAllocationsTable = tickerAllocationsTable.fillna(0) tickerPerformance, algoPerformance, algoTransactionCost = portfolioGeneration.calculatePerformanceForAllocations(tickerAllocationsTable, joinedData) benchmark = factorToTrade factorReturn = dataAck.getDailyFactorReturn(benchmark, joinedData) factorReturn.columns = ["Factor Return (" + benchmark + ")"] algoPerformance.columns = ["Algo Return"] algoPerformanceRollingWeekly = algoPerformance.rolling(5, min_periods=5).apply(lambda x:empyrical.cum_returns(x)[-1]).dropna() algoPerformanceRollingWeekly.columns = ["Weekly Rolling Performance"] algoPerformanceRollingMonthly = algoPerformance.rolling(22, min_periods=22).apply(lambda x:empyrical.cum_returns(x)[-1]).dropna() algoPerformanceRollingMonthly.columns = ["Monthly Rolling Performance"] algoPerformanceRollingYearly = algoPerformance.rolling(252, min_periods=252).apply(lambda x:empyrical.cum_returns(x)[-1]).dropna() algoPerformanceRollingYearly.columns = ["Yearly Rolling Performance"] tickersUsed = [] for mod in modelsUsed: tickersUsed.append(mod.targetTicker) # for ticker in tickersUsed: # thisFactorReturn = dataAck.getDailyFactorReturn(ticker, joinedData) # thisFactorReturn.columns = ["Factor Return (" + ticker + ")"] # alpha, beta = empyrical.alpha_beta(algoPerformance, thisFactorReturn) # print(ticker, beta) alpha, beta = empyrical.alpha_beta(algoPerformance, factorReturn) sharpe_difference = empyrical.sharpe_ratio(algoPerformance) - empyrical.sharpe_ratio(factorReturn) annualizedReturn = empyrical.annual_return(algoPerformance)[0] annualizedVolatility = empyrical.annual_volatility(algoPerformance) stability = empyrical.stability_of_timeseries(algoPerformance) profitability = len((algoPerformance.values)[algoPerformance.values > 0])/len(algoPerformance.values) rollingSharpe = algoPerformance.rolling(252, min_periods=252).apply(lambda x:empyrical.sharpe_ratio(x)).dropna() rollingSharpe.columns = ["252 Day Rolling Sharpe"] rollingSharpeError = rollingSharpe["252 Day Rolling Sharpe"].std() rollingSharpeMinimum = np.percentile(rollingSharpe["252 Day Rolling Sharpe"].values, 1) ##AUTOMATICALLY TAKES SLIPPAGE INTO ACCOUNT return { "benchmark":factorToTrade, "alpha":alpha, "beta":abs(beta), "sharpe difference":sharpe_difference, "annualizedReturn":annualizedReturn, "annualizedVolatility":annualizedVolatility, "sharpe":empyrical.sharpe_ratio(algoPerformance), "free return":annualizedReturn - annualizedVolatility, "stability":stability, "profitability":profitability, "rollingSharpeError":rollingSharpeError, "rollingSharpeMinimum":rollingSharpeMinimum, "weeklyMinimum":algoPerformanceRollingWeekly.min().values[0], "monthlyMinimum":algoPerformanceRollingMonthly.min().values[0], "yearlyMinimum":algoPerformanceRollingYearly.min().values[0] }, tickerAllocationsTable
df = pd.DataFrame() day_vol = AD1.iloc[7479:, 0].ewm(ignore_na=False, adjust=True, span=60, min_periods=0).std(bias=False) df['return'] = AD1.iloc[7479:, 0] * y_pred_lstm * 0.15 / day_vol #%% import empyrical print("Annualized Sharpe Ratio = ", empyrical.sharpe_ratio(df['return'], period='daily')) print("Annualized Mean Returns = ", empyrical.annual_return(df['return'], period='daily')) print("Annualized Standard Deviations = ", empyrical.annual_volatility(df['return'], period='daily')) print("Max Drawdown (MDD) = ", empyrical.max_drawdown(df['return'])) print("Sortino ratio = ", empyrical.sortino_ratio(df['return'], period='daily')) print("Calmar ratio = ", empyrical.calmar_ratio(df['return'], period='daily')) #%% a = pd.DataFrame() a = pd.concat([a, TSMOM.ind_return['port_avg']], axis=1) a = a.tail(831) b = empyrical.cum_returns(a) c = empyrical.cum_returns(df['return']) plt.plot(b, color='red', label='R') plt.plot(c, color='blue', label='R') plt.title('Cumulative return in daily basis') plt.xlabel('Time')
def annual_volatility(portfolio_daily_returns): return ep.annual_volatility(portfolio_daily_returns)