def get_returns(self, benchmark=''): returns = {} if benchmark: try: benchmark_data = ( get_benchmark_returns(benchmark, self.configuration['index'][0], self.configuration['index'][-1])) except Exception as e: raise KeyError(e) else: #TODO Automatic detection given exchange market (on command line) ? raise NotImplementedError() #NOTE Could be more efficient. But len(benchmark_data.date) != # len(self.results.returns.index). Maybe because of different markets dates = pd.DatetimeIndex([d.date for d in benchmark_data]) returns['benchmark_return'] = pd.Series( [d.returns for d in benchmark_data], index=dates) returns['benchmark_c_return'] = ( (returns['benchmark_return'] + 1).cumprod()) - 1 returns['algo_return'] = pd.Series( self.results.returns.values, index=dates) returns['algo_c_return'] = pd.Series( ((self.results.returns.values + 1).cumprod()) - 1, index=dates) df = pd.DataFrame(returns, index=dates) if benchmark is None: df = df.drop(['benchmark_return', 'benchmark_c_return'], axis=1) return df
def get_returns(self, benchmark='', timestamp='one_month', save=False, db_id=None): returns = dict() if benchmark: try: benchmark_data = get_benchmark_returns(benchmark, self.configuration['start'], self.configuration['end']) except: raise KeyError() else: #TODO Automatic detection given exchange market (on command line) ? raise NotImplementedError() #NOTE Could be more efficient. But len(benchmark_data.date) != len(self.results.returns.index). Maybe because of different markets dates = pd.DatetimeIndex([d.date for d in benchmark_data]) returns['Benchmark.Returns'] = pd.Series([d.returns for d in benchmark_data], index=dates) returns['Benchmark.CReturns'] = ((returns['Benchmark.Returns'] + 1).cumprod()) - 1 returns['Returns'] = pd.Series(self.results.returns, index=dates) returns['CReturns'] = pd.Series(((self.results.returns + 1).cumprod()) - 1, index=dates) df = pd.DataFrame(returns, index=dates) if save: raise NotImplementedError() self.datafeed.stock_db.saveDFToDB(df, table=db_id) if benchmark is None: df = df.drop(['Benchmark.Returns', 'Benchmark.CReturns'], axis=1) return df
def get_returns(self, benchmark=None, timestamp='one_month', save=False, db_id=None): returns = dict() if benchmark: #TODO Benchmark fields in database for guessing name like for stocks benchmark_symbol = '^GSPC' benchmark_data = get_benchmark_returns(benchmark_symbol, self.backtest_cfg['start'], self.backtest_cfg['end']) else: raise NotImplementedError() #benchmark_data = [d for d in benchmark_data if (d.date >= self.backtest_cfg['start']) and (d.date <= self.backtest_cfg['end'])] dates = pd.DatetimeIndex([d.date for d in benchmark_data]) returns['Benchmark.Returns'] = pd.Series([d.returns for d in benchmark_data], index=dates) returns['Benchmark.CReturns'] = ((returns['Benchmark.Returns'] + 1).cumprod()) - 1 returns['Returns'] = pd.Series(self.results.returns, index=dates) returns['CReturns'] = pd.Series(((self.results.returns + 1).cumprod()) - 1, index=dates) df = pd.DataFrame(returns, index=dates) if save: raise NotImplementedError() self.feeds.stock_db.saveDFToDB(df, table=db_id) if benchmark is None: df = df.drop(['Benchmark.Returns', 'Benchmark.CReturns'], axis=1) return df
def get_returns(self, benchmark=None, timestamp='one_month', save=False, db_id=None): returns = dict() if benchmark: benchmark_symbol = self.feeds.guess_name(benchmark) if benchmark_symbol: benchmark_data = get_benchmark_returns(benchmark_symbol, self.backtest_cfg['start'], self.backtest_cfg['end']) else: raise KeyError() else: #TODO Automatic detection given exchange market (on command line) ? Or s&p500 as only implemented in zipline currently (but not for long !) raise NotImplementedError() #NOTE Could be more efficient. But len(benchmark_data.date) != len(self.results.returns.index). Maybe because of different markets dates = pd.DatetimeIndex([d.date for d in benchmark_data]) returns['Benchmark.Returns'] = pd.Series([d.returns for d in benchmark_data], index=dates) returns['Benchmark.CReturns'] = ((returns['Benchmark.Returns'] + 1).cumprod()) - 1 returns['Returns'] = pd.Series(self.results.returns, index=dates) returns['CReturns'] = pd.Series(((self.results.returns + 1).cumprod()) - 1, index=dates) df = pd.DataFrame(returns, index=dates) if save: raise NotImplementedError() self.feeds.stock_db.saveDFToDB(df, table=db_id) if benchmark is None: df = df.drop(['Benchmark.Returns', 'Benchmark.CReturns'], axis=1) return df
def test(self, sd, ed, live_start_date, benchmark='SPY'): trading_days = date_range(sd, ed, freq=trading_day) self.timestep_progress = tqdm(total=len(trading_days) / 21) results = run_algorithm(initialize=self.initialize_testing_algo, capital_base=self.capital, start=sd, end=ed) returns, positions, transactions = pf.utils.extract_rets_pos_txn_from_zipline( results) benchmark_returns = get_benchmark_returns(benchmark, sd, ed) benchmark_returns.ix[sd] = 0.0 pf.create_full_tear_sheet(returns, positions=positions, transactions=transactions, benchmark_rets=benchmark_returns, live_start_date=live_start_date, round_trips=True)
def get_non_trading_days(start, end): # 对non_trading_days 进行改写 # 改写主要获取所有的时间日期,然后在剔除交易日期,最后在和交易日期进行 # 从000001 中获取所有的交易日期 #print start,end,'start,end' tradeing_date = [ canonicalize_datetime(i) for i in (get_benchmark_returns('000001', start, end)).index ] #print tradeing_date[-5:] # 获取在给定是时间内所有的日期 all_date = [canonicalize_datetime(t) for t in pd.date_range(start, end)] non_trading_rules = [] start = canonicalize_datetime(start) end = canonicalize_datetime(end) #this is the rule of saturday and sunday weekends = rrule.rrule(rrule.YEARLY, byweekday=(rrule.SA, rrule.SU), cache=True, dtstart=start, until=end) non_trading_rules.append(weekends) #first day of the year new_years = rrule.rrule(rrule.MONTHLY, byyearday=1, cache=True, dtstart=start, until=end) non_trading_rules.append(new_years) # 5.1 may_1st = rrule.rrule(rrule.MONTHLY, bymonth=5, bymonthday=1, cache=True, dtstart=start, until=end) non_trading_rules.append(may_1st) #10.1,2,3 oct_1st = rrule.rrule(rrule.MONTHLY, bymonth=10, bymonthday=1, cache=True, dtstart=start, until=end) non_trading_rules.append(oct_1st) oct_2nd = rrule.rrule(rrule.MONTHLY, bymonth=10, bymonthday=2, cache=True, dtstart=start, until=end) non_trading_rules.append(oct_2nd) oct_3rd = rrule.rrule(rrule.MONTHLY, bymonth=10, bymonthday=3, cache=True, dtstart=start, until=end) non_trading_rules.append(oct_3rd) non_trading_ruleset = rrule.rruleset() for rule in non_trading_rules: non_trading_ruleset.rrule(rule) non_trading_days = non_trading_ruleset.between(start, end, inc=True) #bias_day = [p for p in all_date if (p not in tradeing_date and p not in non_trading_days)] non_trading_days = non_trading_days + [ p for p in all_date if (p not in tradeing_date and p not in non_trading_days) ] non_trading_days.sort() return pd.DatetimeIndex(non_trading_days)
if __name__ == '__main__': from datetime import datetime import pytz from zipline.algorithm import TradingAlgorithm from zipline.utils.factory import load_from_yahoo # Set the simulation start and end dates. start = datetime(2005, 11, 14, 0, 0, 0, 0, pytz.utc) end = datetime(2013, 10, 14, 0, 0, 0, 0, pytz.utc) # Load price data from yahoo. data = load_from_yahoo(stocks=['AAPL', 'AXP', 'BA', 'CAT', 'CSCO', 'CVX', 'DD', 'DIS', 'GE', 'GS', 'HD', 'IBM', 'INTC', 'JNJ', 'JPM', 'KO', 'MCD', 'MMM', 'MRK', 'MSFT', 'NKE', 'PFE', 'PG', 'TRV', 'UNH', 'UTX', 'V', 'VZ', 'WMT', 'XOM'], indexes={}, start=start, end=end) benchmarks = get_benchmark_returns("^DJI", start, end) data.bench = benchmarks end2 = datetime(2013, 10, 14, 0, 0, 0, 0, pytz.utc) start2 = datetime(2007, 5, 07, 0, 0, 0, 0, pytz.utc) dw= web.DataReader("^GSPC", "yahoo", start=start2, end = end2) data.dowJones = dw # Create and run the algorithm. algo = TradingAlgorithm(initialize=initialize, handle_data=handle_data, identifiers=['AAPL', 'AXP', 'BA', 'CAT', 'CSCO', 'CVX', 'DD', 'DIS', 'GE', 'GS', 'HD', 'IBM', 'INTC', 'JNJ', 'JPM', 'KO', 'MCD', 'MMM', 'MRK', 'MSFT', 'NKE', 'PFE', 'PG', 'TRV', 'UNH', 'UTX', 'V', 'VZ', 'WMT', 'XOM']) results = algo.run(data)
# # plt.show() # # print('Returns: ', perfData.returns) # # print('Alpha: ', perfData.alpha) # # print('Beta: ', perfData.beta) # # print('Sharpe: ', perfData.sharpe) # # print('Drawdown: ', perfData.max_drawdown) # Use pyfolio (by Quantopian) to generate tearsheet import pyfolio as pf returns, positions, transactions = pf.utils.extract_rets_pos_txn_from_zipline(perfData) # print(positions.tail()) # Get benchmark returns and compare it with our algorithm's returns from zipline.data.benchmarks import get_benchmark_returns bm_returns = get_benchmark_returns(args['benchmark'], start, end) # requires network connection bm_returns.name = 'Benchmark (%s)' % args['benchmark'] returns.name = 'Algorithm' ax = plt.gca() pf.plot_rolling_returns(returns, factor_returns=bm_returns, logy=False, ax=ax) # pf.plot_rolling_returns(returns, logy=False, ax=ax) # pf.create_returns_tear_sheet(returns) # pf.create_full_tear_sheet(perfData, positions=positions, transactions=transactions, # live_start_date='2018-08-1', round_trips=False) import matplotlib.ticker as mtick ax.yaxis.set_major_formatter(mtick.PercentFormatter(1.0))