def handle_data(context, data): print context.portfolio.portfolio_value # Skip first 300 days to get full windows context.i += 1 if context.i < 300: return # Compute averages # history() has to be called with the same params # from above and returns a pandas dataframe. short_mavg = history(100, '1d', 'price').mean() long_mavg = history(300, '1d', 'price').mean() sym = symbol('AAPL') # Trading logic if short_mavg[sym] > long_mavg[sym]: # order_target orders as many shares as needed to # achieve the desired number of shares. order_target(sym, 100) elif short_mavg[sym] < long_mavg[sym]: order_target(sym, 0) # Save values for later inspection record(AAPL=data[sym].price, short_mavg=short_mavg[sym], long_mavg=long_mavg[sym])
def handle_data(context, data): # Skip first 300 days to get full windows context.i += 1 if context.i < 300: return # Compute averages # history() has to be called with the same params # from above and returns a pandas dataframe. short_mavg = history(100, '1d', 'price').mean() long_mavg = history(300, '1d', 'price').mean() # price_history = data.history(assets=symbol('TEST'), fields="price", bar_count=5, frequency="1d") # Trading logic if short_mavg[0] > long_mavg[0]: # order_target orders as many shares as needed to # achieve the desired number of shares. order_target(symbol('AAPL'), 100) elif short_mavg[0] < long_mavg[0]: order_target(symbol('AAPL'), 0) # Save values for later inspection record(AAPL=data[symbol('AAPL')].price, short_mavg=short_mavg[0], long_mavg=long_mavg[0])
def handle_data(context, data): context.i += 1 if context.i < 20: return ma5 = history(5, '1d', 'price').mean() ma20 = history(20, '1d', 'price').mean() buy = False sell = False sym = symbol('HMD') if ma5[sym] > ma20[sym] and context.investment == False: order_target(sym, 1) context.investment = True buy = True elif ma5[sym] < ma20[sym] and context.investment == True: order_target(sym, -1) context.investment = False sell = True record(HMD=data[sym].price, ma5=ma5[sym], ma20=ma20[sym], buy=buy, sell=sell)
def handle_data(context, data): # Skip first 300 days to get full windows context.i += 1 dp = context.history_container.digest_panels for k in dp.keys(): df = dp[k].buffer['price'] a = df.dropna() print('No.', context.i, ':Len.', len(a)) print('Contents:') print(a) print(context.history_container.buffer_panel.buffer['price']) if context.i < 40: return # Compute averages # history() has to be called with the same params # from above and returns a pandas dataframe. short_mavg = history(20, '1d', 'price').mean() long_mavg = history(40, '1d', 'price').mean() # Trading logic if short_mavg[context.sym] > long_mavg[context.sym]: # order_target orders as many shares as needed to # achieve the desired number of shares. order_target(context.sym, 100) elif short_mavg[context.sym] < long_mavg[context.sym]: order_target(context.sym, 0) # Save values for later inspection record(AAPL=data[context.sym].price, short_mavg=short_mavg[context.sym], long_mavg=long_mavg[context.sym])
def handle_data(context, data): # Skip first 300 days to get full windows context.i += 1 if context.i < context.N: return # Compute averages # history() has to be called with the same params # from above and returns a pandas dataframe. c = history(context.N, '1d', 'close') o = history(context.N, '1d', 'open') h = history(context.N, '1d', 'high') l = history(context.N, '1d', 'low') for sym in data: # Trading logic hh = h[sym].ix[:-1].max() hc = c[sym].ix[:-1].max() lc = c[sym].ix[:-1].min() ll = l[sym].ix[:-1].min() r = max(hh-lc, hc-ll) upper = data[sym].open + context.k * r lower = data[sym].open - context.k * r if short_mavg[sym] > long_mavg[sym] and not context.invested: # order_target orders as many shares as needed to # achieve the desired number of shares. order(sym, 10000, limit_price=data[sym].price) context.invested = True elif short_mavg[sym] < long_mavg[sym] and context.invested: order(sym, -10000, limit_price=data[sym].price) context.invested = False
def handle_data_magc(context, data): context.i += 1 if context.i < 60: return ma20 = history(20, '1d', 'price').mean() ma60 = history(60, '1d', 'price').mean() buy = False sell = False sym = symbol(code) count = int(100000 / data[sym].price) if context.investment == False: if ma20[sym] > ma60[sym] : order_target(sym, count) context.investment = True context.buy_price = data[sym].price buy = True else: if (data[sym].price > context.buy_price + (context.buy_price * sell_point)): order_target(sym, -count) context.investment = False sell = True record(code=data[sym].price, ma20=ma20[sym], ma60=ma60[sym], buy=buy, sell=sell)
def handle_data(context, data): rebalance_period = 20 context.tick += 1 if context.tick < 120 : return if context.tick % rebalance_period != 0: return # Get rolling window of past prices and compute returns prices_6m = history(120, '1d', 'price').dropna() returns_6m = prices_6m.pct_change().dropna() prices_60d = history(60, '1d', 'price').dropna() returns_60d = prices_60d.pct_change().dropna() try: # Get the strongest 5 in momentum mom = returns_6m.T.sum(axis=1) selected_indices = mom[mom>0].order().tail(len(mom) /2).index # selected_indices = mom.index # selected_indices = mom[mom > 0 ].index selected_returns = returns_60d[selected_indices] weights = minimize_vol(selected_returns.T) # weights = minimize_vol(returns_60d.T) # Rebalance portfolio accordingly for stock, weight in zip(selected_returns.columns, weights): order_target_percent(stock, weight) except : # Sometimes this error is thrown # ValueError: Rank(A) < p or Rank([P; A; G]) < n pass
def handle_data(self, context, data): rebalance_period = 20 context.tick += 1 if context.tick < 120: return if context.tick % rebalance_period != 0: return # Get rolling window of past prices and compute returns prices_6m = history(120, '1d', 'price').dropna() returns_6m = prices_6m.pct_change().dropna() prices_60d = history(60, '1d', 'price').dropna() returns_60d = prices_60d.pct_change().dropna() try: # Get the strongest 5 in momentum mom = returns_6m.T.sum(axis=1) #selected_indices = mom[mom>0].order().tail(len(mom) /2).index selected_indices = mom.index #selected_indices = mom[mom > 0 ].index selected_returns = returns_60d[selected_indices] weights = self.minimize_vol(selected_returns.T) # weights = minimize_vol(returns_60d.T) # Rebalance portfolio accordingly for stock, weight in zip(selected_returns.columns, weights): order_target_percent(stock, weight) except: # Sometimes this error is thrown # ValueError: Rank(A) < p or Rank([P; A; G]) < n pass
def handle_data(context, data): #trading algorithm (executed on every event) #skip first 300 days to get full windows context.i += 1 if context.i < 300: return #compute short and long moving averages: short_mavg = history(100, '1d', 'price').mean() long_mavg = history(300, '1d', 'price').mean() buy = False sell = False #trading logic if (short_mavg[0] > long_mavg[0]) and not context.invested: buy = True context.invested = True order_target(symbol('AAPL'), 100) elif (short_mavg[0] < long_mavg[0]) and context.invested: sell = True context.invested = False order_target(symbol('AAPL'), -100) #save values for plotting record(AAPL = data[symbol('AAPL')].price, short_mavg = short_mavg[0], long_mavg = long_mavg[0], buy=buy, sell=sell)
def handle_data(self, context, data): rebalance_period = 20 context.tick += 1 if context.tick % rebalance_period != 0: return # Get rolling window of past prices and compute returns prices_6m = history(120, '1d', 'price').dropna() returns_6m = prices_6m.pct_change().dropna() prices_60d = history(60, '1d', 'price').dropna() returns_60d = prices_60d.pct_change().dropna() try: # Get the strongest 5 in momentum mom = returns_6m.T.sum(axis=1) selected = (mom > np.median(mom)) * 1 # 60 days volatility vol = np.std(returns_60d.T, axis=1) vol_target = 0.01 wt = vol_target / vol * 0.2 wt[wt > 0.2] = 0.2 # weights = wt * selected # Rebalance portfolio accordingly for stock, weight in zip(prices_60d.columns, weights): order_target_percent(stock, weight) except ValueError as e: # Sometimes this error is thrown # ValueError: Rank(A) < p or Rank([P; A; G]) < n pass
def handle_data(context, data): # Skip first 300 days to get full windows context.i += 1 dp = context.history_container.digest_panels for k in dp.keys(): df = dp[k].buffer['price'] a = df.dropna() print('No.',context.i,':Len.',len(a)) print('Contents:') print(a) print(context.history_container.buffer_panel.buffer['price']) if context.i < 40: return # Compute averages # history() has to be called with the same params # from above and returns a pandas dataframe. short_mavg = history(20, '1d', 'price').mean() long_mavg = history(40, '1d', 'price').mean() # Trading logic if short_mavg[context.sym] > long_mavg[context.sym]: # order_target orders as many shares as needed to # achieve the desired number of shares. order_target(context.sym, 100) elif short_mavg[context.sym] < long_mavg[context.sym]: order_target(context.sym, 0) # Save values for later inspection record(AAPL=data[context.sym].price, short_mavg=short_mavg[context.sym], long_mavg=long_mavg[context.sym])
def handle_data(context, data): # Skip first 300 days to get full windows context.i += 1 if context.i < 10: return # Compute averages # history() has to be called with the same params # from above and returns a pandas dataframe. short_mavg = history(5, '1d', 'price').mean() long_mavg = history(10, '1d', 'price').mean() for sym in data: # sym = data.keys()[0] # sym = data.keys()[0] # Trading logic if short_mavg[sym] > long_mavg[sym] and not context.invested: # order_target orders as many shares as needed to # achieve the desired number of shares. order_target(sym, 5000) context.invested = True elif short_mavg[sym] < long_mavg[sym] and context.invested: order_target(sym, 0) context.invested = False
def handle_data(context, data): # Skip first 300 days to get full windows context.i += 1 if context.i < 300: return # Compute averages # history() has to be called with the same params # from above and returns a pandas dataframe. short_mavg = history(100, '1d', 'price').mean() long_mavg = history(300, '1d', 'price').mean() sym = symbol('AAPL') # Trading logic if short_mavg[sym] > long_mavg[sym]: # order_target orders as many shares as needed to # achieve the desired number of shares. order_target(sym, 100) elif short_mavg[sym] < long_mavg[sym]: order_target(sym, 0) # Save values for later inspection record(AAPL=data[sym].price, short_mavg=short_mavg[sym], long_mavg=long_mavg[sym])
def handle_data(context, data): # context.i+=1 # if context.i<=5: # return # 循环每只股票 closeprice= history(5,'1d','close') for security in context.stocks: vwap=(closeprice[symbol(security)][-2]+closeprice[symbol(security)][-3]+closeprice[symbol(security)][-4])/3 price = closeprice[symbol(security)][-2] print get_datetime(),security,vwap,price # # 如果上一时间点价格小于三天平均价*0.995,并且持有该股票,卖出 if price < vwap * 0.995: # 下入卖出单 order(symbol(security),-300) print get_datetime(),("Selling %s" % (security)) # 记录这次卖出 #log.info("Selling %s" % (security)) # 如果上一时间点价格大于三天平均价*1.005,并且有现金余额,买入 elif price > vwap * 1.005: # 下入买入单 order(symbol(security),300) # 记录这次买入 print get_datetime(),("Buying %s" % (security))
def top_rets(self, tickers, win): hist = history(bar_count=241, frequency='1d', field='price') ret = ((hist / hist.shift(win)) - 1).tail(1) mean_ret = float(np.median(ret)) max_ret = float(ret.max(axis=1)) spy_ret = float(ret[symbol(self.ticker_spy)]) lst = {} lst['mean'] = [] lst['spy'] = [] lst['zero'] = [] lst['max'] = [] for ticker in tickers: ticker_ret = float(ret[ticker]) if ticker_ret > mean_ret: lst['mean'].append(ticker) if ticker_ret > spy_ret: lst['spy'].append(ticker) if ticker_ret > 0: lst['zero'].append(ticker) if ticker_ret >= max_ret: lst['max'].append(ticker) return lst
def handle_data_macd(context, data): context.i += 1 if context.i < 60: return buy = False sell = False sym = symbol(code) count = int(100000 / data[sym].price) prices = history(40, '1d', 'price') macd = prices.apply(MACD, fastperiod=12, slowperiod=26, signalperiod=9) if context.investment == False: if macd[sym] > 0 and context.position == -1: order_target(sym, count) context.investment = True context.buy_price = data[sym].price buy = True context.position = 1 else: if (data[sym].price > context.buy_price + (context.buy_price * sell_point)): order_target(sym, -count) context.investment = False sell = True if macd[sym] < 0 : context.position = -1 if macd[sym] > 0 : context.position = 1 record(code=data[sym].price, macd=macd[sym], buy=buy, sell=sell)
def handle_data(context, data): #Get a trailing window of data prices = history(15, '1d', 'price') # Use pandas dataframe.apply to get the last RSI value # for for each stock in our basket rsi_data = prices.apply(talib.RSI, timeperiod=14).iloc[-1] intc_rsi = rsi_data[context.intc] # check how many shares of Intel we currently own current_intel_shares = context.portfolio.positions[context.intc].amount # until 14 time periods have gone by, the rsi value will be numpy.nan # RSI is above 70 and we own GOOG, time to close the position. if intc_rsi > context.HIGH_RSI and current_intel_shares > 0: order_target(context.intc, 0) log.info('RSI is at ' + str(intc_rsi) + ', selling ' + str(current_intel_shares) + ' shares') # RSI is below 30 and we don't have any Intel stock, time to buy. elif intc_rsi < context.LOW_RSI and current_intel_shares == 0: num_shares = math.floor(context.max_notional / data[context.intc].close_price) order(context.intc, num_shares) log.info('RSI is at ' + str(intc_rsi) + ', buying ' + str(num_shares) + ' shares') # record the current RSI value and the current price of INTC. record(intcRSI=intc_rsi, intcPRICE=data[context.intc].close_price)
def handle_data(context, data): context.iwarmup = context.iwarmup + 1 if context.iwarmup <= (context.history_depth + 1): return dfHistD = history(30, '1d', 'price') S = context.secs[0] CurP = data[S].price BolU, BolM, BolL = talib.BBANDS(dfHistD[S].values, timeperiod=context.BBANDS_timeperiod, nbdevup=context.BBANDS_nbdevup, nbdevdn=context.BBANDS_nbdevdn, matype=0) record(CurP=CurP, BolU=BolU[-1], BolM=BolM[-1], BolL=BolL[-1]) if CurP < BolL[-1]: order_target_percent(S, +0.97) elif CurP > BolU[-1]: order_target_percent(S, -0.97) return
def handle_data(context, data): print context #raw_input() #输出每天持仓情况 if not context.has_ordered: for stock in data: #openprice=history(3, '1d', 'open') closeprice = history(5, '1d', 'close') #-2:昨天,-3 前天.-4 大前天 print get_datetime(), closeprice[sid(stock)][0], closeprice[sid( stock)][1], closeprice[sid(stock)][2], closeprice[sid( stock)][3], closeprice[sid(stock)][4] #print closeprice,closeprice[sid(stock)][1] if closeprice[sid(stock)][-2] > closeprice[sid( stock)][-3] and closeprice[sid(stock)][-3] > closeprice[ sid(stock)][-4]: print "buy", get_datetime() order(stock, 300) elif closeprice[sid(stock)][-2] < closeprice[sid( stock)][-3] and closeprice[sid(stock)][-3] < closeprice[ sid(stock)][-4]: print "sell", get_datetime() order(stock, -300)
def handle_data(context, data): logging.debug('enter handle_data') context.i += 1 if context.i < context.rsi_window: return # get the last RSI value prices = history(context.rsi_window, '1d', 'price') sec_rsi = talib.RSI(prices[context.security].values, timeperiod=context.rsi_window - 1) # buy and sell flags buy = False sell = False if sec_rsi[-1] < context.LOW_RSI and not context.invested: # RSI under 30 indicates oversold, time to buy order_target(context.security, 1000) logging.debug('Buying {}'.format(context.security)) context.invested = True buy = True elif sec_rsi[-1] > context.HIGH_RSI and context.invested: # RSI over 70 indicates overbought, sell everything order_target(context.security, 0) logging.debug('Selling {}'.format(context.security)) context.invested = False sell = True # record data for each time increment record(secRSI=sec_rsi[-1], price=data[context.security].price, buy=buy, sell=sell) logging.info(context.portfolio.cash)
def handle_data(context, data): # Skip first 300 days to get full windows date = get_datetime() context.i += 1 if context.i < 10: return prices = history(25, '1d', 'price') for sym in data: upper, middle, lower = talib.BBANDS( np.array(prices[sym]), timeperiod=20, nbdevup=2, nbdevdn=2, matype=0 ) potential_buy = [] buy = False sell = False if data[sym].price > upper[-1] and context.portfolio.positions[sym].amount == 0: # log.info('buy') # log.info(get_datetime()) # log.info(data[sym].price) # log.info(upper[-1]) order_target_percent(sym, 1.0, limit_price=data[sym].price) elif data[sym].price < middle[-1] and context.portfolio.positions[sym].amount > 0: # log.info('sell') # log.info(get_datetime()) # log.info(data[sym].price) # log.info(middle[-1]) order_target(sym, 0, limit_price=data[sym].price)
def handle_data_bband(context, data): context.i += 1 if context.i < 20: return buy = False sell = False sym = symbol(code) count = int(100000 / data[sym].price) prices = history(20, '1d', 'price') upper, middle, lower = ta.BBANDS( prices[sym].values, timeperiod=20, nbdevup=2, nbdevdn=2, matype=0) if context.investment == False: if lower[-1] > data[sym].price: order_target(sym, count) context.investment = True context.buy_price = data[sym].price buy = True context.position = 1 else: if (data[sym].price > context.buy_price + (context.buy_price * sell_point)): order_target(sym, -count) context.investment = False sell = True record(code=data[sym].price, upper=upper[-1], lower=lower[-1], makeBacktestingDataFrame=middle[-1], buy=buy, sell=sell)
def handle_data(context, data): ''' Called when a market event occurs for any of the algorithm's securities. Parameters data: A dictionary keyed by security id containing the current state of the securities in the algo's universe. context: The same context object from the initialize function. Stores the up to date portfolio as well as any state variables defined. Returns None ''' # Allow history to accumulate 100 days of prices before trading # and rebalance every day thereafter. context.tick += 1 if context.tick < 100: return # Get rolling window of past prices and compute returns prices = history(100, '1d', 'price').dropna() returns = prices.pct_change().dropna() try: # Perform Markowitz-style portfolio optimization weights, _, _ = optimal_portfolio(returns.T) # Rebalance portfolio accordingly for stock, weight in zip(prices.columns, weights): order_target_percent(stock, weight) except ValueError as e: # Sometimes this error is thrown # ValueError: Rank(A) < p or Rank([P; A; G]) < n pass
def handle_data(context, data): # order(symbol('035720'), 1) context.i += 1 if context.i < 20: return ma5 = history(5, '1d', 'price').mean() ma20 = history(20, '1d', 'price').mean() sym = symbol(context.stockCd) record(kakao=data[sym].price, ma5=ma5[sym], ma20=ma20[sym]) if ma5[sym] > ma20[sym]: order_target(sym, 1) else: order_target(sym, -1)
def top_rets(self, tickers, win) : hist = history(bar_count = 241, frequency='1d', field='price') ret = ((hist/hist.shift(win)) - 1).tail(1) mean_ret = float(np.median(ret)) max_ret = float(ret.max(axis=1)) spy_ret = float(ret[symbol(self.ticker_spy)]) lst = {} lst['mean'] = [] lst['spy'] = [] lst['zero'] = [] lst['max'] = [] for ticker in tickers : ticker_ret = float(ret[ticker]) if ticker_ret > mean_ret : lst['mean'].append(ticker) if ticker_ret > spy_ret: lst['spy'].append(ticker) if ticker_ret > 0: lst['zero'].append(ticker) if ticker_ret >= max_ret: lst['max'].append(ticker) return lst
def handle_data(context, data): context.i += 1 if context.i < 300: return short_mvag = history(100, '1d', 'price').mean() long_mvad = history(300, '1d', 'price').mean() sym = symbol('AAPL') if short_mvag[sym] > long_mvad[sym]: order_target(sym, 100) elif short_mvag[sym] < long_mvad[sym]: order_target(sym, 0) record(AAPL=data[sym].price, short_mvag=short_mvag[sym], long_mvad=long_mvad[sym])
def handle_data(self, context, data): rebalance_period = 20 context.tick += 1 if context.tick < 200: return if context.tick % rebalance_period != 0: return # condition 1: momentum conditons, current monthly prices > 10 months MA prices_200d = history(200, '1d', 'price').dropna() prices_m = self.filtering(prices_200d, rebalance_period) prices_ma_10m = pd.rolling_mean(prices_m, 10) con1 = prices_m.tail(1) > prices_ma_10m.tail(1) # condition 2: picking up the ETF, which outperforms the SPY moving_win = 3 symbol_spy = symbol(self.ticker_spy) rets_3m = (prices_m / prices_m.shift(moving_win) - 1) rets_spy_3m = ( prices_m[symbol_spy] / prices_m[symbol_spy].shift(moving_win) - 1) con2 = rets_3m.tail(1) > np.asarray(rets_spy_3m.tail(1)) # condition 3: picking up the ETF, which have positive returns rets = prices_m.pct_change() con3 = rets.tail(1) > 0 # signals sig1 = con1 sig2 = con1 & con2 sig3 = con2 & con3 sig4 = con1 & con3 sig5 = con1 & con2 & con3 sig = sig1 # Trading count = np.asarray(sig.sum(axis=1)) if count == 0: weights = sig * 0.0 else: weights = sig * 1.0 / count weights = np.asarray(weights.fillna(0)) stocks = np.asarray(sig.columns) try: # Rebalance portfolio accordingly for stock, weight in zip(stocks, weights.T): #print 'stock={stock}:weight={weight}'.format(stock=stock, weight=weight) order_target_percent(stock, weight) except: # Sometimes this error is thrown # ValueError: Rank(A) < p or Rank([P; A; G]) < n pass
def handle_data(self, context, data): rebalance_period = 20 context.tick += 1 if context.tick < 200 : return if context.tick % rebalance_period != 0: return # condition 1: momentum conditons, current monthly prices > 10 months MA prices_200d = history(200, '1d', 'price').dropna() prices_m = self.filtering(prices_200d, rebalance_period) prices_ma_10m = pd.rolling_mean(prices_m, 10) con1 = prices_m.tail(1) > prices_ma_10m.tail(1) # condition 2: picking up the ETF, which outperforms the SPY moving_win = 3 symbol_spy = symbol(self.ticker_spy) rets_3m = (prices_m / prices_m.shift(moving_win) - 1) rets_spy_3m = (prices_m[symbol_spy] / prices_m[symbol_spy].shift(moving_win) - 1) con2 = rets_3m.tail(1) > np.asarray(rets_spy_3m.tail(1)) # condition 3: picking up the ETF, which have positive returns rets = prices_m.pct_change() con3 = rets.tail(1) > 0 # signals sig1 = con1 sig2 = con1 & con2 sig3 = con2 & con3 sig4 = con1 & con3 sig5 = con1 & con2 & con3 sig = sig1 # Trading count = np.asarray(sig.sum(axis=1)) if count == 0: weights = sig * 0.0 else : weights = sig * 1.0 / count weights = np.asarray(weights.fillna(0)) stocks = np.asarray(sig.columns) try: # Rebalance portfolio accordingly for stock, weight in zip(stocks, weights.T): #print 'stock={stock}:weight={weight}'.format(stock=stock, weight=weight) order_target_percent(stock, weight) except : # Sometimes this error is thrown # ValueError: Rank(A) < p or Rank([P; A; G]) < n pass
def handle_data(context, data): # check if the spot is outside CI of MPP day_option_df = context.options[context.options['date'] == get_datetime()] call_sums = call_otm(day_option_df, 'FB', get_datetime()) put_sums = put_otm(day_option_df, 'FB', get_datetime()) add_to_window(context, 10, max_pain_strike(call_sums, put_sums), 'FB') ci = CI(context.window, 1) price = history(1, '1d', 'price').iloc[0, 0] if price < ci[0]: order_target_percent(symbol('FB'), 1) elif price > ci[1]: order_target_percent(symbol('FB'), 0)
def handle_data(context, data): # check if the spot is outside CI of MPP day_option_df = context.options[context.options['date'] == get_datetime()] call_sums = call_otm(day_option_df, 'FB', get_datetime()) put_sums = put_otm(day_option_df, 'FB', get_datetime()) add_to_window(context, 10, max_pain_strike(call_sums, put_sums), 'FB') ci = CI(context.window, 1) price = history(1, '1d', 'price').iloc[0,0] if price < ci[0]: order_target_percent(symbol('FB'), 1) elif price > ci[1]: order_target_percent(symbol('FB'), 0)
def handle_data(context, data): #On-Line Moving Average Reversal (OLMAR) context.days += 1 if context.days < context.window_length: return if context.init: rebalance_portfolio(context, data, context.b_t) context.init=False return m = context.m #num assets x_tilde = np.zeros(m) #relative mean deviation b = np.zeros(m) #weights #compute moving average price for each asset mavgs = history(context.window_length, '1d', 'price').mean() #mavgs = data.history(context.sids, 'price', context.window_length, '1d').mean() for i, stock in enumerate(context.stocks): price = data[stock]['price'] x_tilde[i] = mavgs[i] / price x_bar = x_tilde.mean() market_rel_dev = x_tilde - x_bar #relative deviation exp_return = np.dot(context.b_t, x_tilde) weight = context.eps - exp_return variability = (np.linalg.norm(market_rel_dev))**2 if variability == 0.0: step_size = 0 else: step_size = np.max((0, weight/variability)) b = context.b_t + step_size * market_rel_dev b_norm = simplex_projection(b) rebalance_portfolio(context, data, b_norm) context.b_t = b_norm #save values for plotting record(AAPL = data[symbol('AAPL')].price, MSFT = data[symbol('MSFT')].price, step_size = step_size, variability = variability )
def wait_for_data (context, data): if context.waiting_for_data : print ('\n{} WAITING FOR DATA...'.format(get_datetime().date())) # wait for history to fill max_lookback = context.max_lookback highs = history (max_lookback, '1d', 'high') if not highs.ix[0].sum() > 0 : return else: context.waiting_for_data = False return
def handle_data(context, data): #On-Line Moving Average Reversal (OLMAR) context.days += 1 if context.days < context.window_length: return if context.init: rebalance_portfolio(context, data, context.b_t) context.init = False return m = context.m #num assets x_tilde = np.zeros(m) #relative mean deviation b = np.zeros(m) #weights #compute moving average price for each asset mavgs = history(context.window_length, '1d', 'price').mean() #mavgs = data.history(context.sids, 'price', context.window_length, '1d').mean() for i, stock in enumerate(context.stocks): price = data[stock]['price'] x_tilde[i] = mavgs[i] / price x_bar = x_tilde.mean() market_rel_dev = x_tilde - x_bar #relative deviation exp_return = np.dot(context.b_t, x_tilde) weight = context.eps - exp_return variability = (np.linalg.norm(market_rel_dev))**2 if variability == 0.0: step_size = 0 else: step_size = np.max((0, weight / variability)) b = context.b_t + step_size * market_rel_dev b_norm = simplex_projection(b) rebalance_portfolio(context, data, b_norm) context.b_t = b_norm #save values for plotting record(AAPL=data[symbol('AAPL')].price, MSFT=data[symbol('MSFT')].price, step_size=step_size, variability=variability)
def handle_data(context, data): context.i += 1 if context.i < 20: return ma5 = history(5,'1d','price').mean() ma20 = history(20, '1d', 'price').mean() buy = False sell = False sym = symbol('HMD') if ma5[sym] > ma20[sym] and context.investment == False: order_target(sym, 1) context.investment = True buy = True elif ma5[sym] < ma20[sym] and context.investment == True: order_target(sym, -1) context.investment = False sell = True record(HMD=data[sym].price, ma5=ma5[sym], ma20=ma20[sym], buy=buy, sell=sell)
def handle_data(self, context, data): # Implement your algorithm logic here. # data[sid(X)] holds the trade event data for that security. # context.portfolio holds the current portfolio state. # Place orders with the order(SID, amount) method. # TODO: implement your own logic here. context.trade_days += 1 if context.trade_days <> 5 : return context.trade_days = 0 ## checking the market status: ## if SPY > price one year ago, Market is in uptrend ## otherwise, market is in downtrend hist = history(bar_count = 241, frequency='1d', field='price') cash = context.portfolio.cash current_price_spy = data[symbol(self.ticker_spy)].price try: if current_price_spy > hist[symbol(self.ticker_spy)][200] : lst = self.top_rets(context.equities, 240) lst_mean = lst['zero'] count = len(lst_mean) for ticker in sector_tickers: if ticker in lst_mean: order_target_percent(symbol(ticker), 1.0/count) else : order_target_percent(symbol(ticker), 0) order_target_percent(symbol(self.ticker_gld), 0) order_target_percent(symbol(self.ticker_tlt), 0) else : for ticker in sector_tickers: order_target_percent(symbol(ticker), 0) order_target_percent(symbol(self.ticker_spy), 0) order_target_percent(symbol(self.ticker_gld), 0.5) order_target_percent(symbol(self.ticker_tlt), 0.5) except: pass
def handle_data(context, data): prices = history(bar_count = context.historical_bars, frequency='1d', field='price') print prices ##Carrying last 100d prices for stock in context.stocks: try: ma1 = data[stock].mavg(50) ma2 = data[stock].mavg(200) start_bar = context.feature_window price_list = prices[stock].tolist() X = [] y = [] bar = start_bar # feature creation while bar < len(price_list)-1: try: end_price = price_list[bar+1] begin_price = price_list[bar] pricing_list = [] xx = 0 for _ in range(context.feature_window): price = price_list[bar-(context.feature_window-xx)] pricing_list.append(price) xx += 1 features = np.around(np.diff(pricing_list) / pricing_list[:-1] * 100.0, 1) #print(features) if end_price > begin_price: label = 1 else: label = -1 bar += 1 X.append(features) y.append(label) except Exception as e: bar += 1 print(('feature creation',str(e))) clf = RandomForestClassifier() last_prices = price_list[-context.feature_window:] current_features = np.around(np.diff(last_prices) / last_prices[:-1] * 100.0, 1) X.append(current_features) X = preprocessing.scale(X) current_features = X[-1] X = X[:-1] clf.fit(X,y) p = clf.predict(current_features)[0] print(('Prediction',p)) except Exception as e: print(str(e))
def handle_data(self, context, data): rebalance_period = 20 context.tick += 1 if context.tick % rebalance_period != 0: return # Get rolling window of past prices and compute returns prices = history(120, '1d', 'price').dropna() returns = prices.pct_change().dropna() try: weights = self.MOM(returns.T) for stock, weight in zip(prices.columns, weights): order_target_percent(stock, weight) except ValueError as e: pass
def handle_data(context, data): context.day_count += 1 if context.day_count < 100: return prices = history(950, '1d', 'price').dropna() security_index = 0; daily_returns = np.zeros((len(context.stocks), 950)) for security in context.stocks: if data.has_key(security): for day in range(0, 99): day_of = prices[security][day] day_before = prices[security][day - 1] daily_returns[security_index][day] = (day_of - day_before) / day_before security_index = security_index + 1 covars = cov(daily_returns) covars = covars * 250 ########################################################### returns = prices.pct_change().dropna() bnds = ((0,1),(0,1),(0,1),(0,1),(0,1),(0,1),(0,1),(0,1),(0,1),(0,1)) cons = ({'type': 'eq', 'fun': lambda x: np.sum(x)-1.0}) res = scipy.optimize.minimize(compute_var, context.x0, cov(daily_returns)*255, method='SLSQP', constraints=cons, bounds=bnds) allocation = res.x allocation[allocation < 0] = 0 # jamais de vente, que des achats denom = np.sum(allocation) if denom != 0: allocation = allocation/denom context.x0 = allocation record(stocks=np.sum(allocation[0:-1])) record(bonds=allocation[-1]) for i, stock in enumerate(context.stocks): order_target_percent(stock, allocation[i])
def handle_data(self, context, data): # Implement your algorithm logic here. # data[sid(X)] holds the trade event data for that security. # context.portfolio holds the current portfolio state. # Place orders with the order(SID, amount) method. # TODO: implement your own logic here. context.trade_days += 1 if context.trade_days <> 5: return context.trade_days = 0 ## checking the market status: ## if SPY > price one year ago, Market is in uptrend ## otherwise, market is in downtrend hist = history(bar_count=241, frequency='1d', field='price') cash = context.portfolio.cash current_price_spy = data[symbol(self.ticker_spy)].price try: if current_price_spy > hist[symbol(self.ticker_spy)][200]: lst = self.top_rets(context.equities, 240) lst_mean = lst['zero'] count = len(lst_mean) for ticker in sector_tickers: if ticker in lst_mean: order_target_percent(symbol(ticker), 1.0 / count) else: order_target_percent(symbol(ticker), 0) order_target_percent(symbol(self.ticker_gld), 0) order_target_percent(symbol(self.ticker_tlt), 0) else: for ticker in sector_tickers: order_target_percent(symbol(ticker), 0) order_target_percent(symbol(self.ticker_spy), 0) order_target_percent(symbol(self.ticker_gld), 0.5) order_target_percent(symbol(self.ticker_tlt), 0.5) except: pass
def handle_data(context, data): day_option_df = context.options[context.options['date'] == get_datetime()] call_options = day_option_df[day_option_df['type'] == 'C'] ################################## classifier stuff happens somewhere here call_options_good = call_options # call_options_good is the classified call_options ################################## # purchase the options that we think will end up in the money (could also modify this to give weight to it) for index, row in call_options_good.iterrows(): context.bought_options = rbind(context.bought_options, row) cash -= row['price'] # exercise expiring options that we've bought (assuming strike price is lower than expiration price) expiring_calls = context.bought_options[context.bought_options['expiration'] == get_datetime()] for index, row in expiring_calls.iterrows(): price = history(symbol(row['ticker']), '1d', 'price').iloc[0,0] cash += 100*max(price - row['strike'], 0) # assuming 100:1 ratio equity:option
def handle_data(context, data): # 获取股票的收盘价 close_data = history(12,'1d','close') # 取得过去五天的平均价格 ma5 = close_data[-6:-2].mean() # 取得过去10天的平均价格 ma10 = close_data[-11:-2].mean() # 取得当前的现金 print get_datetime(),ma5,ma10 cash = context.portfolio.cash #print ma5[sid(symbol(context.security))],ma10[sid(stock)],cash,symbol(context.security) #如果当前有余额,并且五日均线大于十日均线 if ma5[sid(symbol(context.security))] > ma10[sid(symbol(context.security))]: order_value(symbol(context.security), cash) # 如果五日均线小于十日均线,并且目前有头寸 elif ma5[sid(symbol(context.security))] < ma10[sid(symbol(context.security))]: # 全部卖出 order_target(symbol(context.security), 0)
def handle_data(self, context, data): rebalance_period = 20 context.tick += 1 if context.tick % rebalance_period != 0: return # Get rolling window of past prices and compute returns prices = history(60, '1d', 'price').dropna() returns = prices.pct_change().dropna() try: # Perform Markowitz-style portfolio optimization weights = self.vol_weighting(returns.T) # Rebalance portfolio accordingly for stock, weight in zip(prices.columns, weights): order_target_percent(stock, weight) except ValueError as e: # Sometimes this error is thrown # ValueError: Rank(A) < p or Rank([P; A; G]) < n pass
def generate_strat_data (context, data) : print ('\n{} GENERATING STRAT_DATA...'.format(get_datetime().date())) # if ENVIRONMENT != 'IDE': # wait_for_data(context, data) # if context.waiting_for_data : # return for item in ['high', 'open_price', 'low', 'close_price', 'volume', 'price']: context.strat_data[item] = history(context.max_lookback, '1d', item) # need to do this to keep talib happy context.strat_data['open'] = context.strat_data['open_price'] context.strat_data['close'] = context.strat_data['close_price'] # return a dataframe for each transform for transform in context.algo_transforms : panel = apply_transform(context, transform) context.strat_data = {i: panel[i] for i in panel.items}
def handle_data(context, data): logging.debug('enter handle_data') context.i += 1 if context.i < context.rsi_window: return # get the last RSI value prices = history(context.rsi_window, '1d', 'price') sec_rsi = talib.RSI( prices[context.security].values, timeperiod=context.rsi_window - 1) # buy and sell flags buy = False sell = False if sec_rsi[-1] < context.LOW_RSI and not context.invested: # RSI under 30 indicates oversold, time to buy order_target(context.security, 1000) logging.debug('Buying {}'.format(context.security)) context.invested = True buy = True elif sec_rsi[-1] > context.HIGH_RSI and context.invested: # RSI over 70 indicates overbought, sell everything order_target(context.security, 0) logging.debug('Selling {}'.format(context.security)) context.invested = False sell = True # record data for each time increment record(secRSI=sec_rsi[-1], price=data[context.security].price, buy=buy, sell=sell) logging.info(context.portfolio.cash)
def handle_data(context, data): prices = history(bar_count = context.historical_bars, frequency='1d', field='open') print prices
def handle_data(context, data): # Skip first 300 days to get full windows context.n += 1 if context.n < 359: return context.day += 1 if context.day < 7: return # Compute averages # history() has to be called with the same params # from above and returns a pandas dataframe. historical_data =history(365, '1d', 'price') pastReturns = (historical_data - historical_data.shift(-1)) / historical_data.shift(-1) short_mavg = history(42, '1d', 'price').mean() long_mavg = history(84, '1d', 'price').mean() diff = short_mavg / long_mavg - 1 diff = diff.dropna() diff.sort() buys = diff [diff > 0.03] sells = diff[diff < -0.03] buy_length = min(context.stocks_to_long, len(buys)) short_length = min(context.stocks_to_short, len(sells)) buy_weight = 1.0/buy_length if buy_length != 0 else 0 short_weight = -1.0/short_length if short_length != 0 else 0 buys.sort(ascending=False) sells.sort() buys = buys.iloc[:buy_length] if buy_weight != 0 else None sells = sells.iloc[:short_length] if short_weight != 0 else None stops = historical_data.iloc[-1] * 0.05 for i in range(len(context.syms)): #: If the security exists in our sells.index then sell if sells is not None and context.syms[i] in sells.index: #print ('SHORT: %s'%context.syms[i]) order_target_percent(context.syms[i], short_weight) #print 'sell' #: If the security instead, exists in our buys index, buy elif buys is not None and context.syms[i] in buys.index: # print ('BUYS: %s'%context.syms[i]) order_target_percent(context.syms[i], buy_weight) #print 'nothing' #: If the security is in neither list, exit any positions we might have in that security else: order_target(context.syms[i], 0) context.day = 0 # Keep track of the number of long and short positions in the portfolio longs = shorts = 0 for position in context.portfolio.positions.itervalues(): if position.amount > 0: longs += 1 if position.amount < 0: shorts += 1 record(short_mavg=short_mavg[context.syms[1]], long_mavg=long_mavg[context.syms[1]], portfoliovalue = (context.portfolio.returns), long_count=longs, short_count=shorts)
def svr_trading(context, data): # Historical data, lets get the past days close prices for pastPrice = history(bar_count=context.history_len, frequency='1d', field='price') # Make predictions on universe for stock in data: # Make sure this stock has no existing orders or positions to simplify our portfolio handling. if check_if_no_conflicting_orders(stock) and context.portfolio.positions[stock].amount == 0: #This is a scoring system for our model, we only trade when confident our model is wicked awesome full_series = np.array(pastPrice[stock].values) l = context.out_of_sameple_bin_size power = 1 #N where X^n for weight function # Create bins of X len to hold as out of sample data, average score(error) of these is a decent measure of fit. prediction_history = [] for i in np.arange(context.history_len/context.out_of_sameple_bin_size): #Index of current in same, and out of sample data. # 3 cases of this slicing if i == 0: #First run, only two bins to work with(First OOSD bin, and the rest of the data) ISD = full_series[l:] OOSD = full_series[:l] X = np.arange(l,len(full_series)) # use a variable weight (~0 - 1.0) weight_training = np.power(np.arange(l,len(full_series),dtype=float), power)[::-1]/np.power(np.arange(l,len(full_series),dtype=float), power)[::-1].max() # use a variable weight, focus on next day prediction (~0 - 1.0 - ~0) weight_score = np.concatenate((np.power(np.arange(1,l+1,dtype=float), power)/np.power(np.arange(1,l+1,dtype=float), power).max(), np.power(np.arange(l+1,len(full_series)+1,dtype=float), power)[::-1]/np.power(np.arange(l+1,len(full_series)+2,dtype=float), power)[::-1].max())) """print len (weight_training) print weight_training print len (weight_score) print weight_score print exit()""" elif i == context.history_len/context.out_of_sameple_bin_size - 1: #Last run, only two bins to work with(Last OOSD bin, and the rest of the data) ISD = full_series[:-l] OOSD = full_series[-l:] X = np.arange(0,len(full_series)-l) # use a variable weight (~0 - 1.0) weight_training = np.power(np.arange(l,len(full_series),dtype=float)+1, power)/np.power(np.arange(l,len(full_series),dtype=float)+1, power).max() # use a variable weight, focus on next day prediction (~0 - 1.0 - ~0) weight_score = np.concatenate((np.power(np.arange(1,len(full_series)-l+1,dtype=float), power)/np.power(np.arange(1,len(full_series)-l+2,dtype=float), power).max(), np.power(np.arange(1,l+1,dtype=float), power)[::-1]/np.power(np.arange(1,l+1,dtype=float), power)[::-1].max())) """print len (weight_training) print weight_training print len (weight_score) print weight_score print exit()""" else: #Any other run, we have a sandwhich of OOSD in the middle of two ISD sets so we need to aggregate. ISD = np.concatenate((full_series[:(l*i)], full_series[l*(i+1):])) OOSD = full_series[l*i:l*(i+1)] X = np.concatenate(( np.arange(0,(l*i)), np.arange(l*(i+1),len(full_series)) )) # use a variable weight (~0 - 1.0) weight_training = np.concatenate(( np.power(np.arange(1, l*i+1, dtype=float), power)/np.power(np.arange(1, l*i+1, dtype=float), power).max(), np.power(np.arange(l*(i+1), len(full_series), dtype=float), power)[::-1]/np.power(np.arange(l*(i+1), len(full_series),dtype=float), power)[::-1].max() )) # use a variable weight, focus on next day prediction (~0 - 1.0 - ~0) weight_score = np.concatenate(( np.power(np.arange(1, l*(i+1)+1, dtype=float), power)/np.power(np.arange(1, l*(i+1)+1, dtype=float), power).max(), np.power(np.arange(l*(i+1), len(full_series), dtype=float), power)[::-1]/np.power(np.arange(l*(i+1), len(full_series)+1, dtype=float), power)[::-1].max() )) """print len (weight_training) print weight_training print len (weight_score) print weight_score exit()""" # Domain and range of training data #X = np.arange(len(ISD)) X = np.atleast_2d(X).T y = ISD # Domain of prediction set #x = np.atleast_2d(np.linspace(0, len(ISD)+len(OOSD)-1, len(ISD)+len(OOSD))).T #x = np.atleast_2d(np.linspace(len(ISD) ,len(ISD)+len(OOSD)-1, len(OOSD))).T x = np.atleast_2d(np.linspace(0, len(full_series)-1, len(full_series))).T # epsilon-Support Vector Regression using scikit-learn # Read more here: http://scikit-learn.org/stable/modules/generated/sklearn.svm.SVR.html SVR_model = SVR(kernel='rbf', C=100, gamma=.01) SVR_model.fit(X,y, weight_training) y_predSVR = SVR_model.predict(x) if np.isnan(full_series).any() or np.isinf(full_series).any(): print(stock + " Failed due to data INF or NAN") y_score = 0 break else: y_score = SVR_model.score(x, full_series)#, sample_weight=weight_score) #y_predSVR[-len(OOSD):] np.atleast_2d(y_predSVR).T #log.debug(y_score) prediction_history.append(y_score) score = np.mean(y_score) # If we are studying one stock, lets plot its correlation regression results if len(data) == 1: record(Ideal=1.0, Score=score) #Slope=slope, R_value=r # Store the prediction for comparison with the rest of the universe # Measure accuracy as the mean of the distance to the ideal value of # the r2 and slope from past vs predicted price correlation regression if score >= context.score_filter: #The model was accepted, make a forecast #form domain and range of test data(we leave no out of sameple data out since we already scored the model) X = np.arange(context.history_len) X = np.atleast_2d(X).T y = np.array(pastPrice[stock].values) # Domain of predection set. We only need to predict the next close price. x = np.atleast_2d(np.linspace(len(y), len(y), 1)).T """log.debug(X) log.debug(len(X)) log.debug(x) log.debug(len(x)) exit()""" # use a linearly peaking weight, focus on next day prediction (~0 - 1.0 - ~0) #weight_training = np.power(np.arange(1,context.history_len+1, dtype=float), power)/np.power(np.arange(1,context.history_len+1, dtype=float), power).max() #weight_training = np.exp(np.arange(1,context.history_len+1, dtype=float))/np.exp(np.arange(1,context.history_len+1, dtype=float)).max() # epsilon-Support Vector Regression using scikit-learn # Read more here: http://scikit-learn.org/stable/modules/generated/sklearn.svm.SVR.html SVR_model = SVR(kernel='rbf', C=100, gamma=.01) SVR_model.fit(X, y)#, weight_training) y_predSVR = SVR_model.predict(x) context.next_pred_price[stock] = y_predSVR[-1] else: #Case where stock is left in dict and we dont want to use it, so remove it. if stock in context.next_pred_price: del context.next_pred_price[stock] # Count number of trades so we can split the availible cash properly number_of_trades_today = 0 for stock in data: # Make sure this stock has no existing orders or positions to simplify our portfolio handling # Also check that we have a prediction stored in the dict if check_if_no_conflicting_orders(stock) and \ context.portfolio.positions[stock].amount == 0 and \ stock in context.next_pred_price: # If we plan to move on this stock, take count of it(explained more in actual buy statement below)(Make sure these match both buy statements. if (percent_change(context.next_pred_price[stock], pastPrice[stock][-1]) >= context.action_to_move_percent and \ percent_change(context.next_pred_price[stock], data[stock]['price']) >= context.action_to_move_percent) or \ (percent_change(context.next_pred_price[stock], pastPrice[stock][-1]) <= -context.action_to_move_percent and \ percent_change(context.next_pred_price[stock], data[stock]['price']) <= -context.action_to_move_percent): number_of_trades_today += 1 # #Lets use record to plot how many securities are traded on each day. if len(data) >= 2: record(number_of_stocks_traded=number_of_trades_today) #Make buys and shorts if the predicted close change is bigger than our tollerance, same with current price to avoid opening gaps. for stock in data: # Make sure this stock has no existing orders or positions to simplify our portfolio handling # Also check that we have a prediction stored in the dict if check_if_no_conflicting_orders(stock) and context.portfolio.positions[stock].amount == 0 and stock in context.next_pred_price: #Go long if we predict the close price will change more(upward) than our tollerance, # apply same filter against current price vs predicted close in case of gap up/down. if percent_change(context.next_pred_price[stock], pastPrice[stock][-1]) >= context.action_to_move_percent and \ percent_change(context.next_pred_price[stock], data[stock]['price']) >= context.action_to_move_percent: # Place an order, and store the ID to fetch order info orderId = order_target_percent(stock, 1.0/number_of_trades_today) # How many shares did we just order, since we used target percent of availible cash to place order not share count. shareCount = get_order(orderId).amount # We can add a timeout time on the order. #context.duration[orderId] = exchange_time + timedelta(minutes=5) # We need to calculate our own inter cycle portfolio snapshot as its not updated till next cycle. value_of_open_orders(context, data) availibleCash = context.portfolio.cash-context.cashCommitedToBuy-context.cashCommitedToSell print("+ BUY {0:,d} of {1:s} at ${2:,.2f} for ${3:,.2f} / ${4:,.2f} @ {5:s}"\ .format(shareCount, stock,data[stock]['price'], data[stock]['price']*shareCount, availibleCash, context.exchange_time)) #Go short if we predict the close price will change more(downward) than our tollerance, # apply same filter against current price vs predicted close incase of gap up/down. elif percent_change(context.next_pred_price[stock], pastPrice[stock][-1]) <= -context.action_to_move_percent and \ percent_change(context.next_pred_price[stock], data[stock]['price']) <= -context.action_to_move_percent: #orderId = order_target_percent(stock, -1.0/len(data)) orderId = order_target_percent(stock, -1.0/number_of_trades_today) # How many shares did we just order, since we used target percent of availible cash to place order not share count. shareCount = get_order(orderId).amount # We can add a timeout time on the order. #context.duration[orderId] = exchange_time + timedelta(minutes=5) # We need to calculate our own inter cycle portfolio snapshot as its not updated till next cycle. value_of_open_orders(context, data) availibleCash = context.portfolio.cash-context.cashCommitedToBuy+context.cashCommitedToSell print("- SHORT {0:,d} of {1:s} at ${2:,.2f} for ${3:,.2f} / ${4:,.2f} @ {5:s}"\ .format(shareCount, stock,data[stock]['price'], data[stock]['price']*shareCount, availibleCash, context.exchange_time))
def svr_trading(context, data): # Historical data, lets get the past days close prices for pastPrice = history(bar_count=context.history_len, frequency='1d', field='price') # Make predictions on universe for stock in data: # Make sure this stock has no existing orders or positions to simplify our portfolio handling. if check_if_no_conflicting_orders( stock) and context.portfolio.positions[stock].amount == 0: #This is a scoring system for our model, we only trade when confident our model is wicked awesome full_series = np.array(pastPrice[stock].values) l = context.out_of_sameple_bin_size power = 1 #N where X^n for weight function # Create bins of X len to hold as out of sample data, average score(error) of these is a decent measure of fit. prediction_history = [] for i in np.arange(context.history_len / context.out_of_sameple_bin_size): #Index of current in same, and out of sample data. # 3 cases of this slicing if i == 0: #First run, only two bins to work with(First OOSD bin, and the rest of the data) ISD = full_series[l:] OOSD = full_series[:l] X = np.arange(l, len(full_series)) # use a variable weight (~0 - 1.0) weight_training = np.power( np.arange(l, len(full_series), dtype=float), power)[::-1] / np.power( np.arange(l, len(full_series), dtype=float), power)[::-1].max() # use a variable weight, focus on next day prediction (~0 - 1.0 - ~0) weight_score = np.concatenate(( np.power(np.arange(1, l + 1, dtype=float), power) / np.power(np.arange(1, l + 1, dtype=float), power).max(), np.power( np.arange(l + 1, len(full_series) + 1, dtype=float), power)[::-1] / np.power( np.arange(l + 1, len(full_series) + 2, dtype=float), power)[::-1].max())) """print len (weight_training) print weight_training print len (weight_score) print weight_score print exit()""" elif i == context.history_len / context.out_of_sameple_bin_size - 1: #Last run, only two bins to work with(Last OOSD bin, and the rest of the data) ISD = full_series[:-l] OOSD = full_series[-l:] X = np.arange(0, len(full_series) - l) # use a variable weight (~0 - 1.0) weight_training = np.power( np.arange(l, len(full_series), dtype=float) + 1, power) / np.power( np.arange(l, len(full_series), dtype=float) + 1, power).max() # use a variable weight, focus on next day prediction (~0 - 1.0 - ~0) weight_score = np.concatenate(( np.power( np.arange(1, len(full_series) - l + 1, dtype=float), power) / np.power( np.arange(1, len(full_series) - l + 2, dtype=float), power).max(), np.power(np.arange(1, l + 1, dtype=float), power)[::-1] / np.power(np.arange(1, l + 1, dtype=float), power)[::-1].max())) """print len (weight_training) print weight_training print len (weight_score) print weight_score print exit()""" else: #Any other run, we have a sandwhich of OOSD in the middle of two ISD sets so we need to aggregate. ISD = np.concatenate( (full_series[:(l * i)], full_series[l * (i + 1):])) OOSD = full_series[l * i:l * (i + 1)] X = np.concatenate((np.arange(0, (l * i)), np.arange(l * (i + 1), len(full_series)))) # use a variable weight (~0 - 1.0) weight_training = np.concatenate(( np.power(np.arange(1, l * i + 1, dtype=float), power) / np.power(np.arange(1, l * i + 1, dtype=float), power).max(), np.power( np.arange( l * (i + 1), len(full_series), dtype=float), power)[::-1] / np.power( np.arange(l * (i + 1), len(full_series), dtype=float), power)[::-1].max())) # use a variable weight, focus on next day prediction (~0 - 1.0 - ~0) weight_score = np.concatenate( (np.power(np.arange(1, l * (i + 1) + 1, dtype=float), power) / np.power(np.arange(1, l * (i + 1) + 1, dtype=float), power).max(), np.power( np.arange( l * (i + 1), len(full_series), dtype=float), power)[::-1] / np.power( np.arange(l * (i + 1), len(full_series) + 1, dtype=float), power)[::-1].max())) """print len (weight_training) print weight_training print len (weight_score) print weight_score exit()""" # Domain and range of training data #X = np.arange(len(ISD)) X = np.atleast_2d(X).T y = ISD # Domain of prediction set #x = np.atleast_2d(np.linspace(0, len(ISD)+len(OOSD)-1, len(ISD)+len(OOSD))).T #x = np.atleast_2d(np.linspace(len(ISD) ,len(ISD)+len(OOSD)-1, len(OOSD))).T x = np.atleast_2d( np.linspace(0, len(full_series) - 1, len(full_series))).T # epsilon-Support Vector Regression using scikit-learn # Read more here: http://scikit-learn.org/stable/modules/generated/sklearn.svm.SVR.html SVR_model = SVR(kernel='rbf', C=100, gamma=.01) SVR_model.fit(X, y, weight_training) y_predSVR = SVR_model.predict(x) if np.isnan(full_series).any() or np.isinf(full_series).any(): print(stock + " Failed due to data INF or NAN") y_score = 0 break else: y_score = SVR_model.score( x, full_series ) #, sample_weight=weight_score) #y_predSVR[-len(OOSD):] np.atleast_2d(y_predSVR).T #log.debug(y_score) prediction_history.append(y_score) score = np.mean(y_score) # If we are studying one stock, lets plot its correlation regression results if len(data) == 1: record(Ideal=1.0, Score=score) #Slope=slope, R_value=r # Store the prediction for comparison with the rest of the universe # Measure accuracy as the mean of the distance to the ideal value of # the r2 and slope from past vs predicted price correlation regression if score >= context.score_filter: #The model was accepted, make a forecast #form domain and range of test data(we leave no out of sameple data out since we already scored the model) X = np.arange(context.history_len) X = np.atleast_2d(X).T y = np.array(pastPrice[stock].values) # Domain of predection set. We only need to predict the next close price. x = np.atleast_2d(np.linspace(len(y), len(y), 1)).T """log.debug(X) log.debug(len(X)) log.debug(x) log.debug(len(x)) exit()""" # use a linearly peaking weight, focus on next day prediction (~0 - 1.0 - ~0) #weight_training = np.power(np.arange(1,context.history_len+1, dtype=float), power)/np.power(np.arange(1,context.history_len+1, dtype=float), power).max() #weight_training = np.exp(np.arange(1,context.history_len+1, dtype=float))/np.exp(np.arange(1,context.history_len+1, dtype=float)).max() # epsilon-Support Vector Regression using scikit-learn # Read more here: http://scikit-learn.org/stable/modules/generated/sklearn.svm.SVR.html SVR_model = SVR(kernel='rbf', C=100, gamma=.01) SVR_model.fit(X, y) #, weight_training) y_predSVR = SVR_model.predict(x) context.next_pred_price[stock] = y_predSVR[-1] else: #Case where stock is left in dict and we dont want to use it, so remove it. if stock in context.next_pred_price: del context.next_pred_price[stock] # Count number of trades so we can split the availible cash properly number_of_trades_today = 0 for stock in data: # Make sure this stock has no existing orders or positions to simplify our portfolio handling # Also check that we have a prediction stored in the dict if check_if_no_conflicting_orders(stock) and \ context.portfolio.positions[stock].amount == 0 and \ stock in context.next_pred_price: # If we plan to move on this stock, take count of it(explained more in actual buy statement below)(Make sure these match both buy statements. if (percent_change(context.next_pred_price[stock], pastPrice[stock][-1]) >= context.action_to_move_percent and \ percent_change(context.next_pred_price[stock], data[stock]['price']) >= context.action_to_move_percent) or \ (percent_change(context.next_pred_price[stock], pastPrice[stock][-1]) <= -context.action_to_move_percent and \ percent_change(context.next_pred_price[stock], data[stock]['price']) <= -context.action_to_move_percent): number_of_trades_today += 1 # #Lets use record to plot how many securities are traded on each day. if len(data) >= 2: record(number_of_stocks_traded=number_of_trades_today) #Make buys and shorts if the predicted close change is bigger than our tollerance, same with current price to avoid opening gaps. for stock in data: # Make sure this stock has no existing orders or positions to simplify our portfolio handling # Also check that we have a prediction stored in the dict if check_if_no_conflicting_orders( stock) and context.portfolio.positions[ stock].amount == 0 and stock in context.next_pred_price: #Go long if we predict the close price will change more(upward) than our tollerance, # apply same filter against current price vs predicted close in case of gap up/down. if percent_change(context.next_pred_price[stock], pastPrice[stock][-1]) >= context.action_to_move_percent and \ percent_change(context.next_pred_price[stock], data[stock]['price']) >= context.action_to_move_percent: # Place an order, and store the ID to fetch order info orderId = order_target_percent(stock, 1.0 / number_of_trades_today) # How many shares did we just order, since we used target percent of availible cash to place order not share count. shareCount = get_order(orderId).amount # We can add a timeout time on the order. #context.duration[orderId] = exchange_time + timedelta(minutes=5) # We need to calculate our own inter cycle portfolio snapshot as its not updated till next cycle. value_of_open_orders(context, data) availibleCash = context.portfolio.cash - context.cashCommitedToBuy - context.cashCommitedToSell print("+ BUY {0:,d} of {1:s} at ${2:,.2f} for ${3:,.2f} / ${4:,.2f} @ {5:s}"\ .format(shareCount, stock,data[stock]['price'], data[stock]['price']*shareCount, availibleCash, context.exchange_time)) #Go short if we predict the close price will change more(downward) than our tollerance, # apply same filter against current price vs predicted close incase of gap up/down. elif percent_change(context.next_pred_price[stock], pastPrice[stock][-1]) <= -context.action_to_move_percent and \ percent_change(context.next_pred_price[stock], data[stock]['price']) <= -context.action_to_move_percent: #orderId = order_target_percent(stock, -1.0/len(data)) orderId = order_target_percent(stock, -1.0 / number_of_trades_today) # How many shares did we just order, since we used target percent of availible cash to place order not share count. shareCount = get_order(orderId).amount # We can add a timeout time on the order. #context.duration[orderId] = exchange_time + timedelta(minutes=5) # We need to calculate our own inter cycle portfolio snapshot as its not updated till next cycle. value_of_open_orders(context, data) availibleCash = context.portfolio.cash - context.cashCommitedToBuy + context.cashCommitedToSell print("- SHORT {0:,d} of {1:s} at ${2:,.2f} for ${3:,.2f} / ${4:,.2f} @ {5:s}"\ .format(shareCount, stock,data[stock]['price'], data[stock]['price']*shareCount, availibleCash, context.exchange_time))
def handle_data(self, context, data): context.tick += 1 total_window = self.train_win + self.nn_win + 1 if context.tick < (total_window): return try : # print 'tick = {t}'.format(t = context.tick) price = history(total_window - 1, '1d', 'price').dropna() df_price = pd.DataFrame(data=price.values, index=price.index, columns=['close']) features, target = self.create_features(df_price, self.nn_win) features_insample = features.iloc[(self.nn_win -1):-1, :].values target_insample = target.iloc[(self.nn_win -1):-1, :].values.ravel() features_oosample = features.iloc[-1, :] features_oosample = features_oosample.values.reshape([1, len(features_oosample)]) ATR = self.atr.loc[price.index[-1], :][0] symbol = price.columns[0] if self.enable_stoploss: if data[symbol].price < context.longstop: print 'Stop Loss ' order_target_percent(symbol, 0.0) context.longstop = 0.0 return if self.ml == 'SVM' : ### Training the SVM from sklearn import svm model_svm = svm.SVC() model_svm.fit(features_insample, target_insample) preds_svm = model_svm.predict(features_oosample)[0] if preds_svm < 0.5: #print "Sell " order_target_percent(symbol, 0.0) context.longstop = 0.0 else : #print "Buy" order_target_percent(symbol, 1.0) context.longstop = max(context.longstop, data[symbol].price * (1 - 0.7*ATR)) print "target sl = {n}".format(n=context.longstop) if self.ml == 'KNN' : ### Training the SVM from sklearn import neighbors k = 10 model_knn = neighbors.KNeighborsClassifier(k, 'distance') model_knn.fit(features_insample, target_insample) preds_knn = model_knn.predict(features_oosample)[0] if preds_knn < 0.5: #print "Sell " order_target_percent(symbol, 0.0) else : #print "Buy" order_target_percent(symbol, 1.0) record('price', data[symbol]['price']) except : pass