def main(eval_stock, window_size, model_name, debug): """ Evaluates the stock trading bot. Please see https://arxiv.org/abs/1312.5602 for more details. Args: [python eval.py --help] """ data = filter_data_by_feature_columns(get_stock_data(eval_stock)) # Single Model Evaluation if model_name is not None: agent = Agent(window_size * data.shape[1], pretrained=True, model_name=model_name) profit, history = evaluate_model(agent, data, window_size, debug) first_buy = history[0][0] show_eval_result(model_name, profit, first_buy) # Multiple Model Evaluation else: for model in os.listdir("models"): if os.path.isfile(os.path.join("models", model)): agent = Agent(window_size * data.shape[1], pretrained=True, model_name=model) profit = evaluate_model(agent, data, window_size, debug) show_eval_result(model, profit) del agent
def main(args): count = 0 total_profit = 0 t=0 history = [] reward = 0 ticker = args.ticker + '.NS' price = [] window_size =10 time_now = datetime.datetime.now(tz).time() while(datetime.time(9, 14, tzinfo=tz) < time_now < datetime.time(15, 31, tzinfo=tz)): url = 'https://finance.yahoo.com/quote/{}?p={}&.tsrc=fin-srch'.format(ticker,ticker) print(count) live = Real(url,count) count+=1 price.append(live) if count < window_size: continue model_name='model_debug_50' print(live) initial_offset = price[1] - price[0] state = get_state(price, 0, window_size + 1) next_state = get_state(price, t + 1, window_size + 1) agent = Agent(state_size=window_size, pretrained=True, model_name=model_name) agent.inventory = [] profit = evaluate_model(agent,state,next_state, price, t, total_profit, history, reward, window_size=window_size) show_eval_result(model_name, profit, initial_offset) t+=1 state = next_state
def main(eval_stock, window_size, model_name, debug): """ Evaluates the stock trading bot. Please see https://arxiv.org/abs/1312.5602 for more details. Args: [python eval.py --help] """ data = [] time_get = [] a = True for i in range(15): strings = time.strftime("%Y,%m,%d,%H,%M,%S") t = strings.split(',') numbers = [t[4], t[5]] time_get.append(str(numbers[0]) + '-' + str(numbers[1])) time.sleep(1) y = cryptocompare.get_price('BTC', currency='USD') data.append(y['BTC']['USD']) initial_offset = data[1] - data[0] # Single Model Evaluation if model_name is not None: agent = Agent(window_size, pretrained=True, model_name=model_name) profit, _ = evaluate_model(agent, data, window_size, debug, time_get) show_eval_result(model_name, profit, initial_offset) # Multiple Model Evaluation else: for model in os.listdir("models"): if os.path.isfile(os.path.join("models", model)): agent = Agent(window_size, pretrained=True, model_name=model) profit = evaluate_model(agent, data, window_size, debug) show_eval_result(model, profit, initial_offset) del agent
def main(eval_stock, window_size, model_name, debug, manual_run): """ Evaluates the stock trading bot. Please see https://arxiv.org/abs/1312.5602 for more details. Args: [python eval.py --help] """ if type(eval_stock) == str: data = get_stock_data(eval_stock) elif type(eval_stock) == list: data = eval_stock initial_offset = data[1] - data[0] # Single Model Evaluation if model_name is not None: agent = Agent(window_size, pretrained=True, model_name=model_name, manual = manual_run) profit, _ = evaluate_model(agent, data, window_size, debug) show_eval_result(model_name, profit, initial_offset) # Multiple Model Evaluation else: for model in os.listdir("models"): if os.path.isfile(os.path.join("models", model)): agent = Agent(window_size, pretrained=True, model_name=model) profit = evaluate_model(agent, data, window_size, debug) show_eval_result(model, profit, initial_offset) del agent
def main(args): count = 0 total_profit = 0 t=0 history = [] reward = 0 price = [] window_size =10 time_now = datetime.datetime.now(tz).time() url = 'https://api.binance.com/api/v1/ticker/price?symbol={}'.format(args.ticker) live = Real(url) print(live) while(count < 100): url = 'https://api.binance.com/api/v1/ticker/price?symbol={}'.format(args.ticker) live = Real(url) count+=1 price.append(live) if count < window_size: continue model_name='model_t-dqn_GOOG_10' print(live) initial_offset = price[1] - price[0] state = get_state(price, 0, window_size + 1) next_state = get_state(price, t + 1, window_size + 1) agent = Agent(state_size=window_size, pretrained=True, model_name=model_name) agent.inventory = [] profit = evaluate_model(agent,state,next_state, price, t, total_profit, history, reward, window_size=window_size) show_eval_result(model_name, profit, initial_offset) t+=1 state = next_state
def main(eval_stock, window_size, model1, model2, debug): """ Evaluates the stock trading bot. Please see https://arxiv.org/abs/1312.5602 for more details. Args: [python eval.py --help] """ data = get_stock_data(eval_stock) initial_offset = data[1] - data[0] # Single Model Evaluation if model1 is not None and model2 is not None: agent1 = Agent(window_size, pretrained=True, model_name=model1) agent2 = Agent(window_size, pretrained=True, model_name=model2) profit, _ = evaluate_models(agent1, agent2, data, window_size, debug) show_multi_eval_result(model1, model2, profit, initial_offset)
def main(train_stock, val_stock, window_size, batch_size, ep_count, strategy="t-dqn", model_name="model_debug", pretrained=False, debug=False): """ Trains the stock trading bot using Deep Q-Learning. Please see https://arxiv.org/abs/1312.5602 for more details. Args: [python train.py --help] """ agent = Agent(window_size, strategy=strategy, pretrained=pretrained, model_name=model_name) train_data = get_stock_data(train_stock) val_data = get_stock_data(val_stock) initial_offset = val_data[1] - val_data[0] for episode in range(1, ep_count + 1): train_result = train_model(agent, episode, train_data, ep_count=ep_count, batch_size=batch_size, window_size=window_size) val_result, _ = evaluate_model(agent, val_data, window_size, debug) show_train_result(train_result, val_result, initial_offset)
def main(train_stock, val_stock, window_size, batch_size, ep_count, strategy="t-dqn", model_name="model_debug", pretrained=False, debug=False): """Trains the stock trading bot using Deep Q-Learning. Please see https://arxiv.org/abs/1312.5602 for more details. Args: [python train.py --help] """ train_data = filter_data_by_feature_columns(get_stock_data(train_stock)) val_data = filter_data_by_feature_columns(get_stock_data(val_stock)) agent = Agent(window_size * train_data.shape[1], strategy=strategy, pretrained=pretrained, model_name=model_name) for episode in range(1, ep_count + 1): train_result = train_model(agent, episode, train_data, ep_count=ep_count, batch_size=batch_size, window_size=window_size) val_result, _ = evaluate_model(agent, val_data, window_size, debug) show_train_result(train_result, val_result)
def train(train_stock, val_stock, window_size, batch_size, ep_count, strategy="t-dqn", model_name="model_double-dqn_GOOG_50", pretrained=True, debug=False): """ Trains the stock trading bot using Deep Q-Learning. Please see https://arxiv.org/abs/1312.5602 for more details. Args: [python train.py --help] """ print("Started the model training for the {}".format(symbol)) agent = Agent(window_size, strategy=strategy, pretrained=pretrained, model_name=model_name) train_data = train_stock val_data = val_stock initial_offset = np.array(val_data)[1] - np.array(val_data)[0] for episode in range(1, ep_count + 1): train_result = train_model(agent, episode, train_data, ep_count=ep_count, batch_size=batch_size, window_size=window_size) val_result, _ = evaluate_model(agent, val_data, window_size, debug) show_train_result(train_result, val_result, initial_offset) print("Training the model completed!!")
def main(train_stock, val_stock, window_size, batch_size, ep_count, strategy="t-dqn", model_name="model_debug", pretrained=False, debug=False): """ Trains the stock trading bot using Deep Q-Learning. Please see https://arxiv.org/abs/1312.5602 for more details. Args: [python train.py --help] """ import tensorflow as tf print("#TF Version: ",tf.__version__) from tensorflow.python.client import device_lib print("Using device: ") print(device_lib.list_local_devices()) agent = Agent(window_size, strategy=strategy, pretrained=pretrained, model_name=model_name) train_data = get_stock_data(train_stock) val_data = get_stock_data(val_stock) initial_offset = val_data["Close"][1] - val_data["Close"][0] for episode in range(1, ep_count + 1): train_result = train_model(agent, episode, train_data, ep_count=ep_count, batch_size=batch_size, window_size=window_size) val_result, _ , actionCollection = evaluate_model(agent, val_data, window_size, debug) show_train_result(train_result, val_result, initial_offset)
def main(args): price = [] cur_symbol = args.ticker quick_train(cur_symbol) window_size =10 time_now = datetime.datetime.now(tz).time() for c in range(2): price.append(Real(cur_symbol)) model_name='model_double-dqn_GOOG_50_50' initial_offset = price[1] - price[0] agent = Agent(window_size, pretrained=True, model_name=model_name) profit, history = evaluate_model(agent, price, window_size,cur_symbol, debug=False) show_eval_result(model_name, profit, initial_offset) print("Profit:", profit) buys = sells = holds = 0 for i in history: if i[1] == "BUY": buys += 1 elif i[1] == "SELL": sells += 1 elif i[1] == "HOLD": holds += 1 print("BUYS Percentage:", (buys/len(history)) * 100) print("SELLS Percentage:", (sells/len(history)) * 100) print("HOLDS Percentage:", (holds/len(history)) * 100) rpath = 'training_data/' + cur_symbol + '.csv' os.remove(rpath)
def alpaca_trading_bot(stock_name, window_size=10, model_name='model_debug'): # Alpaca API api = tradeapi.REST() # Create Agent Object agent = Agent(window_size, pretrained=True, model_name=model_name) # Get Ticker from last intraday times from Polygon file = open('ticker.csv', 'w') file.write('Adj Close\n') # Check for connection errors and retry 30 times cnt = 0 while cnt <= 30: try: date = api.get_barset(timeframe='minute', symbols=stock_name, limit=1000, end=datetime.datetime.now()) break except: logging.warning("Error retrieving initial 1000 prices, retrying in 30s (" + str(cnt) + "/30)") time.sleep(30) cnt += 1 continue # Write ticker csv for minutes in date.get(stock_name): file.write(str(minutes.c)) file.write('\n') file.close() data = get_stock_data('ticker.csv') # Call actual buy/sell/hold decisions and print result forever decisions(agent, data, window_size, debug, stock_name, api)
def main(args): count = 0 ticker = args.ticker + '.NS' price = [] time_now = datetime.datetime.now(tz).time() while (datetime.time(9, 14, tzinfo=tz) < time_now < datetime.time( 19, 31, tzinfo=tz)): url = 'https://finance.yahoo.com/quote/{}?p={}&.tsrc=fin-srch'.format( ticker, ticker) print(count) live = Real(url, count) count += 1 price.append(live) if count < 10: continue print(live) initial_offset = price[1] - price[0] agent = Agent(state_size=10, pretrained=True, model_name='model_debug_50') profit, _ = evaluate_model(agent, price, window_size=10, debug=False) show_eval_result(model_name, profit, initial_offset)
def alpaca_trading_bot(stock_name, window_size=10, model_name='model_debug'): # Alpaca API api = tradeapi.REST() # Create Agent Object agent = Agent(window_size, pretrained=True, model_name=model_name) # Get Ticker from last intraday times from Polygon file = open('ticker.csv', 'w') file.write('Adj Close\n') # Get date for ticker date = api.get_barset(timeframe='15Min', symbols=stock_name, limit=1000, end=datetime.datetime.now()) # Write ticker csv for minutes in date.get(stock_name): file.write(str(minutes.c)) file.write('\n') file.close() data = get_stock_data('ticker.csv') # Call actual buy/sell/hold decisions and print result forever decisions(agent, data, window_size, debug, stock_name, api)
def main(train_stock, val_stock, window_size, batch_size, ep_count, strategy="t-dqn", model_name="model_debug", pretrained=False, debug=False): agent = Agent(window_size, strategy=strategy, pretrained=pretrained, model_name=model_name) train_data = get_stock_data(train_stock) val_data = get_stock_data(val_stock) initial_offset = val_data[1] - val_data[0] for episode in range(1, ep_count + 1): train_result = train_model(agent, episode, train_data, ep_count=ep_count, batch_size=batch_size, window_size=window_size) val_result, _ = evaluate_model(agent, val_data, window_size, debug) show_train_result(train_result, val_result, initial_offset)
def main(symbol): price = [] window_size = 10 time_now = datetime.now(tz).time() model_name = 'model_double-dqn_GOOG_50_10' agent = Agent(window_size, pretrained=True, model_name=model_name) profit, history = evaluate_model1(agent, symbol, price, window_size) print("Profit:", profit) '''
def main(eval_stock, window_size, model_name, debug): data = get_stock_data(eval_stock) initial_offset = data[1] - data[0] # Single Model Evaluation if model_name is not None: agent = Agent(window_size, pretrained=True, model_name=model_name) profit, _ = evaluate_model(agent, data, window_size, debug) show_eval_result(model_name, profit, initial_offset) # Multiple Model Evaluation else: for model in os.listdir("models"): if os.path.isfile(os.path.join("models", model)): agent = Agent(window_size, pretrained=True, model_name=model) profit = evaluate_model(agent, data, window_size, debug) show_eval_result(model, profit, initial_offset) del agent
def main(train_stock, window_size, batch_size, ep_count, strategy="t-dqn", model_name="model_debug", pretrained=False, debug=False): agent = Agent(window_size, strategy=strategy, pretrained=pretrained, model_name=model_name) train_data = get_stock_data(train_stock) for episode in range(1, ep_count + 1): train_result = train_model(agent, episode, train_data, ep_count=ep_count, batch_size=batch_size, window_size=window_size)
def main(train_stock, val_stock, economy, window_size, batch_size, ep_count, strategy="dqn", model_name="model_debug", pretrained=False, debug=False): """ Trains the stock trading bot using Deep Q-Learning. Please see https://arxiv.org/abs/1312.5602 for more details. Args: [python train.py --help] """ print("initialize agent") agent = Agent(window_size, strategy=strategy, pretrained=pretrained, model_name=model_name) print('get stock data') train_data = get_stock_data(train_stock) print('get economy leading') economy_data = get_economy_data(economy) print('get val data') val_data = get_stock_data(val_stock) # 첫 째날과 둘 째 날의 종가의 차 initial_offset = val_data[0][1] - val_data[0][0] last_checkpoint = 0 for episode in range(1, ep_count + 1): print('train episode : ', episode) train_result, is_earlystopping = train_model( agent, episode, train_data, economy_data, ep_count=ep_count, batch_size=batch_size, window_size=window_size, last_checkpoint=last_checkpoint) val_result, _ = evaluate_model(agent, val_data, economy_data, window_size, debug) show_train_result(train_result, val_result, initial_offset) if is_earlystopping == False: last_checkpoint = episode
def main(window_size, batch_size, ep_count, model_name, pretrained, debug): """ Trains the stock trading bot using Deep Q-Learning. Please see https://arxiv.org/abs/1312.5602 for more details. Args: [python train.py --help] """ agent = Agent(window_size, pretrained=pretrained, model_name=model_name) train_data = get_stock_data('data/training.csv') val_data = get_stock_data('data/test.csv') initial_offset = val_data[1] - val_data[0] for episode in range(1, ep_count + 1): train_result = train_model(agent, episode, train_data, ep_count=ep_count, batch_size=batch_size, window_size=window_size) val_result, _ = evaluate_model(agent, val_data, window_size, debug) show_train_result(train_result, val_result, initial_offset) agent.soft_save()
def main(symbol): price = [] window_size = 10 time_now = datetime.datetime.now(tz).time() model_name = 'model_double-dqn_GOOG_50_10' agent = Agent(window_size, pretrained=True, model_name=model_name) print("[INFO] Model initialised successfully[INFO]") profit, history = evaluate_model1(agent, symbol, price, window_size) print("Profit:", profit) ''' with open("profit.txt", "w") as text_file: print("Total Profit {}".format(profit), file=text_file) ''' print("Profit:", profit) '''
def main(eval_stock, window_size, model_name, debug): """ Evaluates the stock trading bot. Please see https://arxiv.org/abs/1312.5602 for more details. Args: [python eval.py --help] """ data = get_stock_data(eval_stock) initial_offset = data["Close"][1] - data["Close"][0] # Single Model Evaluation if model_name is not None: agent = Agent(window_size, pretrained=True, model_name=model_name) profit, _, actionCollection = evaluate_model(agent, data, window_size, debug) show_eval_result(model_name, profit, initial_offset) return model_name, profit, actionCollection
def main(eval_stock, window_size, model_name, debug): """ Evaluates the stock trading bot. Please see https://arxiv.org/abs/1312.5602 for more details. Args: [python eval.py --help] """ df_google = pd.read_csv("data/GOOG_2019.csv") data = list(df_google['Open']) data_date = list(df_google['Date']) initial_offset = data[1] - data[0] agent = Agent(window_size, current_price=data[0],pretrained=True, model_name=model_name) total_profit, cash_in_hand, total_share, google_buy, google_sell = evaluate_model(agent, data, data_date, window_size, debug) show_eval_result(model_name, total_profit, initial_offset) google_price_buy = [] google_buy_date = [] google_price_sell = [] google_sell_date = [] w = csv.writer(open("dqn.csv", "w")) for date, price in google_buy: google_price_buy.append(price) google_buy_date.append(date) w.writerow(['Buy', date, price]) for date, price in google_sell: google_price_sell.append(price) google_sell_date.append(date) w.writerow(['Sell', date, price]) fig = px.line(df_google, x='Date', y='Open') fig.add_trace(go.Scatter(x=google_buy_date, y=google_price_buy, mode="markers", showlegend=True, name="Buy")) fig.add_trace(go.Scatter(x=google_sell_date, y=google_price_sell, mode="markers", showlegend=True, name="Sell")) fig.update_layout(title="DQN - Test results on Goog_2019 stock data with profit of " + str(total_profit),font=dict( size=9, color="#7f7f7f" )) fig.show()
def main(train_stock, window_size, batch_size, ep_count, strategy="dqn", model_name="dqn", pretrained=False, debug=False): """ Trains the stock trading bot using Deep Q-Learning. Please see https://arxiv.org/abs/1312.5602 for more details. Args: [python train.py --help] """ train_data = get_stock_data(train_stock) #val_data = get_stock_data(val_stock) agent = Agent(window_size, current_price=train_data[0], strategy=strategy, pretrained=pretrained, model_name=model_name) #initial_offset = val_data[1] - val_data[0] cash_in_hand = [] total_profit = [] for episode in range(1, ep_count + 1): train_result = train_model(agent, episode, train_data, ep_count=ep_count, batch_size=batch_size, window_size=window_size) #val_result, _ = evaluate_model(agent, val_data, window_size, debug) if episode % 100 == 0: agent.save(episode) #show_train_result(train_result) total_profit.append(train_result[2]) cash_in_hand.append(agent.cash_in_hand) agent.reset() plt.plot(np.array(cash_in_hand)) plt.xlabel('cash in hand for each episode') plt.ylabel('Amount') plt.show() plt.plot(np.array(total_profit)) plt.xlabel('total profit for each episode') plt.ylabel('Amount') plt.show()
def main(symbol): price = [] window_size = 10 time_now = datetime.now(tz).time() model_name='model_double-dqn_GOOG_50_10' agent = Agent(window_size, pretrained=True, model_name=model_name) profit, history = evaluate_model1(agent, symbol, price, window_size, debug=True) print("Profit:", profit) buys = sells = holds = 0 for i in history: if i[1] == "BUY": buys += 1 elif i[1] == "SELL": sells += 1 elif i[1] == "HOLD": holds += 1 print("BUYS Percentage:", (buys/len(history)) * 100) print("SELLS Percentage:", (sells/len(history)) * 100) print("HOLDS Percentage:", (holds/len(history)) * 100)
from trading_bot.methods import predict_next import pandas as pd import numpy as np import time parser = argparse.ArgumentParser(description='LiveTrading using Backtrader.') parser.add_argument('--model-name', dest="modelname", help='the name of the model as in models/', required=True) args = parser.parse_args() print("using model: ", args.modelname) window_size = 10 agent = Agent(window_size, pretrained=True, model_name=args.modelname) #act = predict_next(agent, data, window_size) class rebot(bt.Strategy): params = (('allowshorts', 1), ('printLog', False)) def __init__(self): self.pos = 0 self.data_live = False self.isShort = False def next(self): #for d in self.datas: d = self.datas[1]
def fn(ticker,strategy,ep_count,year): stock = '{}'.format(ticker) model_name = '{}_{}_{}'.format(ticker,strategy,ep_count) test_stock = 'data/Nifty50/Split/{}_{}.csv'.format(ticker,year) window_size = 10 debug = True tf.keras.backend.clear_session() K.clear_session() agent = Agent(window_size, pretrained=True, model_name=model_name) # read csv into dataframe df = pd.read_csv(test_stock) # df = df.iloc[:55] # filter out the desired features df = df[['Date', 'Adj Close']] # rename feature column names df = df.rename(columns={'Adj Close': 'actual', 'Date': 'date'}) # convert dates from object to DateTime type dates = df['date'] dates = pd.to_datetime(dates, infer_datetime_format=True) df['date'] = dates df.head() coloredlogs.install(level='DEBUG') switch_k_backend_device() test_data = get_stock_data(test_stock) initial_offset = test_data[1] - test_data[0] test_result, history = evaluate_model(agent, test_data, window_size, debug) show_eval_result(model_name, test_result, initial_offset) def visualize(df, history, title="trading session"): # add history to dataframe position = [history[0][0]] + [x[0] for x in history] actions = ['HOLD'] + [x[1] for x in history] df['position'] = position df['action'] = actions # specify y-axis scale for stock prices scale = alt.Scale(domain=(min(min(df['actual']), min(df['position'])) - 50, max(max(df['actual']), max(df['position'])) + 50), clamp=True) # plot a line chart for stock positions actual = alt.Chart(df).mark_line( color='green', opacity=0.5 ).encode( x='date:T', y=alt.Y('position', axis=alt.Axis(format='$.2f', title='Price'), scale=scale) ).interactive( bind_y=False ) # plot the BUY and SELL actions as points points = alt.Chart(df).transform_filter( alt.datum.action != 'HOLD' ).mark_point( filled=True ).encode( x=alt.X('date:T', axis=alt.Axis(title='Date')), y=alt.Y('position', axis=alt.Axis(format='$.2f', title='Price'), scale=scale), color='action' ).interactive(bind_y=False) # merge the two charts chart = alt.layer(actual, points, title=title).properties(height=300, width=1000) return chart chart = visualize(df, history, title=test_stock) cap = [0] inv = 0 ret = 0 b = 0 for i in range(len(df)): if df.iloc[i]['action']=='BUY': cap.append(cap[-1]+df.iloc[i]['actual']) inv+=df.iloc[i]['actual'] b+=1 if df.iloc[i]['action']=='SELL' and b>0: cap.append(cap[-1]-df.iloc[i]['actual']) ret += df.iloc[i]['actual'] b-=1 req_cap = max(cap) prof = ret+(df['action'].value_counts().get('BUY',0)-df['action'].value_counts().get('SELL',0))*df.iloc[-1]['actual']-inv return pd.DataFrame([[ticker,strategy,ep_count,year,inv,ret,req_cap, prof, (prof/req_cap)*100]],columns=['ticker','strategy','ep_count','year','investment','returns','required capital','profit','profit percentage'])
def main(eval_stock, model_name, period, money, waitTime): """ Evaluates the stock trading bot. Please see https://arxiv.org/abs/1312.5602 for more details. Args: [python eval.py --help] """ LEVERAGE = 20 pos = getPositions() print("crnt positions", pos) # todo get current portfolio run = True boughtAT = 0 prof = 0 isShort = False while run: try: resp = None pos = getPositions() print("current positions: ", pos) prof = 0 app.reqHistoricalData(1, eurusd_contract, '', '60 S', '1 min', 'BID', 0, 2, False, []) time.sleep(5) data = pd.DataFrame( app.data, columns=['Datetime', 'Open', "High", "Low", "Close"]) data['Datetime'] = pd.to_datetime(data['Datetime'], unit='s') if len(data) == 0: print(data) raise Exception("NO DATAAAA") # need to change data for input print(data) data = data[data.columns[1:]] # leave out datetime window_size = 10 # Single Model Evaluation print("Modelname: ", model_name) agent = Agent(window_size, pretrained=True, model_name=model_name) act = predict_next(agent, data, window_size) # act on decision print(type(data["Close"])) crntPrice = data["Close"][len(data["Close"]) - 1] # wtf not -1? print("current price is: ", crntPrice) def createOrder(money, crntPrice): order = Order() order.action = None order.orderType = "MKT" order.totalQuantity = int(money / crntPrice) return order allowShort = False if act == 1: # buy if eval_stock.lower() not in pos.columns: order = createOrder(money, crntPrice) order.action = "SELL" print("Long Buy bc no position: ", pos, eval_stock.lower()) print("TYPE OF DAMN ORDERID", type(orderIds)) if orderIds.get(eval_stock.lower()) == None: # should be case in first episode orderIds[eval_stock.lower()] = 1 else: orderIds[eval_stock.lower( )] = orderIds[eval_stock.lower()] + 1 # add one saveOrderIds(orderIds) resp = app.placeOrder(orderIds[eval_stock.lower()], eurusd_contract, order) print(resp) pos[name] = order.totalQuantity print(order.totalQuantity, money, crntPrice) writePositions(pos) boughtAT = crntPrice isShort = False elif eval_stock.lower( ) in pos.columns and isShort and allowShort: prof = -(boughtAT - crntPrice) print("Short SELLINGGGG ", eval_stock, " profit: ", prof) boughtAT = 0 #resp = eh.close(eval_stock.lower()) else: resp = " buy, but have position, so stay" elif act == 2: # sell if eval_stock.lower() in pos and not isShort: prof = boughtAT - crntPrice print("Long SELLINGGGG ", eval_stock, " profit: ", prof) boughtAT = 0 order = createOrder(money, crntPrice) order.action = "BUY" print("Long sell", pos, eval_stock.lower()) id = orderIds[eval_stock.lower()] resp = app.placeOrder(id, eurusd_contract, order) print(resp) pos[name] = 0 writePositions(pos) elif eval_stock.lower() not in pos.columns and allowShort: print("Short Buy bc no position: ", pos, eval_stock.lower()) #resp = eh.sell(eval_stock.lower(),money,LEVERAGE,crntPrice*.8,crntPrice*1.1) print(resp) boughtAT = crntPrice isShort = True else: # we have a short position print("not selling bc position not thjere ", eval_stock) resp = "not selling bc i have no positions in ", eval_stock else: print("HOLD") with open('logs/%s_log.txt' % eval_stock, 'a') as f: txt = "stock:%s, action:%s, profit:%.2f , api-response:%s, time:%s\n" % ( eval_stock, act, prof, resp, datetime.datetime.now()) f.write(txt) print(waitTime, "s waiting... ") time.sleep(waitTime - 3) except KeyboardInterrupt: print('Interrupted') run = False try: sys.exit(0) except SystemExit: os._exit(0)
def main(eval_stock, model_name, period, money, waitTime): """ Evaluates the stock trading bot. Please see https://arxiv.org/abs/1312.5602 for more details. Args: [python eval.py --help] """ eh = etoroHandler() LEVERAGE = 20 pos = eh.getPositionDict() print(pos) # todo get current portfolio run = True boughtAT = 0 prof = 0 isShort = False while run: try: pos = eh.getPositionDict() prof = 0 yfname = eval_stock if "EUR" in eval_stock: # if forex yfname = eval_stock #+ "=X" data = get_live_stock_data(yfname, period) window_size = 10 # Single Model Evaluation if model_name is not None: print("Modelname: ", model_name) agent = Agent(window_size, pretrained=True, model_name=model_name) act = predict_next(agent, data, window_size) # show_eval_result(model_name, profit, initial_offset) # Multiple Model Evaluation else: raise Exception("MODEL NAME MISSISNg") # act on decision crntPrice = data["realprice"].values[-1] resp = None eh.updateHandler() tmpstock = eval_stock if "btc" in eval_stock.lower(): # dirty fix for btc tmpstock = "btc" if "eur" in eval_stock.lower(): # wtf tmpstock = eval_stock.split("=X")[0].lower() allowShort = False if act == 1: # buy if tmpstock.lower() not in pos: print("Long Buy bc no position: ", pos, tmpstock.lower()) resp = eh.buy(tmpstock.lower(), money, LEVERAGE, crntPrice * 1.2, crntPrice * 0.9) print(resp) boughtAT = crntPrice isShort = False elif tmpstock.lower() in pos and isShort and allowShort: prof = -(boughtAT - crntPrice) print("Short SELLINGGGG ", tmpstock, " profit: ", prof) boughtAT = 0 resp = eh.close(tmpstock.lower()) else: resp = " buy, but have position, so stay" elif act == 2: # sell if tmpstock.lower() in pos and not isShort: prof = boughtAT - crntPrice print("Long SELLINGGGG ", tmpstock, " profit: ", prof) boughtAT = 0 resp = eh.close(tmpstock.lower()) elif tmpstock.lower() not in pos and allowShort: print("Short Buy bc no position: ", pos, tmpstock.lower()) resp = eh.sell(tmpstock.lower(), money, LEVERAGE, crntPrice * .8, crntPrice * 1.1) print(resp) boughtAT = crntPrice isShort = True else: # we have a short position print("not selling bc position not thjere ", tmpstock) resp = "not selling bc i have no positions in ", tmpstock else: print("HOLD") with open('logs/%s_log.txt' % tmpstock, 'a') as f: txt = "stock:%s, action:%s, profit:%.2f , api-response:%s, time:%s\n" % ( eval_stock, act, prof, resp, datetime.datetime.now()) f.write(txt) print(waitTime, "s waiting... ") time.sleep(waitTime - 3) except KeyboardInterrupt: print('Interrupted') run = False try: sys.exit(0) except SystemExit: os._exit(0) except Exception: #resp = eh.close(tmpstock.lower()) print("EERROR: try to close positions before exit") raise
def main(train_stock, val_stock, window_size, batch_size, ep_count, strategy="t-dqn", model_name="model_debug", pretrained=False, debug=False): """ Trains the stock trading bot using Deep Q-Learning. Please see https://arxiv.org/abs/1312.5602 for more details. Args: [python train.py --help] """ agent = Agent(window_size, strategy=strategy, pretrained=pretrained, model_name=model_name) train_data = get_stock_data(train_stock) val_data = get_stock_data(val_stock) final_rewards = [] train_roi = [] valid_roi = [] train_loss = [] rewards = [] initial_offset = val_data[1] - val_data[0] for episode in range(1, ep_count + 1): train_result, rewards = train_model(agent, episode, train_data, ep_count=ep_count, batch_size=batch_size, window_size=window_size) final_rewards.extend(rewards) train_roi.append(train_result[2]) train_loss.append(train_result[3]) val_result, _ = evaluate_model(agent, val_data, window_size, debug) valid_roi.append(val_result) show_train_result(train_result, val_result, initial_offset) gs = gridspec.GridSpec(2, 2) fig = plt.figure(figsize=(20, 9)) # To be shifted to Axis 1 ax1 = fig.add_subplot(gs[0, 0]) ax1.plot(range(len(train_loss)), train_loss, color='purple', label='loss') ax1.legend(loc=0, ncol=2, prop={'size': 20}, fancybox=True, borderaxespad=0.) ax1.set_xlabel('Epochs', size=20) ax1.set_ylabel('Train Loss', size=20) ax1.set_title('Loss w.r.t. Epochs', size=20) # To be shifted to Axis 2 ax2 = fig.add_subplot(gs[0, 1]) ax2.plot(range(len(train_roi)), train_roi, color='crimson', label='train') ax2.plot(range(len(valid_roi)), valid_roi, color='olive', label='val') ax2.legend(loc=0, ncol=2, prop={'size': 20}, fancybox=True, borderaxespad=0.) ax2.set_ylabel('Return of Investment($)', size=20) ax2.set_xlabel('Epochs', size=20) ax2.set_title('Train and Valid ROI w.r.t. Epochs', size=20) # To be shifted to Axis 3 ax3 = fig.add_subplot(gs[1, :]) ax3.plot(range(len(final_rewards)), final_rewards, color='red', label='Reward of Rainbow DQN') ax3.set_xlabel('Episodes', size=20) ax3.set_ylabel('Rewards', size=20) ax3.set_title('Reward w.r.t. episodes', size=20) ax3.legend(loc=0, ncol=2, prop={'size': 20}, fancybox=True, borderaxespad=0.) plt.show()