예제 #1
0
def main(eval_stock, window_size, model_name, debug):
    """ Evaluates the stock trading bot.
    Please see https://arxiv.org/abs/1312.5602 for more details.

    Args: [python eval.py --help]
    """
    data = []
    time_get = []
    a = True
    for i in range(15):
        strings = time.strftime("%Y,%m,%d,%H,%M,%S")
        t = strings.split(',')
        numbers = [t[4], t[5]]
        time_get.append(str(numbers[0]) + '-' + str(numbers[1]))
        time.sleep(1)
        y = cryptocompare.get_price('BTC', currency='USD')
        data.append(y['BTC']['USD'])

    initial_offset = data[1] - data[0]

    # Single Model Evaluation
    if model_name is not None:
        agent = Agent(window_size, pretrained=True, model_name=model_name)
        profit, _ = evaluate_model(agent, data, window_size, debug, time_get)
        show_eval_result(model_name, profit, initial_offset)

    # Multiple Model Evaluation
    else:
        for model in os.listdir("models"):
            if os.path.isfile(os.path.join("models", model)):
                agent = Agent(window_size, pretrained=True, model_name=model)
                profit = evaluate_model(agent, data, window_size, debug)
                show_eval_result(model, profit, initial_offset)
                del agent
예제 #2
0
def main(eval_stock, window_size, model_name, debug, manual_run):
    """ Evaluates the stock trading bot.
    Please see https://arxiv.org/abs/1312.5602 for more details.

    Args: [python eval.py --help]
    """  
    if type(eval_stock) == str:
        data = get_stock_data(eval_stock)
    elif type(eval_stock) == list:
        data = eval_stock
    initial_offset = data[1] - data[0]

    # Single Model Evaluation
    if model_name is not None:
        agent = Agent(window_size, pretrained=True, model_name=model_name, manual = manual_run)
        profit, _ = evaluate_model(agent, data, window_size, debug)
        show_eval_result(model_name, profit, initial_offset)
        
    # Multiple Model Evaluation
    else:
        for model in os.listdir("models"):
            if os.path.isfile(os.path.join("models", model)):
                agent = Agent(window_size, pretrained=True, model_name=model)
                profit = evaluate_model(agent, data, window_size, debug)
                show_eval_result(model, profit, initial_offset)
                del agent
예제 #3
0
def main(eval_stock, window_size, model_name, debug):
    """ Evaluates the stock trading bot.
    Please see https://arxiv.org/abs/1312.5602 for more details.

    Args: [python eval.py --help]
    """
    data = filter_data_by_feature_columns(get_stock_data(eval_stock))

    # Single Model Evaluation
    if model_name is not None:
        agent = Agent(window_size * data.shape[1],
                      pretrained=True,
                      model_name=model_name)
        profit, history = evaluate_model(agent, data, window_size, debug)
        first_buy = history[0][0]
        show_eval_result(model_name, profit, first_buy)

    # Multiple Model Evaluation
    else:
        for model in os.listdir("models"):
            if os.path.isfile(os.path.join("models", model)):
                agent = Agent(window_size * data.shape[1],
                              pretrained=True,
                              model_name=model)
                profit = evaluate_model(agent, data, window_size, debug)
                show_eval_result(model, profit)
                del agent
예제 #4
0
def main(train_stock,
         val_stock,
         window_size,
         batch_size,
         ep_count,
         strategy="t-dqn",
         model_name="model_debug",
         pretrained=False,
         debug=False):
    """Trains the stock trading bot using Deep Q-Learning.
    Please see https://arxiv.org/abs/1312.5602 for more details.

    Args: [python train.py --help]

    """
    train_data = filter_data_by_feature_columns(get_stock_data(train_stock))
    val_data = filter_data_by_feature_columns(get_stock_data(val_stock))

    agent = Agent(window_size * train_data.shape[1],
                  strategy=strategy,
                  pretrained=pretrained,
                  model_name=model_name)

    for episode in range(1, ep_count + 1):
        train_result = train_model(agent,
                                   episode,
                                   train_data,
                                   ep_count=ep_count,
                                   batch_size=batch_size,
                                   window_size=window_size)
        val_result, _ = evaluate_model(agent, val_data, window_size, debug)
        show_train_result(train_result, val_result)
def main(args):
    count = 0
    total_profit = 0
    t=0
    history = []
    reward = 0
    ticker = args.ticker + '.NS'
    price = []
    window_size =10
    time_now = datetime.datetime.now(tz).time()
    while(datetime.time(9, 14, tzinfo=tz) < time_now < datetime.time(15, 31, tzinfo=tz)):
        url = 'https://finance.yahoo.com/quote/{}?p={}&.tsrc=fin-srch'.format(ticker,ticker)
        print(count)
        live = Real(url,count)
        count+=1        
        price.append(live)
        if count < window_size:
           continue
        model_name='model_debug_50'  
        print(live)
        initial_offset = price[1] - price[0]
        state = get_state(price, 0, window_size + 1)
        next_state = get_state(price, t + 1, window_size + 1)
        agent = Agent(state_size=window_size, pretrained=True, model_name=model_name)
        agent.inventory = []
        profit = evaluate_model(agent,state,next_state, price, t, total_profit, history, reward, window_size=window_size)
        show_eval_result(model_name, profit, initial_offset)
        t+=1
        state = next_state
def main(train_stock,
         val_stock,
         window_size,
         batch_size,
         ep_count,
         strategy="t-dqn",
         model_name="model_debug",
         pretrained=False,
         debug=False):

    agent = Agent(window_size,
                  strategy=strategy,
                  pretrained=pretrained,
                  model_name=model_name)

    train_data = get_stock_data(train_stock)
    val_data = get_stock_data(val_stock)

    initial_offset = val_data[1] - val_data[0]

    for episode in range(1, ep_count + 1):
        train_result = train_model(agent,
                                   episode,
                                   train_data,
                                   ep_count=ep_count,
                                   batch_size=batch_size,
                                   window_size=window_size)
        val_result, _ = evaluate_model(agent, val_data, window_size, debug)
        show_train_result(train_result, val_result, initial_offset)
예제 #7
0
def main(train_stock,
         val_stock,
         window_size,
         batch_size,
         ep_count,
         strategy="t-dqn",
         model_name="model_debug",
         pretrained=False,
         debug=False):
    """ Trains the stock trading bot using Deep Q-Learning.
    Please see https://arxiv.org/abs/1312.5602 for more details.

    Args: [python train.py --help]
    """
    agent = Agent(window_size,
                  strategy=strategy,
                  pretrained=pretrained,
                  model_name=model_name)

    train_data = get_stock_data(train_stock)
    val_data = get_stock_data(val_stock)

    initial_offset = val_data[1] - val_data[0]

    for episode in range(1, ep_count + 1):
        train_result = train_model(agent,
                                   episode,
                                   train_data,
                                   ep_count=ep_count,
                                   batch_size=batch_size,
                                   window_size=window_size)
        val_result, _ = evaluate_model(agent, val_data, window_size, debug)
        show_train_result(train_result, val_result, initial_offset)
def main(args):
    count = 0
    total_profit = 0
    t=0
    history = []
    reward = 0
    price = []
    window_size =10
    time_now = datetime.datetime.now(tz).time()
    url = 'https://api.binance.com/api/v1/ticker/price?symbol={}'.format(args.ticker)
    live = Real(url)
    print(live)

    while(count < 100):
        url = 'https://api.binance.com/api/v1/ticker/price?symbol={}'.format(args.ticker)
        live = Real(url)
        count+=1        
        price.append(live)
        if count < window_size:
           continue
        model_name='model_t-dqn_GOOG_10'  
        print(live)
        initial_offset = price[1] - price[0]
        state = get_state(price, 0, window_size + 1)
        next_state = get_state(price, t + 1, window_size + 1)
        agent = Agent(state_size=window_size, pretrained=True, model_name=model_name)
        agent.inventory = []
        profit = evaluate_model(agent,state,next_state, price, t, total_profit, history, reward, window_size=window_size)
        show_eval_result(model_name, profit, initial_offset)
        t+=1
        state = next_state
예제 #9
0
def train(train_stock,
          val_stock,
          window_size,
          batch_size,
          ep_count,
          strategy="t-dqn",
          model_name="model_double-dqn_GOOG_50",
          pretrained=True,
          debug=False):
    """ 
    Trains the stock trading bot using Deep Q-Learning.
    Please see https://arxiv.org/abs/1312.5602 for more details.
    Args: [python train.py --help]
    """
    print("Started the model training for the {}".format(symbol))
    agent = Agent(window_size,
                  strategy=strategy,
                  pretrained=pretrained,
                  model_name=model_name)

    train_data = train_stock
    val_data = val_stock
    initial_offset = np.array(val_data)[1] - np.array(val_data)[0]

    for episode in range(1, ep_count + 1):
        train_result = train_model(agent,
                                   episode,
                                   train_data,
                                   ep_count=ep_count,
                                   batch_size=batch_size,
                                   window_size=window_size)
        val_result, _ = evaluate_model(agent, val_data, window_size, debug)
        show_train_result(train_result, val_result, initial_offset)
    print("Training the model completed!!")
예제 #10
0
def main(args):
    count = 0

    ticker = args.ticker + '.NS'
    price = []

    time_now = datetime.datetime.now(tz).time()
    while (datetime.time(9, 14, tzinfo=tz) < time_now < datetime.time(
            19, 31, tzinfo=tz)):
        url = 'https://finance.yahoo.com/quote/{}?p={}&.tsrc=fin-srch'.format(
            ticker, ticker)
        print(count)
        live = Real(url, count)
        count += 1
        price.append(live)
        if count < 10:
            continue

        print(live)
        initial_offset = price[1] - price[0]
        agent = Agent(state_size=10,
                      pretrained=True,
                      model_name='model_debug_50')
        profit, _ = evaluate_model(agent, price, window_size=10, debug=False)
        show_eval_result(model_name, profit, initial_offset)
예제 #11
0
def main(train_stock, val_stock, window_size, batch_size, ep_count,
         strategy="t-dqn", model_name="model_debug", pretrained=False,
         debug=False):
    """ Trains the stock trading bot using Deep Q-Learning.
    Please see https://arxiv.org/abs/1312.5602 for more details.

    Args: [python train.py --help]
    """
    import tensorflow as tf
    print("#TF Version: ",tf.__version__)

    from tensorflow.python.client import device_lib
    print("Using device: ")
    print(device_lib.list_local_devices())
    agent = Agent(window_size, strategy=strategy, pretrained=pretrained, model_name=model_name)
    
    train_data = get_stock_data(train_stock)
    val_data = get_stock_data(val_stock)

    initial_offset = val_data["Close"][1] - val_data["Close"][0]

    for episode in range(1, ep_count + 1):
        train_result = train_model(agent, episode, train_data, ep_count=ep_count,
                                   batch_size=batch_size, window_size=window_size)
        val_result, _ , actionCollection = evaluate_model(agent, val_data, window_size, debug)
        show_train_result(train_result, val_result, initial_offset)
예제 #12
0
def main(args):
    price = []

    cur_symbol = args.ticker

    quick_train(cur_symbol)
    window_size =10
    time_now = datetime.datetime.now(tz).time()

    for c in range(2):
        price.append(Real(cur_symbol))

    model_name='model_double-dqn_GOOG_50_50'

    initial_offset = price[1] - price[0]

    agent = Agent(window_size, pretrained=True, model_name=model_name)
    profit, history = evaluate_model(agent, price, window_size,cur_symbol, debug=False)
    show_eval_result(model_name, profit, initial_offset)
    print("Profit:", profit)
    buys = sells = holds = 0
    for i in history:
        if i[1] == "BUY":
            buys += 1
        elif i[1] == "SELL":
            sells += 1
        elif i[1] == "HOLD":
            holds += 1
    print("BUYS Percentage:", (buys/len(history)) * 100)
    print("SELLS Percentage:", (sells/len(history)) * 100)
    print("HOLDS Percentage:", (holds/len(history)) * 100)
    rpath = 'training_data/' + cur_symbol + '.csv'
    os.remove(rpath)
def main(eval_stock, window_size, model_name, debug):

    data = get_stock_data(eval_stock)
    initial_offset = data[1] - data[0]

    # Single Model Evaluation
    if model_name is not None:
        agent = Agent(window_size, pretrained=True, model_name=model_name)
        profit, _ = evaluate_model(agent, data, window_size, debug)
        show_eval_result(model_name, profit, initial_offset)

    # Multiple Model Evaluation
    else:
        for model in os.listdir("models"):
            if os.path.isfile(os.path.join("models", model)):
                agent = Agent(window_size, pretrained=True, model_name=model)
                profit = evaluate_model(agent, data, window_size, debug)
                show_eval_result(model, profit, initial_offset)
                del agent
예제 #14
0
def main(eval_stock, window_size, model_name, debug):
    """ Evaluates the stock trading bot.
    Please see https://arxiv.org/abs/1312.5602 for more details.

    Args: [python eval.py --help]
    """
    data = get_stock_data(eval_stock)
    initial_offset = data[1] - data[0]

    # Single Model Evaluation
    if model_name is not None:
        agent = Agent(window_size, pretrained=True, model_name=model_name)
        profit, _ = evaluate_model(agent, data, window_size, debug)
        show_eval_result(model_name, profit, initial_offset)
예제 #15
0
def main(train_stock,
         val_stock,
         economy,
         window_size,
         batch_size,
         ep_count,
         strategy="dqn",
         model_name="model_debug",
         pretrained=False,
         debug=False):
    """ Trains the stock trading bot using Deep Q-Learning.
    Please see https://arxiv.org/abs/1312.5602 for more details.

    Args: [python train.py --help]
    """
    print("initialize agent")
    agent = Agent(window_size,
                  strategy=strategy,
                  pretrained=pretrained,
                  model_name=model_name)

    print('get stock data')
    train_data = get_stock_data(train_stock)
    print('get economy leading')
    economy_data = get_economy_data(economy)
    print('get val data')
    val_data = get_stock_data(val_stock)

    # 첫 째날과 둘 째 날의 종가의 차
    initial_offset = val_data[0][1] - val_data[0][0]
    last_checkpoint = 0

    for episode in range(1, ep_count + 1):
        print('train episode : ', episode)
        train_result, is_earlystopping = train_model(
            agent,
            episode,
            train_data,
            economy_data,
            ep_count=ep_count,
            batch_size=batch_size,
            window_size=window_size,
            last_checkpoint=last_checkpoint)
        val_result, _ = evaluate_model(agent, val_data, economy_data,
                                       window_size, debug)
        show_train_result(train_result, val_result, initial_offset)

        if is_earlystopping == False:
            last_checkpoint = episode
예제 #16
0
def main(eval_stock, window_size, model_name, debug):
    """ Evaluates the stock trading bot.
    Please see https://arxiv.org/abs/1312.5602 for more details.

    Args: [python eval.py --help]
    """    
    df_google = pd.read_csv("data/GOOG_2019.csv")
    data = list(df_google['Open'])
    data_date = list(df_google['Date'])
    initial_offset = data[1] - data[0]


    agent = Agent(window_size, current_price=data[0],pretrained=True, model_name=model_name)
    total_profit, cash_in_hand, total_share, google_buy, google_sell = evaluate_model(agent, data, data_date, window_size, debug)

    show_eval_result(model_name, total_profit, initial_offset)
        
 

    google_price_buy = []
    google_buy_date = []
    google_price_sell = []
    google_sell_date = []
    

    w = csv.writer(open("dqn.csv", "w"))

    for date, price in google_buy:
        google_price_buy.append(price)
        google_buy_date.append(date)
        w.writerow(['Buy', date, price]) 

    for date, price in google_sell:
        google_price_sell.append(price)
        google_sell_date.append(date)
        w.writerow(['Sell', date, price])

    fig = px.line(df_google, x='Date', y='Open')
    fig.add_trace(go.Scatter(x=google_buy_date, y=google_price_buy, mode="markers", showlegend=True, name="Buy"))
    fig.add_trace(go.Scatter(x=google_sell_date, y=google_price_sell, mode="markers", showlegend=True, name="Sell"))
    fig.update_layout(title="DQN - Test results on Goog_2019 stock data with profit of " + str(total_profit),font=dict(
        size=9,
        color="#7f7f7f"
    ))
    fig.show()            
예제 #17
0
def main(window_size, batch_size, ep_count, model_name, pretrained, debug):
    """ Trains the stock trading bot using Deep Q-Learning.
    Please see https://arxiv.org/abs/1312.5602 for more details.

    Args: [python train.py --help]
    """
    agent = Agent(window_size, pretrained=pretrained, model_name=model_name)

    train_data = get_stock_data('data/training.csv')
    val_data = get_stock_data('data/test.csv')

    initial_offset = val_data[1] - val_data[0]

    for episode in range(1, ep_count + 1):
        train_result = train_model(agent,
                                   episode,
                                   train_data,
                                   ep_count=ep_count,
                                   batch_size=batch_size,
                                   window_size=window_size)
        val_result, _ = evaluate_model(agent, val_data, window_size, debug)
        show_train_result(train_result, val_result, initial_offset)

    agent.soft_save()
예제 #18
0
def fn(ticker,strategy,ep_count,year):
    stock = '{}'.format(ticker)
    model_name = '{}_{}_{}'.format(ticker,strategy,ep_count)
    test_stock = 'data/Nifty50/Split/{}_{}.csv'.format(ticker,year)
    window_size = 10
    debug = True
    tf.keras.backend.clear_session()
    K.clear_session()
    agent = Agent(window_size, pretrained=True, model_name=model_name)

    # read csv into dataframe
    df = pd.read_csv(test_stock)
    # df = df.iloc[:55]
    # filter out the desired features
    df = df[['Date', 'Adj Close']]
    # rename feature column names
    df = df.rename(columns={'Adj Close': 'actual', 'Date': 'date'})
    # convert dates from object to DateTime type
    dates = df['date']
    dates = pd.to_datetime(dates, infer_datetime_format=True)
    df['date'] = dates

    df.head()



    coloredlogs.install(level='DEBUG')
    switch_k_backend_device()

    test_data = get_stock_data(test_stock)
    initial_offset = test_data[1] - test_data[0]

    test_result, history = evaluate_model(agent, test_data, window_size, debug)
    show_eval_result(model_name, test_result, initial_offset)

    def visualize(df, history, title="trading session"):
        # add history to dataframe
        position = [history[0][0]] + [x[0] for x in history]
        actions = ['HOLD'] + [x[1] for x in history]
        df['position'] = position
        df['action'] = actions

        # specify y-axis scale for stock prices
        scale = alt.Scale(domain=(min(min(df['actual']), min(df['position'])) - 50, max(max(df['actual']), max(df['position'])) + 50), clamp=True)

        # plot a line chart for stock positions
        actual = alt.Chart(df).mark_line(
            color='green',
            opacity=0.5
        ).encode(
            x='date:T',
            y=alt.Y('position', axis=alt.Axis(format='$.2f', title='Price'), scale=scale)
        ).interactive(
            bind_y=False
        )

        # plot the BUY and SELL actions as points
        points = alt.Chart(df).transform_filter(
            alt.datum.action != 'HOLD'
        ).mark_point(
            filled=True
        ).encode(
            x=alt.X('date:T', axis=alt.Axis(title='Date')),
            y=alt.Y('position', axis=alt.Axis(format='$.2f', title='Price'), scale=scale),
            color='action'
        ).interactive(bind_y=False)

        # merge the two charts
        chart = alt.layer(actual, points, title=title).properties(height=300, width=1000)

        return chart

    chart = visualize(df, history, title=test_stock)

    cap = [0]
    inv = 0
    ret = 0
    b = 0
    for i in range(len(df)):
        if df.iloc[i]['action']=='BUY':
            cap.append(cap[-1]+df.iloc[i]['actual'])
            inv+=df.iloc[i]['actual']
            b+=1
        if df.iloc[i]['action']=='SELL' and b>0:
            cap.append(cap[-1]-df.iloc[i]['actual'])
            ret += df.iloc[i]['actual']
            b-=1

    req_cap = max(cap)

    prof = ret+(df['action'].value_counts().get('BUY',0)-df['action'].value_counts().get('SELL',0))*df.iloc[-1]['actual']-inv

    return pd.DataFrame([[ticker,strategy,ep_count,year,inv,ret,req_cap, prof, (prof/req_cap)*100]],columns=['ticker','strategy','ep_count','year','investment','returns','required capital','profit','profit percentage'])
def main(train_stock,
         val_stock,
         window_size,
         batch_size,
         ep_count,
         strategy="t-dqn",
         model_name="model_debug",
         pretrained=False,
         debug=False):
    """ Trains the stock trading bot using Deep Q-Learning.
		Please see https://arxiv.org/abs/1312.5602 for more details.

		Args: [python train.py --help]
		"""

    agent = Agent(window_size,
                  strategy=strategy,
                  pretrained=pretrained,
                  model_name=model_name)

    train_data = get_stock_data(train_stock)
    val_data = get_stock_data(val_stock)
    final_rewards = []
    train_roi = []
    valid_roi = []
    train_loss = []
    rewards = []
    initial_offset = val_data[1] - val_data[0]

    for episode in range(1, ep_count + 1):
        train_result, rewards = train_model(agent,
                                            episode,
                                            train_data,
                                            ep_count=ep_count,
                                            batch_size=batch_size,
                                            window_size=window_size)
        final_rewards.extend(rewards)
        train_roi.append(train_result[2])
        train_loss.append(train_result[3])
        val_result, _ = evaluate_model(agent, val_data, window_size, debug)
        valid_roi.append(val_result)
        show_train_result(train_result, val_result, initial_offset)

    gs = gridspec.GridSpec(2, 2)
    fig = plt.figure(figsize=(20, 9))

    # To be shifted to Axis 1
    ax1 = fig.add_subplot(gs[0, 0])
    ax1.plot(range(len(train_loss)), train_loss, color='purple', label='loss')
    ax1.legend(loc=0,
               ncol=2,
               prop={'size': 20},
               fancybox=True,
               borderaxespad=0.)
    ax1.set_xlabel('Epochs', size=20)
    ax1.set_ylabel('Train Loss', size=20)
    ax1.set_title('Loss w.r.t. Epochs', size=20)

    # To be shifted to Axis 2
    ax2 = fig.add_subplot(gs[0, 1])
    ax2.plot(range(len(train_roi)), train_roi, color='crimson', label='train')
    ax2.plot(range(len(valid_roi)), valid_roi, color='olive', label='val')
    ax2.legend(loc=0,
               ncol=2,
               prop={'size': 20},
               fancybox=True,
               borderaxespad=0.)
    ax2.set_ylabel('Return of Investment($)', size=20)
    ax2.set_xlabel('Epochs', size=20)
    ax2.set_title('Train and Valid ROI w.r.t. Epochs', size=20)

    # To be shifted to Axis 3
    ax3 = fig.add_subplot(gs[1, :])
    ax3.plot(range(len(final_rewards)),
             final_rewards,
             color='red',
             label='Reward of Rainbow DQN')
    ax3.set_xlabel('Episodes', size=20)
    ax3.set_ylabel('Rewards', size=20)
    ax3.set_title('Reward w.r.t. episodes', size=20)
    ax3.legend(loc=0,
               ncol=2,
               prop={'size': 20},
               fancybox=True,
               borderaxespad=0.)

    plt.show()