def evaluate_model(agent, state, next_state, data, t, total_profit, history, reward, window_size, debug=False):
  
    print(t)
        # select an action
    action = agent.act(state, is_eval=True)

        # BUY
    if action == 1:
        agent.inventory.append(data[t])

        history.append((data[t], "BUY"))
        if debug:
            logging.debug("Buy at: {}".format(format_currency(data[t])))

        # SELL
    elif action == 2 and len(agent.inventory) > 0:
        bought_price = agent.inventory.pop(0)
        delta = data[t] - bought_price
        reward = delta #max(delta, 0)
        total_profit += delta

        history.append((data[t], "SELL"))
        if debug:
            logging.debug("Sell at: {} | Position: {}".format(
                    format_currency(data[t]), format_position(data[t] - bought_price)))
        # HOLD
    else:
        history.append((data[t], "HOLD"))

#        done = (t == data_length - 1)
    agent.memory.append((state, action, reward, next_state))

    return total_profit
def evaluate_model1(agent, symbol, data, window_size, debug):
    count = 0
    while count < window_size:
        live = Real(client, symbol)
        data.append(live)
        count += 1
    total_profit = 0
    quantity = {}
    quantity_1 = 5
    max_amount = 1000
    history = []
    max_transaction = 40
    inventory_limit = 5
    agent.inventory = []
    state = get_state(data, 0, window_size + 1)
    number_of_buys = 0
    t = 0
    step_size = float(next(filter(lambda f: f['filterType'] == 'LOT_SIZE', client.get_symbol_info(symbol)['filters']))['stepSize'])
    while True:
        live = Real(client, symbol)
        data.append(live)
        reward = 0
        next_state = get_state(data, t + 1, window_size + 1)
        
        action = agent.act(state, is_eval=True)

        print("Live Price: ",live)
        if action == 1 and number_of_buys < max_transaction and len(agent.inventory) <= inventory_limit:
            quantity[live] = floatPrecision((max_amount / (quantity_1 * live)),step_size )
            client.order_market_buy(
                symbol=symbol,
                quantity=quantity[live])
            agent.inventory.append(data[t+window_size-1])
            history.append((data[t+window_size-1], "BUY"))
            number_of_buys += 1
            df1 = pd.DataFrame({'Datetime': [datetime.now(tz)], 'Symbol': [symbol], 'Buy/Sell': ['Buy'],
                                'Quantity': [quantity1], 'Price': [close1], 'Profit/loss': [0]})
            df1['Datetime'] = df1['Datetime'].apply(lambda x: x.strftime('%Y-%m-%d %H:%M:%S'))
            if not os.path.isfile('5result.csv'):
                df1.to_csv('5result.csv', index=False)                                                                                                                    else:                                                                                                                                                              df1.to_csv('5result.csv', index=False, mode='a', header=False)
            if debug:
                logging.debug("Buy at: {}".format(format_currency(data[t+window_size-1])))
        
        elif action == 2 and len(agent.inventory) > 0:
            if agent.inventory != []:
                for i in agent.inventory:
                    temp = data[t+window_size-1] - i
                    if temp > 0:
                        pft = temp
                        agent.inventory.remove(i)
                        delta = pft
                        reward = delta #max(delta, 0)
                        total_profit += delta

                        history.append((data[t+window_size-1], "SELL"))
                        if debug:
                            logging.debug("Sell at: {} | Position: {}".format(
                                format_currency(data[t+window_size-1]), format_position(delta)))
                        break
示例#3
0
def evaluate_model1(agent, symbol, data, window_size, debug):
    count = 0
    while count < window_size:
        live = Real(client, symbol)
        data.append(live)
        count += 1
    total_profit = 0
    history = []
    agent.inventory = []
    state = get_state(data, 0, window_size + 1)
    number_of_buys = 0
    t = 0
    while True:
        live = Real(client, symbol)
        data.append(live)
        reward = 0
        next_state = get_state(data, t + 1, window_size + 1)
        
        action = agent.act(state, is_eval=True)

        print("Live Price: ",live)
        if action == 1 and number_of_buys < 10:
            agent.inventory.append(data[t+window_size-1])
            history.append((data[t+window_size-1], "BUY"))
            number_of_buys += 1
            if debug:
                logging.debug("Buy at: {}".format(format_currency(data[t+window_size-1])))
        
        elif action == 2 and len(agent.inventory) > 0:
            if agent.inventory != []:
                for i in agent.inventory:
                    temp = data[t+window_size-1] - i
                    if temp > 0:
                        pft = temp
                        agent.inventory.remove(i)
                        delta = pft
                        reward = delta #max(delta, 0)
                        total_profit += delta

                        history.append((data[t+window_size-1], "SELL"))
                        if debug:
                            logging.debug("Sell at: {} | Position: {}".format(
                                format_currency(data[t+window_size-1]), format_position(delta)))
                        break
        
        else:
            history.append((data[t], "HOLD"))
            if False:
                logging.debug("Hold at: {}".format(format_currency(data[t+window_size-1])))
        
        done=False
        agent.memory.append((state, action, reward, next_state, done))

        state = next_state
        t += 1
        if agent.inventory == [] and number_of_buys >= 10:
            return total_profit, history
示例#4
0
def evaluate_model(agent, data, window_size, debug):
    total_profit = 0
    t = 0

    history = []
    agent.inventory = []

    state = get_state(data, 0, window_size + 1)

    while t >= 0:
        reward = 0
        next_state = get_state(data, t + 1, window_size + 1)
        print(t)
        # select an action
        action = agent.act(state, is_eval=True)

        # BUY
        if action == 1:
            agent.inventory.append(data[t])

            history.append((data[t], "BUY"))
            if debug:
                logging.debug("Buy at: {}".format(format_currency(data[t])))

        # SELL
        elif action == 2 and len(agent.inventory) > 0:
            bought_price = agent.inventory.pop(0)
            delta = data[t] - bought_price
            reward = delta  #max(delta, 0)
            total_profit += delta

            history.append((data[t], "SELL"))
            if debug:
                logging.debug("Sell at: {} | Position: {}".format(
                    format_currency(data[t]),
                    format_position(data[t] - bought_price)))
        # HOLD
        else:
            history.append((data[t], "HOLD"))


#        done = (t == data_length - 1)
        agent.memory.append((state, action, reward, next_state))

        state = next_state
        t += 1
        #if done:
        print(total_profit)
示例#5
0
def evaluate_model(agent, data, window_size, debug):
    total_profit = 0
    data_length = len(data) - 1

    history = []
    agent.inventory = []

    state = get_state(data, 0, window_size + 1)

    for t in range(data_length):
        reward = 0
        next_state = get_state(data, t + 1, window_size + 1)

        # select an action
        action = agent.act(state, is_eval=True)

        # BUY
        if action == 1:
            agent.inventory.append(data[t])

            history.append((data[t], "BUY"))
            if debug:
                logging.debug("Buy at: {}".format(format_currency(data[t])))

        # SELL
        elif action == 2 and len(agent.inventory) > 0:
            bought_price = agent.inventory.pop(0)
            reward = max(data[t] - bought_price, 0)
            total_profit += data[t] - bought_price

            history.append((data[t], "SELL"))
            if debug:
                logging.debug("Sell at: {} | Position: {}".format(
                    format_currency(data[t]),
                    format_position(data[t] - bought_price)))
        # HOLD
        else:
            history.append((data[t], "HOLD"))

        done = (t == data_length - 1)
        agent.memory.append((state, action, reward, next_state, done))

        state = next_state
        if done:
            return total_profit, history
示例#6
0
def main(train_stock,
         val_stock,
         window_size,
         batch_size,
         ep_count,
         strategy="t-dqn",
         model_name="model_debug",
         pretrained=False,
         debug=False):
    """ Trains the stock trading bot using Deep Q-Learning.
    Please see https://arxiv.org/abs/1312.5602 for more details.

    Args: [python train.py --help]
    """
    agent = Agent(window_size,
                  strategy=strategy,
                  pretrained=pretrained,
                  model_name=model_name)

    train_data = get_stock_data(train_stock)
    val_data = get_stock_data(val_stock)

    initial_offset = val_data[1] - val_data[0]

    best_profit = 0

    if pretrained:
        best_profit = evaluate_model(agent, val_data, window_size, 0)[0]
        logging.debug("Best Profit: {}".format(format_currency(best_profit)))

    for episode in range(1, ep_count + 1):
        train_result = train_model(agent,
                                   episode,
                                   train_data,
                                   ep_count=ep_count,
                                   batch_size=batch_size,
                                   window_size=window_size)
        val_result, _ = evaluate_model(agent, val_data, window_size, debug)
        show_train_result(train_result, val_result, initial_offset)

        if val_result > best_profit:
            best_profit = val_result
            agent.save_best(best_profit)
            logging.debug("Best Profit: {}".format(
                format_currency(best_profit)))
示例#7
0
def evaluate_model(agent, data, window_size, debug):
    total_profit = 0
    data_length = data.shape[0] - 1

    history = []
    agent.last_buy = 0

    state = get_state(data, 0, window_size + 1)

    for t in range(data_length):
        reward = 0
        next_state = get_state(data, t + 1, window_size + 1)

        # select an action
        action = agent.act(state, is_eval=True)
        close = data.iloc[t].loc['close']

        # BUY
        if action == 1 and agent.last_buy == 0:
            agent.last_buy = close
            history.append((close, "BUY"))
            if debug:
                logging.debug("Buy at: {}".format(format_currency(close)))

        # SELL
        elif action == 2 and agent.last_buy > 0 and agent.last_buy < close:
            reward = close - agent.last_buy
            if debug:
                logging.debug("Sell at: {} | Position: {}".format(
                    format_currency(close), format_position(reward)))
            total_profit += reward
            agent.last_buy = 0

            history.append((close, "SELL"))

        # HOLD
        else:
            history.append((close, "HOLD"))

        done = (t == data_length - 1)
        agent.memory.append((state, action, reward, next_state, done))

        state = next_state
        if done:
            return total_profit, history
示例#8
0
def decisions(agent, data, window_size, debug, stock, api):

    # Initialize Variables
    total_profit = 0
    global orders
    orders = []
    history = []
    agent.inventory = []
    action = None
    sentiments = runNewsAnalysis(stock, api)
    state = get_state(data, 0, window_size + 1)

    # decide_stock()

    t = 0

    # Main While Loop
    while True:

        data_length = len(data) - 1
        is_open = True

        if t == data_length - 1:
            # Wait for market to open.
            is_open = api.get_clock().is_open

        # Checks for if Market is open
        while not is_open:
            logging.info("Waiting for market to open...")
            clock = api.get_clock()
            opening_time = clock.next_open.replace(
                tzinfo=datetime.timezone.utc).timestamp()
            curr_time = clock.timestamp.replace(
                tzinfo=datetime.timezone.utc).timestamp()
            time_to_open = int((opening_time - curr_time) / 60)
            logging.info(str(time_to_open) + " minutes til market open.")
            logging.info("Last days profit: " + str(total_profit))
            time.sleep(300)
            is_open = api.get_clock().is_open
            if is_open:
                logging.info("Market opened.")

                # Runs Analysis on all new sources
                try:
                    sentiments = runNewsAnalysis(stock, api)
                except:
                    logging.info("Error Collecting Sentiment")

                # Save last days data
                if action is not None:
                    agent.memory.append(
                        (state, action, reward, next_state, True))
                    agent.soft_save()

                # Reinitialize for new day
                total_profit = 0
                orders = []
                history = []
                agent.inventory = []

        if t == data_length - 1:
            time.sleep(60)
            date = api.get_barset(timeframe='minute',
                                  symbols=stock_name,
                                  limit=1,
                                  end=datetime.datetime.now())
            data.append(date.get(stock)[0].c)

        reward = 0
        next_state = get_state(data, t + 1, window_size + 1)

        # select an action
        action = agent.act(state, is_eval=True)

        # BUY
        if action == 1 and sentiments >= 0:
            # if action == 1:
            agent.inventory.append(data[t])

            # Buy using Alpaca API
            if t == data_length - 1:
                orders.append(submit_order_helper(1, stock, 'buy', api))

            history.append((data[t], "BUY"))
            if debug:
                logging.debug("Buy at: {} | Sentiment: {}".format(
                    format_currency(data[t]), format_sentiment(sentiments)))
                # "Buy at: {}".format(format_currency(data[t])))

        # SELL
        elif (action == 2 and len(agent.inventory) > 0) or sentiments < 0:
            # elif action == 2 and len(agent.inventory) > 0:
            bought_price = agent.inventory.pop(0)
            reward = max(data[t] - bought_price, 0)
            total_profit += data[t] - bought_price

            # Sell all stock using Alpaca API
            if t == data_length - 1 and len(orders) is not 0:
                try:
                    qty = api.get_position(stock).qty
                    submit_order_helper(qty, stock, 'sell', api)
                    orders.pop()
                except:
                    logging.info("No position!")

            history.append((data[t], "SELL"))
            if debug:
                logging.debug(
                    "Sell at: {} | Sentiment: {} | Position: {}".format(
                        format_currency(data[t]), format_sentiment(sentiments),
                        format_position(data[t] - bought_price)))
                # format_currency(data[t]), format_position(data[t] - bought_price)))

        # HOLD
        else:
            history.append((data[t], "HOLD"))
            if debug:
                logging.debug("Hold at: {} | Sentiment: {}".format(
                    format_currency(data[t]), format_sentiment(sentiments)))
                # format_currency(data[t])))

        agent.memory.append((state, action, reward, next_state, False))
        if len(agent.memory) > 32:
            agent.train_experience_replay(32)

        state = next_state
        t += 1
def decisions(agent, data, window_size, debug, stock, api):
    # Initialize Variables
    total_profit = 0
    global orders
    orders = []
    history = []
    agent.inventory = []
    action = None
    sentiments = runNewsAnalysis(stock, api, natural_lang)
    state = get_state(data, 0, window_size + 1)

    # decide_stock()
    t = 0

    # Main While Loop
    while True:

        data_length = len(data) - 1
        is_open = True

        # Checks for if the original 1000 data points were tested
        if t == data_length - 1:

            # Check for connection errors and retry 30 times
            cnt = 0
            while cnt <= 30:

                try:
                    # Wait for market to open.
                    is_open = api.get_clock().is_open
                    break

                except:
                    logging.warning(
                        "Error in checking market status, retrying in 30s (" +
                        str(cnt) + "/30)")
                    time.sleep(30)
                    cnt += 1
                    continue

        # Checks for if Market is open
        while not is_open:
            logging.info("Waiting for market to open...")

            # Check for connection errors and retry 30 times
            cnt = 0
            while cnt <= 30:
                try:
                    clock = api.get_clock()
                    opening_time = clock.next_open.replace(
                        tzinfo=datetime.timezone.utc).timestamp()
                    curr_time = clock.timestamp.replace(
                        tzinfo=datetime.timezone.utc).timestamp()
                    time_to_open = int((opening_time - curr_time) / 60)
                    logging.info("Last days profit: {}".format(
                        format_currency(str(total_profit))))

                    # Countdown timer until market opens
                    while time_to_open > -1:
                        print(str(time_to_open) + " minutes til market open.",
                              end='\r')
                        time.sleep(60)
                        time_to_open -= 1

                    # Alternative timer here
                    # time.sleep(time_to_open *    60)
                    is_open = api.get_clock().is_open
                    break

                except:
                    logging.warning(
                        "Error in checking market status, retrying in 30s (" +
                        str(cnt) + "/30)")
                    time.sleep(30)
                    cnt += 1
                    continue

            # Initialization of new day, we only want this to happen once at the beginning of each day
            if is_open:
                logging.info("Market opened.")

                # Runs Analysis on all new sources
                try:
                    sentiments = runNewsAnalysis(stock, api, natural_lang)
                except:
                    logging.info("Error Collecting Sentiment")

                # Save last days data
                if action is not None:
                    agent.memory.append(
                        (state, action, reward, next_state, True))
                    agent.soft_save()

                # Reinitialize for new day
                total_profit = 0
                orders = []
                history = []
                q = 0
                agent.inventory = []

                # ****COMMENT THIS OUT IF YOU DON'T WANT TO SELL ALL OF THE STOCKS AT THE BEGINNING OF NEW DAY****
                # Sell all stock using Alpaca API at the beginning of the new day
                if t == data_length - 1:

                    try:
                        qty = api.get_position(stock).qty

                    except:
                        logging.warning(
                            "Error fetching stock position, may not exist.")

                    # Just checks to see if I'm trying to sell zero or a negative number of stocks
                    if int(qty) > 2:
                        submit_order_helper(int(qty) - 2, stock, 'sell', api)

        # Checks for if the original 1000 data points were tested
        if t == data_length - 1:
            time.sleep(60)

            # Check for connection errors and retry 30 times
            cnt = 0
            while cnt <= 30:
                try:
                    date = api.get_barset(timeframe='minute',
                                          symbols=stock_name,
                                          limit=1,
                                          end=datetime.datetime.now())
                    break

                except:
                    logging.warning(
                        "Unable to retrieve barset, retrying in 30s (" +
                        str(cnt) + "/30)")
                    time.sleep(30)
                    cnt += 1
                    continue

            data.append(date.get(stock)[0].c)

        reward = 0
        next_state = get_state(data, t + 1, window_size + 1)

        # select an action
        action = agent.act(state, is_eval=True)

        # BUY
        if action == 1 and sentiments >= 0:
            # if action == 1:
            agent.inventory.append(data[t])

            # Buy using Alpaca API, only if it is realtime data
            if t == data_length - 1:
                file = open('data/' + stock + "_trading_data.csv", 'a')
                file.write(
                    str(datetime.datetime.now().strftime("%m/%d/%Y,%H:%M:%S"))
                    + ',BUY,$' + str(date.get(stock)[0].c) + '\n')
                file.close()
                global now
                global current_time
                global bbbpower
                now = datetime.now()
                current_time = now.strftime("%H")
                account = api.get_account()
                perstockpower = int(float(account.buying_power) / 4)
                bbbpower = int(perstockpower / float(si.get_live_price(stock)))
                orders.append(submit_order_helper(bbbpower, stock, 'buy', api))

            # Appends and logs
            history.append((data[t], "BUY"))
            if debug:
                logging.debug(
                    "Buy at: {}  | Sentiment: {} | Total Profit: {}".format(
                        format_currency(data[t]), format_sentiment(sentiments),
                        format_currency(total_profit)))
                # "Buy at: {}".format(format_currency(data[t])))

        # SELL
        elif (action == 2 or sentiments < 0) and len(agent.inventory) > 0:
            # elif action == 2 and len(agent.inventory) > 0:
            bought_price = agent.inventory.pop(0)
            reward = max(data[t] - bought_price, 0)
            total_profit += data[t] - bought_price

            # Sell all stock using Alpaca API
            # if t == data_length - 1 and len(orders) != 0:
            #    try:
            #        qty = api.get_position(stock).qty
            #        submit_order_helper(qty, stock, 'sell', api)
            #        orders.pop()
            #    except:
            #        logging.info("No position!")

            # Sell's one stock using Alpaca's API if it is in realtime
            if t == data_length - 1:
                file = open('data/' + stock + "_trading_data.csv", 'a')
                file.write(
                    str(datetime.datetime.now().strftime("%m/%d/%Y,%H:%M:%S"))
                    + ',SELL,$' + str(date.get(stock)[0].c) + '\n')
                file.close()
                position = api.get_position(stock)
                submit_order_helper(int(position.qty), stock, 'sell', api)
            history.append((data[t], "SELL"))
            if debug:
                logging.debug(
                    "Sell at: {} | Sentiment: {} | Position: {}".format(
                        format_currency(data[t]), format_sentiment(sentiments),
                        format_position(data[t] - bought_price)))
                # format_currency(data[t]), format_position(data[t] - bought_price)))

        # HOLD
        else:
            history.append((data[t], "HOLD"))
            if debug:
                logging.debug(
                    "Hold at: {} | Sentiment: {} | Total Profit: {}".format(
                        format_currency(data[t]), format_sentiment(sentiments),
                        format_currency(total_profit)))
                # format_currency(data[t])))

            if t == data_length - 1:
                file = open('data/' + stock + "_trading_data.csv", 'a')
                file.write(
                    str(datetime.datetime.now().strftime("%m/%d/%Y,%H:%M:%S"))
                    + ',HOLD,$' + str(date.get(stock)[0].c) + '\n')
                file.close()

        agent.memory.append((state, action, reward, next_state, False))
        if len(agent.memory) > 32:
            agent.train_experience_replay(32)

        state = next_state
        t += 1
def evaluate_model(agent,
                   state,
                   next_state,
                   data,
                   t,
                   total_profit,
                   history,
                   reward,
                   window_size,
                   debug=False):

    print(t)
    # select an action
    action = agent.act(state, is_eval=True)

    # BUY
    if action == 1:
        agent.inventory.append(data[t])

        history.append((data[t], "BUY"))
        if debug:
            logging.debug("Buy at: {}".format(format_currency(data[t])))

        # SELL
    elif action == 2 and len(agent.inventory) > 0:
        bought_price = agent.inventory.pop(0)
        delta = data[t] - bought_price
        reward = delta  #max(delta, 0)
        total_profit += delta

        history.append((data[t], "SELL"))
        if debug:
            logging.debug("Sell at: {} | Position: {}".format(
                format_currency(data[t]),
                format_position(data[t] - bought_price)))
        # HOLD
    else:
        history.append((data[t], "HOLD"))


#        done = (t == data_length - 1)

#code to plot a graph
    x = []
    y = []
    file = data[t]  #doing for particular csvfiles
    with open('file', 'r') as csvfile:
        plots = csv.reader(csvfile, delimiter=',')
        for row in plots:
            x.append(int(row[0]))
            y.append(int(row[1]))

    plt.plot(x, y, marker='o')
    plt.title("Graph: Yahoo_url_test.py")

    plt.xlabel('Data')
    plt.ylabel('Expenses')

    plt.show()

    agent.memory.append((state, action, reward, next_state))

    return total_profit
示例#11
0
def evaluate_model1(agent, symbol, data, window_size, debug=False):
    count = 0
    url = 'https://finance.yahoo.com/quote/{}?p={}&.tsrc=fin-srch'.format(
        symbol, symbol)

    while count < window_size:
        live = Real(url, count)
        print(live)
        data.append(live)
        count += 1
    total_profit = 0
    history = []
    agent.inventory = []
    state = get_state(data, 0, window_size + 1)
    number_of_buys = 0
    max_transaction = 100  #  maximum buy/sell limit
    quantity_1 = 5  #  divide max amount in the number of parts

    max_amount = 1000  #  maximum  amount bot is allowed to trade with
    max_loss = -5  #  stop loss amount in dollar
    t = 0
    step_size = 10
    #print(step_size)
    datetime_list = []
    p = []
    quantity = {}
    status = []
    profit = []
    fq = []
    time_now = datetime.datetime.now(tz).time()
    while (datetime.time(9, 14, tzinfo=tz) < time_now < datetime.time(
            21, 49, tzinfo=tz)):
        #while count<11:
        live = Real(url, count)
        count += 1
        time_now = datetime.datetime.now(tz).time()
        #print(live)
        data.append(live)
        reward = 0
        next_state = get_state(data, t + 1, window_size + 1)

        action = agent.act(state, is_eval=True)

        print("Live Price: ", live)
        if action == 1 and number_of_buys <= max_transaction and len(
                agent.inventory) <= quantity_1:
            datetime_list.append(datetime.datetime.now(tz))
            p.append(live)
            status.append("BOUGHT")
            profit.append(0)
            fp = floatPrecision((max_amount / (quantity_1 * live)), 2)
            quantity[live] = fp
            fq.append(fp)

            agent.inventory.append(data[t + window_size])
            history.append((data[t + window_size], "BUY"))
            number_of_buys += 1
            df1 = pd.DataFrame({
                'Datetime': [datetime.datetime.now()],
                'Symbol': [symbol],
                'Buy/Sell': ['Buy'],
                'Quantity': [fp],
                'Price': [live],
                'Profit/loss': [0]
            })
            df1['Datetime'] = df1['Datetime'].apply(
                lambda x: x.strftime('%Y-%m-%d %H:%M:%S'))
            if not os.path.isfile('{}.csv'.format(symbol)):
                df1.to_csv('{}.csv'.format(symbol), index=False)
            else:
                df1.to_csv('{}.csv'.format(symbol),
                           index=False,
                           mode='a',
                           header=False)
            print("Buy at: {}".format(format_currency(data[t + window_size])))

        elif action == 2 and len(agent.inventory) > 0:
            if agent.inventory != []:
                for i in agent.inventory:
                    temp = data[t + window_size] - i
                    if temp > 0:
                        q = float(
                            floatPrecision(float(quantity[i]) * 0.9989, 2))
                        pft = temp * q
                        agent.inventory.remove(i)
                        delta = pft
                        reward = delta  #max(delta, 0)
                        total_profit += delta
                        del quantity[i]
                        datetime_list.append(datetime.datetime.now(tz))
                        p.append(live)
                        status.append("SOLD")
                        profit.append(delta)
                        fq.append(q)

                        history.append((data[t + window_size], "SELL"))
                        df2 = pd.DataFrame({
                            'Datetime': [datetime.datetime.now()],
                            'Symbol': [symbol],
                            'Buy/Sell': ['Sell'],
                            'Quantity': [q],
                            'Price': [live],
                            'Profit/loss': [pft]
                        })
                        df2['Datetime'] = df2['Datetime'].apply(
                            lambda x: x.strftime('%Y-%m-%d %H:%M:%S'))
                        df2.to_csv('{}.csv'.format(symbol),
                                   index=False,
                                   mode='a',
                                   header=False)
                        print("Sell at: {} | Position: {}".format(
                            format_currency(data[t + window_size]),
                            format_position(delta)))

        else:
            history.append((data[t], "HOLD"))
            if False:
                logging.debug("Hold at: {}".format(
                    format_currency(data[t + window_size])))
        time.sleep(10)
        done = False
        agent.memory.append((state, action, reward, next_state, done))

        state = next_state
        t += 1
        if sum(profit) <= max_loss:
            break
        if agent.inventory == [] and number_of_buys > max_transaction:

            return str(total_profit), history
示例#12
0
def evaluate_model(agent, price, window_size, cur_symbol, debug):
    quantity_1 = 1  # any value between 1-4 : 1 =100%, 2=50%, 3 = 33%, 4 = 25%, 5 = 20% and so on...
    max_amount = 19  # Maximum authorized amount
    loss_limit = -25  # Maximum loss limit to terminate the trading in dollar
    buy_percent = 0.0005  # percent at which it should buy, currently 0.1% = 0.1/100 = 0.001
    sell_percent = 0.0029  # percent at which it should sell, currently 0.1%
    loss_percent = -0.0032  # stop loss if price falls, currently -0.3%
    transaction = 50  # number of maximum transactions
    buy_range = 0.0008  # allowed buy upto, currently 0.4%
    total_profit = 0
    spent_amount = 0
    loss = []
    quantity = {}

    buy_limit = 40
    num_buys = 0

    
    history = []
    agent.inventory = []
    
    state = get_state(price, 0, window_size + 1)
    step_size = float(next(filter(lambda f: f['filterType'] == 'LOT_SIZE', client.get_symbol_info(cur_symbol)['filters']))['stepSize'])
    t = 2
    while True:
        mdata =  Real(cur_symbol)   
        #print(mdata)
        price.append(mdata)
        reward = 0
        next_state = get_state(price, t + 1 - 2, window_size + 1)


        # select an action
        action = agent.act(state, is_eval=True)

        # BUY
        if num_buys==buy_limit and len(agent.inventory) == 0:
            break

        if action == 1  and len(agent.inventory)<5 and num_buys<buy_limit:

            quantity[mdata] = floatPrecision((max_amount / (quantity_1 * mdata)),step_size)

            #client.order_market_buy(symbol=cur_symbol,quantity=quantity[mdata])
            agent.inventory.append(price[t])

            history.append((price[t], "BUY"))
            if debug:
                logging.debug("Buy at: {}".format(format_currency(price[t])))

            df2 = pd.DataFrame({'Datetime': [datetime.datetime.now(tz)], 'Symbol': [cur_symbol], 'Buy/Sell': ['Buy'],
                                            'Quantity': [quantity_1], 'Price': [mdata], 'Profit/loss': [total_profit]})
            df2['Datetime'] = df2['Datetime'].apply(lambda x: x.strftime('%Y-%m-%d %H:%M:%S'))
            if not os.path.isfile('5result.csv'):
                df2.to_csv('5result.csv', index=False)
            else:
                df2.to_csv('5result.csv', index=False, mode='a', header=False)

            max_amount += total_profit
            loss.append(total_profit)
            num_buys += 1


        
        # SELL
        #The fix
        elif (action == 2 and len(agent.inventory) > 0 ):
            bought_price = agent.inventory.pop(0)
            delta = price[t] - bought_price
            reward = delta #max(delta, 0)
            total_profit += delta
            quantity1 = quantity[bought_price]
            #client.order_market_sell(symbol=cur_symbol,quantity=quantity[bought_price])

            spent_amount -= float(quantity1) * bought_price;

            max_amount += total_profit
            loss.append(total_profit)

            df2 = pd.DataFrame({'Datetime': [datetime.datetime.now(tz)], 'Symbol': [cur_symbol], 'Buy/Sell': ['Sell'],
                                            'Quantity': [quantity_1], 'Price': [mdata], 'Profit/loss': [total_profit]})
            df2['Datetime'] = df2['Datetime'].apply(lambda x: x.strftime('%Y-%m-%d %H:%M:%S'))
            if not os.path.isfile('5result.csv'):
                df2.to_csv('5result.csv', index=False)
            else:
                df2.to_csv('5result.csv', index=False, mode='a', header=False)

            history.append((price[t], "SELL"))
            if debug:
                logging.debug("Sell at: {} | Position: {}".format(
                    format_currency(price[t]), format_position(price[t] - bought_price)))


        # HOLD
        else:
            history.append((price[t], "HOLD"))

        t += 1;
        done = True

        agent.memory.append((state, action, reward, next_state, done))

        state = next_state
        #time.sleep(1)
   
 
    return total_profit, history
def evaluate_model1(agent, symbol, data, window_size, debug):
    count = 0
    while count < window_size:
        live = Real(client, symbol)
        data.append(live)
        count += 1
    total_profit = 0
    history = []
    agent.inventory = []
    state = get_state(data, 0, window_size + 1)
    number_of_buys = 0
    max_transaction = 10
    quantity_1 = 5
    max_amount = 1000
    t = 0
    step_size = float(next(filter(lambda f: f['filterType'] == 'LOT_SIZE', client.get_symbol_info(symbol)['filters']))['stepSize'])
    #print(step_size)
    datetime_list = []
    p = []
    quantity = {}
    status= []
    profit = []
    fq = []
    
    while True:
        live = Real(client, symbol)
        data.append(live)
        reward = 0
        next_state = get_state(data, t + 1, window_size + 1)
        
        action = agent.act(state, is_eval=True)

        #print("Live Price: ",live)
        if action == 1 and number_of_buys < max_transaction:
            datetime_list.append(datetime.now(tz))
            p.append(live)
            status.append("BOUGHT")
            profit.append(0)
            fp = floatPrecision((max_amount / (quantity_1 * live)),step_size )
            quantity[live] = fp
            fq.append(fp)
            client.order_market_buy(
                symbol=symbol,
                quantity=fp)
            
            agent.inventory.append(data[t+window_size])
            history.append((data[t+window_size], "BUY"))
            number_of_buys += 1
            if debug:
                print("Buy at: {}".format(format_currency(data[t+window_size])))
        
        elif action == 2 and len(agent.inventory) > 0:
            if agent.inventory != []:
                for i in agent.inventory:
                    temp = data[t+window_size] - i
                    if temp > 0:
                        q = quantity[i]
                        pft = temp * q * 0.999
                        agent.inventory.remove(i)
                        delta = pft
                        reward = delta #max(delta, 0)
                        total_profit += delta
                        del quantity[i]
                        datetime_list.append(datetime.now(tz))
                        p.append(live)
                        status.append("SOLD")
                        profit.append(delta)
                        fq.append(q * 0.999)
                        order_market_sell(
                            symbol=symbol,
                            quantity=q*0.999)

                        history.append((data[t+window_size], "SELL"))
                        if debug:
                            print("Sell at: {} | Position: {}".format(
                                format_currency(data[t+window_size]), format_position(delta)))
                        break
        
        else:
            history.append((data[t], "HOLD"))
            if False:
                logging.debug("Hold at: {}".format(format_currency(data[t+window_size])))
        
        done=False
        agent.memory.append((state, action, reward, next_state, done))

        state = next_state
        t += 1
        if agent.inventory == [] and number_of_buys >= max_transaction:
        
            df = pd.DataFrame({'Datetime': datetime_list, 'Price': p, 'Quantity Bought/Sold': fq, 'Status': status, 'Profit made': profit})
            
            df['Datetime'] = df['Datetime'].apply(lambda x: x.strftime('%Y-%m-%d %H:%M:%S'))
           
            
            df.to_csv(symbol + '_BOUGHT_SOLD_DATA.csv', index=False)
            
            return total_profit, history
                        pft = temp
                        agent.inventory.remove(i)
                        delta = pft
                        reward = delta #max(delta, 0)
                        total_profit += delta

                        history.append((data[t+window_size-1], "SELL"))
                        if debug:
                            logging.debug("Sell at: {} | Position: {}".format(
                                format_currency(data[t+window_size-1]), format_position(delta)))
                        break
        
        else:
            history.append((data[t], "HOLD"))
            if False:
                logging.debug("Hold at: {}".format(format_currency(data[t+window_size-1])))
        
        done=False
        agent.memory.append((state, action, reward, next_state, done))

        state = next_state
        t += 1
        if agent.inventory == [] and number_of_buys >= max_transaction:
            return total_profit, history
#---------------------------------------------------------------------------------------------------------------------------
def main(symbol):
    price = []
    window_size = 10
    time_now = datetime.now(tz).time()
    model_name='model_double-dqn_GOOG_50_10'
示例#15
0
def evaluate_model(agent, data, window_size, debug):
    total_profit = 0
    data_length = len(data) - 1

    history = []
    agent.asset = 1e7
    agent.inventory = []

    state = get_state(data, 0, window_size + 1)

    for t in range(data_length):
        reward = 0
        delta = 0
        next_state = get_state(data, t + 1, window_size + 1)

        # select an action
        action = agent.act(state, is_eval=True)

        # # BUY
        # if action == 1:
        #     agent.inventory.append(data[t])
        #
        #     history.append((data[t], "BUY"))
        #     if debug:
        #         logging.debug("Buy at: {} | Day_Index: {}".format(format_currency(data[t]), t))
        #
        # # SELL
        # elif action == 2 and len(agent.inventory) > 0:
        #     stock_list = []
        #     for i in agent.inventory:
        #         stock_list.append(i)
        #     agent.inventory = []
        #
        #     bought_sum = np.array(stock_list).sum()
        #
        #     delta = 0
        #     for bought_price in stock_list:
        #         delta += data[t] - bought_price
        #
        #     reward = float(delta) / float(bought_sum) * 100
        #
        #     total_profit += delta
        #
        #     history.append((data[t], "SELL"))
        #     if debug:
        #         logging.debug("Sell at: {} | Position: {} | Total: {} | Reward: {} | Day_Index: {}".format(
        #             format_currency(data[t]), format_position(delta), format_position(total_profit), format_position(reward), t))

        # BUY
        if action == 1:
            if agent.asset < data[t]:
                history.append((data[t], "HOLD"))
                if debug:
                    logging.debug(
                        "Cannot Buy, Hold at: {} | Day_Index: {}".format(
                            format_currency(data[t]), t))

            else:
                nStocks = agent.asset // data[t]

                if nStocks == 0:
                    nStocks = agent.asset // data[t]

                agent.asset -= nStocks * data[t]
                agent.inventory.append([data[t], nStocks])

                history.append((data[t] * nStocks, "BUY"))
                if debug:
                    logging.debug("Buy at: {}, {} | Day_Index: {}".format(
                        format_currency(data[t]), nStocks, t))

        # SELL
        elif action == 2 and len(agent.inventory) > 0:
            stock_list = []
            nStocks = 0
            for item in agent.inventory:
                stock_list.append(item[0] * item[1])
                nStocks += item[1]
            agent.inventory = []

            bought_sum = np.array(stock_list).sum()

            delta = data[t] * nStocks - bought_sum

            agent.asset += data[t] * nStocks

            reward = delta / bought_sum * 100

            total_profit += delta

            history.append((data[t] * nStocks, "SELL"))
            if debug:
                logging.debug(
                    "Sell at: {} {} | Position: {} | Total: {} | Reward: {} | Day_Index: {}"
                    .format(format_currency(data[t]), nStocks,
                            format_position(delta),
                            format_position(total_profit), reward, t))

        # HOLD
        else:
            stock_list = []
            nStocks = 0
            for item in agent.inventory:
                stock_list.append(item[0] * item[1])
                nStocks += item[1]

            bought_sum = np.array(stock_list).sum()
            delta = data[t] * nStocks - bought_sum

            if bought_sum > 0:
                reward = delta / bought_sum
            else:
                reward = 0
            history.append((data[t], "HOLD"))
            if debug:
                logging.debug(
                    "Hold at: {} | Reward: {} | Day_Index: {}".format(
                        format_currency(data[t]), reward, t))

        done = (t == data_length - 1)
        agent.memory.append((state, action, reward, next_state, done))

        state = next_state
        if done:
            return total_profit, history