예제 #1
0
def process_placed_orders(args):
    """
            Check for new orders placed by ANY of trading bots


    :param args:
    :return:
    """
    pg_conn, settings = process_args(args)

    msg_queue = get_message_queue(host=settings.cache_host,
                                  port=settings.cache_port)

    cnt = 0

    while True:
        order = msg_queue.get_next_order(ORDERS_MSG)
        if order is not None:
            save_order_into_pg(order, pg_conn)
            print_to_console("Saving {} in db".format(order), LOG_ALL_ERRORS)
        sleep_for(1)
        cnt += 1

        if cnt >= HEARTBEAT_TIMEOUT:
            cnt = 0
            print_to_console("Order storing heartbeat", LOG_ALL_ERRORS)
예제 #2
0
def load_all_public_data(args):
    """
                06.08.2019 As far as I remember it is NOT main data retrieval routine

                Retrieve ticker, trade history, candles and order book
                from ALL supported exchanges
                and store it within DB
                every TIMEOUT seconds through REST api.

                Majority of exchanges tend to throttle clients who send too many requests
                from the same ip - so be mindful about timeout.

    :param args:
    :return:
    """

    pg_conn, settings = process_args(args)

    processor = ConnectionPool()

    def split_on_errors(raw_response):
        valid_objects = filter(lambda x: type(x) != str, raw_response)
        error_strings = filter(lambda x: type(x) != str, raw_response)

        return valid_objects, error_strings

    while True:
        end_time = get_now_seconds_utc()
        start_time = end_time - POLL_PERIOD_SECONDS

        candles, errs = split_on_errors(get_ohlc_speedup(start_time, end_time, processor))
        bulk_insert_to_postgres(pg_conn, Candle.table_name, Candle.columns, candles)

        trade_history, errs = split_on_errors(get_history_speedup(start_time, end_time, processor))
        bulk_insert_to_postgres(pg_conn, TradeHistory.table_name, TradeHistory.columns, trade_history)

        tickers, errs = split_on_errors(get_ticker_speedup(end_time, processor))
        bulk_insert_to_postgres(pg_conn, Ticker.table_name, Ticker.columns, tickers)

        if should_print_debug():
            msg = """History retrieval at {ts}:
                Candle size - {num}
                Ticker size - {num3}
                Trade history size - {num2}
                """.format(ts=end_time, num=len(candles), num3=len(tickers), num2=len(trade_history))
            print_to_console(msg, LOG_ALL_ERRORS)
            log_to_file(msg, "candles_trade_history.log")

        print_to_console("Before sleep...", LOG_ALL_ERRORS)
        sleep_for(POLL_PERIOD_SECONDS)
예제 #3
0
def process_failed_orders(args):
    """
                We try to address following issue

            Due to network issue or just bugs we may end up in situation that we are failed to place order
            Or we think so. Such orders registered in dedicated queue. We want to re-process them
            to minimise loss.

            Option 1: We managed to place order, just didn't get proper response from exchange
            - i.e. didn't wait enough for exchange to response
            Option 2: We managed to place order, just exchange were overloaded and decided to
            return to us some errors ()
            Option 3: We didn't managed to place order
                nonce issue - particular poloniex
                exchange issue - kraken
                ill fate - :(
            Option 4: ??? TODO

            First we try to find order in open or executed.
            In case we find it - update order_id in db.
            If it still open add it to watch list for expired orders processing.

            If not we can replace it by market with idea that there is high probability that other arbitrage deal were
            successfully placed

    :param args:
    :return:
    """

    pg_conn, settings = process_args(args)
    load_keys(settings.key_path)

    priority_queue, msg_queue, local_cache = init_queues(settings)

    cnt = 0

    while True:
        order = msg_queue.get_next_order(FAILED_ORDERS_MSG)
        if order is not None:
            process_failed_order(order, msg_queue, priority_queue, local_cache,
                                 pg_conn)

        sleep_for(1)
        cnt += 1

        if cnt >= HEARTBEAT_TIMEOUT:
            cnt = 0
            print_to_console("Failed orders processing heartbeat",
                             LOG_ALL_ERRORS)
예제 #4
0
def load_order_books(args):
    """
        Periodically retrieve FULL order books
        from ALL supported exchanges via REST api
        and save it for further analysis in DB.

        Under the hood requests are sent in async fashion via gevent library

    :param args: config file
    :return:
    """

    pg_conn, _ = process_args(args)

    processor = ConnectionPool()

    while True:
        ts = get_now_seconds_utc()

        results = get_order_book_speedup(ts, processor)

        order_book = filter(lambda x: type(x) != str, results)

        load_to_postgres(order_book, ORDER_BOOK_TYPE_NAME, pg_conn)

        order_book_size = len(order_book)
        order_book_ask_size = 0
        order_book_bid_size = 0

        for entry in order_book:
            if entry is not None:
                order_book_ask_size += len(entry.ask)
                order_book_bid_size += len(entry.bid)

        if should_print_debug():
            msg = """Orderbook retrieval at {tt}:
            Order book size - {num1} Order book asks - {num10} Order book bids - {num20}""".format(
                tt=ts,
                num1=order_book_size,
                num10=order_book_ask_size,
                num20=order_book_bid_size)
            print_to_console(msg, LOG_ALL_ERRORS)
            log_to_file(msg, "order_book.log")

        print_to_console("Before sleep...", LOG_ALL_ERRORS)
        sleep_for(ORDER_BOOK_POLL_TIMEOUT)
예제 #5
0
def load_trade_history(args):
    """
        Retrieve executed trades from ALL exchanges via REST api
        and save into db

        Those data later will be used for analysis
        of profitability of trading and bot's performance

    :param args: period, exchanges, connection details
    :return:
    """

    pg_conn, settings = process_args(args)

    log_initial_settings(
        "Starting trade history retrieval for bots using following exchanges: \n",
        settings.exchanges)

    if args.start_time is None or args.end_time is None:
        end_time = get_now_seconds_utc()
        start_time = end_time - 24 * 3600
    else:
        end_time = parse_time(args.end_time, '%Y-%m-%d %H:%M:%S')
        start_time = parse_time(args.start_time, '%Y-%m-%d %H:%M:%S')

    if start_time == end_time or end_time <= start_time:
        die_hard("Wrong time interval provided! {ts0} - {ts1}".format(
            ts0=start_time, ts1=end_time))

    load_keys(settings.key_path)

    while True:
        for exchange_id in settings.exchanges:
            method = get_trade_retrieval_method_by_exchange(exchange_id)
            method(pg_conn, start_time, end_time)
            sleep_for(1)

        print_to_console("Trade retrieval heartbeat", LOG_ALL_DEBUG)

        sleep_for(TRADE_POLL_TIMEOUT)

        end_time = get_now_seconds_utc()
        start_time = end_time - 24 * 3600
예제 #6
0
            save_alarm_into_pg(entry[2], entry[3], pg_connection)

        print_to_console(
            "Total amount of tickers = {num}".format(num=len(tickers)),
            LOG_ALL_DEBUG)
        load_to_postgres(tickers, TICKER_TYPE_NAME, pg_connection)

        print_to_console("Before sleep...", LOG_ALL_DEBUG)
        sleep_for(POLL_PERIOD_SECONDS)


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="""
    Arbitrage monitoring service, every {POLL_TIMEOUT}: 
        - retrieve tickers
        - compare high \ low per pair
        - if difference bigger than {THRES} %
        - trigger notification message
    """.format(POLL_TIMEOUT=POLL_PERIOD_SECONDS, THRES=TRIGGER_THRESHOLD))

    parser.add_argument('--cfg', action='store', required=True)

    arguments = parser.parse_args()

    pg_conn, settings = process_args(arguments)

    msg_queue = get_message_queue(host=settings.cache_host,
                                  port=settings.cache_port)

    analyse_tickers(pg_conn, msg_queue)