Ejemplo n.º 1
0
def check_deal_placements():
    if not YES_I_KNOW_WHAT_AM_I_DOING:
        die_hard("check_deal_placements may issue a real trade!")

    create_time = get_now_seconds_utc()
    fake_order_book_time1 = -10
    fake_order_book_time2 = -20
    deal_volume = 5
    pair_id = CURRENCY_PAIR.BTC_TO_ARDR

    sell_exchange_id = EXCHANGE.POLONIEX
    buy_exchange_id = EXCHANGE.BITTREX

    difference = "difference is HUGE"
    file_name = "test.log"

    msg_queue = get_message_queue()

    processor = ConnectionPool(pool_size=2)

    trade_at_first_exchange = Trade(DEAL_TYPE.SELL, sell_exchange_id, pair_id,
                                    0.00000001, deal_volume,
                                    fake_order_book_time1, create_time)

    trade_at_second_exchange = Trade(DEAL_TYPE.BUY, buy_exchange_id, pair_id,
                                     0.00004, deal_volume,
                                     fake_order_book_time2, create_time)

    trade_pairs = TradePair(trade_at_first_exchange, trade_at_second_exchange,
                            fake_order_book_time1, fake_order_book_time2,
                            DEAL_TYPE.DEBUG)

    init_deals_with_logging_speedy(trade_pairs, difference, file_name,
                                   processor, msg_queue)
Ejemplo n.º 2
0
def forward_new_messages(args):
    settings = CommonSettings.from_cfg(args.cfg)

    set_log_folder(settings.log_folder)
    set_logging_level(settings.logging_level_id)
    msg_queue = get_message_queue(host=settings.cache_host,
                                  port=settings.cache_port)

    do_we_have_data = False

    while True:
        for topic_id in QUEUE_TOPICS:
            msg = msg_queue.get_message_nowait(topic_id)
            if msg is not None:
                do_we_have_data = True
                notification_id = get_notification_id_by_topic_name(topic_id)
                err_code = send_single_message(msg, notification_id)
                if err_code == STATUS.FAILURE:
                    err_msg = """telegram_notifier can't send message to telegram. Message will be re-processed on next iteration.
                        {msg}""".format(msg=msg)
                    log_to_file(err_msg, "telegram_notifier.log")
                    print_to_console(err_msg, LOG_ALL_ERRORS)
                    msg_queue.add_message_to_start(topic_id, msg)
                    sleep_for(1)

        #
        #   NOTE: it still can lead to throttling by telegram
        #

        if not do_we_have_data:
            sleep_for(1)

        do_we_have_data = False
Ejemplo n.º 3
0
def process_placed_orders(args):
    """
            Check for new orders placed by ANY of trading bots


    :param args:
    :return:
    """
    pg_conn, settings = process_args(args)

    msg_queue = get_message_queue(host=settings.cache_host,
                                  port=settings.cache_port)

    cnt = 0

    while True:
        order = msg_queue.get_next_order(ORDERS_MSG)
        if order is not None:
            save_order_into_pg(order, pg_conn)
            print_to_console("Saving {} in db".format(order), LOG_ALL_ERRORS)
        sleep_for(1)
        cnt += 1

        if cnt >= HEARTBEAT_TIMEOUT:
            cnt = 0
            print_to_console("Order storing heartbeat", LOG_ALL_ERRORS)
Ejemplo n.º 4
0
def init_queues(app_settings):
    priority_queue = get_priority_queue(host=app_settings.cache_host,
                                        port=app_settings.cache_port)
    msg_queue = get_message_queue(host=app_settings.cache_host,
                                  port=app_settings.cache_port)
    local_cache = get_cache(host=app_settings.cache_host,
                            port=app_settings.cache_port)

    return priority_queue, msg_queue, local_cache
Ejemplo n.º 5
0
def test_message_queue():
    msg_queue = get_message_queue()
    msg_queue.add_message(DEAL_INFO_MSG, "DEAL_INFO_MSG: Test DEAL_INFO_MSG")
    msg_queue.add_message(ARBITRAGE_MSG, "ARBITRAGE_MSG: Test ARBITRAGE_MSG")
    msg_queue.add_message(DEBUG_INFO_MSG,
                          "DEBUG_INFO_MSG: Test DEBUG_INFO_MSG")
    msg_queue.add_message(ORDERS_MSG, "ORDERS_MSG: Test ORDERS_MSG")
    msg_queue.add_message(FAILED_ORDERS_MSG,
                          "ORDERS_MSG: Test FAILED_ORDERS_MSG")
Ejemplo n.º 6
0
def test_failed_order_placement_bittrex():
    load_keys(API_KEY_PATH)

    ts = get_now_seconds_utc()
    order = Trade(DEAL_TYPE.SELL, EXCHANGE.BITTREX, CURRENCY_PAIR.BTC_TO_ETH,
                  price=0.075, volume=0.1, order_book_time=ts, create_time=ts)

    msg = "Testing huobi - {tt}".format(tt=order)
    err_code, json_document = init_deal(order, msg)
    print json_document

    msg_queue = get_message_queue()
    msg_queue.add_order(FAILED_ORDERS_MSG, order)
Ejemplo n.º 7
0
def test_failed_deal_placement():
    load_keys(API_KEY_PATH)
    msg_queue = get_message_queue()
    ts = 1517938516
    order = Trade(DEAL_TYPE.SELL, EXCHANGE.BITTREX, CURRENCY_PAIR.BTC_TO_STRAT, price=0.000844, volume=5.0,
                  order_book_time=ts, create_time=ts, execute_time=ts, order_id=None)

    #   from dao.order_utils import get_open_orders_by_exchange
    #   r = get_open_orders_by_exchange(EXCHANGE.BITTREX, CURRENCY_PAIR.BTC_TO_STRAT)

    #   for rr in r:
    #       print r

    #   raise
    #
    # msg = "Replace existing order with new one - {tt}".format(tt=order)
    # err_code, json_document = init_deal(order, msg)
    # print json_document
    # order.deal_id = parse_deal_id(order.exchange_id, json_document)

    # msg_queue.add_order(ORDERS_MSG, order)
    sleep_for(3)
    msg_queue.add_order(FAILED_ORDERS_MSG, order)
    print order
Ejemplo n.º 8
0
def watch_balance_for_exchange(args):
    """
            Those routine update balance at redis CACHE
            for ALL coins at ONE exchange for active key set.

            NOTE:   It still rely on REST api - i.e. not proactive
                    For some exchanges - balance not immediately updated

                    Initially all exchanges were polled sequentially
                    But it lead to delays in the past
                    due to exchange errors or throttling

    :param args: config file and exchange_id
    :return:
    """
    settings = CommonSettings.from_cfg(args.cfg)

    exchange_id = get_exchange_id_by_name(args.exchange)
    if exchange_id not in EXCHANGE.values():
        log_wrong_exchange_id(exchange_id)
        die_hard("Exchange id {} seems to be unknown? 0_o".format(exchange_id))

    log_initial_settings(
        "Starting balance monitoring for following exchange: \n",
        [exchange_id])

    cache = connect_to_cache(host=settings.cache_host,
                             port=settings.cache_port)
    msg_queue = get_message_queue(host=settings.cache_host,
                                  port=settings.cache_port)

    load_keys(settings.key_path)
    set_log_folder(settings.log_folder)
    set_logging_level(settings.logging_level_id)

    init_balances(settings.exchanges, cache)

    cnt = 0

    while True:
        # We load initial balance using init_balance
        sleep_for(BALANCE_POLL_TIMEOUT)

        cnt += BALANCE_POLL_TIMEOUT

        log_balance_update_heartbeat(exchange_id)

        balance_for_exchange = update_balance_by_exchange(exchange_id, cache)
        while balance_for_exchange is None:
            log_cant_update_balance(exchange_id)
            sleep_for(1)
            balance_for_exchange = update_balance_by_exchange(
                exchange_id, cache)

        if cnt >= BALANCE_HEALTH_CHECK_TIMEOUT:
            cnt = 0
            log_last_balances(settings.exchanges, cache, msg_queue)

            for base_currency_id in BASE_CURRENCY:
                threshold = BASE_CURRENCIES_BALANCE_THRESHOLD[base_currency_id]
                if not balance_for_exchange.do_we_have_enough(
                        base_currency_id, threshold):
                    log_not_enough_base_currency(exchange_id, base_currency_id,
                                                 threshold,
                                                 balance_for_exchange,
                                                 msg_queue)
Ejemplo n.º 9
0
            save_alarm_into_pg(entry[2], entry[3], pg_connection)

        print_to_console(
            "Total amount of tickers = {num}".format(num=len(tickers)),
            LOG_ALL_DEBUG)
        load_to_postgres(tickers, TICKER_TYPE_NAME, pg_connection)

        print_to_console("Before sleep...", LOG_ALL_DEBUG)
        sleep_for(POLL_PERIOD_SECONDS)


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="""
    Arbitrage monitoring service, every {POLL_TIMEOUT}: 
        - retrieve tickers
        - compare high \ low per pair
        - if difference bigger than {THRES} %
        - trigger notification message
    """.format(POLL_TIMEOUT=POLL_PERIOD_SECONDS, THRES=TRIGGER_THRESHOLD))

    parser.add_argument('--cfg', action='store', required=True)

    arguments = parser.parse_args()

    pg_conn, settings = process_args(arguments)

    msg_queue = get_message_queue(host=settings.cache_host,
                                  port=settings.cache_port)

    analyse_tickers(pg_conn, msg_queue)