Exemplo n.º 1
0
def test_trade_history_huobi_methods():

    load_keys(API_KEY_PATH)
    key = get_key_by_exchange(EXCHANGE.HUOBI)

    time_end = get_now_seconds_utc()
    time_start = 0  # time_end - POLL_TIMEOUT

    pair_name = get_currency_pair_to_huobi(CURRENCY_PAIR.BTC_TO_LSK)

    huobi_orders_by_pair = get_order_history_huobi(key, pair_name, time_start,
                                                   time_end)

    for pair_id in huobi_orders_by_pair:
        pair_name = get_currency_pair_to_huobi(pair_id)
        print "PAIR NAME: ", pair_name
        for b in huobi_orders_by_pair[pair_id]:
            print b

    res, order_history = get_order_history_huobi(key, pair_name, time_start,
                                                 time_end)
    if len(order_history) > 0:
        for b in order_history:
            print b
    pg_conn = init_pg_connection(_db_host=DB_HOST,
                                 _db_port=DB_PORT,
                                 _db_name=DB_NAME)

    load_recent_huobi_trades_to_db(pg_conn,
                                   time_start,
                                   time_end,
                                   unique_only=True)
Exemplo n.º 2
0
def test_expired_deal_placement():
    load_keys(API_KEY_PATH)
    priority_queue = get_priority_queue()
    ts = get_now_seconds_utc()
    order = Trade(DEAL_TYPE.SELL, EXCHANGE.BINANCE, CURRENCY_PAIR.BTC_TO_STRAT, price=0.001, volume=5.0,
                  order_book_time=ts, create_time=ts, execute_time=ts, order_id='whatever')

    msg = "Replace existing order with new one - {tt}".format(tt=order)
    err_code, json_document = init_deal(order, msg)
    print json_document
    order.order_id = parse_order_id(order.exchange_id, json_document)
    priority_queue.add_order_to_watch_queue(ORDERS_EXPIRE_MSG, order)
Exemplo n.º 3
0
def test_failed_order_placement_bittrex():
    load_keys(API_KEY_PATH)

    ts = get_now_seconds_utc()
    order = Trade(DEAL_TYPE.SELL, EXCHANGE.BITTREX, CURRENCY_PAIR.BTC_TO_ETH,
                  price=0.075, volume=0.1, order_book_time=ts, create_time=ts)

    msg = "Testing huobi - {tt}".format(tt=order)
    err_code, json_document = init_deal(order, msg)
    print json_document

    msg_queue = get_message_queue()
    msg_queue.add_order(FAILED_ORDERS_MSG, order)
Exemplo n.º 4
0
def test_binance_xlm():
    if not YES_I_KNOW_WHAT_AM_I_DOING:
        die_hard("test_binance_xlm may issue a real trade!")

    load_keys(API_KEY_PATH)
    key = get_key_by_exchange(EXCHANGE.BINANCE)
    pair_id = CURRENCY_PAIR.BTC_TO_XLM
    pair_name = get_currency_pair_name_by_exchange_id(pair_id,
                                                      EXCHANGE.BINANCE)
    err, json_repr = add_buy_order_binance(key,
                                           pair_name,
                                           price=0.00003000,
                                           amount=100)
    print json_repr
Exemplo n.º 5
0
def test_poloniex_doge():
    if not YES_I_KNOW_WHAT_AM_I_DOING:
        die_hard("test_poloniex_doge may issue a real trade!")

    load_keys(API_KEY_PATH)
    key = get_key_by_exchange(EXCHANGE.POLONIEX)
    pair_id = CURRENCY_PAIR.BTC_TO_DGB
    pair_name = get_currency_pair_name_by_exchange_id(pair_id,
                                                      EXCHANGE.POLONIEX)
    err, json_repr = add_buy_order_poloniex(key,
                                            pair_name,
                                            price=0.00000300,
                                            amount=100)
    print json_repr
Exemplo n.º 6
0
def process_failed_orders(args):
    """
                We try to address following issue

            Due to network issue or just bugs we may end up in situation that we are failed to place order
            Or we think so. Such orders registered in dedicated queue. We want to re-process them
            to minimise loss.

            Option 1: We managed to place order, just didn't get proper response from exchange
            - i.e. didn't wait enough for exchange to response
            Option 2: We managed to place order, just exchange were overloaded and decided to
            return to us some errors ()
            Option 3: We didn't managed to place order
                nonce issue - particular poloniex
                exchange issue - kraken
                ill fate - :(
            Option 4: ??? TODO

            First we try to find order in open or executed.
            In case we find it - update order_id in db.
            If it still open add it to watch list for expired orders processing.

            If not we can replace it by market with idea that there is high probability that other arbitrage deal were
            successfully placed

    :param args:
    :return:
    """

    pg_conn, settings = process_args(args)
    load_keys(settings.key_path)

    priority_queue, msg_queue, local_cache = init_queues(settings)

    cnt = 0

    while True:
        order = msg_queue.get_next_order(FAILED_ORDERS_MSG)
        if order is not None:
            process_failed_order(order, msg_queue, priority_queue, local_cache,
                                 pg_conn)

        sleep_for(1)
        cnt += 1

        if cnt >= HEARTBEAT_TIMEOUT:
            cnt = 0
            print_to_console("Failed orders processing heartbeat",
                             LOG_ALL_ERRORS)
Exemplo n.º 7
0
def load_trade_history(args):
    """
        Retrieve executed trades from ALL exchanges via REST api
        and save into db

        Those data later will be used for analysis
        of profitability of trading and bot's performance

    :param args: period, exchanges, connection details
    :return:
    """

    pg_conn, settings = process_args(args)

    log_initial_settings(
        "Starting trade history retrieval for bots using following exchanges: \n",
        settings.exchanges)

    if args.start_time is None or args.end_time is None:
        end_time = get_now_seconds_utc()
        start_time = end_time - 24 * 3600
    else:
        end_time = parse_time(args.end_time, '%Y-%m-%d %H:%M:%S')
        start_time = parse_time(args.start_time, '%Y-%m-%d %H:%M:%S')

    if start_time == end_time or end_time <= start_time:
        die_hard("Wrong time interval provided! {ts0} - {ts1}".format(
            ts0=start_time, ts1=end_time))

    load_keys(settings.key_path)

    while True:
        for exchange_id in settings.exchanges:
            method = get_trade_retrieval_method_by_exchange(exchange_id)
            method(pg_conn, start_time, end_time)
            sleep_for(1)

        print_to_console("Trade retrieval heartbeat", LOG_ALL_DEBUG)

        sleep_for(TRADE_POLL_TIMEOUT)

        end_time = get_now_seconds_utc()
        start_time = end_time - 24 * 3600
Exemplo n.º 8
0
def process_expired_orders(args):
    """

    :param args: file name
    :return:
    """
    settings = CommonSettings.from_cfg(args.cfg)
    set_log_folder(settings.log_folder)
    set_logging_level(settings.logging_level_id)

    load_keys(settings.key_path)

    priority_queue, msg_queue, local_cache = init_queues(settings)

    cnt = 0

    while True:
        curr_ts = get_now_seconds_utc()

        order = priority_queue.get_oldest_order(ORDERS_EXPIRE_MSG)

        if order:
            msg = "Current expired order - {}".format(order)
            log_to_file(msg, EXPIRED_ORDER_PROCESSING_FILE_NAME)
            order_age = curr_ts - order.create_time
            if order_age < ORDER_EXPIRATION_TIMEOUT:
                msg = "A bit early - {t1} {t2} WILLL SLEEP".format(
                    t1=order_age, t2=ORDER_EXPIRATION_TIMEOUT)
                log_to_file(msg, EXPIRED_ORDER_PROCESSING_FILE_NAME)
                sleep_for(ORDER_EXPIRATION_TIMEOUT - order_age)

            process_expired_order(order, msg_queue, priority_queue,
                                  local_cache)

        sleep_for(1)
        cnt += 1

        if cnt >= HEARTBEAT_TIMEOUT:
            cnt = 0
            print_to_console("Watch list is empty sleeping", LOG_ALL_ERRORS)
            log_to_file("Watch list is empty sleeping",
                        EXPIRED_ORDER_PROCESSING_FILE_NAME)
Exemplo n.º 9
0
def test_open_orders_retrieval_arbitrage():

    sell_exchange_id = EXCHANGE.BINANCE
    buy_exchange_id = EXCHANGE.POLONIEX
    pair_id = CURRENCY_PAIR.BTC_TO_OMG
    threshold = 2.0
    reverse_threshold = 2.0
    balance_threshold = 5.0
    deal_expire_timeout = 60
    logging_level = LOG_ALL_DEBUG

    cfg = ArbitrageConfig(sell_exchange_id, buy_exchange_id, pair_id,
                          threshold, reverse_threshold, balance_threshold,
                          deal_expire_timeout, logging_level)
    load_keys(API_KEY_PATH)
    processor = ConnectionPool(pool_size=2)
    print cfg
    res = get_open_orders_for_arbitrage_pair(cfg, processor)
    print "Length:", len(res)
    for r in res:
        print r
Exemplo n.º 10
0
def test_failed_deal_placement():
    load_keys(API_KEY_PATH)
    msg_queue = get_message_queue()
    ts = 1517938516
    order = Trade(DEAL_TYPE.SELL, EXCHANGE.BITTREX, CURRENCY_PAIR.BTC_TO_STRAT, price=0.000844, volume=5.0,
                  order_book_time=ts, create_time=ts, execute_time=ts, order_id=None)

    #   from dao.order_utils import get_open_orders_by_exchange
    #   r = get_open_orders_by_exchange(EXCHANGE.BITTREX, CURRENCY_PAIR.BTC_TO_STRAT)

    #   for rr in r:
    #       print r

    #   raise
    #
    # msg = "Replace existing order with new one - {tt}".format(tt=order)
    # err_code, json_document = init_deal(order, msg)
    # print json_document
    # order.deal_id = parse_deal_id(order.exchange_id, json_document)

    # msg_queue.add_order(ORDERS_MSG, order)
    sleep_for(3)
    msg_queue.add_order(FAILED_ORDERS_MSG, order)
    print order
Exemplo n.º 11
0
    parser = argparse.ArgumentParser(description="Constantly poll two exchange for order book for particular pair "
                                                 "and initiate sell\\buy deals for arbitrage opportunities")

    parser.add_argument('--threshold', action="store", type=float, required=True)
    parser.add_argument('--balance_threshold', action="store", type=float, required=True)
    parser.add_argument('--reverse_threshold', action="store", type=float, required=True)
    parser.add_argument('--sell_exchange_id', action="store", type=int, required=True)
    parser.add_argument('--buy_exchange_id', action="store", type=int, required=True)
    parser.add_argument('--pair_id', action="store", type=int, required=True)
    parser.add_argument('--deal_expire_timeout', action="store", type=int, required=True)
    parser.add_argument('--cfg', action="store", required=True)

    arguments = parser.parse_args()

    cfg = ArbitrageConfig.from_args(arguments)

    app_settings = CommonSettings.from_cfg(cfg)

    set_logging_level(app_settings.logging_level_id)
    set_log_folder(app_settings.log_folder)
    load_keys(app_settings.key_path)

    # to avoid time-consuming check in future - validate arguments here
    for exchange_id in [cfg.sell_exchange_id, cfg.buy_exchange_id]:
        pair_name = get_currency_pair_name_by_exchange_id(cfg.pair_id, exchange_id)
        if pair_name is None:
            log_dont_supported_currency(cfg, exchange_id, cfg.pair_id)
            exit()

    ArbitrageListener(cfg, app_settings).start()
Exemplo n.º 12
0
def watch_balance_for_exchange(args):
    """
            Those routine update balance at redis CACHE
            for ALL coins at ONE exchange for active key set.

            NOTE:   It still rely on REST api - i.e. not proactive
                    For some exchanges - balance not immediately updated

                    Initially all exchanges were polled sequentially
                    But it lead to delays in the past
                    due to exchange errors or throttling

    :param args: config file and exchange_id
    :return:
    """
    settings = CommonSettings.from_cfg(args.cfg)

    exchange_id = get_exchange_id_by_name(args.exchange)
    if exchange_id not in EXCHANGE.values():
        log_wrong_exchange_id(exchange_id)
        die_hard("Exchange id {} seems to be unknown? 0_o".format(exchange_id))

    log_initial_settings(
        "Starting balance monitoring for following exchange: \n",
        [exchange_id])

    cache = connect_to_cache(host=settings.cache_host,
                             port=settings.cache_port)
    msg_queue = get_message_queue(host=settings.cache_host,
                                  port=settings.cache_port)

    load_keys(settings.key_path)
    set_log_folder(settings.log_folder)
    set_logging_level(settings.logging_level_id)

    init_balances(settings.exchanges, cache)

    cnt = 0

    while True:
        # We load initial balance using init_balance
        sleep_for(BALANCE_POLL_TIMEOUT)

        cnt += BALANCE_POLL_TIMEOUT

        log_balance_update_heartbeat(exchange_id)

        balance_for_exchange = update_balance_by_exchange(exchange_id, cache)
        while balance_for_exchange is None:
            log_cant_update_balance(exchange_id)
            sleep_for(1)
            balance_for_exchange = update_balance_by_exchange(
                exchange_id, cache)

        if cnt >= BALANCE_HEALTH_CHECK_TIMEOUT:
            cnt = 0
            log_last_balances(settings.exchanges, cache, msg_queue)

            for base_currency_id in BASE_CURRENCY:
                threshold = BASE_CURRENCIES_BALANCE_THRESHOLD[base_currency_id]
                if not balance_for_exchange.do_we_have_enough(
                        base_currency_id, threshold):
                    log_not_enough_base_currency(exchange_id, base_currency_id,
                                                 threshold,
                                                 balance_for_exchange,
                                                 msg_queue)
Exemplo n.º 13
0
def arbitrage_between_pair(args):
    cfg = ArbitrageConfig.from_args(args)

    app_settings = CommonSettings.from_cfg(args.cfg)

    set_logging_level(app_settings.logging_level_id)
    set_log_folder(app_settings.log_folder)
    load_keys(app_settings.key_path)

    priority_queue, msg_queue, local_cache = init_queues(app_settings)

    processor = ConnectionPool(pool_size=2)

    # to avoid time-consuming check in future - validate arguments here
    for exchange_id in [args.sell_exchange_id, args.buy_exchange_id]:
        pair_name = get_currency_pair_name_by_exchange_id(
            cfg.pair_id, exchange_id)
        if pair_name is None:
            log_dont_supported_currency(cfg, exchange_id, cfg.pair_id)
            exit()

    deal_cap = MarketCap(cfg.pair_id, get_now_seconds_utc())
    deal_cap.update_max_volume_cap(NO_MAX_CAP_LIMIT)
    update_min_cap(cfg, deal_cap, processor)

    balance_state = dummy_balance_init(timest=0,
                                       default_volume=Decimal("0"),
                                       default_available_volume=Decimal("0"))

    if not YES_I_KNOW_WHAT_AM_I_DOING:
        die_hard("LIVE TRADING!")

    while True:

        if get_now_seconds_utc(
        ) - deal_cap.last_updated > MIN_CAP_UPDATE_TIMEOUT:
            update_min_cap(cfg, deal_cap, processor)

        for mode_id in [DEAL_TYPE.ARBITRAGE, DEAL_TYPE.REVERSE]:
            cur_timest_sec = get_now_seconds_utc()

            method = search_for_arbitrage if mode_id == DEAL_TYPE.ARBITRAGE else adjust_currency_balance
            active_threshold = cfg.threshold if mode_id == DEAL_TYPE.ARBITRAGE else cfg.reverse_threshold

            balance_state = get_updated_balance_arbitrage(
                cfg, balance_state, local_cache)

            if balance_state.expired(cur_timest_sec, cfg.buy_exchange_id,
                                     cfg.sell_exchange_id,
                                     BALANCE_EXPIRED_THRESHOLD):
                log_balance_expired_errors(cfg, msg_queue, balance_state)
                die_hard("Balance expired")

            order_book_src, order_book_dst = get_order_books_for_arbitrage_pair(
                cfg, cur_timest_sec, processor)

            if order_book_dst is None or order_book_src is None:
                log_failed_to_retrieve_order_book(cfg)
                sleep_for(3)
                continue

            if is_order_books_expired(order_book_src, order_book_dst,
                                      local_cache, msg_queue,
                                      cfg.log_file_name):
                sleep_for(3)
                continue

            local_cache.cache_order_book(order_book_src)
            local_cache.cache_order_book(order_book_dst)

            # init_deals_with_logging_speedy
            status_code, deal_pair = method(order_book_src,
                                            order_book_dst,
                                            active_threshold,
                                            cfg.balance_threshold,
                                            init_deals_with_logging_speedy,
                                            balance_state,
                                            deal_cap,
                                            type_of_deal=mode_id,
                                            worker_pool=processor,
                                            msg_queue=msg_queue)

            add_orders_to_watch_list(deal_pair, priority_queue)

            print_to_console("I am still alive! ", LOG_ALL_DEBUG)
            sleep_for(2)

        sleep_for(3)

        deal_cap.update_max_volume_cap(NO_MAX_CAP_LIMIT)
Exemplo n.º 14
0
                            '%Y-%m-%d %H:%M:%S')
    end_time = parse_time(config.get("profit_report", "end_time"),
                          '%Y-%m-%d %H:%M:%S')

    if start_time == end_time or end_time <= start_time:
        print "Wrong time interval provided! {ts0} - {ts1}".format(
            ts0=start_time, ts1=end_time)
        assert False

    pg_conn = init_pg_connection(_db_host=db_host,
                                 _db_port=db_port,
                                 _db_name=db_name)

    key_path = config.get("keys", "path_to_api_keys")
    log_folder = config.get("logging", "logs_folder")
    load_keys(key_path)
    set_log_folder(log_folder)

    if should_fetch_history_to_db:
        fetch_trades_history_to_db(pg_conn, start_time, end_time,
                                   fetch_from_start)

    orders, history_trades = prepare_data(pg_conn, start_time, end_time)

    missing_orders, failed_orders, orders_with_trades = group_trades_by_orders(
        orders, history_trades)

    # 2 stage - bucketing all that crap by pair_id
    trades_to_order = defaultdict(list)
    for order, trade_list in orders_with_trades:
        trades_to_order[order.pair_id].append((order, trade_list))