Beispiel #1
0
def get_history_time_fast():
    end_time = get_now_seconds_utc()
    start_time = end_time - POLL_PERIOD_SECONDS
    processor = ConnectionPool()

    trade_history = get_history_speedup(start_time, end_time, processor)
    return trade_history
Beispiel #2
0
def test_pool():
    def heavy_load():
        sleep_for(3)
        print "heavy_load"

    def more_heavy_load():
        sleep_for(30)
        print "more_heavy_load"

    def batch(iterable, n=1):
        l = len(iterable)
        for ndx in range(0, l, n):
            yield iterable[ndx:min(ndx + n, l)]

    processor = ConnectionPool(10)

    iters = []
    for x in xrange(100):
        iters.append(x)

    for work_batch in batch(iters, 10):
        futures = []
        for x in work_batch:
            futures.append(processor.network_pool.spawn(more_heavy_load))
        gevent.joinall(futures)

    for work_batch in batch(iters, 10):
        futures = []
        for x in work_batch:
            futures.append(processor.network_pool.spawn(heavy_load))
        gevent.joinall(futures)
Beispiel #3
0
def check_deal_placements():
    if not YES_I_KNOW_WHAT_AM_I_DOING:
        die_hard("check_deal_placements may issue a real trade!")

    create_time = get_now_seconds_utc()
    fake_order_book_time1 = -10
    fake_order_book_time2 = -20
    deal_volume = 5
    pair_id = CURRENCY_PAIR.BTC_TO_ARDR

    sell_exchange_id = EXCHANGE.POLONIEX
    buy_exchange_id = EXCHANGE.BITTREX

    difference = "difference is HUGE"
    file_name = "test.log"

    msg_queue = get_message_queue()

    processor = ConnectionPool(pool_size=2)

    trade_at_first_exchange = Trade(DEAL_TYPE.SELL, sell_exchange_id, pair_id,
                                    0.00000001, deal_volume,
                                    fake_order_book_time1, create_time)

    trade_at_second_exchange = Trade(DEAL_TYPE.BUY, buy_exchange_id, pair_id,
                                     0.00004, deal_volume,
                                     fake_order_book_time2, create_time)

    trade_pairs = TradePair(trade_at_first_exchange, trade_at_second_exchange,
                            fake_order_book_time1, fake_order_book_time2,
                            DEAL_TYPE.DEBUG)

    init_deals_with_logging_speedy(trade_pairs, difference, file_name,
                                   processor, msg_queue)
Beispiel #4
0
    def _init_infrastructure(self, app_settings):

        self.priority_queue, self.msg_queue, self.local_cache = init_queues(app_settings)

        self.processor = ConnectionPool(pool_size=2)

        self.sell_exchange_updates = Queue()
        self.buy_exchange_updates = Queue()

        buy_subscription_constructor = get_subcribtion_by_exchange(self.buy_exchange_id)
        sell_subscription_constructor = get_subcribtion_by_exchange(self.sell_exchange_id)

        self.buy_subscription = buy_subscription_constructor(self.pair_id, on_update=self.on_order_book_update)
        self.sell_subscription = sell_subscription_constructor(self.pair_id, on_update=self.on_order_book_update)
Beispiel #5
0
def load_all_public_data(args):
    """
                06.08.2019 As far as I remember it is NOT main data retrieval routine

                Retrieve ticker, trade history, candles and order book
                from ALL supported exchanges
                and store it within DB
                every TIMEOUT seconds through REST api.

                Majority of exchanges tend to throttle clients who send too many requests
                from the same ip - so be mindful about timeout.

    :param args:
    :return:
    """

    pg_conn, settings = process_args(args)

    processor = ConnectionPool()

    def split_on_errors(raw_response):
        valid_objects = filter(lambda x: type(x) != str, raw_response)
        error_strings = filter(lambda x: type(x) != str, raw_response)

        return valid_objects, error_strings

    while True:
        end_time = get_now_seconds_utc()
        start_time = end_time - POLL_PERIOD_SECONDS

        candles, errs = split_on_errors(get_ohlc_speedup(start_time, end_time, processor))
        bulk_insert_to_postgres(pg_conn, Candle.table_name, Candle.columns, candles)

        trade_history, errs = split_on_errors(get_history_speedup(start_time, end_time, processor))
        bulk_insert_to_postgres(pg_conn, TradeHistory.table_name, TradeHistory.columns, trade_history)

        tickers, errs = split_on_errors(get_ticker_speedup(end_time, processor))
        bulk_insert_to_postgres(pg_conn, Ticker.table_name, Ticker.columns, tickers)

        if should_print_debug():
            msg = """History retrieval at {ts}:
                Candle size - {num}
                Ticker size - {num3}
                Trade history size - {num2}
                """.format(ts=end_time, num=len(candles), num3=len(tickers), num2=len(trade_history))
            print_to_console(msg, LOG_ALL_ERRORS)
            log_to_file(msg, "candles_trade_history.log")

        print_to_console("Before sleep...", LOG_ALL_ERRORS)
        sleep_for(POLL_PERIOD_SECONDS)
Beispiel #6
0
def analyse_tickers(pg_connection, notify_queue):
    """
            Retrieve tickers from ALL exchanges via REST api and save into DB.

            NOTE: Very first routine to analyse gap between rates at different exchanges.

    :param pg_connection:
    :param notify_queue:
    :return:
    """

    processor = ConnectionPool()

    while True:

        timest = get_now_seconds_utc()
        results = get_ticker_speedup(timest, processor)

        tickers = filter(lambda x: type(x) != str, results)

        res = compare_price(tickers, TRIGGER_THRESHOLD,
                            check_highest_bid_bigger_than_lowest_ask)

        for entry in res:
            msg = """Condition: {msg} at {ts}
            Date: {dt}
            Pair: {pair_name}, {ask_exchange}: {ask_price:.7f} {sell_exchange}: {sell_price:.7f}
            TAG: {ask_exchange}-{sell_exchange}
            """.format(msg=entry[0],
                       ts=timest,
                       dt=ts_to_string_local(timest),
                       pair_name=get_pair_name_by_id(entry[1]),
                       ask_exchange=entry[2].exchange,
                       ask_price=entry[2].bid,
                       sell_exchange=entry[3].exchange,
                       sell_price=entry[3].ask)
            print_to_console(msg, LOG_ALL_ERRORS)

            notify_queue.add_message(ARBITRAGE_MSG, msg)
            save_alarm_into_pg(entry[2], entry[3], pg_connection)

        print_to_console(
            "Total amount of tickers = {num}".format(num=len(tickers)),
            LOG_ALL_DEBUG)
        load_to_postgres(tickers, TICKER_TYPE_NAME, pg_connection)

        print_to_console("Before sleep...", LOG_ALL_DEBUG)
        sleep_for(POLL_PERIOD_SECONDS)
Beispiel #7
0
def load_order_books(args):
    """
        Periodically retrieve FULL order books
        from ALL supported exchanges via REST api
        and save it for further analysis in DB.

        Under the hood requests are sent in async fashion via gevent library

    :param args: config file
    :return:
    """

    pg_conn, _ = process_args(args)

    processor = ConnectionPool()

    while True:
        ts = get_now_seconds_utc()

        results = get_order_book_speedup(ts, processor)

        order_book = filter(lambda x: type(x) != str, results)

        load_to_postgres(order_book, ORDER_BOOK_TYPE_NAME, pg_conn)

        order_book_size = len(order_book)
        order_book_ask_size = 0
        order_book_bid_size = 0

        for entry in order_book:
            if entry is not None:
                order_book_ask_size += len(entry.ask)
                order_book_bid_size += len(entry.bid)

        if should_print_debug():
            msg = """Orderbook retrieval at {tt}:
            Order book size - {num1} Order book asks - {num10} Order book bids - {num20}""".format(
                tt=ts,
                num1=order_book_size,
                num10=order_book_ask_size,
                num20=order_book_bid_size)
            print_to_console(msg, LOG_ALL_ERRORS)
            log_to_file(msg, "order_book.log")

        print_to_console("Before sleep...", LOG_ALL_ERRORS)
        sleep_for(ORDER_BOOK_POLL_TIMEOUT)
Beispiel #8
0
def test_open_orders_retrieval_arbitrage():

    sell_exchange_id = EXCHANGE.BINANCE
    buy_exchange_id = EXCHANGE.POLONIEX
    pair_id = CURRENCY_PAIR.BTC_TO_OMG
    threshold = 2.0
    reverse_threshold = 2.0
    balance_threshold = 5.0
    deal_expire_timeout = 60
    logging_level = LOG_ALL_DEBUG

    cfg = ArbitrageConfig(sell_exchange_id, buy_exchange_id, pair_id,
                          threshold, reverse_threshold, balance_threshold,
                          deal_expire_timeout, logging_level)
    load_keys(API_KEY_PATH)
    processor = ConnectionPool(pool_size=2)
    print cfg
    res = get_open_orders_for_arbitrage_pair(cfg, processor)
    print "Length:", len(res)
    for r in res:
        print r
Beispiel #9
0
def get_order_book_time_fast():
    end_time = get_now_seconds_utc()
    processor = ConnectionPool()
    order_book = get_order_book_speedup(end_time, processor)
    return order_book
Beispiel #10
0
def get_ticker_time_fast():
    timest = get_now_seconds_utc()
    processor = ConnectionPool()
    return get_ticker_speedup(timest, processor)
Beispiel #11
0
def get_ohlc_time_fast_test():
    processor = ConnectionPool()
    end_time = get_now_seconds_utc()
    start_time = end_time - 900
    return get_ohlc_speedup(start_time, end_time, processor)
Beispiel #12
0
def arbitrage_between_pair(args):
    cfg = ArbitrageConfig.from_args(args)

    app_settings = CommonSettings.from_cfg(args.cfg)

    set_logging_level(app_settings.logging_level_id)
    set_log_folder(app_settings.log_folder)
    load_keys(app_settings.key_path)

    priority_queue, msg_queue, local_cache = init_queues(app_settings)

    processor = ConnectionPool(pool_size=2)

    # to avoid time-consuming check in future - validate arguments here
    for exchange_id in [args.sell_exchange_id, args.buy_exchange_id]:
        pair_name = get_currency_pair_name_by_exchange_id(
            cfg.pair_id, exchange_id)
        if pair_name is None:
            log_dont_supported_currency(cfg, exchange_id, cfg.pair_id)
            exit()

    deal_cap = MarketCap(cfg.pair_id, get_now_seconds_utc())
    deal_cap.update_max_volume_cap(NO_MAX_CAP_LIMIT)
    update_min_cap(cfg, deal_cap, processor)

    balance_state = dummy_balance_init(timest=0,
                                       default_volume=Decimal("0"),
                                       default_available_volume=Decimal("0"))

    if not YES_I_KNOW_WHAT_AM_I_DOING:
        die_hard("LIVE TRADING!")

    while True:

        if get_now_seconds_utc(
        ) - deal_cap.last_updated > MIN_CAP_UPDATE_TIMEOUT:
            update_min_cap(cfg, deal_cap, processor)

        for mode_id in [DEAL_TYPE.ARBITRAGE, DEAL_TYPE.REVERSE]:
            cur_timest_sec = get_now_seconds_utc()

            method = search_for_arbitrage if mode_id == DEAL_TYPE.ARBITRAGE else adjust_currency_balance
            active_threshold = cfg.threshold if mode_id == DEAL_TYPE.ARBITRAGE else cfg.reverse_threshold

            balance_state = get_updated_balance_arbitrage(
                cfg, balance_state, local_cache)

            if balance_state.expired(cur_timest_sec, cfg.buy_exchange_id,
                                     cfg.sell_exchange_id,
                                     BALANCE_EXPIRED_THRESHOLD):
                log_balance_expired_errors(cfg, msg_queue, balance_state)
                die_hard("Balance expired")

            order_book_src, order_book_dst = get_order_books_for_arbitrage_pair(
                cfg, cur_timest_sec, processor)

            if order_book_dst is None or order_book_src is None:
                log_failed_to_retrieve_order_book(cfg)
                sleep_for(3)
                continue

            if is_order_books_expired(order_book_src, order_book_dst,
                                      local_cache, msg_queue,
                                      cfg.log_file_name):
                sleep_for(3)
                continue

            local_cache.cache_order_book(order_book_src)
            local_cache.cache_order_book(order_book_dst)

            # init_deals_with_logging_speedy
            status_code, deal_pair = method(order_book_src,
                                            order_book_dst,
                                            active_threshold,
                                            cfg.balance_threshold,
                                            init_deals_with_logging_speedy,
                                            balance_state,
                                            deal_cap,
                                            type_of_deal=mode_id,
                                            worker_pool=processor,
                                            msg_queue=msg_queue)

            add_orders_to_watch_list(deal_pair, priority_queue)

            print_to_console("I am still alive! ", LOG_ALL_DEBUG)
            sleep_for(2)

        sleep_for(3)

        deal_cap.update_max_volume_cap(NO_MAX_CAP_LIMIT)