Пример #1
0
def get_ohlc(date_start, date_end):

    all_ohlc = []

    for pair_name in BITTREX_CURRENCY_PAIRS:
        period = "thirtyMin"
        all_ohlc += get_ohlc_bittrex(pair_name, date_start, date_end, period)
        sleep_for(1)

    for pair_name in KRAKEN_CURRENCY_PAIRS:
        period = 15
        all_ohlc += get_ohlc_kraken(pair_name, date_start, date_end, period)

    for pair_name in POLONIEX_CURRENCY_PAIRS:
        period = 14400
        all_ohlc += get_ohlc_poloniex(pair_name, date_start, date_end, period)

    for pair_name in BINANCE_CURRENCY_PAIRS:
        period = "15m"
        all_ohlc += get_ohlc_binance(pair_name, date_start, date_end, period)

    for pair_name in HUOBI_CURRENCY_PAIRS:
        period = "15min"
        all_ohlc += get_ohlc_huobi(pair_name, date_start, date_end, period)

    return all_ohlc
Пример #2
0
def forward_new_messages(args):
    settings = CommonSettings.from_cfg(args.cfg)

    set_log_folder(settings.log_folder)
    set_logging_level(settings.logging_level_id)
    msg_queue = get_message_queue(host=settings.cache_host,
                                  port=settings.cache_port)

    do_we_have_data = False

    while True:
        for topic_id in QUEUE_TOPICS:
            msg = msg_queue.get_message_nowait(topic_id)
            if msg is not None:
                do_we_have_data = True
                notification_id = get_notification_id_by_topic_name(topic_id)
                err_code = send_single_message(msg, notification_id)
                if err_code == STATUS.FAILURE:
                    err_msg = """telegram_notifier can't send message to telegram. Message will be re-processed on next iteration.
                        {msg}""".format(msg=msg)
                    log_to_file(err_msg, "telegram_notifier.log")
                    print_to_console(err_msg, LOG_ALL_ERRORS)
                    msg_queue.add_message_to_start(topic_id, msg)
                    sleep_for(1)

        #
        #   NOTE: it still can lead to throttling by telegram
        #

        if not do_we_have_data:
            sleep_for(1)

        do_we_have_data = False
Пример #3
0
def process_placed_orders(args):
    """
            Check for new orders placed by ANY of trading bots


    :param args:
    :return:
    """
    pg_conn, settings = process_args(args)

    msg_queue = get_message_queue(host=settings.cache_host,
                                  port=settings.cache_port)

    cnt = 0

    while True:
        order = msg_queue.get_next_order(ORDERS_MSG)
        if order is not None:
            save_order_into_pg(order, pg_conn)
            print_to_console("Saving {} in db".format(order), LOG_ALL_ERRORS)
        sleep_for(1)
        cnt += 1

        if cnt >= HEARTBEAT_TIMEOUT:
            cnt = 0
            print_to_console("Order storing heartbeat", LOG_ALL_ERRORS)
Пример #4
0
def test_kraken_placing_deals(krak_key):
    order_state = dummy_order_state_init()
    # order_state = get_updated_order_state(order_state)

    for x in order_state[EXCHANGE.KRAKEN].open_orders:
        if x.pair_id == CURRENCY.BCC and x.volume == 0.1 and x.price == 0.5:
            cancel_order_kraken(krak_key, x.deal_id)

    # order_state = get_updated_order_state(order_state)
    cnt = 0
    for x in order_state[EXCHANGE.KRAKEN].open_orders:
        if x.pair_id == CURRENCY.BCC and x.volume == 0.1 and x.price == 0.5:
            cnt += 1
            print x

    print cnt

    print order_state[EXCHANGE.KRAKEN]
    ts1 = get_now_seconds_local()
    for x in range(10000):
        # add_sell_order_kraken_till_the_end(krak_key, "BCHXBT", price=0.5, amount=0.1, order_state=order_state[EXCHANGE.KRAKEN])
        sleep_for(30)

    ts2 = get_now_seconds_local()
    # order_state = get_updated_order_state(order_state)
    print "Goal was to set 10000 deals: "
    print "Total number of open orders: ", len(
        order_state[EXCHANGE.KRAKEN].open_orders)
    print "It take ", ts2 - ts1, " seconds"
Пример #5
0
def test_binance():
    def on_message(message):
        print(message)

    def on_error(ws, error):
        print(error)

    def on_close(ws):
        print("### closed ###")

    def on_open(ws):
        print("ONOPEN")
        # def run(*args):
        #     ws.send(json.dumps({'command':'subscribe','channel':'BTC-ETH@depth'}))
        #     while True:
        #         time.sleep(1)
        #     ws.close()
        #     print("thread terminating...")
        # thread.start_new_thread(run, ())

    # websocket.enableTrace(True)
    # # ws = websocket.WebSocketApp(sslopt={"cert_reqs": ssl.CERT_NONE})
    # ws = websocket.WebSocketApp("wss://stream.binance.com:9443/ws/ethbtc@depth")
    # # ws = websocket.WebSocket(sslopt={"cert_reqs": ssl.CERT_NONE})
    # ws.on_message = on_message
    # ws.on_error = on_error
    # ws.on_close = on_close
    # ws.on_open = on_open
    # # ws.connect("wss://stream.binance.com:9443/ws/ethbtc@depth")
    # # ws.run_forever()
    # ws.run_forever(sslopt={"cert_reqs": ssl.CERT_NONE})


    # Create connection
    while True:
        try:
            ws = create_connection("wss://stream.binance.com:9443/ws/ethbtc@depth", sslopt={"cert_reqs": ssl.CERT_NONE})
            ws.settimeout(15)
            break
        except:
            print('connect ws error,retry...')
            sleep_for(5)

    # actual subscription
    # ws.send()

    # event loop
    while True:
        try:
            compress_data = ws.recv()
            on_message(compress_data)
        except Exception as e:      # Supposedly timeout big enough to not trigger re-syncing
            msg = "Binance - triggered exception during reading from socket = {}".format(str(e))
            print msg
            break

    msg = "Binance - triggered on_close. We have to re-init the whole state from the scratch. " \
          "Current thread will be finished."
    log_to_file(msg, SOCKET_ERRORS_LOG_FILE_NAME)
Пример #6
0
 def start(self):
     self.reset_arbitrage_state()
     while True:
         if self.buy_subscription.is_running() and self.sell_subscription.is_running():
             sleep_for(1)
         else:
             # We will NOT issue a reset till any pending process still running
             while self.buy_subscription.is_running() or self.sell_subscription.is_running():
                 sleep_for(1)
             self.reset_arbitrage_state()
Пример #7
0
    def update_min_cap(self):
        log_to_file("Subscribing for updating cap updates", SOCKET_ERRORS_LOG_FILE_NAME)
        while self.update_min_cap_run_flag:
            update_min_cap(self.cfg, self.deal_cap, self.processor)

            for _ in xrange(self.cap_update_timeout):
                if self.update_min_cap_run_flag:
                    sleep_for(1)

        log_to_file("Exit from updating cap updates", SOCKET_ERRORS_LOG_FILE_NAME)
Пример #8
0
    def test_socket_subscription(self):
        t1 = SubscriptionHuobi(CURRENCY_PAIR.BTC_TO_ETC)
        buy_subscription_thread = threading.Thread(target=t1.subscribe, args=())
        buy_subscription_thread.daemon = True
        buy_subscription_thread.start()

        sleep_for(5)
        self.assertTrue(t1.should_run)

        t1.disconnect()
        self.assertFalse(t1.should_run)
Пример #9
0
        def run():
            log_subscribe_to_exchange_heartbeat("Poloniex")
            self.ws.send(self.subscribe_string)
            try:
                while self.should_run:
                    self.ws.send(self.subscribe_heartbeat)
                    sleep_for(1)
            except Exception as e:
                log_send_heart_beat_failed("Poloniex", e)

            log_unsubscribe_to_exchange_heartbeat("Poloniex")
Пример #10
0
        def run():
            log_subscribe_to_exchange_heartbeat("Huobi")
            self.ws.send(self.subscription_url)
            try:
                while self.should_run:
                    self.ws.send(json.dumps({'ping': 18212558000}))
                    sleep_for(1)
                self.ws.close()
            except Exception as e:
                log_send_heart_beat_failed("Huobi", e)

            log_unsubscribe_to_exchange_heartbeat("Huobi")
Пример #11
0
    def update_balance(self):

        while self.update_balance_run_flag:
            cur_timest_sec = get_now_seconds_utc()
            self.balance_state = get_updated_balance_arbitrage(cfg, self.balance_state, self.local_cache)

            if self.balance_state.expired(cur_timest_sec, self.buy_exchange_id, self.sell_exchange_id,
                                          BALANCE_EXPIRED_THRESHOLD):
                log_balance_expired_errors(cfg, self.msg_queue, self.balance_state)

                assert False

            sleep_for(self.balance_update_timeout)
Пример #12
0
def load_all_public_data(args):
    """
                06.08.2019 As far as I remember it is NOT main data retrieval routine

                Retrieve ticker, trade history, candles and order book
                from ALL supported exchanges
                and store it within DB
                every TIMEOUT seconds through REST api.

                Majority of exchanges tend to throttle clients who send too many requests
                from the same ip - so be mindful about timeout.

    :param args:
    :return:
    """

    pg_conn, settings = process_args(args)

    processor = ConnectionPool()

    def split_on_errors(raw_response):
        valid_objects = filter(lambda x: type(x) != str, raw_response)
        error_strings = filter(lambda x: type(x) != str, raw_response)

        return valid_objects, error_strings

    while True:
        end_time = get_now_seconds_utc()
        start_time = end_time - POLL_PERIOD_SECONDS

        candles, errs = split_on_errors(get_ohlc_speedup(start_time, end_time, processor))
        bulk_insert_to_postgres(pg_conn, Candle.table_name, Candle.columns, candles)

        trade_history, errs = split_on_errors(get_history_speedup(start_time, end_time, processor))
        bulk_insert_to_postgres(pg_conn, TradeHistory.table_name, TradeHistory.columns, trade_history)

        tickers, errs = split_on_errors(get_ticker_speedup(end_time, processor))
        bulk_insert_to_postgres(pg_conn, Ticker.table_name, Ticker.columns, tickers)

        if should_print_debug():
            msg = """History retrieval at {ts}:
                Candle size - {num}
                Ticker size - {num3}
                Trade history size - {num2}
                """.format(ts=end_time, num=len(candles), num3=len(tickers), num2=len(trade_history))
            print_to_console(msg, LOG_ALL_ERRORS)
            log_to_file(msg, "candles_trade_history.log")

        print_to_console("Before sleep...", LOG_ALL_ERRORS)
        sleep_for(POLL_PERIOD_SECONDS)
Пример #13
0
def process_failed_orders(args):
    """
                We try to address following issue

            Due to network issue or just bugs we may end up in situation that we are failed to place order
            Or we think so. Such orders registered in dedicated queue. We want to re-process them
            to minimise loss.

            Option 1: We managed to place order, just didn't get proper response from exchange
            - i.e. didn't wait enough for exchange to response
            Option 2: We managed to place order, just exchange were overloaded and decided to
            return to us some errors ()
            Option 3: We didn't managed to place order
                nonce issue - particular poloniex
                exchange issue - kraken
                ill fate - :(
            Option 4: ??? TODO

            First we try to find order in open or executed.
            In case we find it - update order_id in db.
            If it still open add it to watch list for expired orders processing.

            If not we can replace it by market with idea that there is high probability that other arbitrage deal were
            successfully placed

    :param args:
    :return:
    """

    pg_conn, settings = process_args(args)
    load_keys(settings.key_path)

    priority_queue, msg_queue, local_cache = init_queues(settings)

    cnt = 0

    while True:
        order = msg_queue.get_next_order(FAILED_ORDERS_MSG)
        if order is not None:
            process_failed_order(order, msg_queue, priority_queue, local_cache,
                                 pg_conn)

        sleep_for(1)
        cnt += 1

        if cnt >= HEARTBEAT_TIMEOUT:
            cnt = 0
            print_to_console("Failed orders processing heartbeat",
                             LOG_ALL_ERRORS)
Пример #14
0
def analyse_tickers(pg_connection, notify_queue):
    """
            Retrieve tickers from ALL exchanges via REST api and save into DB.

            NOTE: Very first routine to analyse gap between rates at different exchanges.

    :param pg_connection:
    :param notify_queue:
    :return:
    """

    processor = ConnectionPool()

    while True:

        timest = get_now_seconds_utc()
        results = get_ticker_speedup(timest, processor)

        tickers = filter(lambda x: type(x) != str, results)

        res = compare_price(tickers, TRIGGER_THRESHOLD,
                            check_highest_bid_bigger_than_lowest_ask)

        for entry in res:
            msg = """Condition: {msg} at {ts}
            Date: {dt}
            Pair: {pair_name}, {ask_exchange}: {ask_price:.7f} {sell_exchange}: {sell_price:.7f}
            TAG: {ask_exchange}-{sell_exchange}
            """.format(msg=entry[0],
                       ts=timest,
                       dt=ts_to_string_local(timest),
                       pair_name=get_pair_name_by_id(entry[1]),
                       ask_exchange=entry[2].exchange,
                       ask_price=entry[2].bid,
                       sell_exchange=entry[3].exchange,
                       sell_price=entry[3].ask)
            print_to_console(msg, LOG_ALL_ERRORS)

            notify_queue.add_message(ARBITRAGE_MSG, msg)
            save_alarm_into_pg(entry[2], entry[3], pg_connection)

        print_to_console(
            "Total amount of tickers = {num}".format(num=len(tickers)),
            LOG_ALL_DEBUG)
        load_to_postgres(tickers, TICKER_TYPE_NAME, pg_connection)

        print_to_console("Before sleep...", LOG_ALL_DEBUG)
        sleep_for(POLL_PERIOD_SECONDS)
Пример #15
0
def receive_binance_trade_batch(key, pair_name, limit, last_order_id):
    trades_by_pair = []

    error_code, json_document = get_trades_history_binance(
        key, pair_name, limit, last_order_id)

    while error_code == STATUS.FAILURE:
        print "receive_trade_batch: got error responce - Reprocessing"
        sleep_for(2)
        error_code, json_document = get_trades_history_binance(
            key, pair_name, limit, last_order_id)

    for entry in json_document:
        trades_by_pair.append(Trade.from_binance_history(entry, pair_name))

    return trades_by_pair
Пример #16
0
    def subscribe(self):

        #
        #       FIXME DBG PART - REMOVE AFTER TESTS
        #

        if self.should_run:
            die_hard("Bittrex another running?")

        msg = "Bittrex - call subscribe!"
        log_to_file(msg, SOCKET_ERRORS_LOG_FILE_NAME)
        print msg

        self.should_run = True

        try:
            with Session() as session:

                self.connection = Connection(self.url, session)
                self.hub = self.connection.register_hub(self.hub_name)

                self.hub.client.on(BittrexParameters.MARKET_DELTA,
                                   self.on_public)

                self.connection.start()

                log_conect_to_websocket("Bittrex")

                while self.connection.started and self.should_run:
                    try:
                        self.hub.server.invoke(
                            BittrexParameters.SUBSCRIBE_EXCHANGE_DELTA,
                            self.pair_name)
                    except Exception as e:
                        log_send_heart_beat_failed("Bittrex", e)

                        # FIXME NOTE - still not sure - connection.wait(1)
                        self.should_run = False

                        break
                    sleep_for(1)
        except Exception as e:
            log_error_on_receive_from_socket("Bittrex", e)

        log_subscription_cancelled("Bittrex")

        self.disconnect()
Пример #17
0
def load_order_books(args):
    """
        Periodically retrieve FULL order books
        from ALL supported exchanges via REST api
        and save it for further analysis in DB.

        Under the hood requests are sent in async fashion via gevent library

    :param args: config file
    :return:
    """

    pg_conn, _ = process_args(args)

    processor = ConnectionPool()

    while True:
        ts = get_now_seconds_utc()

        results = get_order_book_speedup(ts, processor)

        order_book = filter(lambda x: type(x) != str, results)

        load_to_postgres(order_book, ORDER_BOOK_TYPE_NAME, pg_conn)

        order_book_size = len(order_book)
        order_book_ask_size = 0
        order_book_bid_size = 0

        for entry in order_book:
            if entry is not None:
                order_book_ask_size += len(entry.ask)
                order_book_bid_size += len(entry.bid)

        if should_print_debug():
            msg = """Orderbook retrieval at {tt}:
            Order book size - {num1} Order book asks - {num10} Order book bids - {num20}""".format(
                tt=ts,
                num1=order_book_size,
                num10=order_book_ask_size,
                num20=order_book_bid_size)
            print_to_console(msg, LOG_ALL_ERRORS)
            log_to_file(msg, "order_book.log")

        print_to_console("Before sleep...", LOG_ALL_ERRORS)
        sleep_for(ORDER_BOOK_POLL_TIMEOUT)
Пример #18
0
def load_trade_history(args):
    """
        Retrieve executed trades from ALL exchanges via REST api
        and save into db

        Those data later will be used for analysis
        of profitability of trading and bot's performance

    :param args: period, exchanges, connection details
    :return:
    """

    pg_conn, settings = process_args(args)

    log_initial_settings(
        "Starting trade history retrieval for bots using following exchanges: \n",
        settings.exchanges)

    if args.start_time is None or args.end_time is None:
        end_time = get_now_seconds_utc()
        start_time = end_time - 24 * 3600
    else:
        end_time = parse_time(args.end_time, '%Y-%m-%d %H:%M:%S')
        start_time = parse_time(args.start_time, '%Y-%m-%d %H:%M:%S')

    if start_time == end_time or end_time <= start_time:
        die_hard("Wrong time interval provided! {ts0} - {ts1}".format(
            ts0=start_time, ts1=end_time))

    load_keys(settings.key_path)

    while True:
        for exchange_id in settings.exchanges:
            method = get_trade_retrieval_method_by_exchange(exchange_id)
            method(pg_conn, start_time, end_time)
            sleep_for(1)

        print_to_console("Trade retrieval heartbeat", LOG_ALL_DEBUG)

        sleep_for(TRADE_POLL_TIMEOUT)

        end_time = get_now_seconds_utc()
        start_time = end_time - 24 * 3600
Пример #19
0
def get_recent_bittrex_trades(start_time=START_OF_TIME,
                              end_time=get_now_seconds_utc()):
    key = get_key_by_exchange(EXCHANGE.BITTREX)
    bittrex_order_by_pair = defaultdict(list)

    for pair_name in BITTREX_CURRENCY_PAIRS:
        err_code, trades = get_order_history_bittrex(key, pair_name=pair_name)

        while err_code == STATUS.FAILURE:
            print "get_recent_bittrex_trades: got error responce - Reprocessing"
            sleep_for(2)
            err_code, trades = get_order_history_bittrex(key,
                                                         pair_name=pair_name)

        for new_trade in trades:
            if start_time <= new_trade.create_time <= end_time:
                bittrex_order_by_pair[new_trade.pair_id].append(new_trade)

    return bittrex_order_by_pair
Пример #20
0
def process_expired_orders(args):
    """

    :param args: file name
    :return:
    """
    settings = CommonSettings.from_cfg(args.cfg)
    set_log_folder(settings.log_folder)
    set_logging_level(settings.logging_level_id)

    load_keys(settings.key_path)

    priority_queue, msg_queue, local_cache = init_queues(settings)

    cnt = 0

    while True:
        curr_ts = get_now_seconds_utc()

        order = priority_queue.get_oldest_order(ORDERS_EXPIRE_MSG)

        if order:
            msg = "Current expired order - {}".format(order)
            log_to_file(msg, EXPIRED_ORDER_PROCESSING_FILE_NAME)
            order_age = curr_ts - order.create_time
            if order_age < ORDER_EXPIRATION_TIMEOUT:
                msg = "A bit early - {t1} {t2} WILLL SLEEP".format(
                    t1=order_age, t2=ORDER_EXPIRATION_TIMEOUT)
                log_to_file(msg, EXPIRED_ORDER_PROCESSING_FILE_NAME)
                sleep_for(ORDER_EXPIRATION_TIMEOUT - order_age)

            process_expired_order(order, msg_queue, priority_queue,
                                  local_cache)

        sleep_for(1)
        cnt += 1

        if cnt >= HEARTBEAT_TIMEOUT:
            cnt = 0
            print_to_console("Watch list is empty sleeping", LOG_ALL_ERRORS)
            log_to_file("Watch list is empty sleeping",
                        EXPIRED_ORDER_PROCESSING_FILE_NAME)
Пример #21
0
def get_recent_binance_orders():
    key = get_key_by_exchange(EXCHANGE.BINANCE)

    binance_order = []
    limit = 500
    for pair_name in BINANCE_CURRENCY_PAIRS:

        err_code, orders_by_pair = get_order_history_binance(
            key, pair_name, limit)

        print "Get data for ", pair_name
        while err_code == STATUS.FAILURE:
            sleep_for(2)
            err_code, orders_by_pair = get_order_history_binance(
                key, pair_name, limit)

        binance_order += orders_by_pair
        sleep_for(1)

    return binance_order
Пример #22
0
def get_recent_poloniex_trades(start_time=START_OF_TIME,
                               end_time=get_now_seconds_utc()):

    key = get_key_by_exchange(EXCHANGE.POLONIEX)
    poloniex_orders_by_pair = defaultdict(list)

    for pair_name in POLONIEX_CURRENCY_PAIRS:
        error_code, trades = get_order_history_poloniex(key,
                                                        pair_name=pair_name,
                                                        time_start=start_time)
        while error_code != STATUS.SUCCESS:
            print "get_recent_poloniex_trades: got error responce - Reprocessing"
            sleep_for(2)
            error_code, trades = get_order_history_poloniex(
                key, pair_name=pair_name, time_start=start_time)

        for trade in trades:
            if start_time <= trade.create_time <= end_time:
                poloniex_orders_by_pair[trade.pair_id].append(trade)

    return poloniex_orders_by_pair
Пример #23
0
    def reset_arbitrage_state(self):

        local_timeout = 1

        while True:
            sleep_for(local_timeout)

            log_init_reset()

            set_stage(ORDER_BOOK_SYNC_STAGES.RESETTING)

            self.update_balance_run_flag = False
            self.update_min_cap_run_flag = False

            clear_queue(self.sell_exchange_updates)
            clear_queue(self.buy_exchange_updates)

            self._init_arbitrage_state()            # Spawn balance & cap threads, no blocking
            self.subscribe_to_order_book_update()   # Spawn order book subscription threads, no blocking
            self.sync_order_books()                 # Spawn order book sync threads, BLOCKING till they finished

            log_reset_final_stage()

            if get_stage() != ORDER_BOOK_SYNC_STAGES.AFTER_SYNC:

                self.shutdown_subscriptions()

                log_to_file("reset_arbitrage_state - cant sync order book, lets try one more time!", SOCKET_ERRORS_LOG_FILE_NAME)

                while self.buy_subscription.is_running() or self.sell_subscription.is_running():
                    sleep_for(1)

                local_timeout += 1

            else:
                break

        log_reset_stage_successfully()
Пример #24
0
def get_recent_kraken_trades(start_time=START_OF_TIME,
                             end_time=get_now_seconds_utc()):

    key = get_key_by_exchange(EXCHANGE.KRAKEN)
    kraken_orders_by_pair = defaultdict(list)

    error_code, trades = get_order_history_kraken(key,
                                                  pair_name=None,
                                                  time_start=start_time,
                                                  time_end=end_time)
    while error_code != STATUS.SUCCESS:
        print "get_recent_huobi_trades: got error responce - Reprocessing"
        sleep_for(2)
        error_code, trades = get_order_history_kraken(key,
                                                      pair_name=None,
                                                      time_start=start_time,
                                                      time_end=end_time)

    for trade in trades:
        if start_time <= trade.create_time <= end_time:
            kraken_orders_by_pair[trade.pair_id].append(trade)

    return kraken_orders_by_pair
Пример #25
0
def register_and_wait_for_commands(args):
    settings = CommonSettings.from_cfg(args.cfg)

    command_queue = CommandQueue(settings.cache_host, settings.cache_port)

    #
    # while true read settings
    #   1) what kind of services to deploy
    #   2) service specific settings
    #

    server_name = socket.gethostname()
    command_queue.register_node(server_name)

    while True:
        # get command
        # execute

        cmd = command_queue.get_command()
        if cmd:
            print_to_console("Subscriber: {} - {}".format(server_name, cmd),
                             LOG_ALL_DEBUG)
        sleep_for(1)
Пример #26
0
def test_failed_deal_placement():
    load_keys(API_KEY_PATH)
    msg_queue = get_message_queue()
    ts = 1517938516
    order = Trade(DEAL_TYPE.SELL, EXCHANGE.BITTREX, CURRENCY_PAIR.BTC_TO_STRAT, price=0.000844, volume=5.0,
                  order_book_time=ts, create_time=ts, execute_time=ts, order_id=None)

    #   from dao.order_utils import get_open_orders_by_exchange
    #   r = get_open_orders_by_exchange(EXCHANGE.BITTREX, CURRENCY_PAIR.BTC_TO_STRAT)

    #   for rr in r:
    #       print r

    #   raise
    #
    # msg = "Replace existing order with new one - {tt}".format(tt=order)
    # err_code, json_document = init_deal(order, msg)
    # print json_document
    # order.deal_id = parse_deal_id(order.exchange_id, json_document)

    # msg_queue.add_order(ORDERS_MSG, order)
    sleep_for(3)
    msg_queue.add_order(FAILED_ORDERS_MSG, order)
    print order
Пример #27
0
 def more_heavy_load():
     sleep_for(30)
     print "more_heavy_load"
Пример #28
0
import os

from deploy.constants import DATA_RETRIEVAL_SERVICES, TELEGRAM_NOTIFIER_DEPLOY_UNIT

from utils.debug_utils import print_to_console, LOG_ALL_ERRORS
from utils.time_utils import sleep_for

from deploy.service_utils import deploy_telegram_notifier, deploy_process_in_screen


if __name__ == "__main__":
    if not os.path.isfile('common.cfg'):
        print_to_console("Copy `common_sample.cfg` to `common.cfg` and set appropriate settings there!", LOG_ALL_ERRORS)
        exit(0)

    screen_name = TELEGRAM_NOTIFIER_DEPLOY_UNIT.screen_name

    deploy_telegram_notifier(screen_name, should_create_screen=True)
    sleep_for(3)

    print_to_console("2 stage - init data retrieval services...", LOG_ALL_ERRORS)
    for deploy_unit in DATA_RETRIEVAL_SERVICES.values():
        deploy_process_in_screen(screen_name, deploy_unit)
Пример #29
0
 def heavy_load():
     sleep_for(3)
     print "heavy_load"
Пример #30
0
def process_expired_order(expired_order, msg_queue, priority_queue,
                          local_cache):
    """
            In order to speedup and simplify expired deal processing following approach implemented.

            Every successfully placed order go into priority queue sorted by time. Earliest - first.
            When time come - it will appear in this method.
            We retrieve open orders and try to find that order there.
            If it still there:
                adjust executed volume
                cancel active order
                retrieve order book and adjust price
                place new order with new volume and price

            FIXME NOTE: poloniex(? other ?) executed volume = 0 and volume != original ?

    :param expired_order:  order retrieved from redis cache
    :param msg_queue: saving to postgres and re-process failed orders
    :param priority_queue: watch queue for expired orders
    :param local_cache: to retrieve balance
    :return:
    """

    err_code, open_orders = get_open_orders_by_exchange(
        expired_order.exchange_id, expired_order.pair_id)

    if err_code == STATUS.FAILURE:
        log_open_orders_by_exchange_bad_result(expired_order)

        priority_queue.add_order_to_watch_queue(ORDERS_EXPIRE_MSG,
                                                expired_order)

        return

    if not open_orders:
        log_open_orders_is_empty(expired_order)
        return

    log_trace_all_open_orders(open_orders)

    if not executed_volume_updated(open_orders, expired_order):
        log_to_file("Can't update volume for ",
                    EXPIRED_ORDER_PROCESSING_FILE_NAME)

    err_code, response = cancel_by_exchange(expired_order)

    log_trace_cancel_request_result(expired_order, err_code, response)

    if err_code == STATUS.FAILURE:
        log_cant_cancel_deal(expired_order, msg_queue)

        priority_queue.add_order_to_watch_queue(ORDERS_EXPIRE_MSG,
                                                expired_order)

        return

    # FIXME NOTE
    # so we want exchange update for us available balance
    # as we observe situation where its not happen immediately
    # we want to mitigate delay with this dirty workaround
    sleep_for(2)

    ticker = get_ticker(expired_order.exchange_id, expired_order.pair_id)
    if ticker is None:

        priority_queue.add_order_to_watch_queue(ORDERS_EXPIRE_MSG,
                                                expired_order)

        log_cant_retrieve_ticker(expired_order, msg_queue)
        return

    min_volume = compute_min_cap_from_ticker(expired_order.pair_id, ticker)
    order_book = get_order_book(expired_order.exchange_id,
                                expired_order.pair_id)

    if order_book is None:

        priority_queue.add_order_to_watch_queue(ORDERS_EXPIRE_MSG,
                                                expired_order)

        log_cant_retrieve_order_book(expired_order, msg_queue)
        return

    if is_order_book_expired(EXPIRED_ORDER_PROCESSING_FILE_NAME, order_book,
                             local_cache, msg_queue):

        priority_queue.add_order_to_watch_queue(ORDERS_EXPIRE_MSG,
                                                expired_order)

        return

    orders = order_book.bid if expired_order.trade_type == DEAL_TYPE.SELL else order_book.ask

    expired_order.price = adjust_price_by_order_book(orders,
                                                     expired_order.volume)

    # Forcefully update balance for exchange - maybe other processes consume those coins
    update_balance_by_exchange(expired_order.exchange_id)
    # Do we have enough coins at our balance
    balance = local_cache.get_balance(expired_order.exchange_id)

    if balance.expired(BALANCE_EXPIRED_THRESHOLD):

        priority_queue.add_order_to_watch_queue(ORDERS_EXPIRE_MSG,
                                                expired_order)

        log_balance_expired(expired_order.exchange_id,
                            BALANCE_EXPIRED_THRESHOLD, balance, msg_queue)

        assert False

    place_order_by_market_rate(expired_order, msg_queue, priority_queue,
                               min_volume, balance, order_book,
                               EXPIRED_ORDER_PROCESSING_FILE_NAME)