Пример #1
0
def forward_new_messages(args):
    settings = CommonSettings.from_cfg(args.cfg)

    set_log_folder(settings.log_folder)
    set_logging_level(settings.logging_level_id)
    msg_queue = get_message_queue(host=settings.cache_host,
                                  port=settings.cache_port)

    do_we_have_data = False

    while True:
        for topic_id in QUEUE_TOPICS:
            msg = msg_queue.get_message_nowait(topic_id)
            if msg is not None:
                do_we_have_data = True
                notification_id = get_notification_id_by_topic_name(topic_id)
                err_code = send_single_message(msg, notification_id)
                if err_code == STATUS.FAILURE:
                    err_msg = """telegram_notifier can't send message to telegram. Message will be re-processed on next iteration.
                        {msg}""".format(msg=msg)
                    log_to_file(err_msg, "telegram_notifier.log")
                    print_to_console(err_msg, LOG_ALL_ERRORS)
                    msg_queue.add_message_to_start(topic_id, msg)
                    sleep_for(1)

        #
        #   NOTE: it still can lead to throttling by telegram
        #

        if not do_we_have_data:
            sleep_for(1)

        do_we_have_data = False
Пример #2
0
def process_args(args):
    settings = CommonSettings.from_cfg(args.cfg)
    pg_conn = init_pg_connection(_db_host=settings.db_host,
                                 _db_port=settings.db_port,
                                 _db_name=settings.db_name)

    set_log_folder(settings.log_folder)
    set_logging_level(settings.logging_level_id)

    return pg_conn, settings
Пример #3
0
def process_expired_orders(args):
    """

    :param args: file name
    :return:
    """
    settings = CommonSettings.from_cfg(args.cfg)
    set_log_folder(settings.log_folder)
    set_logging_level(settings.logging_level_id)

    load_keys(settings.key_path)

    priority_queue, msg_queue, local_cache = init_queues(settings)

    cnt = 0

    while True:
        curr_ts = get_now_seconds_utc()

        order = priority_queue.get_oldest_order(ORDERS_EXPIRE_MSG)

        if order:
            msg = "Current expired order - {}".format(order)
            log_to_file(msg, EXPIRED_ORDER_PROCESSING_FILE_NAME)
            order_age = curr_ts - order.create_time
            if order_age < ORDER_EXPIRATION_TIMEOUT:
                msg = "A bit early - {t1} {t2} WILLL SLEEP".format(
                    t1=order_age, t2=ORDER_EXPIRATION_TIMEOUT)
                log_to_file(msg, EXPIRED_ORDER_PROCESSING_FILE_NAME)
                sleep_for(ORDER_EXPIRATION_TIMEOUT - order_age)

            process_expired_order(order, msg_queue, priority_queue,
                                  local_cache)

        sleep_for(1)
        cnt += 1

        if cnt >= HEARTBEAT_TIMEOUT:
            cnt = 0
            print_to_console("Watch list is empty sleeping", LOG_ALL_ERRORS)
            log_to_file("Watch list is empty sleeping",
                        EXPIRED_ORDER_PROCESSING_FILE_NAME)
Пример #4
0
def register_and_wait_for_commands(args):
    settings = CommonSettings.from_cfg(args.cfg)

    command_queue = CommandQueue(settings.cache_host, settings.cache_port)

    #
    # while true read settings
    #   1) what kind of services to deploy
    #   2) service specific settings
    #

    server_name = socket.gethostname()
    command_queue.register_node(server_name)

    while True:
        # get command
        # execute

        cmd = command_queue.get_command()
        if cmd:
            print_to_console("Subscriber: {} - {}".format(server_name, cmd),
                             LOG_ALL_DEBUG)
        sleep_for(1)
Пример #5
0
    parser = argparse.ArgumentParser(description="Constantly poll two exchange for order book for particular pair "
                                                 "and initiate sell\\buy deals for arbitrage opportunities")

    parser.add_argument('--threshold', action="store", type=float, required=True)
    parser.add_argument('--balance_threshold', action="store", type=float, required=True)
    parser.add_argument('--reverse_threshold', action="store", type=float, required=True)
    parser.add_argument('--sell_exchange_id', action="store", type=int, required=True)
    parser.add_argument('--buy_exchange_id', action="store", type=int, required=True)
    parser.add_argument('--pair_id', action="store", type=int, required=True)
    parser.add_argument('--deal_expire_timeout', action="store", type=int, required=True)
    parser.add_argument('--cfg', action="store", required=True)

    arguments = parser.parse_args()

    cfg = ArbitrageConfig.from_args(arguments)

    app_settings = CommonSettings.from_cfg(cfg)

    set_logging_level(app_settings.logging_level_id)
    set_log_folder(app_settings.log_folder)
    load_keys(app_settings.key_path)

    # to avoid time-consuming check in future - validate arguments here
    for exchange_id in [cfg.sell_exchange_id, cfg.buy_exchange_id]:
        pair_name = get_currency_pair_name_by_exchange_id(cfg.pair_id, exchange_id)
        if pair_name is None:
            log_dont_supported_currency(cfg, exchange_id, cfg.pair_id)
            exit()

    ArbitrageListener(cfg, app_settings).start()
Пример #6
0
def watch_balance_for_exchange(args):
    """
            Those routine update balance at redis CACHE
            for ALL coins at ONE exchange for active key set.

            NOTE:   It still rely on REST api - i.e. not proactive
                    For some exchanges - balance not immediately updated

                    Initially all exchanges were polled sequentially
                    But it lead to delays in the past
                    due to exchange errors or throttling

    :param args: config file and exchange_id
    :return:
    """
    settings = CommonSettings.from_cfg(args.cfg)

    exchange_id = get_exchange_id_by_name(args.exchange)
    if exchange_id not in EXCHANGE.values():
        log_wrong_exchange_id(exchange_id)
        die_hard("Exchange id {} seems to be unknown? 0_o".format(exchange_id))

    log_initial_settings(
        "Starting balance monitoring for following exchange: \n",
        [exchange_id])

    cache = connect_to_cache(host=settings.cache_host,
                             port=settings.cache_port)
    msg_queue = get_message_queue(host=settings.cache_host,
                                  port=settings.cache_port)

    load_keys(settings.key_path)
    set_log_folder(settings.log_folder)
    set_logging_level(settings.logging_level_id)

    init_balances(settings.exchanges, cache)

    cnt = 0

    while True:
        # We load initial balance using init_balance
        sleep_for(BALANCE_POLL_TIMEOUT)

        cnt += BALANCE_POLL_TIMEOUT

        log_balance_update_heartbeat(exchange_id)

        balance_for_exchange = update_balance_by_exchange(exchange_id, cache)
        while balance_for_exchange is None:
            log_cant_update_balance(exchange_id)
            sleep_for(1)
            balance_for_exchange = update_balance_by_exchange(
                exchange_id, cache)

        if cnt >= BALANCE_HEALTH_CHECK_TIMEOUT:
            cnt = 0
            log_last_balances(settings.exchanges, cache, msg_queue)

            for base_currency_id in BASE_CURRENCY:
                threshold = BASE_CURRENCIES_BALANCE_THRESHOLD[base_currency_id]
                if not balance_for_exchange.do_we_have_enough(
                        base_currency_id, threshold):
                    log_not_enough_base_currency(exchange_id, base_currency_id,
                                                 threshold,
                                                 balance_for_exchange,
                                                 msg_queue)
Пример #7
0
from deploy.classes.common_settings import CommonSettings
from deploy.classes.deploy_unit import DeployUnit
from deploy.service_utils import deploy_process_in_screen
from deploy.constants import BALANCE_UPDATE_DEPLOY_UNIT
from deploy.screen_utils import create_screen

if __name__ == "__main__":
    parser = argparse.ArgumentParser(
        description=
        "Balance monitoring service, every {POLL_TIMEOUT} for configured via "
        "--cfg file containing core settings")
    parser.add_argument('--cfg', action='store', required=True)

    arguments = parser.parse_args()

    settings = CommonSettings.from_cfg(arguments.cfg)

    print_to_console("1 stage - create named screen for balance retrieval...",
                     LOG_ALL_ERRORS)
    create_screen(BALANCE_UPDATE_DEPLOY_UNIT.screen_name)

    print_to_console("2 stage - init balance retrieval per exchange...",
                     LOG_ALL_ERRORS)
    for exchange_id in settings.exchanges:
        deploy_unit = DeployUnit(
            BALANCE_UPDATE_DEPLOY_UNIT.screen_name,
            get_exchange_name_by_id(exchange_id),
            BALANCE_UPDATE_DEPLOY_UNIT.command +
            get_exchange_name_by_id(exchange_id))

        deploy_process_in_screen(BALANCE_UPDATE_DEPLOY_UNIT.screen_name,
Пример #8
0
def arbitrage_between_pair(args):
    cfg = ArbitrageConfig.from_args(args)

    app_settings = CommonSettings.from_cfg(args.cfg)

    set_logging_level(app_settings.logging_level_id)
    set_log_folder(app_settings.log_folder)
    load_keys(app_settings.key_path)

    priority_queue, msg_queue, local_cache = init_queues(app_settings)

    processor = ConnectionPool(pool_size=2)

    # to avoid time-consuming check in future - validate arguments here
    for exchange_id in [args.sell_exchange_id, args.buy_exchange_id]:
        pair_name = get_currency_pair_name_by_exchange_id(
            cfg.pair_id, exchange_id)
        if pair_name is None:
            log_dont_supported_currency(cfg, exchange_id, cfg.pair_id)
            exit()

    deal_cap = MarketCap(cfg.pair_id, get_now_seconds_utc())
    deal_cap.update_max_volume_cap(NO_MAX_CAP_LIMIT)
    update_min_cap(cfg, deal_cap, processor)

    balance_state = dummy_balance_init(timest=0,
                                       default_volume=Decimal("0"),
                                       default_available_volume=Decimal("0"))

    if not YES_I_KNOW_WHAT_AM_I_DOING:
        die_hard("LIVE TRADING!")

    while True:

        if get_now_seconds_utc(
        ) - deal_cap.last_updated > MIN_CAP_UPDATE_TIMEOUT:
            update_min_cap(cfg, deal_cap, processor)

        for mode_id in [DEAL_TYPE.ARBITRAGE, DEAL_TYPE.REVERSE]:
            cur_timest_sec = get_now_seconds_utc()

            method = search_for_arbitrage if mode_id == DEAL_TYPE.ARBITRAGE else adjust_currency_balance
            active_threshold = cfg.threshold if mode_id == DEAL_TYPE.ARBITRAGE else cfg.reverse_threshold

            balance_state = get_updated_balance_arbitrage(
                cfg, balance_state, local_cache)

            if balance_state.expired(cur_timest_sec, cfg.buy_exchange_id,
                                     cfg.sell_exchange_id,
                                     BALANCE_EXPIRED_THRESHOLD):
                log_balance_expired_errors(cfg, msg_queue, balance_state)
                die_hard("Balance expired")

            order_book_src, order_book_dst = get_order_books_for_arbitrage_pair(
                cfg, cur_timest_sec, processor)

            if order_book_dst is None or order_book_src is None:
                log_failed_to_retrieve_order_book(cfg)
                sleep_for(3)
                continue

            if is_order_books_expired(order_book_src, order_book_dst,
                                      local_cache, msg_queue,
                                      cfg.log_file_name):
                sleep_for(3)
                continue

            local_cache.cache_order_book(order_book_src)
            local_cache.cache_order_book(order_book_dst)

            # init_deals_with_logging_speedy
            status_code, deal_pair = method(order_book_src,
                                            order_book_dst,
                                            active_threshold,
                                            cfg.balance_threshold,
                                            init_deals_with_logging_speedy,
                                            balance_state,
                                            deal_cap,
                                            type_of_deal=mode_id,
                                            worker_pool=processor,
                                            msg_queue=msg_queue)

            add_orders_to_watch_list(deal_pair, priority_queue)

            print_to_console("I am still alive! ", LOG_ALL_DEBUG)
            sleep_for(2)

        sleep_for(3)

        deal_cap.update_max_volume_cap(NO_MAX_CAP_LIMIT)