示例#1
0
def parse_trade_book(msg, msg_data):
    #book trades
    if (msg['category'] == 'order_matches'
        and ((msg['command'] == 'update' and msg_data['status'] == 'completed') #for a trade with BTC involved, but that is settled (completed)
             or ('forward_asset' in msg_data and msg_data['forward_asset'] != config.BTC and msg_data['backward_asset'] != config.BTC))): #or for a trade without BTC on either end

        if msg['command'] == 'update' and msg_data['status'] == 'completed':
            #an order is being updated to a completed status (i.e. a BTCpay has completed)
            tx0_hash, tx1_hash = msg_data['order_match_id'][:64], msg_data['order_match_id'][65:]
            #get the order_match this btcpay settles
            order_match = util.jsonrpc_api("get_order_matches",
                {'filters': [
                 {'field': 'tx0_hash', 'op': '==', 'value': tx0_hash},
                 {'field': 'tx1_hash', 'op': '==', 'value': tx1_hash}]
                }, abort_on_error=False)['result'][0]
        else:
            assert msg_data['status'] == 'completed' #should not enter a pending state for non BTC matches
            order_match = msg_data

        forward_asset_info = config.mongo_db.tracked_assets.find_one({'asset': order_match['forward_asset']})
        backward_asset_info = config.mongo_db.tracked_assets.find_one({'asset': order_match['backward_asset']})
        assert forward_asset_info and backward_asset_info
        base_asset, quote_asset = util.assets_to_asset_pair(order_match['forward_asset'], order_match['backward_asset'])
        
        #don't create trade records from order matches with BTC that are under the dust limit
        if    (order_match['forward_asset'] == config.BTC and order_match['forward_quantity'] <= config.ORDER_BTC_DUST_LIMIT_CUTOFF) \
           or (order_match['backward_asset'] == config.BTC and order_match['backward_quantity'] <= config.ORDER_BTC_DUST_LIMIT_CUTOFF):
            logger.debug("Order match %s ignored due to %s under dust limit." % (order_match['tx0_hash'] + order_match['tx1_hash'], config.BTC))
            return 'ABORT_THIS_MESSAGE_PROCESSING'

        #take divisible trade quantities to floating point
        forward_quantity = blockchain.normalize_quantity(order_match['forward_quantity'], forward_asset_info['divisible'])
        backward_quantity = blockchain.normalize_quantity(order_match['backward_quantity'], backward_asset_info['divisible'])
        
        #compose trade
        trade = {
            'block_index': config.state['cur_block']['block_index'],
            'block_time': config.state['cur_block']['block_time_obj'],
            'message_index': msg['message_index'], #secondary temporaral ordering off of when
            'order_match_id': order_match['tx0_hash'] + '_' + order_match['tx1_hash'],
            'order_match_tx0_index': order_match['tx0_index'],
            'order_match_tx1_index': order_match['tx1_index'],
            'order_match_tx0_address': order_match['tx0_address'],
            'order_match_tx1_address': order_match['tx1_address'],
            'base_asset': base_asset,
            'quote_asset': quote_asset,
            'base_quantity': order_match['forward_quantity'] if order_match['forward_asset'] == base_asset else order_match['backward_quantity'],
            'quote_quantity': order_match['backward_quantity'] if order_match['forward_asset'] == base_asset else order_match['forward_quantity'],
            'base_quantity_normalized': forward_quantity if order_match['forward_asset'] == base_asset else backward_quantity,
            'quote_quantity_normalized': backward_quantity if order_match['forward_asset'] == base_asset else forward_quantity,
        }
        trade['unit_price'] = float(
            ( D(trade['quote_quantity_normalized']) / D(trade['base_quantity_normalized']) ).quantize(
                D('.00000000'), rounding=decimal.ROUND_HALF_EVEN))
        trade['unit_price_inverse'] = float(
            ( D(trade['base_quantity_normalized']) / D(trade['quote_quantity_normalized']) ).quantize(
                D('.00000000'), rounding=decimal.ROUND_HALF_EVEN))

        config.mongo_db.trades.insert(trade)
        logger.info("Procesed Trade from tx %s :: %s" % (msg['message_index'], trade))
示例#2
0
def handle_reorg(msg, msg_data):
    if msg['command'] == 'reorg':
        logger.warn("Blockchain reorginization at block %s" % msg_data['block_index'])

        #prune back to and including the specified message_index
        database.rollback(msg_data['block_index'] - 1)
        assert config.state['my_latest_block']['block_index'] == msg_data['block_index'] - 1

        #for the current last_message_index (which could have gone down after the reorg), query counterpartyd
        running_info = util.jsonrpc_api("get_running_info", abort_on_error=True)['result']
        config.state['last_message_index'] = running_info['last_message_index']
示例#3
0
    def publish_mempool_tx():
        """fetch new tx from mempool"""
        mempool_txs = config.mongo_db.mempool.find(projection={'tx_hash': True})
        tx_hashes = {t['tx_hash'] for t in mempool_txs}

        params = { # get latest 1000 entries from mempool
            'order_by': 'timestamp',
            'order_dir': 'DESC'
        }
        new_txs = util.jsonrpc_api("get_mempool", params, abort_on_error=True)
        num_skipped_tx = 0
        if new_txs:
            for new_tx in new_txs['result']:
                # skip if it's already in our mempool table
                if new_tx['tx_hash'] in tx_hashes:
                    num_skipped_tx += 1
                    continue

                tx = {
                    'tx_hash': new_tx['tx_hash'],
                    'command': new_tx['command'],
                    'category': new_tx['category'],
                    'bindings': new_tx['bindings'],
                    'timestamp': new_tx['timestamp'],
                    'viewed_in_block': config.state['my_latest_block']['block_index']
                }
                config.mongo_db.mempool.insert(tx)
                del(tx['_id'])
                tx['_category'] = tx['category']
                tx['_message_index'] = 'mempool'
                logger.debug("Spotted mempool tx: %s" % tx)
                for function in MempoolMessageProcessor.active_functions():
                    logger.debug('starting {} (mempool)'.format(function['function']))
                    # TODO: Better handling of double parsing
                    try:
                        result = function['function'](tx, json.loads(tx['bindings'])) or None
                    except pymongo.errors.DuplicateKeyError as e:
                        logging.exception(e)
                    if result == 'ABORT_THIS_MESSAGE_PROCESSING' or result == 'continue':
                        break
                    elif result:
                        raise Exception(
                            "Message processor returned unknown code -- processor: '%s', result: '%s'" %
                            (function, result))
        logger.debug("Mempool refresh: {} entries retrieved from counterparty-server, {} new".format(len(new_txs['result']) if new_txs else '??', (len(new_txs['result']) - num_skipped_tx) if new_txs else '??'))
示例#4
0
    def publish_mempool_tx():
        """fetch new tx from mempool"""
        mempool_txs = config.mongo_db.mempool.find(projection={'tx_hash': True})
        tx_hashes = {t['tx_hash'] for t in mempool_txs}

        params = { # get latest 1000 entries from mempool
            'order_by': 'timestamp',
            'order_dir': 'DESC'
        }
        new_txs = util.jsonrpc_api("get_mempool", params, abort_on_error=True)
        num_skipped_tx = 0
        if new_txs:
            for new_tx in new_txs['result']:
                # skip if it's already in our mempool table
                if new_tx['tx_hash'] in tx_hashes:
                    num_skipped_tx += 1
                    continue

                tx = {
                    'tx_hash': new_tx['tx_hash'],
                    'command': new_tx['command'],
                    'category': new_tx['category'],
                    'bindings': new_tx['bindings'],
                    'timestamp': new_tx['timestamp'],
                    'viewed_in_block': config.state['my_latest_block']['block_index']
                }
                config.mongo_db.mempool.insert(tx)
                del(tx['_id'])
                tx['_category'] = tx['category']
                tx['_message_index'] = 'mempool'
                logger.debug("Spotted mempool tx: %s" % tx)
                for function in MempoolMessageProcessor.active_functions():
                    logger.debug('starting {} (mempool)'.format(function['function']))
                    # TODO: Better handling of double parsing
                    try:
                        result = function['function'](tx, json.loads(tx['bindings'])) or None
                    except pymongo.errors.DuplicateKeyError as e:
                        logging.exception(e)
                    if result == 'ABORT_THIS_MESSAGE_PROCESSING' or result == 'continue':
                        break
                    elif result:
                        raise Exception(
                            "Message processor returned unknown code -- processor: '%s', result: '%s'" %
                            (function, result))
        logger.debug("Mempool refresh: {} entries retrieved from counterparty-server, {} new".format(len(new_txs['result']), len(new_txs['result']) - num_skipped_tx))
示例#5
0
 def publish_mempool_tx():
     """fetch new tx from mempool"""
     tx_hashes = []
     mempool_txs = config.mongo_db.mempool.find(projection={'tx_hash': True})
     for mempool_tx in mempool_txs:
         tx_hashes.append(str(mempool_tx['tx_hash']))
 
     params = None
     if len(tx_hashes) > 0:
         params = {
             'filters': [
                 {'field':'tx_hash', 'op': 'NOT IN', 'value': tx_hashes},
                 {'field':'category', 'op': 'IN', 'value': ['sends', 'btcpays', 'issuances', 'dividends']}
             ],
             'filterop': 'AND'
         }
     new_txs = util.jsonrpc_api("get_mempool", params, abort_on_error=True)
     if new_txs:
         for new_tx in new_txs['result']:
             tx = {
                 'tx_hash': new_tx['tx_hash'],
                 'command': new_tx['command'],
                 'category': new_tx['category'],
                 'bindings': new_tx['bindings'],
                 'timestamp': new_tx['timestamp'],
                 'viewed_in_block': config.state['my_latest_block']['block_index']
             }
             
             config.mongo_db.mempool.insert(tx)
             del(tx['_id'])
             tx['_category'] = tx['category']
             tx['_message_index'] = 'mempool'
             logger.debug("Spotted mempool tx: %s" % tx)
             for function in MempoolMessageProcessor.active_functions():
                 logger.debug('starting {} (mempool)'.format(function['function']))
                 # TODO: Better handling of double parsing
                 try:
                     result = function['function'](tx, json.loads(tx['bindings'])) or None
                 except pymongo.errors.DuplicateKeyError, e:
                     logging.exception(e)
                 if result == 'ABORT_THIS_MESSAGE_PROCESSING' or result == 'continue':
                     break
                 elif result:
                     raise Exception("Message processor returned unknown code -- processor: '%s', result: '%s'" %
                         (function, result))
示例#6
0
    def publish_mempool_tx():
        """fetch new tx from mempool"""
        tx_hashes = []
        mempool_txs = config.mongo_db.mempool.find(fields={'tx_hash': True})
        for mempool_tx in mempool_txs:
            tx_hashes.append(str(mempool_tx['tx_hash']))

        params = None
        if len(tx_hashes) > 0:
            params = {
                'filters': [{
                    'field': 'tx_hash',
                    'op': 'NOT IN',
                    'value': tx_hashes
                }, {
                    'field':
                    'category',
                    'op':
                    'IN',
                    'value': ['sends', 'btcpays', 'issuances', 'dividends']
                }],
                'filterop':
                'AND'
            }
        new_txs = util.jsonrpc_api("get_mempool", params, abort_on_error=True)

        for new_tx in new_txs['result']:
            tx = {
                'tx_hash': new_tx['tx_hash'],
                'command': new_tx['command'],
                'category': new_tx['category'],
                'bindings': new_tx['bindings'],
                'timestamp': new_tx['timestamp'],
                'viewed_in_block':
                config.state['my_latest_block']['block_index']
            }

            config.mongo_db.mempool.insert(tx)
            del (tx['_id'])
            tx['_category'] = tx['category']
            tx['_message_index'] = 'mempool'
            logger.debug("Spotted mempool tx: %s" % tx)
            zmq_publisher_eventfeed.send_json(tx)
示例#7
0
def handle_reorg(msg, msg_data):
    if msg['command'] == 'reorg':
        logger.warn("Blockchain reorginization at block %s" %
                    msg_data['block_index'])
        #prune back to and including the specified message_index
        my_latest_block = database.rollback(msg_data['block_index'] - 1)
        config.state['my_latest_block'] = my_latest_block
        assert config.state['my_latest_block'][
            'block_index'] == msg_data['block_index'] - 1

        #for the current last_message_index (which could have gone down after the reorg), query counterpartyd
        running_info = util.jsonrpc_api("get_running_info",
                                        abort_on_error=True)['result']
        config.state['last_message_index'] = running_info['last_message_index']

        #send out the message to listening clients (but don't forward along while we're catching up)
        if config.state['cp_latest_block_index'] - config.state[
                'my_latest_block']['block_index'] < config.MAX_REORG_NUM_BLOCKS:
            msg_data['_last_message_index'] = config.state[
                'last_message_index']
            event = messages.decorate_message_for_feed(msg, msg_data=msg_data)
            config.ZMQ_PUBLISHER_EVENTFEED.send_json(event)
        return 'break'  #break out of inner loop
示例#8
0
def process_cp_blockfeed():
    # initialize state
    config.state['cur_block'] = {
        'block_index': 0,
    }  # block being currently processed
    config.state['my_latest_block'] = {
        'block_index': 0
    }  # last block that was successfully processed by counterblockd
    config.state[
        'last_message_index'] = -1  # initialize (last processed message index)
    config.state[
        'cp_latest_block_index'] = 0  # last block that was successfully processed by counterparty
    config.state[
        'cp_backend_block_index'] = 0  # the latest block height as reported by the cpd blockchain backend
    config.state[
        'cp_caught_up'] = False  # whether counterparty-server is caught up to the backend (e.g. bitcoind)
    config.state['caught_up_started_events'] = False
    #^ set after we are caught up and start up the recurring events that depend on us being caught up with the blockchain

    # enabled processor functions
    logger.debug("Enabled Message Processor Functions {0}".format(
        MessageProcessor.active_functions()))
    logger.debug("Enabled Block Processor Functions {0}".format(
        BlockProcessor.active_functions()))

    def publish_mempool_tx():
        """fetch new tx from mempool"""
        mempool_txs = config.mongo_db.mempool.find(
            projection={'tx_hash': True})
        tx_hashes = {t['tx_hash'] for t in mempool_txs}

        params = {  # get latest 1000 entries from mempool
            'order_by': 'timestamp',
            'order_dir': 'DESC'
        }
        new_txs = util.jsonrpc_api("get_mempool",
                                   params,
                                   abort_on_error=True,
                                   use_cache=False)
        num_skipped_tx = 0
        if new_txs:
            for new_tx in new_txs['result']:
                # skip if it's already in our mempool table
                if new_tx['tx_hash'] in tx_hashes:
                    num_skipped_tx += 1
                    continue

                tx = {
                    'tx_hash':
                    new_tx['tx_hash'],
                    'command':
                    new_tx['command'],
                    'category':
                    new_tx['category'],
                    'bindings':
                    new_tx['bindings'],
                    'timestamp':
                    new_tx['timestamp'],
                    'viewed_in_block':
                    config.state['my_latest_block']['block_index']
                }
                config.mongo_db.mempool.insert(tx)
                del (tx['_id'])
                tx['_category'] = tx['category']
                tx['_message_index'] = 'mempool'
                logger.debug("Spotted mempool tx: %s" % tx)
                for function in MempoolMessageProcessor.active_functions():
                    logger.debug('starting {} (mempool)'.format(
                        function['function']))
                    # TODO: Better handling of double parsing
                    try:
                        result = function['function'](
                            tx, json.loads(tx['bindings'])) or None
                    except pymongo.errors.DuplicateKeyError as e:
                        logging.exception(e)
                    if result == 'ABORT_THIS_MESSAGE_PROCESSING' or result == 'continue':
                        break
                    elif result:
                        raise Exception(
                            "Message processor returned unknown code -- processor: '%s', result: '%s'"
                            % (function, result))
        logger.debug(
            "Mempool refresh: {} entries retrieved from counterparty-server, {} new"
            .format(
                len(new_txs['result']) if new_txs else '??',
                (len(new_txs['result']) -
                 num_skipped_tx) if new_txs else '??'))

    def clean_mempool_tx():
        """clean mempool transactions older than MAX_REORG_NUM_BLOCKS blocks"""
        config.mongo_db.mempool.remove({
            "viewed_in_block": {
                "$lt":
                config.state['my_latest_block']['block_index'] -
                config.MAX_REORG_NUM_BLOCKS
            }
        })

    def parse_message(msg):
        msg_data = json.loads(msg['bindings'])
        logger.debug("Received message %s: %s ..." %
                     (msg['message_index'], msg))

        # out of order messages should not happen (anymore), but just to be sure
        if msg['message_index'] != config.state[
                'last_message_index'] + 1 and config.state[
                    'last_message_index'] != -1:
            raise Exception(
                "Message index mismatch. Next message's message_index: %s, last_message_index: %s"
                % (msg['message_index'], config.state['last_message_index']))

        for function in MessageProcessor.active_functions():
            logger.debug('MessageProcessor: starting {}'.format(
                function['function']))
            # TODO: Better handling of double parsing
            try:
                result = function['function'](msg, msg_data) or None
            except pymongo.errors.DuplicateKeyError as e:
                logging.exception(e)

            if result in (
                    'ABORT_THIS_MESSAGE_PROCESSING',
                    'continue',  # just abort further MessageProcessors for THIS message
                    'ABORT_BLOCK_PROCESSING'
            ):  # abort all further block processing, including that of all messages in the block
                break
            elif result not in (True, False, None):
                raise Exception(
                    "Message processor returned unknown code -- processor: '%s', result: '%s'"
                    % (function, result))

        config.state['last_message_index'] = msg['message_index']
        return 'ABORT_BLOCK_PROCESSING' if result == 'ABORT_BLOCK_PROCESSING' else None

    def parse_block(block_data):
        config.state['cur_block'] = block_data
        config.state['cur_block']['block_time_obj'] \
            = datetime.datetime.utcfromtimestamp(config.state['cur_block']['block_time'])
        config.state['cur_block']['block_time_str'] = config.state[
            'cur_block']['block_time_obj'].isoformat()

        for msg in config.state['cur_block']['_messages']:
            result = parse_message(msg)
            if result == 'ABORT_BLOCK_PROCESSING':  # reorg
                return False

        # run block processor Functions
        BlockProcessor.run_active_functions()
        # block successfully processed, track this in our DB
        new_block = {
            'block_index': config.state['cur_block']['block_index'],
            'block_time': config.state['cur_block']['block_time_obj'],
            'block_hash': config.state['cur_block']['block_hash'],
        }
        config.mongo_db.processed_blocks.insert(new_block)

        config.state['my_latest_block'] = new_block

        if config.state['my_latest_block'][
                'block_index'] % 10 == 0:  # every 10 blocks print status
            root_logger = logging.getLogger()
            root_logger.setLevel(logging.INFO)
        logger.info("Block: %i of %i [message height=%s]" %
                    (config.state['my_latest_block']['block_index'],
                     config.state['cp_backend_block_index']
                     if config.state['cp_backend_block_index'] else '???',
                     config.state['last_message_index']
                     if config.state['last_message_index'] != -1 else '???'))
        if config.state['my_latest_block'][
                'block_index'] % 10 == 0:  # every 10 blocks print status
            root_logger.setLevel(logging.WARNING)

        return True

    # grab our stored preferences, and rebuild the database if necessary
    app_config = config.mongo_db.app_config.find()
    assert app_config.count() in [0, 1]
    if (app_config.count() == 0
            or app_config[0]['db_version'] != config.DB_VERSION
            or app_config[0]['running_testnet'] != config.TESTNET):
        if app_config.count():
            logger.warn(
                "counterblockd database version UPDATED (from %i to %i) or testnet setting changed (from %s to %s). REBUILDING FROM SCRATCH ..."
                % (app_config[0]['db_version'], config.DB_VERSION,
                   app_config[0]['running_testnet'], config.TESTNET))
        else:
            logger.warn(
                "counterblockd database app_config collection doesn't exist. BUILDING FROM SCRATCH..."
            )
        app_config = database.init_reparse()
    else:
        app_config = app_config[0]
        # get the last processed block out of mongo
        my_latest_block = config.mongo_db.processed_blocks.find_one(
            sort=[("block_index", pymongo.DESCENDING)])
        if my_latest_block:
            # remove any data we have for blocks higher than this (would happen if counterblockd or mongo died
            # or errored out while processing a block)
            database.rollback(my_latest_block['block_index'])
        else:
            # no block state in the database yet
            config.state['my_latest_block'] = config.LATEST_BLOCK_INIT

    # avoid contacting counterparty-server (on reparse, to speed up)
    autopilot = False
    autopilot_runner = 0
    iteration = 0

    if config.IS_REPARSING:
        reparse_start = time.time()
        root_logger = logging.getLogger()
        root_logger_level = root_logger.getEffectiveLevel()
        root_logger.setLevel(logging.WARNING)

    # start polling counterparty-server for new blocks
    cp_running_info = None
    while True:
        iteration += 1
        if iteration % 10 == 0:
            logger.info(
                "Heartbeat (%s, block: %s, caught up: %s)" %
                (iteration, config.state['my_latest_block']['block_index'],
                 fuzzy_is_caught_up()))
        logger.debug(
            "iteration: ap %s/%s, cp_latest_block_index: %s, my_latest_block: %s"
            % (autopilot, autopilot_runner,
               config.state['cp_latest_block_index'],
               config.state['my_latest_block']['block_index']))

        if not autopilot or autopilot_runner == 0:
            try:
                cp_running_info = util.jsonrpc_api("get_running_info",
                                                   abort_on_error=True,
                                                   use_cache=False)['result']
            except Exception as e:
                logger.warn(
                    "Cannot contact counterparty-server (via get_running_info): {}"
                    .format(e))
                time.sleep(3)
                continue

        # wipe our state data if necessary, if counterparty-server has moved on to a new DB version
        wipeState = False
        updatePrefs = False

        # Checking appconfig against old running info (when batch-fetching) is redundant
        if    app_config['counterpartyd_db_version_major'] is None \
           or app_config['counterpartyd_db_version_minor'] is None \
           or app_config['counterpartyd_running_testnet'] is None:
            logger.info("Updating version info from counterparty-server")
            updatePrefs = True
        elif cp_running_info['version_major'] != app_config[
                'counterpartyd_db_version_major']:
            logger.warn(
                "counterparty-server MAJOR DB version change (we built from %s, counterparty-server is at %s). Wiping our state data."
                % (app_config['counterpartyd_db_version_major'],
                   cp_running_info['version_major']))
            wipeState = True
            updatePrefs = True
        elif cp_running_info['version_minor'] != app_config[
                'counterpartyd_db_version_minor']:
            logger.warn(
                "counterparty-server MINOR DB version change (we built from %s.%s, counterparty-server is at %s.%s). Wiping our state data."
                % (app_config['counterpartyd_db_version_major'],
                   app_config['counterpartyd_db_version_minor'],
                   cp_running_info['version_major'],
                   cp_running_info['version_minor']))
            wipeState = True
            updatePrefs = True
        elif cp_running_info.get(
                'running_testnet',
                False) != app_config['counterpartyd_running_testnet']:
            logger.warn(
                "counterparty-server testnet setting change (from %s to %s). Wiping our state data."
                % (app_config['counterpartyd_running_testnet'],
                   cp_running_info['running_testnet']))
            wipeState = True
            updatePrefs = True
        if wipeState:
            app_config = database.reset_db_state()
        if updatePrefs:
            app_config['counterpartyd_db_version_major'] = cp_running_info[
                'version_major']
            app_config['counterpartyd_db_version_minor'] = cp_running_info[
                'version_minor']
            app_config['counterpartyd_running_testnet'] = cp_running_info[
                'running_testnet']
            config.mongo_db.app_config.update({}, app_config)
            # reset my latest block record
            config.state['my_latest_block'] = config.LATEST_BLOCK_INIT
            config.state['caught_up'] = False

        # work up to what block counterpartyd is at
        try:
            if cp_running_info[
                    'last_block']:  # should normally exist, unless counterparty-server had an error getting it
                assert cp_running_info['last_block']['block_index']
                config.state['cp_latest_block_index'] = cp_running_info[
                    'last_block']['block_index']
            elif cp_running_info['db_caught_up']:
                config.state['cp_latest_block_index'] = cp_running_info[
                    'bitcoin_block_count']
            else:
                assert False
        except:
            logger.warn(
                "counterparty-server not returning a valid last processed block (probably is reparsing or was just restarted)."
                +
                " Waiting 3 seconds before trying again... (Data returned: %s, we have: %s)"
                % (cp_running_info, config.state['cp_latest_block_index']))
            time.sleep(3)
            continue

        config.state['cp_backend_block_index'] = cp_running_info[
            'bitcoin_block_count']
        config.state['cp_caught_up'] = cp_running_info['db_caught_up']

        if config.state['my_latest_block']['block_index'] < config.state[
                'cp_latest_block_index']:
            # need to catch up
            config.state['caught_up'] = False

            # TODO: Autopilot and autopilot runner are redundant
            if config.state['cp_latest_block_index'] - config.state[
                    'my_latest_block']['block_index'] > 500:
                # we are safely far from the tip, switch to bulk-everything
                autopilot = True
                if autopilot_runner == 0:
                    autopilot_runner = 500
                autopilot_runner -= 1
            else:
                autopilot = False

            cur_block_index = config.state['my_latest_block']['block_index'] + 1
            try:
                block_data = cache.get_block_info(
                    cur_block_index,
                    prefetch=min(
                        100, (config.state['cp_latest_block_index'] -
                              config.state['my_latest_block']['block_index'])),
                    min_message_index=config.state['last_message_index'] +
                    1 if config.state['last_message_index'] != -1 else None)
            except Exception as e:
                logger.warn(
                    str(e) + " Waiting 3 seconds before trying again...")
                time.sleep(3)
                continue

            try:
                result = parse_block(block_data)
            except Exception as e:  # if anything bubbles up
                logger.exception(
                    "Unhandled exception while processing block. Rolling back, waiting 3 seconds and retrying. Error was: %s"
                    % e)

                # counterparty-server might have gone away...
                my_latest_block = config.mongo_db.processed_blocks.find_one(
                    sort=[("block_index", pymongo.DESCENDING)])
                if my_latest_block:
                    database.rollback(my_latest_block['block_index'])

                # disable autopilot this next iteration to force us to check up against counterparty-server
                # (it will be re-enabled later on in that same iteration if we are far enough from the tip)
                autopilot = False

                time.sleep(3)
                continue
            if result is False:  # reorg, or block processing otherwise not completed
                autopilot = False

            if config.state[
                    'cp_latest_block_index'] - cur_block_index < config.MAX_REORG_NUM_BLOCKS:  # only when we are near the tip
                clean_mempool_tx()
        elif config.state['my_latest_block']['block_index'] > config.state[
                'cp_latest_block_index']:
            # should get a reorg message. Just to be on the safe side, prune back MAX_REORG_NUM_BLOCKS blocks
            # before what counterpartyd is saying if we see this
            logger.error(
                "Very odd: Ahead of counterparty-server with block indexes! Pruning back %s blocks to be safe."
                % config.MAX_REORG_NUM_BLOCKS)
            database.rollback(config.state['cp_latest_block_index'] -
                              config.MAX_REORG_NUM_BLOCKS)
        else:
            if config.IS_REPARSING:
                # restore logging state
                root_logger.setLevel(root_logger_level)
                # print out how long the reparse took
                reparse_end = time.time()
                logger.info("Reparse took {:.3f} minutes.".format(
                    (reparse_end - reparse_start) / 60.0))
                config.IS_REPARSING = False

            if config.QUIT_AFTER_CAUGHT_UP:
                sys.exit(0)

            # ...we may be caught up (to counterpartyd), but counterpartyd may not be (to the blockchain). And if it isn't, we aren't
            config.state['caught_up'] = config.state['cp_caught_up']

            # this logic here will cover a case where we shut down counterblockd, then start it up again quickly...
            # in that case, there are no new blocks for it to parse, so config.state['last_message_index'] would otherwise remain 0.
            # With this logic, we will correctly initialize config.state['last_message_index'] to the last message ID of the last processed block
            if config.state['last_message_index'] == -1 or config.state[
                    'my_latest_block']['block_index'] == 0:
                if config.state['last_message_index'] == -1:
                    config.state['last_message_index'] = cp_running_info[
                        'last_message_index']
                if config.state['my_latest_block']['block_index'] == 0:
                    config.state['my_latest_block'][
                        'block_index'] = cp_running_info['last_block'][
                            'block_index']
                logger.info(
                    "Detected blocks caught up on startup. Setting last message idx to %s, current block index to %s ..."
                    % (config.state['last_message_index'],
                       config.state['my_latest_block']['block_index']))

            if config.state['caught_up'] and not config.state[
                    'caught_up_started_events']:
                # start up recurring events that depend on us being fully caught up with the blockchain to run
                CaughtUpProcessor.run_active_functions()

                config.state['caught_up_started_events'] = True

            publish_mempool_tx()
            time.sleep(
                2
            )  # counterblockd itself is at least caught up, wait a bit to query again for the latest block from cpd
示例#9
0
            #remove any data we have for blocks higher than this (would happen if counterblockd or mongo died
            # or errored out while processing a block)
            database.rollback(my_latest_block['block_index'])
        else:
            #no block state in the database yet
            config.state['my_latest_block'] = config.LATEST_BLOCK_INIT
    
    #avoid contacting counterpartyd (on reparse, to speed up)
    autopilot = False
    autopilot_runner = 0

    #start polling counterpartyd for new blocks
    while True:
        if not autopilot or autopilot_runner == 0:
            try:
                cp_running_info = util.jsonrpc_api("get_running_info", abort_on_error=True)['result']
            except Exception, e:
                logger.warn("Cannot contact counterpartyd get_running_info: %s" % e)
                time.sleep(3)
                continue
                
        #wipe our state data if necessary, if counterpartyd has moved on to a new DB version
        wipeState = False
        updatePrefs = False
        
        #Checking appconfig against old running info (when batch-fetching) is redundant 
        if    app_config['counterpartyd_db_version_major'] is None \
           or app_config['counterpartyd_db_version_minor'] is None \
           or app_config['counterpartyd_running_testnet'] is None:
            updatePrefs = True
        elif cp_running_info['version_major'] != app_config['counterpartyd_db_version_major']:
示例#10
0
def process_cp_blockfeed():
    # initialize state
    config.state['cur_block'] = {'block_index': 0, }  # block being currently processed
    config.state['my_latest_block'] = {'block_index': 0}  # last block that was successfully processed by counterblockd
    config.state['last_message_index'] = -1  # initialize (last processed message index)
    config.state['cp_latest_block_index'] = 0  # last block that was successfully processed by counterparty
    config.state['cp_backend_block_index'] = 0  # the latest block height as reported by the cpd blockchain backend
    config.state['cp_caught_up'] = False  # whether counterparty-server is caught up to the backend (e.g. bitcoind)
    config.state['caught_up_started_events'] = False
    #^ set after we are caught up and start up the recurring events that depend on us being caught up with the blockchain

    # enabled processor functions
    logger.debug("Enabled Message Processor Functions {0}".format(MessageProcessor.active_functions()))
    logger.debug("Enabled Block Processor Functions {0}".format(BlockProcessor.active_functions()))

    def publish_mempool_tx():
        """fetch new tx from mempool"""
        mempool_txs = config.mongo_db.mempool.find(projection={'tx_hash': True})
        tx_hashes = {t['tx_hash'] for t in mempool_txs}

        params = { # get latest 1000 entries from mempool
            'order_by': 'timestamp',
            'order_dir': 'DESC'
        }
        new_txs = util.jsonrpc_api("get_mempool", params, abort_on_error=True)
        num_skipped_tx = 0
        if new_txs:
            for new_tx in new_txs['result']:
                # skip if it's already in our mempool table
                if new_tx['tx_hash'] in tx_hashes:
                    num_skipped_tx += 1
                    continue

                tx = {
                    'tx_hash': new_tx['tx_hash'],
                    'command': new_tx['command'],
                    'category': new_tx['category'],
                    'bindings': new_tx['bindings'],
                    'timestamp': new_tx['timestamp'],
                    'viewed_in_block': config.state['my_latest_block']['block_index']
                }
                config.mongo_db.mempool.insert(tx)
                del(tx['_id'])
                tx['_category'] = tx['category']
                tx['_message_index'] = 'mempool'
                logger.debug("Spotted mempool tx: %s" % tx)
                for function in MempoolMessageProcessor.active_functions():
                    logger.debug('starting {} (mempool)'.format(function['function']))
                    # TODO: Better handling of double parsing
                    try:
                        result = function['function'](tx, json.loads(tx['bindings'])) or None
                    except pymongo.errors.DuplicateKeyError as e:
                        logging.exception(e)
                    if result == 'ABORT_THIS_MESSAGE_PROCESSING' or result == 'continue':
                        break
                    elif result:
                        raise Exception(
                            "Message processor returned unknown code -- processor: '%s', result: '%s'" %
                            (function, result))
        logger.debug("Mempool refresh: {} entries retrieved from counterparty-server, {} new".format(len(new_txs['result']), len(new_txs['result']) - num_skipped_tx))

    def clean_mempool_tx():
        """clean mempool transactions older than MAX_REORG_NUM_BLOCKS blocks"""
        config.mongo_db.mempool.remove(
            {"viewed_in_block": {"$lt": config.state['my_latest_block']['block_index'] - config.MAX_REORG_NUM_BLOCKS}})

    def parse_message(msg):
        msg_data = json.loads(msg['bindings'])
        logger.debug("Received message %s: %s ..." % (msg['message_index'], msg))

        # out of order messages should not happen (anymore), but just to be sure
        if msg['message_index'] != config.state['last_message_index'] + 1 and config.state['last_message_index'] != -1:
            raise Exception("Message index mismatch. Next message's message_index: %s, last_message_index: %s" % (
                msg['message_index'], config.state['last_message_index']))

        for function in MessageProcessor.active_functions():
            logger.debug('MessageProcessor: starting {}'.format(function['function']))
            # TODO: Better handling of double parsing
            try:
                result = function['function'](msg, msg_data) or None
            except pymongo.errors.DuplicateKeyError as e:
                logging.exception(e)

            if result in (
                    'ABORT_THIS_MESSAGE_PROCESSING', 'continue',  # just abort further MessageProcessors for THIS message
                    'ABORT_BLOCK_PROCESSING'):  # abort all further block processing, including that of all messages in the block
                break
            elif result not in (True, False, None):
                raise Exception(
                    "Message processor returned unknown code -- processor: '%s', result: '%s'" %
                    (function, result))

        config.state['last_message_index'] = msg['message_index']
        return 'ABORT_BLOCK_PROCESSING' if result == 'ABORT_BLOCK_PROCESSING' else None

    def parse_block(block_data):
        config.state['cur_block'] = block_data
        config.state['cur_block']['block_time_obj'] \
            = datetime.datetime.utcfromtimestamp(config.state['cur_block']['block_time'])
        config.state['cur_block']['block_time_str'] = config.state['cur_block']['block_time_obj'].isoformat()

        for msg in config.state['cur_block']['_messages']:
            result = parse_message(msg)
            if result == 'ABORT_BLOCK_PROCESSING':  # reorg
                return False

        # run block processor Functions
        BlockProcessor.run_active_functions()
        # block successfully processed, track this in our DB
        new_block = {
            'block_index': config.state['cur_block']['block_index'],
            'block_time': config.state['cur_block']['block_time_obj'],
            'block_hash': config.state['cur_block']['block_hash'],
        }
        config.mongo_db.processed_blocks.insert(new_block)

        config.state['my_latest_block'] = new_block

        logger.info("Block: %i of %i [message height=%s]" % (
            config.state['my_latest_block']['block_index'],
            config.state['cp_backend_block_index']
            if config.state['cp_backend_block_index'] else '???',
            config.state['last_message_index'] if config.state['last_message_index'] != -1 else '???'))
        return True

    # grab our stored preferences, and rebuild the database if necessary
    app_config = config.mongo_db.app_config.find()
    assert app_config.count() in [0, 1]
    if(app_config.count() == 0 or
       app_config[0]['db_version'] != config.DB_VERSION or
       app_config[0]['running_testnet'] != config.TESTNET):
        if app_config.count():
            logger.warn("counterblockd database version UPDATED (from %i to %i) or testnet setting changed (from %s to %s). REBUILDING FROM SCRATCH ..." % (
                app_config[0]['db_version'], config.DB_VERSION, app_config[0]['running_testnet'],
                config.TESTNET))
        else:
            logger.warn("counterblockd database app_config collection doesn't exist. BUILDING FROM SCRATCH...")
        app_config = database.reparse()
    else:
        app_config = app_config[0]
        # get the last processed block out of mongo
        my_latest_block = config.mongo_db.processed_blocks.find_one(sort=[("block_index", pymongo.DESCENDING)])
        if my_latest_block:
            # remove any data we have for blocks higher than this (would happen if counterblockd or mongo died
            # or errored out while processing a block)
            database.rollback(my_latest_block['block_index'])
        else:
            # no block state in the database yet
            config.state['my_latest_block'] = config.LATEST_BLOCK_INIT

    # avoid contacting counterparty-server (on reparse, to speed up)
    autopilot = False
    autopilot_runner = 0
    iteration = 0

    # start polling counterparty-server for new blocks
    cp_running_info = None
    while True:
        iteration += 1
        if iteration % 10 == 0:
            logger.info(
                "Heartbeat (%s, block: %s, caught up: %s)"
                % (iteration, config.state['my_latest_block']['block_index'], fuzzy_is_caught_up()))
        logger.debug(
            "iteration: ap %s/%s, cp_latest_block_index: %s, my_latest_block: %s" % (autopilot, autopilot_runner,
                                                                                     config.state['cp_latest_block_index'], config.state['my_latest_block']['block_index']))

        if not autopilot or autopilot_runner == 0:
            try:
                cp_running_info = util.jsonrpc_api("get_running_info", abort_on_error=True)['result']
            except Exception as e:
                logger.warn("Cannot contact counterparty-server (via get_running_info)")
                time.sleep(3)
                continue

        # wipe our state data if necessary, if counterparty-server has moved on to a new DB version
        wipeState = False
        updatePrefs = False

        # Checking appconfig against old running info (when batch-fetching) is redundant
        if    app_config['counterpartyd_db_version_major'] is None \
           or app_config['counterpartyd_db_version_minor'] is None \
           or app_config['counterpartyd_running_testnet'] is None:
            logger.info("Updating version info from counterparty-server")
            updatePrefs = True
        elif cp_running_info['version_major'] != app_config['counterpartyd_db_version_major']:
            logger.warn(
                "counterparty-server MAJOR DB version change (we built from %s, counterparty-server is at %s). Wiping our state data." % (
                    app_config['counterpartyd_db_version_major'], cp_running_info['version_major']))
            wipeState = True
            updatePrefs = True
        elif cp_running_info['version_minor'] != app_config['counterpartyd_db_version_minor']:
            logger.warn(
                "counterparty-server MINOR DB version change (we built from %s.%s, counterparty-server is at %s.%s). Wiping our state data." % (
                    app_config['counterpartyd_db_version_major'], app_config['counterpartyd_db_version_minor'],
                    cp_running_info['version_major'], cp_running_info['version_minor']))
            wipeState = True
            updatePrefs = True
        elif cp_running_info.get('running_testnet', False) != app_config['counterpartyd_running_testnet']:
            logger.warn("counterparty-server testnet setting change (from %s to %s). Wiping our state data." % (
                app_config['counterpartyd_running_testnet'], cp_running_info['running_testnet']))
            wipeState = True
            updatePrefs = True
        if wipeState:
            app_config = database.reset_db_state()
        if updatePrefs:
            app_config['counterpartyd_db_version_major'] = cp_running_info['version_major']
            app_config['counterpartyd_db_version_minor'] = cp_running_info['version_minor']
            app_config['counterpartyd_running_testnet'] = cp_running_info['running_testnet']
            config.mongo_db.app_config.update({}, app_config)
            # reset my latest block record
            config.state['my_latest_block'] = config.LATEST_BLOCK_INIT
            config.state['caught_up'] = False

        # work up to what block counterpartyd is at
        try:
            if cp_running_info['last_block']:  # should normally exist, unless counterparty-server had an error getting it
                assert cp_running_info['last_block']['block_index']
                config.state['cp_latest_block_index'] = cp_running_info['last_block']['block_index']
            elif cp_running_info['db_caught_up']:
                config.state['cp_latest_block_index'] = cp_running_info['bitcoin_block_count']
            else:
                assert False
        except:
            logger.warn(
                "counterparty-server not returning a valid last processed block (probably is reparsing or was just restarted)."
                + " Waiting 3 seconds before trying again... (Data returned: %s, we have: %s)" % (
                    cp_running_info, config.state['cp_latest_block_index']))
            time.sleep(3)
            continue

        config.state['cp_backend_block_index'] = cp_running_info['bitcoin_block_count']
        config.state['cp_caught_up'] = cp_running_info['db_caught_up']

        if config.state['my_latest_block']['block_index'] < config.state['cp_latest_block_index']:
            # need to catch up
            config.state['caught_up'] = False

            # TODO: Autopilot and autopilot runner are redundant
            if config.state['cp_latest_block_index'] - config.state['my_latest_block']['block_index'] > 500:
                # we are safely far from the tip, switch to bulk-everything
                autopilot = True
                if autopilot_runner == 0:
                    autopilot_runner = 500
                autopilot_runner -= 1
            else:
                autopilot = False

            cur_block_index = config.state['my_latest_block']['block_index'] + 1
            try:
                block_data = cache.get_block_info(
                    cur_block_index,
                    prefetch=min(100, (config.state['cp_latest_block_index'] - config.state['my_latest_block']['block_index'])),
                    min_message_index=config.state['last_message_index'] + 1 if config.state['last_message_index'] != -1 else None)
            except Exception as e:
                logger.warn(str(e) + " Waiting 3 seconds before trying again...")
                time.sleep(3)
                continue

            # clean api block cache
            if config.state['cp_latest_block_index'] - cur_block_index <= config.MAX_REORG_NUM_BLOCKS:  # only when we are near the tip
                cache.clean_block_cache(cur_block_index)

            try:
                result = parse_block(block_data)
            except Exception as e:  # if anything bubbles up
                logger.exception("Unhandled exception while processing block. Rolling back, waiting 3 seconds and retrying. Error was: %s" % e)

                # counterparty-server might have gone away...
                my_latest_block = config.mongo_db.processed_blocks.find_one(sort=[("block_index", pymongo.DESCENDING)])
                if my_latest_block:
                    database.rollback(my_latest_block['block_index'])

                # disable autopilot this next iteration to force us to check up against counterparty-server
                # (it will be re-enabled later on in that same iteration if we are far enough from the tip)
                autopilot = False

                time.sleep(3)
                continue
            if result is False:  # reorg, or block processing otherwise not completed
                autopilot = False

            if config.state['cp_latest_block_index'] - cur_block_index < config.MAX_REORG_NUM_BLOCKS:  # only when we are near the tip
                clean_mempool_tx()
        elif config.state['my_latest_block']['block_index'] > config.state['cp_latest_block_index']:
            # should get a reorg message. Just to be on the safe side, prune back MAX_REORG_NUM_BLOCKS blocks
            # before what counterpartyd is saying if we see this
            logger.error(
                "Very odd: Ahead of counterparty-server with block indexes! Pruning back %s blocks to be safe."
                % config.MAX_REORG_NUM_BLOCKS)
            database.rollback(config.state['cp_latest_block_index'] - config.MAX_REORG_NUM_BLOCKS)
        else:
            if config.QUIT_AFTER_CAUGHT_UP:
                sys.exit(0)

            # ...we may be caught up (to counterpartyd), but counterpartyd may not be (to the blockchain). And if it isn't, we aren't
            config.state['caught_up'] = config.state['cp_caught_up']

            # this logic here will cover a case where we shut down counterblockd, then start it up again quickly...
            # in that case, there are no new blocks for it to parse, so config.state['last_message_index'] would otherwise remain 0.
            # With this logic, we will correctly initialize config.state['last_message_index'] to the last message ID of the last processed block
            if config.state['last_message_index'] == -1 or config.state['my_latest_block']['block_index'] == 0:
                if config.state['last_message_index'] == -1:
                    config.state['last_message_index'] = cp_running_info['last_message_index']
                if config.state['my_latest_block']['block_index'] == 0:
                    config.state['my_latest_block']['block_index'] = cp_running_info['last_block']['block_index']
                logger.info("Detected blocks caught up on startup. Setting last message idx to %s, current block index to %s ..." % (
                    config.state['last_message_index'], config.state['my_latest_block']['block_index']))

            if config.state['caught_up'] and not config.state['caught_up_started_events']:
                # start up recurring events that depend on us being fully caught up with the blockchain to run
                CaughtUpProcessor.run_active_functions()

                config.state['caught_up_started_events'] = True

            publish_mempool_tx()
            time.sleep(2)  # counterblockd itself is at least caught up, wait a bit to query again for the latest block from cpd
示例#11
0
    autopilot_runner = 0
    iteration = 0

    #start polling counterparty-server for new blocks
    cp_running_info = None
    while True:
        iteration += 1
        if iteration % 10 == 0:
            logger.info("Heartbeat (%s, block: %s, caught up: %s)" % (
                iteration, config.state['my_latest_block']['block_index'], fuzzy_is_caught_up())) 
        logger.info("iteration: ap %s/%s, cp_latest_block_index: %s, my_latest_block: %s" % (autopilot, autopilot_runner,
            config.state['cp_latest_block_index'], config.state['my_latest_block']['block_index']))
        
        if not autopilot or autopilot_runner == 0:
            try:
                cp_running_info = util.jsonrpc_api("get_running_info", abort_on_error=True)['result']
            except Exception, e:
                logger.warn("Cannot contact counterparty-server get_running_info: %s" % e)
                time.sleep(3)
                continue
                
        #wipe our state data if necessary, if counterparty-server has moved on to a new DB version
        wipeState = False
        updatePrefs = False
        
        #Checking appconfig against old running info (when batch-fetching) is redundant 
        if    app_config['counterpartyd_db_version_major'] is None \
           or app_config['counterpartyd_db_version_minor'] is None \
           or app_config['counterpartyd_running_testnet'] is None:
            logger.info("Updating version info from counterparty-server")
            updatePrefs = True