Ejemplo n.º 1
0
def handle_reorg(msg, msg_data):
    if msg['command'] == 'reorg':
       # send out the message to listening clients (but don't forward along while we're catching up)
        if config.state['cp_latest_block_index'] - config.state['my_latest_block']['block_index'] < config.MAX_REORG_NUM_BLOCKS:
            msg_data['_last_message_index'] = config.state['last_message_index']
            store_wallet_message(msg, msg_data)
            event = messages.decorate_message_for_feed(msg, msg_data=msg_data)
Ejemplo n.º 2
0
def store_wallet_message(msg, msg_data, decorate=True):
    wallet_message = messages.decorate_message_for_feed(
        msg, msg_data=msg_data) if decorate else msg

    # use "optimistic loop" pattern to insert a new messages with an incrementing seq
    while True:
        last_seq = config.mongo_db.wallet_messages.find_one(
            sort=[("_id", pymongo.DESCENDING)])['_id']
        new_seq = last_seq + 1
        try:
            config.mongo_db.wallet_messages.insert({
                '_id':
                new_seq,
                'when':
                calendar.timegm(time.gmtime()),
                'message':
                wallet_message,
            })
        except pymongo.errors.DuplicateKeyError:
            continue
        else:
            logger.debug("store_wallet_message: stored {}".format(new_seq))
            if config.state['cw_last_message_seq'] < new_seq:
                config.state['cw_last_message_seq'] = new_seq
            break

    # every so often, trim up the table
    if new_seq % 20 == 0:  # for performance, don't do this every iteration
        if config.mongo_db.wallet_messages.count(
        ) > FUZZY_MAX_WALLET_MESSAGES_STORED:
            config.mongo_db.wallet_messages.remove(
                {'_id': {
                    '$lte': new_seq - FUZZY_MAX_WALLET_MESSAGES_STORED
                }})
def handle_reorg(msg, msg_data):
    if msg['command'] == 'reorg':
       #send out the message to listening clients (but don't forward along while we're catching up)
        if config.state['cp_latest_block_index'] - config.state['my_latest_block']['block_index'] < config.MAX_REORG_NUM_BLOCKS:
            msg_data['_last_message_index'] = config.state['last_message_index']
            event = messages.decorate_message_for_feed(msg, msg_data=msg_data)
            zmq_publisher_eventfeed.send_json(event)
Ejemplo n.º 4
0
 def get_messagefeed_messages_by_index(message_indexes):
     msgs = util.call_jsonrpc_api("get_messages_by_index",
                                  {'message_indexes': message_indexes},
                                  abort_on_error=True)['result']
     events = []
     for m in msgs:
         events.append(messages.decorate_message_for_feed(m))
     return events
def parse_for_socketio(msg, msg_data):
    #if we're catching up beyond MAX_REORG_NUM_BLOCKS blocks out, make sure not to send out any socket.io
    # events, as to not flood on a resync (as we may give a 525 to kick the logged in clients out, but we
    # can't guarantee that the socket.io connection will always be severed as well??)
    if config.state['cp_latest_block_index'] - config.state['my_latest_block']['block_index'] < config.MAX_REORG_NUM_BLOCKS:
        #send out the message to listening clients
        event = messages.decorate_message_for_feed(msg, msg_data=msg_data)
        zmq_publisher_eventfeed.send_json(event)
Ejemplo n.º 6
0
 def get_last_n_messages(count=100):
     if count > 1000:
         raise Exception("The count is too damn high")
     message_indexes = list(range(max(config.state['last_message_index'] - count, 0) + 1, config.state['last_message_index'] + 1))
     msgs = util.call_jsonrpc_api(
         "get_messages_by_index", {'message_indexes': message_indexes}, abort_on_error=True)['result']
     for i in range(len(msgs)):
         msgs[i] = messages.decorate_message_for_feed(msgs[i])
     return msgs
Ejemplo n.º 7
0
 def get_last_n_messages(count=100):
     if count > 1000:
         raise Exception("The count is too damn high")
     message_indexes = range(max(config.state['last_message_index'] - count, 0) + 1, config.state['last_message_index'] + 1)
     msgs = util.call_jsonrpc_api("get_messages_by_index",
         { 'message_indexes': message_indexes }, abort_on_error=True)['result']
     for i in xrange(len(msgs)):
         msgs[i] = messages.decorate_message_for_feed(msgs[i])
     return msgs
Ejemplo n.º 8
0
def parse_for_socketio(msg, msg_data):
    #if we're catching up beyond MAX_REORG_NUM_BLOCKS blocks out, make sure not to send out any socket.io
    # events, as to not flood on a resync (as we may give a 525 to kick the logged in clients out, but we
    # can't guarantee that the socket.io connection will always be severed as well??)
    if config.state['cp_latest_block_index'] - config.state['my_latest_block'][
            'block_index'] < config.MAX_REORG_NUM_BLOCKS:
        #send out the message to listening clients
        event = messages.decorate_message_for_feed(msg, msg_data=msg_data)
        config.ZMQ_PUBLISHER_EVENTFEED.send_json(event)
def handle_invalid(msg, msg_data):
    #don't process invalid messages, but do forward them along to clients
    status = msg_data.get('status', 'valid').lower()
    if status.startswith('invalid'):
        if config.state['cp_latest_block_index'] - config.state['my_latest_block']['block_index'] < config.MAX_REORG_NUM_BLOCKS:
            #forward along via message feed, except while we're catching up
            event = messages.decorate_message_for_feed(msg, msg_data=msg_data)
            zmq_publisher_eventfeed.send_json(event)
        config.state['last_message_index'] = msg['message_index']
        return 'ABORT_THIS_MESSAGE_PROCESSING'
Ejemplo n.º 10
0
def handle_reorg(msg, msg_data):
    if msg["command"] == "reorg":
        # send out the message to listening clients (but don't forward along while we're catching up)
        if (
            config.state["cp_latest_block_index"] - config.state["my_latest_block"]["block_index"]
            < config.MAX_REORG_NUM_BLOCKS
        ):
            msg_data["_last_message_index"] = config.state["last_message_index"]
            store_wallet_message(msg, msg_data)
            event = messages.decorate_message_for_feed(msg, msg_data=msg_data)
Ejemplo n.º 11
0
def handle_invalid(msg, msg_data):
    #don't process invalid messages, but do forward them along to clients
    status = msg_data.get('status', 'valid').lower()
    if status.startswith('invalid'):
        if config.state['cp_latest_block_index'] - config.state[
                'my_latest_block']['block_index'] < config.MAX_REORG_NUM_BLOCKS:
            #forward along via message feed, except while we're catching up
            event = messages.decorate_message_for_feed(msg, msg_data=msg_data)
            config.ZMQ_PUBLISHER_EVENTFEED.send_json(event)
        config.state['last_message_index'] = msg['message_index']
        return 'continue'
Ejemplo n.º 12
0
def handle_invalid(msg, msg_data):
    #don't process invalid messages, but do forward them along to clients
    status = msg_data.get('status', 'valid').lower()
    if status.startswith('invalid'):
        if config.state['cp_latest_block_index'] - config.state[
                'my_latest_block']['block_index'] < config.MAX_REORG_NUM_BLOCKS:
            #forward along via message feed, except while we're catching up
            event = messages.decorate_message_for_feed(msg, msg_data=msg_data)
            zmq_publisher_eventfeed.send_json(event)
        config.state['last_message_index'] = msg['message_index']
        return 'ABORT_THIS_MESSAGE_PROCESSING'
Ejemplo n.º 13
0
def handle_reorg(msg, msg_data):
    if msg['command'] == 'reorg':
        logger.warn("Blockchain reorginization at block %s" %
                    msg_data['block_index'])
        #prune back to and including the specified message_index
        my_latest_block = database.rollback(msg_data['block_index'] - 1)
        config.state['my_latest_block'] = my_latest_block
        assert config.state['my_latest_block'][
            'block_index'] == msg_data['block_index'] - 1

        #for the current last_message_index (which could have gone down after the reorg), query counterpartyd
        running_info = util.jsonrpc_api("get_running_info",
                                        abort_on_error=True)['result']
        config.state['last_message_index'] = running_info['last_message_index']

        #send out the message to listening clients (but don't forward along while we're catching up)
        if config.state['cp_latest_block_index'] - config.state[
                'my_latest_block']['block_index'] < config.MAX_REORG_NUM_BLOCKS:
            msg_data['_last_message_index'] = config.state[
                'last_message_index']
            event = messages.decorate_message_for_feed(msg, msg_data=msg_data)
            config.ZMQ_PUBLISHER_EVENTFEED.send_json(event)
        return 'break'  #break out of inner loop
Ejemplo n.º 14
0
def store_wallet_message(msg, msg_data, decorate=True):
    wallet_message = messages.decorate_message_for_feed(msg, msg_data=msg_data) if decorate else msg

    # use "optimistic loop" pattern to insert a new messages with an incrementing seq
    while True:
        last_seq = config.mongo_db.wallet_messages.find_one(sort=[("_id", pymongo.DESCENDING)])["_id"]
        new_seq = last_seq + 1
        try:
            config.mongo_db.wallet_messages.insert(
                {"_id": new_seq, "when": calendar.timegm(time.gmtime()), "message": wallet_message}
            )
        except pymongo.errors.DuplicateKeyError:
            continue
        else:
            logger.debug("store_wallet_message: stored {}".format(new_seq))
            if config.state["cw_last_message_seq"] < new_seq:
                config.state["cw_last_message_seq"] = new_seq
            break

    # every so often, trim up the table
    if new_seq % 20 == 0:  # for performance, don't do this every iteration
        if config.mongo_db.wallet_messages.count() > FUZZY_MAX_WALLET_MESSAGES_STORED:
            config.mongo_db.wallet_messages.remove({"_id": {"$lte": new_seq - FUZZY_MAX_WALLET_MESSAGES_STORED}})
Ejemplo n.º 15
0
 def get_messagefeed_messages_by_index(message_indexes):
     msgs = util.call_jsonrpc_api("get_messages_by_index", {'message_indexes': message_indexes}, abort_on_error=True)['result']
     events = []
     for m in msgs:
         events.append(messages.decorate_message_for_feed(m))
     return events