Exemplo n.º 1
0
    def __init__(self, chain_manager: ChainManager, sync_state: SyncState,
                 qrl_node):

        self.master_mr = MessageReceipt()
        self.pow = None
        self.sync_state = sync_state

        self._ntp = ntp
        self._qrl_node = qrl_node
        self._chain_manager = chain_manager
        self._chain_manager.set_broadcast_tx(self.broadcast_tx)

        self._syncing_enabled = False
        self._target_channel = None
        self._target_node_header_hash = None
        self._last_requested_block_number = None

        self._genesis_processed = False
        self._peer_connections = []
        self._txn_processor_running = False

        self.peer_blockheight = dict()

        reactor.callLater(config.user.monitor_connections_interval,
                          self.monitor_connections)

        self.p2p_msg_priority = p2p_msg_priority

        # Maintains the list of ips in the queue that can be tried to form a new p2p connection
        self._peer_q = []
Exemplo n.º 2
0
    def __init__(self, chain_manager: ChainManager, sync_state: SyncState,
                 qrl_node):

        self.master_mr = MessageReceipt()
        self.pow = None
        self.sync_state = sync_state

        self._ntp = ntp
        self._qrl_node = qrl_node
        self._chain_manager = chain_manager
        self._chain_manager.set_broadcast_tx(self.broadcast_tx)

        self._syncing_enabled = False
        self._target_peer = None
        self._target_node_header_hash = None
        self._last_requested_block_idx = None

        self._genesis_processed = False
        self._peer_connections = []
        self._synced_peers_protocol = set()
        self._txn_processor_running = False

        self.peer_blockheight = dict()

        reactor.callLater(config.user.monitor_connections_interval,
                          self.monitor_connections)

        self.p2p_msg_priority = p2p_msg_priority
Exemplo n.º 3
0
    def __init__(self, buffered_chain: BufferedChain, sync_state: SyncState,
                 qrl_node: QRLNode):

        # FIXME: Constructor signature is not consistent with other factory classes
        self.master_mr = MessageReceipt()
        self.pos = None
        self.ntp = ntp
        self.buffered_chain = buffered_chain
        self.sync_state = sync_state

        self.sync = 0

        self.genesis_processed = False  # FIXME: Accessed by every p2pprotocol instance
        self.peer_connections = [
        ]  # FIXME: Accessed by every p2pprotocol instance
        self.synced_peers = set(
        )  # FIXME: Accessed by every p2pprotocol instance

        self.qrl_node = qrl_node

        self.txn_processor_running = False

        self.bkmr_blocknumber = 0  # Blocknumber for which bkmr is being tracked
        self.bkmr_priorityq = queue.PriorityQueue()
        # Scheduled and cancel the call, just to initialize with IDelayedCall
        self.bkmr_processor = reactor.callLater(1, lambda: None, pos=None)
        self.bkmr_processor.cancel()
Exemplo n.º 4
0
    def test_register(self):
        mr = MessageReceipt()

        msg_hash = str2bin("asdf")
        msg_obj = [1, 2, 3, 4]
        msg_type = mr.allowed_types[0]

        mr.register(msg_type, msg_hash, msg_obj)
Exemplo n.º 5
0
    def test_register(self):
        mr = MessageReceipt()

        msg_hash = str2bin("asdf")
        msg_obj = [1, 2, 3, 4]
        msg_type = mr.allowed_types[0]

        mr.register(msg_type, msg_hash, msg_obj)
Exemplo n.º 6
0
    def test_contains(self):
        mr = MessageReceipt()

        msg_hash = str2bin("hash_valid")
        msg_obj = [1, 2, 3, 4]
        msg_type = mr.allowed_types[0]

        mr.register(msg_type, msg_hash, msg_obj)
        self.assertTrue(mr.contains(msg_hash, msg_type))
Exemplo n.º 7
0
    def test_contains(self):
        mr = MessageReceipt()

        msg_hash = str2bin("hash_valid")
        msg_obj = [1, 2, 3, 4]
        msg_type = mr.allowed_types[0]

        mr.register(msg_type, msg_hash, msg_obj)
        self.assertTrue(mr.contains(msg_hash, msg_type))
Exemplo n.º 8
0
    def __init__(self,
                 chain_manager: ChainManager,
                 sync_state: SyncState,
                 qrl_node):

        self.master_mr = MessageReceipt()
        self.pow = None
        self.sync_state = sync_state

        self._ntp = ntp
        self._qrl_node = qrl_node
        self._chain_manager = chain_manager
        self._chain_manager.set_broadcast_tx(self.broadcast_tx)

        self._syncing_enabled = False
        self._target_peer = None
        self._target_node_header_hash = None
        self._last_requested_block_idx = None

        self._genesis_processed = False
        self._peer_connections = []
        self._synced_peers_protocol = set()
        self._txn_processor_running = False

        self.peer_blockheight = dict()

        reactor.callLater(config.user.monitor_connections_interval, self.monitor_connections)

        self.p2p_msg_priority = {
            qrllegacy_pb2.LegacyMessage.VE: 0,
            qrllegacy_pb2.LegacyMessage.PL: 0,
            qrllegacy_pb2.LegacyMessage.PONG: 0,

            ######################
            qrllegacy_pb2.LegacyMessage.MR: 2,
            qrllegacy_pb2.LegacyMessage.SFM: 1,

            qrllegacy_pb2.LegacyMessage.BK: 1,
            qrllegacy_pb2.LegacyMessage.FB: 1,
            qrllegacy_pb2.LegacyMessage.PB: 1,
            qrllegacy_pb2.LegacyMessage.BH: 1,

            ############################
            qrllegacy_pb2.LegacyMessage.TX: 1,
            qrllegacy_pb2.LegacyMessage.MT: 1,
            qrllegacy_pb2.LegacyMessage.TK: 1,
            qrllegacy_pb2.LegacyMessage.TT: 1,
            qrllegacy_pb2.LegacyMessage.LT: 1,
            qrllegacy_pb2.LegacyMessage.SL: 1,

            qrllegacy_pb2.LegacyMessage.EPH: 3,

            qrllegacy_pb2.LegacyMessage.SYNC: 0,
            qrllegacy_pb2.LegacyMessage.CHAINSTATE: 0,
            qrllegacy_pb2.LegacyMessage.HEADERHASHES: 1,
            qrllegacy_pb2.LegacyMessage.P2P_ACK: 0,
        }
Exemplo n.º 9
0
    def test_register_overflow(self):
        mr = MessageReceipt()

        msg_obj = [1, 2, 3, 4]
        msg_type = mr.allowed_types[0]

        config.dev.message_q_size = 4

        for i in range(config.dev.message_q_size * 2):
            msg_hash = str2bin(str(i))
            mr.register(msg_type, msg_hash, msg_obj)

        self.assertEqual(len(mr._hash_msg), config.dev.message_q_size)
Exemplo n.º 10
0
    def test_register_overflow(self):
        mr = MessageReceipt()

        msg_obj = [1, 2, 3, 4]
        msg_type = mr.allowed_types[0]

        config.dev.message_q_size = 4

        for i in range(config.dev.message_q_size * 2):
            msg_hash = str2bin(str(i))
            mr.register(msg_type, msg_hash, msg_obj)

        self.assertEqual(len(mr._hash_msg), config.dev.message_q_size)
Exemplo n.º 11
0
    def __init__(self,
                 chain_manager: ChainManager,
                 sync_state: SyncState,
                 qrl_node):

        self.master_mr = MessageReceipt()
        self.pow = None
        self.sync_state = sync_state

        self._ntp = ntp
        self._qrl_node = qrl_node
        self._chain_manager = chain_manager

        self._syncing_enabled = False
        self._target_peer = None
        self._target_node_header_hash = None
        self._last_requested_block_idx = None

        self._genesis_processed = False
        self._peer_connections = []
        self._synced_peers_protocol = set()
        self._txn_processor_running = False

        self.peer_blockheight = dict()

        reactor.callLater(180, self.monitor_connections)

        self.p2p_msg_priority = {
            qrllegacy_pb2.LegacyMessage.VE: 0,
            qrllegacy_pb2.LegacyMessage.PL: 0,
            qrllegacy_pb2.LegacyMessage.PONG: 0,

            ######################
            qrllegacy_pb2.LegacyMessage.MR: 2,
            qrllegacy_pb2.LegacyMessage.SFM: 1,

            qrllegacy_pb2.LegacyMessage.BK: 1,
            qrllegacy_pb2.LegacyMessage.FB: 1,
            qrllegacy_pb2.LegacyMessage.PB: 1,
            qrllegacy_pb2.LegacyMessage.BH: 1,

            ############################
            qrllegacy_pb2.LegacyMessage.TX: 1,
            qrllegacy_pb2.LegacyMessage.MT: 1,
            qrllegacy_pb2.LegacyMessage.TK: 1,
            qrllegacy_pb2.LegacyMessage.TT: 1,
            qrllegacy_pb2.LegacyMessage.LT: 1,
            qrllegacy_pb2.LegacyMessage.SL: 1,

            qrllegacy_pb2.LegacyMessage.EPH: 3,

            qrllegacy_pb2.LegacyMessage.SYNC: 0,
            qrllegacy_pb2.LegacyMessage.CHAINSTATE: 0,
            qrllegacy_pb2.LegacyMessage.HEADERHASHES: 1,
            qrllegacy_pb2.LegacyMessage.P2P_ACK: 0,
        }
Exemplo n.º 12
0
    def __init__(self, chain, p2pFactory, nodeState, ntp):
        self.master_mr = MessageReceipt()
        self.nodeState = nodeState
        self.ntp = ntp
        self.chain = chain
        self.r1_time_diff = defaultdict(list)
        self.r2_time_diff = defaultdict(list)

        self.incoming_blocks = {}
        self.last_pos_cycle = 0
        self.last_selected_height = 0
        self.last_bk_time = 0
        self.last_pb_time = 0
        self.next_header_hash = None
        self.next_block_number = None
        self.fmbh_allowed_peers = {}
        self.fmbh_blockhash_peers = {}

        self.p2pFactory = p2pFactory
Exemplo n.º 13
0
    def test_add_contains_remove(self):
        mr = MessageReceipt()
        # FIXME: Hashes being are treated as strings

        msg_hash = str2bin("hash_valid")
        msg_obj = [1, 2, 3, 4]
        msg_type = mr.allowed_types[0]
        peer = '127.0.0.1'

        mr.register(msg_type, msg_hash, msg_obj)
        mr.add_peer(msg_hash, msg_type, peer)

        self.assertTrue(mr.contains(msg_hash, msg_type))
        self.assertFalse(mr.contains(b'hash_invalid', msg_type))
Exemplo n.º 14
0
 def test_create(self):
     mr = MessageReceipt()
     self.assertIsNotNone(mr)
     self.assertEqual(mr.allowed_types, [LegacyMessage.TX,
                                         LegacyMessage.LT,
                                         LegacyMessage.EPH,
                                         LegacyMessage.BK,
                                         LegacyMessage.MT,
                                         LegacyMessage.TK,
                                         LegacyMessage.TT,
                                         LegacyMessage.SL,
                                         LegacyMessage.MC,
                                         LegacyMessage.MS,
                                         LegacyMessage.MV])
Exemplo n.º 15
0
    def test_add_contains_remove(self):
        mr = MessageReceipt()
        # FIXME: Hashes being are treated as strings

        msg_hash = str2bin("hash_valid")
        msg_obj = [1, 2, 3, 4]
        msg_type = mr.allowed_types[0]
        peer = '127.0.0.1'

        mr.register(msg_type, msg_hash, msg_obj)
        mr.add_peer(msg_hash, msg_type, peer)

        self.assertTrue(mr.contains(msg_hash, msg_type))
        self.assertFalse(mr.contains(b'hash_invalid', msg_type))
Exemplo n.º 16
0
class POS:
    def __init__(self, chain, p2pFactory, nodeState, ntp):
        self.master_mr = MessageReceipt()
        self.nodeState = nodeState
        self.ntp = ntp
        self.chain = chain
        self.r1_time_diff = defaultdict(list)
        self.r2_time_diff = defaultdict(list)

        self.incoming_blocks = {}
        self.last_pos_cycle = 0
        self.last_selected_height = 0
        self.last_bk_time = 0
        self.last_pb_time = 0
        self.next_header_hash = None
        self.next_block_number = None
        self.fmbh_allowed_peers = {}
        self.fmbh_blockhash_peers = {}

        self.p2pFactory = p2pFactory

    def update_node_state(self, state):
        self.nodeState.state = state
        logger.info('Status changed to %s', self.nodeState.state)
        if self.nodeState.state == NState.synced:
            self.nodeState.epoch_diff = 0
            self.last_pos_cycle = time.time()
            self.restart_post_block_logic()
        elif self.nodeState.state == NState.unsynced:
            self.last_bk_time = time.time()
            self.restart_unsynced_logic()
        elif self.nodeState.state == NState.forked:
            self.stop_post_block_logic()
        elif self.nodeState.state == NState.syncing:
            self.last_pb_time = time.time()

    def stop_monitor_bk(self):
        try:
            reactor.monitor_bk.cancel()
        except Exception:  # No need to log this exception
            pass

    def restart_monitor_bk(self, delay=60):
        self.stop_monitor_bk()
        reactor.monitor_bk = reactor.callLater(delay, self.monitor_bk)

    def monitor_bk(self):
        time_diff = time.time() - self.last_pos_cycle
        if (self.nodeState.state == NState.synced or self.nodeState.state == NState.unsynced) and 90 < time_diff:
            if self.nodeState.state == NState.synced:
                self.stop_post_block_logic()
                self.reset_everything()
                self.update_node_state(NState.unsynced)
                self.epoch_diff = -1
            elif time.time() - self.last_bk_time > 120:
                self.last_pos_cycle = time.time()
                logger.info(' POS cycle activated by monitor_bk() ')
                self.update_node_state(NState.synced)

        if self.nodeState.state == NState.syncing and time.time() - self.last_pb_time > 60:
            self.stop_post_block_logic()
            self.reset_everything()
            self.update_node_state(NState.unsynced)
            self.epoch_diff = -1
        reactor.monitor_bk = reactor.callLater(60, self.monitor_bk)

    def peers_blockheight_headerhash(self):
        for peer in self.p2pFactory.peers:
            peer.fetch_headerhash_n(self.chain.m_blockheight())

    def check_fork_status(self):
        current_height = self.chain.m_blockheight()
        block_hash_counter = Counter()
        for peer in self.p2pFactory.peers:
            if current_height in peer.blocknumber_headerhash.keys():
                block_hash_counter[peer.blocknumber_headerhash[current_height]] += 1

        blockhash = block_hash_counter.most_common(1)
        if blockhash:
            blockhash = blockhash[0][0]
            actual_blockhash = self.chain.m_get_block(current_height).blockheader.headerhash
            if actual_blockhash != blockhash:
                logger.info('Blockhash didnt matched in peers_blockheight()')
                logger.info('Local blockhash - %s', actual_blockhash)
                logger.info('Consensus blockhash - %s', blockhash)
                fork_recovery(current_height, self.chain, self.randomize_headerhash_fetch)
                return True
        return

    def peers_blockheight(self):
        if self.nodeState.state == NState.syncing:
            return
        if self.check_fork_status():
            return

        block_height_counter = Counter()

        for peer in self.p2pFactory.peers:
            block_height_counter[peer.blockheight] += 1

        blocknumber = block_height_counter.most_common(1)
        if not blocknumber:
            return  # TODO : Re-Schedule with delay

        blocknumber = blocknumber[0][0]

        if blocknumber > self.chain.height():  # chain.m_blockheight():  len(chain.m_blockchain)
            # pending_blocks['target'] = blocknumber
            logger.info('Calling downloader from peers_blockheight due to no POS CYCLE %s', blocknumber)
            logger.info('Download block from %s to %s', self.chain.height() + 1, blocknumber)
            self.last_pb_time = time.time()
            self.update_node_state(NState.syncing)
            self.randomize_block_fetch(self.chain.height() + 1)
        return

    def schedule_peers_blockheight(self, delay=100):
        try:
            reactor.peers_blockheight.cancel()
        except Exception:  # No need to log this exception
            pass

        reactor.peers_blockheight = reactor.callLater(delay, self.peers_blockheight)
        try:
            reactor.peers_blockheight_headerhash.cancel()  # No need to log this exception
        except Exception as e:
            pass

        reactor.peers_blockheight_headerhash = reactor.callLater(70, self.peers_blockheight_headerhash)

    # pos functions. an asynchronous loop.

    # first block 1 is created with the stake list for epoch 0 decided from circulated st transactions

    def pre_pos_1(self, data=None):  # triggered after genesis for block 1..
        logger.info('pre_pos_1')
        # are we a staker in the stake list?

        if self.chain.mining_address in self.chain.m_blockchain[0].stake_list:
            logger.info('mining address: %s in the genesis.stake_list', self.chain.mining_address)

            hashchain(self.chain.my[0][1], epoch=0)
            self.chain.hash_chain = self.chain.my[0][1].hc
            self.chain.block_chain_buffer.hash_chain[0] = self.chain.my[0][1].hc

            logger.info('hashchain terminator: %s', self.chain.my[0][1].hc_terminator)
            st = StakeTransaction().create(blocknumber=0,
                                           xmss=self.chain.my[0][1],
                                           hashchain_terminator=self.chain.my[0][1].hc_terminator,
                                           first_hash=self.chain.my[0][1].hc[-1][-2],
                                           balance=self.chain.state.state_balance(self.chain.mining_address))
            self.chain.wallet.f_save_winfo()
            self.chain.add_tx_to_pool(st)
            # send the stake tx to generate hashchain terminators for the staker addresses..
            self.p2pFactory.send_st_to_peers(st)
            logger.info('await delayed call to build staker list from genesis')
            reactor.callLater(5, self.pre_pos_2, st)
            return

        logger.info('not in stake list..no further pre_pos_x calls')
        return

    def pre_pos_2(self, data=None):
        logger.info('pre_pos_2')
        if self.chain.height() >= 1:
            return
        # assign hash terminators to addresses and generate a temporary stake list ordered by st.hash..

        tmp_list = []

        for tx in self.chain.transaction_pool:
            if tx.subtype == transaction.TX_SUBTYPE_STAKE:
                if tx.txfrom in self.chain.m_blockchain[0].stake_list:
                    tmp_list.append([tx.txfrom, tx.hash, 0, tx.first_hash, genesis_info[tx.txfrom]])

        # required as doing chain.stake_list.index(s) which will result into different number on different server
        self.chain.block_chain_buffer.epoch_seed = self.chain.state.calc_seed(tmp_list)
        self.chain.stake_list = sorted(tmp_list,
                                       key=lambda staker: self.chain.score(stake_address=staker[0],
                                                                           reveal_one=sha256(str(staker[1])),
                                                                           balance=staker[4],
                                                                           seed=self.chain.block_chain_buffer.epoch_seed))

        logger.info('genesis stakers ready = %s / %s', len(self.chain.stake_list), config.dev.minimum_required_stakers)
        logger.info('node address: %s', self.chain.mining_address)

        if len(self.chain.stake_list) < config.dev.minimum_required_stakers:  # stake pool still not full..reloop..
            self.p2pFactory.send_st_to_peers(data)
            logger.info('waiting for stakers.. retry in 5s')
            reactor.callID = reactor.callLater(5, self.pre_pos_2, data)
            return

        if self.chain.mining_address == self.chain.stake_list[0][0]:
            logger.info('designated to create block 1: building block..')

            # create the genesis block 2 here..
            my_hash_chain, _ = self.chain.select_hashchain(self.chain.m_blockchain[-1].blockheader.headerhash,
                                                           self.chain.mining_address, self.chain.my[0][1].hc,
                                                           blocknumber=1)
            b = self.chain.m_create_block(my_hash_chain[-2])
            self.pre_block_logic(b)
        else:
            logger.info('await block creation by stake validator: %s', self.chain.stake_list[0][0])
            self.last_bk_time = time.time()
            self.restart_unsynced_logic()
        return

    def process_transactions(self, num):
        tmp_num = num
        for tx in self.chain.pending_tx_pool:
            tmp_num -= 1
            tx_peer = tx[1]
            tx = tx[0]
            if not tx.validate_tx():
                logger.info('>>>TX %s failed validate_tx', tx.txhash)
                continue

            block_chain_buffer = self.chain.block_chain_buffer
            tx_state = block_chain_buffer.get_stxn_state(blocknumber=block_chain_buffer.height(),
                                                         addr=tx.txfrom)
            isValidState = tx.state_validate_tx(
                tx_state=tx_state,
                transaction_pool=self.chain.transaction_pool
            )
            if not isValidState:
                logger.info('>>>TX %s failed state_validate', tx.txhash)
                continue

            logger.info('>>>TX - %s from - %s relaying..', tx.txhash, tx_peer.transport.getPeer().host)
            self.chain.add_tx_to_pool(tx)

            txn_msg = tx_peer.wrap_message('TX', tx.transaction_to_json())
            for peer in tx_peer.factory.peer_connections:
                if peer != tx_peer:
                    peer.transport.write(txn_msg)

        for i in range(num - tmp_num):
            del self.chain.pending_tx_pool[0]
            del self.chain.pending_tx_pool_hash[0]

    # create new block..

    def create_new_block(self, winner, reveals, vote_hashes, last_block_number):
        logger.info('create_new_block #%s', (last_block_number + 1))
        block_obj = self.chain.create_stake_block(winner, reveals, vote_hashes, last_block_number)

        return block_obj

    def reset_everything(self, data=None):
        logger.info('** resetting loops and emptying chain.stake_reveal_one and chain.expected_winner ')
        for r in self.chain.stake_reveal_one:
            msg_hash = r[5]
            self.master_mr.deregister(msg_hash, 'R1')

        del self.chain.stake_reveal_one[:]
        return

    def filter_reveal_one_two(self, blocknumber=None):
        if not blocknumber:
            blocknumber = self.chain.m_blockchain[-1].blockheader.blocknumber

        self.chain.stake_reveal_one = filter(lambda s: s[2] > blocknumber,
                                             self.chain.stake_reveal_one)

        return

    # TODO: Incomplete fn, use to select the maximum blockheight by consensus
    def select_blockheight_by_consensus(self):
        block_height_counter = Counter()
        # for identity in self.fmbh_allowed_peers:
        #    block_height_counter[s[2]] += 1
        target_block_height = block_height_counter.most_common(1)

        if len(target_block_height) == 0:
            return None

        last_selected_height = target_block_height[0][0]
        return last_selected_height

    '''
    Unsynced Logic
    1.	Request for maximum blockheight and passes bock number X
    2.	Peers response chain height with headerhash and the headerhash of block number X
    3.	Unsynced node, selects most common chain height, matches the headerhash of block number X
    4.	If headerhash of block number X doesn't match, change state to Forked
    5.	If headerhash of block number X matches, perform Downloading of blocks from those selected peers
    '''

    def restart_unsynced_logic(self, delay=0):
        try:
            reactor.unsynced_logic.cancel()
        except Exception:  # No need to log this exception
            pass

        reactor.unsynced_logic = reactor.callLater(delay, self.unsynced_logic)

    def unsynced_logic(self):
        if self.nodeState.state == NState.synced:
            return

        self.fmbh_blockhash_peers = {}
        self.fmbh_allowed_peers = {}
        for peer in self.p2pFactory.peer_connections:
            self.fmbh_allowed_peers[peer.identity] = None
            peer.fetch_FMBH()
        reactor.unsynced_logic = reactor.callLater(20, self.start_download)

    def start_download(self):
        # add peers and their identity to requested list
        # FMBH
        if self.nodeState.state == NState.synced:
            return
        logger.info('Checking Download..')
        '''
        global fmbh_blockhash_peers
        max_height = None
        selected_blockhash = None
        for blockheaderhash in fmbh_blockhash_peers:
            if fmbh_blockhash_peers[blockheaderhash]['blocknumber']>max_height:
                max_height = fmbh_blockhash_peers[blockheaderhash]['blocknumber']
                selected_blockhash = blockheaderhash
        for peer in fmbh_blockhash_peers[selected_blockhash]['peers']:
            f.target_peers = {}
            f.target_peers[peer.identity] = peer
        
        if max_height == None or max_height<=chain.height():
            chain.state.update(NState.synced)
            return
        
        chain.state.update(NState.syncing)
        pending_blocks['start_block'] = chain.m_blockchain[-1].blockheader.blocknumber
        pending_blocks['target'] = fmbh_blockhash_peers[selected_blockhash]['blocknumber']
        pending_blocks['headerhash'] = selected_blockhash
        randomize_block_fetch(chain.height() + 1)
        '''
        tmp_max = -1
        max_headerhash = None
        for headerhash in self.fmbh_blockhash_peers:
            if self.fmbh_blockhash_peers[headerhash]['blocknumber'] > self.chain.height():
                if len(self.fmbh_blockhash_peers[headerhash]['peers']) > tmp_max:
                    tmp_max = len(self.fmbh_blockhash_peers[headerhash]['peers'])
                    max_headerhash = headerhash

        # Adding all peers
        # TODO only trusted peer
        # for peer in self.p2pFactory.peers:
        if not max_headerhash:
            logger.info('No peers responded FMBH request')
            return
        for peer in self.fmbh_blockhash_peers[max_headerhash]['peers']:
            self.p2pFactory.target_peers[peer.identity] = peer
        self.update_node_state(NState.syncing)
        self.randomize_block_fetch(self.chain.height() + 1)

    def pre_block_logic(self, block, peer_identity=None):
        if len(self.chain.m_blockchain) == 0:
            self.chain.m_read_chain()

        blocknumber = block.blockheader.blocknumber
        chain_buffer_height = self.chain.block_chain_buffer.height()

        if blocknumber <= self.chain.height():
            return False

        if self.nodeState.state == NState.synced:
            if self.chain.block_chain_buffer.add_block(block):
                self.p2pFactory.send_block_to_peers(block, peer_identity)
        else:
            if chain_buffer_height + 1 == blocknumber:
                if blocknumber > 1 and self.chain.block_chain_buffer.add_block(block):
                    self.p2pFactory.send_block_to_peers(block, peer_identity)
                elif blocknumber == 1 and self.chain.block_chain_buffer.add_block_mainchain(block):
                    self.p2pFactory.send_block_to_peers(block, peer_identity)
                self.update_node_state(NState.synced)
            else:
                self.chain.block_chain_buffer.add_pending_block(block)

        if self.nodeState.state == NState.synced:
            if chain_buffer_height + 1 == blocknumber:
                self.last_pos_cycle = time.time()
                block_timestamp = int(block.blockheader.timestamp)
                curr_time = int(self.ntp.getTime())
                delay = config.dev.POS_delay_after_block - min(config.dev.POS_delay_after_block,
                                                               max(0, curr_time - block_timestamp))

                self.restart_post_block_logic(delay)

        return True

    def stop_post_block_logic(self, delay=0):
        try:
            reactor.post_block_logic.cancel()
            reactor.prepare_winners.cancel()
        except Exception:  # No need to log this exception
            pass

    def restart_post_block_logic(self, delay=0):
        self.stop_post_block_logic()
        reactor.post_block_logic = reactor.callLater(delay,
                                                     self.post_block_logic)

    def post_block_logic(self):
        """
            post block logic we initiate the next POS cycle
            send R1, send ST, reset POS flags and remove unnecessary
            messages in chain.stake_reveal_one and _two..
        :return:
        """
        self.filter_reveal_one_two()

        our_reveal = None
        blocknumber = self.chain.block_chain_buffer.height() + 1

        if self.p2pFactory.stake:
            tmp_stake_list = [
                s[0] for s in self.chain.block_chain_buffer.stake_list_get(blocknumber)
            ]
            if self.chain.mining_address in tmp_stake_list:
                our_reveal = self.p2pFactory.send_stake_reveal_one(blocknumber)
                self.schedule_prepare_winners(our_reveal, blocknumber - 1, 30)

            next_stake_list = self.chain.block_chain_buffer.next_stake_list_get(blocknumber)
            next_stake_first_hash = {}
            for s in next_stake_list:
                next_stake_first_hash[s[0]] = s[3]

            epoch = blocknumber // config.dev.blocks_per_epoch
            epoch_blocknum = blocknumber - epoch * config.dev.blocks_per_epoch

            if epoch_blocknum < config.dev.stake_before_x_blocks and self.chain.mining_address not in next_stake_first_hash:
                diff = max(1, (
                    (config.dev.stake_before_x_blocks - epoch_blocknum + 1) * int(1 - config.dev.st_txn_safety_margin)))
                if random.randint(1, diff) == 1:
                    self.make_st_tx(blocknumber, None)
            elif epoch_blocknum >= config.dev.stake_before_x_blocks - 1 and self.chain.mining_address in next_stake_first_hash:
                if next_stake_first_hash[self.chain.mining_address] is None:
                    threshold_blocknum = self.chain.state.get_staker_threshold_blocknum(next_stake_list,
                                                                                        self.chain.mining_address)
                    max_threshold_blocknum = config.dev.blocks_per_epoch
                    if threshold_blocknum == config.dev.low_staker_first_hash_block:
                        max_threshold_blocknum = config.dev.high_staker_first_hash_block

                    if threshold_blocknum - 1 <= epoch_blocknum < max_threshold_blocknum - 1:
                        diff = max(1, (
                            (max_threshold_blocknum - epoch_blocknum + 1) * int(1 - config.dev.st_txn_safety_margin)))
                        if random.randint(1, diff) == 1:
                            my = deepcopy(self.chain.my[0][1])
                            hashchain(my, epoch=epoch + 1)
                            self.make_st_tx(blocknumber, my.hc[-1][-2])

        return

    def make_st_tx(self, blocknumber, first_hash):
        balance = self.chain.state.state_balance(self.chain.mining_address)
        if balance < config.dev.minimum_staking_balance_required:
            logger.warning('Staking not allowed due to insufficient balance')
            logger.warning('Balance %s', balance)
            return

        st = StakeTransaction().create(
            blocknumber,
            self.chain.my[0][1],
            first_hash=first_hash,
            balance=balance
        )
        self.p2pFactory.send_st_to_peers(st)
        self.chain.wallet.f_save_winfo()
        for num in range(len(self.chain.transaction_pool)):
            t = self.chain.transaction_pool[num]
            if t.subtype == transaction.TX_SUBTYPE_STAKE and st.hash == t.hash:
                if st.get_message_hash() == t.get_message_hash():
                    return
                self.chain.remove_tx_from_pool(t)
                break

        self.chain.add_tx_to_pool(st)

    def schedule_prepare_winners(self, our_reveal, last_block_number, delay=0):
        try:
            reactor.prepare_winners.cancel()
        except Exception:  # No need to log this Exception
            pass

        reactor.prepare_winners = reactor.callLater(
            delay,
            self.prepare_winners,
            our_reveal=our_reveal,
            last_block_number=last_block_number)

    def prepare_winners(self, our_reveal, last_block_number):
        if not self.nodeState.state == NState.synced:
            return
        filtered_reveal_one = []
        reveals = []
        vote_hashes = []
        next_block_num = last_block_number + 1
        for s in self.chain.stake_reveal_one:
            tmp_strongest_headerhash = self.chain.block_chain_buffer.get_strongest_headerhash(last_block_number)
            if s[1] == tmp_strongest_headerhash and s[2] == next_block_num:
                filtered_reveal_one.append(s)
                reveals.append(s[3])
                vote_hashes.append(s[5])

        self.restart_post_block_logic(30)

        if len(filtered_reveal_one) <= 1:
            logger.info('only received one reveal for this block.. blocknum #%s', next_block_num)
            return

        epoch = next_block_num / config.dev.blocks_per_epoch  # +1 = next block
        seed = self.chain.block_chain_buffer.get_epoch_seed(next_block_num)
        winners = self.chain.select_winners(filtered_reveal_one,
                                            topN=3,
                                            seed=seed)

        # reactor.process_blocks = reactor.callLater(30, process_blocks, winners=winners, our_reveal=our_reveal)

        if not (self.p2pFactory.stake and our_reveal):
            return

        if our_reveal in winners:
            block = self.create_new_block(our_reveal,
                                          reveals,
                                          vote_hashes,
                                          last_block_number)
            self.pre_block_logic(block)  # broadcast this block

        if self.chain.pending_tx_pool:
            if len(self.chain.transaction_pool) < 10:
                logger.info('Processing TXNs if any')
                self.process_transactions(5)

    def randomize_block_fetch(self, blocknumber):
        if self.nodeState.state != NState.syncing or blocknumber <= self.chain.height():
            return

        if len(self.p2pFactory.target_peers.keys()) == 0:
            logger.info(' No target peers found.. stopping download')
            return

        reactor.download_monitor = reactor.callLater(20,
                                                     self.randomize_block_fetch, blocknumber)

        random_peer = self.p2pFactory.target_peers[random.choice(self.p2pFactory.target_peers.keys())]
        random_peer.fetch_block_n(blocknumber)

    def randomize_headerhash_fetch(self, block_number):
        if self.nodeState.state != NState.forked:
            return
        if block_number not in fork.pending_blocks or fork.pending_blocks[block_number][1] <= 10:  # retry only 11 times
            headerhash_monitor = reactor.callLater(15, self.randomize_headerhash_fetch, block_number)
            if len(self.p2pFactory.peers) > 0:
                try:
                    if len(self.p2pFactory.fork_target_peers) == 0:
                        for peer in self.p2pFactory.peers:
                            self.p2pFactory.fork_target_peers[peer.identity] = peer
                    if len(self.p2pFactory.fork_target_peers) > 0:
                        random_peer = self.p2pFactory.fork_target_peers[
                            random.choice(
                                self.p2pFactory.fork_target_peers.keys()
                            )
                        ]
                        count = 0
                        if block_number in fork.pending_blocks:
                            count = fork.pending_blocks[block_number][1] + 1
                        fork.pending_blocks[block_number] = [
                            random_peer.identity, count, None, headerhash_monitor
                        ]
                        random_peer.fetch_headerhash_n(block_number)
                except Exception as e:
                    logger.warning('Exception at randomize_headerhash_fetch %s', e)
            else:
                logger.info('No peers connected.. Will try again... randomize_headerhash_fetch: %s', block_number)
        else:
            self.update_node_state(NState.unsynced)

    def blockheight_map(self):
        """
            blockheight map for connected nodes - when the blockheight seems up to date after a sync or error, we check all connected nodes to ensure all on same chain/height..
            note - may not return correctly during a block propagation..
            once working alter to identify fork better..
        :return:
        """
        # i = [block_number, headerhash, self.transport.getPeer().host]

        logger.info('blockheight_map:')
        logger.info(self.chain.blockheight_map)

        # first strip out any laggards..
        self.chain.blockheight_map = filter(
            lambda q: q[0] >= self.chain.m_blockheight(),
            self.chain.blockheight_map
        )

        result = True

        # next identify any node entries which are not exactly correct..

        for s in self.chain.blockheight_map:
            if s[0] == self.chain.m_blockheight():
                if s[1] == self.chain.m_blockchain[-1].blockheader.headerhash:
                    logger.info(('node: ', s[2], '@', s[0], 'w/:', s[1], 'OK'))
            elif s[0] > self.chain.m_blockheight():
                logger.info(('warning..', s[2], 'at blockheight', s[0]))
                result = False

        # wipe it..

        del self.chain.blockheight_map[:]

        return result
Exemplo n.º 17
0
class P2PFactory(ServerFactory):
    protocol = P2PProtocol

    def __init__(self, chain_manager: ChainManager, sync_state: SyncState,
                 qrl_node):

        self.master_mr = MessageReceipt()
        self.pow = None
        self.sync_state = sync_state

        self._ntp = ntp
        self._qrl_node = qrl_node
        self._chain_manager = chain_manager

        self._syncing_enabled = False
        self._target_peer = None
        self._target_node_header_hash = None
        self._last_requested_block_idx = None

        self._genesis_processed = False
        self._peer_connections = []
        self._synced_peers_protocol = set()
        self._txn_processor_running = False

        self.peer_blockheight = dict()

        reactor.callLater(180, self.monitor_connections)

        self.p2p_msg_priority = {
            qrllegacy_pb2.LegacyMessage.VE: 0,
            qrllegacy_pb2.LegacyMessage.PL: 0,
            qrllegacy_pb2.LegacyMessage.PONG: 0,

            ######################
            qrllegacy_pb2.LegacyMessage.MR: 2,
            qrllegacy_pb2.LegacyMessage.SFM: 1,
            qrllegacy_pb2.LegacyMessage.BK: 1,
            qrllegacy_pb2.LegacyMessage.FB: 1,
            qrllegacy_pb2.LegacyMessage.PB: 1,
            qrllegacy_pb2.LegacyMessage.BH: 1,

            ############################
            qrllegacy_pb2.LegacyMessage.TX: 1,
            qrllegacy_pb2.LegacyMessage.MT: 1,
            qrllegacy_pb2.LegacyMessage.TK: 1,
            qrllegacy_pb2.LegacyMessage.TT: 1,
            qrllegacy_pb2.LegacyMessage.LT: 1,
            qrllegacy_pb2.LegacyMessage.SL: 1,
            qrllegacy_pb2.LegacyMessage.EPH: 3,
            qrllegacy_pb2.LegacyMessage.SYNC: 0,
            qrllegacy_pb2.LegacyMessage.CHAINSTATE: 0,
            qrllegacy_pb2.LegacyMessage.HEADERHASHES: 1,
            qrllegacy_pb2.LegacyMessage.P2P_ACK: 0,
        }

    ###################################################
    ###################################################
    ###################################################
    ###################################################
    ###################################################
    ###################################################

    @property
    def has_synced_peers(self):
        return len(self._synced_peers_protocol) > 0

    def get_random_peer(self):
        max_cumulative_difficulty = 0
        for connection_id in self.peer_blockheight:
            max_cumulative_difficulty = max(
                max_cumulative_difficulty,
                self.peer_blockheight[connection_id][2])

        connection_ids = []
        for connection_id in self.peer_blockheight:
            if self.peer_blockheight[connection_id][
                    2] == max_cumulative_difficulty:
                connection_ids.append(connection_id)

        selected_peer_connections = []
        for connection_id in connection_ids:
            for peer_conn in self._peer_connections:
                if peer_conn.connection_id == connection_id:
                    selected_peer_connections.append(peer_conn)
        if len(selected_peer_connections
               ) == 0 or max_cumulative_difficulty == 0:
            return None

        return random.sample(selected_peer_connections, 1)[0]

    def update_peer_blockheight(self, connection_id, block_number, headerhash,
                                cumulative_difficulty):
        self.peer_blockheight[connection_id] = [
            block_number, headerhash,
            int(UInt256ToString(cumulative_difficulty))
        ]

    def request_peer_blockheight(self):
        for peer in self._peer_connections:
            msg = qrllegacy_pb2.LegacyMessage(
                func_name=qrllegacy_pb2.LegacyMessage.BH,
                bhData=qrl_pb2.BlockHeightData(block_number=0))
            peer.send(msg)

    def set_peer_synced(self, conn_protocol, synced: bool):
        if synced:
            self._synced_peers_protocol.add(conn_protocol)
        else:
            self._synced_peers_protocol.discard(conn_protocol)

    ###################################################
    ###################################################
    ###################################################
    ###################################################
    ###################################################
    ###################################################
    ###################################################

    @property
    def connections(self):
        return len(self._peer_connections)

    @property
    def synced(self):
        return self.pow.sync_state.state == ESyncState.synced

    @property
    def reached_conn_limit(self):
        return len(self._peer_connections) >= config.user.max_peers_limit

    def get_connected_peer_ips(self):
        # FIXME: Convert self._peer_connections to set
        return set([peer.peer_ip for peer in self._peer_connections])

    ###################################################
    ###################################################
    ###################################################
    ###################################################
    ###################################################
    ###################################################

    @property
    def chain_height(self):
        return self._chain_manager.height

    def get_last_block(self):
        return self._chain_manager.get_last_block()

    def get_headerhashes(self, start_blocknumber):
        return self._chain_manager.get_headerhashes(start_blocknumber)

    def get_cumulative_difficulty(self):
        return self._chain_manager.get_cumulative_difficulty()

    def get_block(self, block_number):
        return self._chain_manager.get_block_by_number(block_number)

    def block_received(self, source, block: Block):
        self.pow.last_pb_time = time.time()
        logger.info('>>> Received Block #%d %s', block.block_number,
                    bin2hstr(block.headerhash))

        if source != self._target_peer:
            logger.warning('Received block from unexpected peer')
            logger.warning('Expected peer: %s',
                           self._target_peer.connection_id)
            logger.warning('Found peer: %s', source.connection_id)
            return

        if block.block_number != self._last_requested_block_idx:
            logger.warning('Did not match %s', self._last_requested_block_idx)
            return

        target_start_blocknumber = self._target_node_header_hash.block_number
        expected_headerhash = self._target_node_header_hash.headerhashes[
            block.block_number - target_start_blocknumber]
        if block.headerhash != expected_headerhash:
            logger.warning('Did not match headerhash')
            logger.warning('Expected headerhash %s', expected_headerhash)
            logger.warning('Found headerhash %s', block.headerhash)
            return

        # FIXME: This check should not be necessary
        if not self._chain_manager.add_block(block):
            logger.warning('Failed to Add Block')
            return

        try:
            reactor.download_monitor.cancel()
        except Exception as e:
            logger.warning("PB: %s", e)

        if self.is_syncing_finished():
            return

        self._last_requested_block_idx += 1
        if self.is_syncing_finished():
            return

        self.peer_fetch_block()

    def ban_peer(self, peer_obj):
        self._qrl_node.ban_peer(peer_obj)

    def is_syncing(self) -> bool:
        return self._syncing_enabled

    def is_syncing_finished(self, force_finish=False):
        curr_index = self._last_requested_block_idx - self._target_node_header_hash.block_number + 1
        if curr_index == len(
                self._target_node_header_hash.headerhashes) or force_finish:
            self._last_requested_block_idx = None
            self._target_node_header_hash = None
            self._target_peer = None
            self._syncing_enabled = False
            return True

        return False

    def peer_fetch_block(self, retry=0):
        node_header_hash = self._target_node_header_hash
        curr_index = self._last_requested_block_idx - node_header_hash.block_number

        block_headerhash = node_header_hash.headerhashes[curr_index]
        block = self._chain_manager.state.get_block(block_headerhash)

        if not block:
            if retry >= 5:
                logger.debug('Retry Limit Hit')
                self._qrl_node.ban_peer(self._target_peer)
                self.is_syncing_finished(force_finish=True)
                return
        else:
            while block and curr_index + 1 < len(
                    node_header_hash.headerhashes):
                self._last_requested_block_idx += 1
                curr_index = self._last_requested_block_idx - node_header_hash.block_number
                block_headerhash = node_header_hash.headerhashes[curr_index]
                block = self._chain_manager.state.get_block(block_headerhash)

            retry = 0

        if self.is_syncing_finished():
            return

        self._target_peer.send_fetch_block(self._last_requested_block_idx)
        reactor.download_monitor = reactor.callLater(20, self.peer_fetch_block,
                                                     retry + 1)

    def compare_and_sync(self, peer, node_header_hash: qrl_pb2.NodeHeaderHash):
        if self._syncing_enabled:
            logger.info('>> Ignoring compare_and_sync Syncing Enabled')
            return
        last_block = self._chain_manager.get_last_block()
        node_last_block_number = node_header_hash.block_number + len(
            node_header_hash.headerhashes) - 1
        last_block_number = min(last_block.block_number,
                                node_last_block_number)
        if last_block_number < node_header_hash.block_number:
            return
        fork_block_number = last_block.block_number + 1
        fork_found = False
        for i in range(last_block_number, node_header_hash.block_number - 1,
                       -1):
            block = self._chain_manager.get_block_by_number(i)
            if block.headerhash == node_header_hash.headerhashes[
                    i - node_header_hash.block_number]:
                break
            fork_block_number = i
            fork_found = True

        if fork_found or (last_block.block_number < node_last_block_number):
            self._target_peer = peer
            self._target_node_header_hash = node_header_hash
            self._last_requested_block_idx = fork_block_number
            self._syncing_enabled = True
            self.peer_fetch_block()

    ###################################################
    ###################################################
    ###################################################
    ###################################################
    ###################################################
    ###################################################

    def request_full_message(self, mr_data: qrllegacy_pb2.MRData):
        """
        Request Full Message
        This function request for the full message against,
        the Message Receipt received.
        :return:
        """

        # FIXME: Again, breaking encasulation
        # FIXME: Huge amount of lookups in dictionaries
        msg_hash = mr_data.hash

        if msg_hash in self.master_mr._hash_msg:
            if msg_hash in self.master_mr.requested_hash:
                del self.master_mr.requested_hash[msg_hash]
            return

        if msg_hash not in self.master_mr.requested_hash:
            return

        peers_list = self.master_mr.requested_hash[
            msg_hash].peers_connection_list
        message_request = self.master_mr.requested_hash[msg_hash]
        for peer in peers_list:
            if peer in message_request.already_requested_peers:
                continue
            message_request.already_requested_peers.append(peer)

            msg = qrllegacy_pb2.LegacyMessage(
                func_name=qrllegacy_pb2.LegacyMessage.SFM,
                mrData=qrllegacy_pb2.MRData(hash=mr_data.hash,
                                            type=mr_data.type))

            peer.send(msg)

            call_later_obj = reactor.callLater(
                config.dev.message_receipt_timeout, self.request_full_message,
                mr_data)

            message_request.callLater = call_later_obj
            return

        # If execution reach to this line, then it means no peer was able to provide
        # Full message for this hash thus the hash has to be deleted.
        # Moreover, negative points could be added to the peers, for this behavior
        if msg_hash in self.master_mr.requested_hash:
            del self.master_mr.requested_hash[msg_hash]

    ##############################################
    ##############################################
    ##############################################
    ##############################################
    # NOTE: PoW related.. broadcasting, etc. OBSOLETE

    def reset_processor_flag(self, _):
        self._txn_processor_running = False

    def reset_processor_flag_with_err(self, msg):
        logger.error('Exception in txn task')
        logger.error('%s', msg)
        self._txn_processor_running = False

    def add_unprocessed_txn(self, tx, ip) -> bool:
        if not self._chain_manager.tx_pool.update_pending_tx_pool(tx, ip):
            return False

        if not self._txn_processor_running:
            txn_processor = TxnProcessor(
                state=self._chain_manager.state,
                transaction_pool_obj=self._chain_manager.tx_pool,
                broadcast_tx=self.broadcast_tx)

            task_defer = TxnProcessor.create_cooperate(
                txn_processor).whenDone()
            task_defer.addCallback(self.reset_processor_flag) \
                .addErrback(self.reset_processor_flag_with_err)
            self._txn_processor_running = True

        return True

    def broadcast_tx(self, tx: TransferTransaction):
        logger.info('<<<Transmitting TX: %s', tx.txhash)

        if isinstance(tx, MessageTransaction):
            legacy_type = qrllegacy_pb2.LegacyMessage.MT
        elif isinstance(tx, TransferTransaction):
            legacy_type = qrllegacy_pb2.LegacyMessage.TX
        elif isinstance(tx, TokenTransaction):
            legacy_type = qrllegacy_pb2.LegacyMessage.TK
        elif isinstance(tx, TransferTokenTransaction):
            legacy_type = qrllegacy_pb2.LegacyMessage.TT
        elif isinstance(tx, LatticePublicKey):
            legacy_type = qrllegacy_pb2.LegacyMessage.LT
        elif isinstance(tx, SlaveTransaction):
            legacy_type = qrllegacy_pb2.LegacyMessage.SL
        else:
            raise ValueError('Invalid Transaction Type')
        self.register_and_broadcast(legacy_type, tx.get_message_hash(),
                                    tx.pbdata)

    def broadcast_ephemeral_message(self, encrypted_ephemeral):
        logger.info('<<<Broadcasting Encrypted Ephemeral Message')
        self._chain_manager.add_ephemeral_message(encrypted_ephemeral)
        self.register_and_broadcast(qrllegacy_pb2.LegacyMessage.EPH,
                                    encrypted_ephemeral.get_message_hash(),
                                    encrypted_ephemeral.pbdata)

    def broadcast_tx_relay(self, source_peer, tx):
        txn_msg = source_peer._wrap_message('TX', tx.to_json())
        for peer in self._peer_connections:
            if peer != source_peer:
                peer.transport.write(txn_msg)

    ##############################################
    ##############################################
    ##############################################
    ##############################################

    def broadcast_block(self, block: Block):
        # logger.info('<<<Transmitting block: ', block.headerhash)
        data = qrllegacy_pb2.MRData()
        data.stake_selector = block.transactions[0].public_key
        data.block_number = block.block_number
        data.prev_headerhash = bytes(block.prev_headerhash)

        self.register_and_broadcast(qrllegacy_pb2.LegacyMessage.BK,
                                    block.headerhash, block.pbdata, data)

    ##############################################
    ##############################################
    ##############################################
    ##############################################

    def register_and_broadcast(self,
                               msg_type,
                               msg_hash: bytes,
                               pbdata,
                               data=None):
        self.master_mr.register(msg_type, msg_hash, pbdata)
        self.broadcast(msg_type, msg_hash, data)

    def broadcast(self, msg_type, msg_hash: bytes, mr_data=None):
        """
        Broadcast
        This function sends the Message Receipt to all connected peers.
        :return:
        """
        ignore_peers = []
        if msg_hash in self.master_mr.requested_hash:
            ignore_peers = self.master_mr.requested_hash[
                msg_hash].peers_connection_list

        if not mr_data:
            mr_data = qrllegacy_pb2.MRData()

        mr_data.hash = msg_hash
        mr_data.type = msg_type
        data = qrllegacy_pb2.LegacyMessage(
            func_name=qrllegacy_pb2.LegacyMessage.MR, mrData=mr_data)

        for peer in self._peer_connections:
            if peer not in ignore_peers:
                peer.send(data)

    def broadcast_get_synced_state(self):
        # Request all peers to update their synced status
        self._synced_peers_protocol = set()
        for peer in self._peer_connections:
            peer.send_sync()

    ###################################################
    ###################################################
    ###################################################
    ###################################################
    # Event handlers / Comms related

    def start_listening(self):
        reactor.listenTCP(9000, self)

    # NOTE: No need to refactor, it is obsolete
    def clientConnectionLost(self, connector, reason):  # noqa
        logger.debug('connection lost: %s', reason)

    def clientConnectionFailed(self, connector, reason):
        logger.debug('connection failed: %s', reason)

    def startedConnecting(self, connector):
        logger.debug('Started connecting: %s', connector)

    def add_connection(self, conn_protocol) -> bool:
        # TODO: Most of this can go the peer manager

        if self._qrl_node.is_banned(conn_protocol.peer_ip):
            conn_protocol.loseConnection()
            return False

        # FIXME: (For AWS) This could be problematic for other users
        # FIXME: identify nodes by an GUID?
        if config.dev.public_ip and conn_protocol.peer_ip == config.dev.public_ip:
            conn_protocol.loseConnection()
            return False

        if self.reached_conn_limit:
            # FIXME: Should we stop listening to avoid unnecessary load due to many connections?
            logger.info('Peer limit hit. Disconnecting client %s',
                        conn_protocol.peer_ip)
            conn_protocol.loseConnection()
            return False

        peer_list = self._qrl_node.peer_addresses
        if conn_protocol.peer_ip == conn_protocol.host_ip:
            if conn_protocol.peer_ip in peer_list:
                logger.info('Self in peer_list, removing..')
                peer_list.remove(conn_protocol.peer_ip)
                self._qrl_node.peer_manager.update_peer_addresses(peer_list)

            conn_protocol.loseConnection()
            return False

        self._peer_connections.append(conn_protocol)

        if conn_protocol.peer_ip not in peer_list:
            logger.debug('Adding to peer_list')
            peer_list.add(conn_protocol.peer_ip)
            self._qrl_node.peer_manager.update_peer_addresses(peer_list)

        logger.debug('>>> new peer connection : %s:%s ', conn_protocol.peer_ip,
                     str(conn_protocol.peer_port))

        return True

    def remove_connection(self, conn_protocol):
        if conn_protocol in self._peer_connections:
            self._peer_connections.remove(conn_protocol)

        if conn_protocol.connection_id in self.peer_blockheight:
            del self.peer_blockheight[conn_protocol.connection_id]

        self._synced_peers_protocol.discard(conn_protocol)

    def monitor_connections(self):
        reactor.callLater(180, self.monitor_connections)

        if len(self._peer_connections) == 0:
            logger.warning('No Connected Peer Found')
            reactor.callLater(60, self._qrl_node.connect_peers)
            return

        connected_peers_set = set()
        for conn_protocol in self._peer_connections:
            connected_peers_set.add(conn_protocol.peer_ip)

        for ip in config.user.peer_list:
            if ip not in connected_peers_set:
                self.connect_peer(ip)

    def connect_peer(self, peer_address):
        if peer_address not in self.get_connected_peer_ips():
            reactor.connectTCP(peer_address, 9000, self)
Exemplo n.º 18
0
class P2PFactory(ServerFactory):
    protocol = P2PProtocol

    def __init__(self, chain_manager: ChainManager, sync_state: SyncState,
                 qrl_node):

        self.master_mr = MessageReceipt()
        self.pow = None
        self.sync_state = sync_state

        self._ntp = ntp
        self._qrl_node = qrl_node
        self._chain_manager = chain_manager
        self._chain_manager.set_broadcast_tx(self.broadcast_tx)

        self._syncing_enabled = False
        self._target_channel = None
        self._target_node_header_hash = None
        self._last_requested_block_number = None

        self._genesis_processed = False
        self._peer_connections = []
        self._txn_processor_running = False

        self.peer_blockheight = dict()

        reactor.callLater(config.user.monitor_connections_interval,
                          self.monitor_connections)

        self.p2p_msg_priority = p2p_msg_priority

        # Maintains the list of ips in the queue that can be tried to form a new p2p connection
        self._peer_q = []

    def add_new_peers_to_peer_q(self, peer_list):
        """
        Checks ip must not already be in the _peer_q and
        connection has not already been established from that ip and port
        before adding the new set of peer into _peer_q
        """
        peer_set = set(peer_list)
        for peer_conn in self._peer_connections:
            ip_port = peer_conn.peer.full_address
            if ip_port in peer_set:
                peer_set.remove(ip_port)

        for ip_port in self._peer_q:
            if ip_port in peer_set:
                peer_set.remove(ip_port)

        self._peer_q.extend(peer_set)

    ###################################################
    ###################################################
    ###################################################
    ###################################################
    ###################################################
    ###################################################

    def get_random_peer(self):
        # FIXME: Used a named tuple to improve readability?
        # FIXME: This probably can go the peerManager
        max_cumulative_difficulty = 0
        for addr_remote in self.peer_blockheight:
            max_cumulative_difficulty = max(
                max_cumulative_difficulty,
                self.peer_blockheight[addr_remote][2])

        best_connection_ids = []
        for addr_remote in self.peer_blockheight:
            if self.peer_blockheight[addr_remote][
                    2] == max_cumulative_difficulty:
                best_connection_ids.append(addr_remote)

        selected_peer_connections = []
        for addr_remote in best_connection_ids:
            for peer_conn in self._peer_connections:
                if peer_conn.peer.full_address == addr_remote:
                    selected_peer_connections.append(peer_conn)

        if len(selected_peer_connections
               ) == 0 or max_cumulative_difficulty == 0:
            return None

        return random.sample(selected_peer_connections, 1)[0]

    def update_peer_blockheight(self, addr_remote, block_number, headerhash,
                                cumulative_difficulty):
        # FIXME: Use a named tuple to improve readability?
        self.peer_blockheight[addr_remote] = [
            block_number, headerhash,
            int(UInt256ToString(cumulative_difficulty))
        ]

    def request_peer_blockheight(self):
        for peer in self._peer_connections:
            msg = qrllegacy_pb2.LegacyMessage(
                func_name=qrllegacy_pb2.LegacyMessage.BH,
                bhData=qrl_pb2.BlockHeightData(block_number=0))
            peer.send(msg)

    ###################################################
    ###################################################
    ###################################################
    ###################################################
    ###################################################
    ###################################################
    ###################################################

    @property
    def num_connections(self):
        return len(self._peer_connections)

    @property
    def connections(self):
        return list(self._peer_connections)

    @property
    def synced(self):
        return self.pow.sync_state.state == ESyncState.synced

    @property
    def reached_conn_limit(self):
        return len(self._peer_connections) >= config.user.max_peers_limit

    def get_connected_peer_addrs(self):
        return set([peer.peer.full_address for peer in self._peer_connections])

    ###################################################
    ###################################################
    ###################################################
    ###################################################
    ###################################################
    ###################################################

    @property
    def chain_height(self):
        return self._chain_manager.height

    def get_last_block(self):
        return self._chain_manager.last_block

    def get_headerhashes(self, start_blocknumber):
        return self._chain_manager.get_headerhashes(start_blocknumber)

    def get_cumulative_difficulty(self):
        return self._chain_manager.get_cumulative_difficulty()

    def get_block_by_number(self, block_number):
        return self._chain_manager.get_block_by_number(block_number)

    def is_block_present(self, header_hash: bytes) -> bool:
        if not self._chain_manager.get_block(header_hash):
            if header_hash not in self.pow.future_blocks:
                return False

        return True

    def block_received(self, source, block: Block):
        self.pow.last_pb_time = ntp.getTime()
        logger.info('>>> Received Block #%d %s', block.block_number,
                    bin2hstr(block.headerhash))

        if source != self._target_channel:
            if self._target_channel is None:
                logger.warning('Received block and target channel is None')
            else:
                logger.warning('Received block from unexpected peer')
                logger.warning('Expected peer: %s', self._target_channel.peer)
                logger.warning('Found peer: %s', source.peer)
            return

        if block.block_number != self._last_requested_block_number:
            logger.warning('Did not match %s',
                           self._last_requested_block_number)
            self._qrl_node.peer_manager.ban_channel(source)
            return

        target_start_blocknumber = self._target_node_header_hash.block_number
        expected_headerhash = self._target_node_header_hash.headerhashes[
            block.block_number - target_start_blocknumber]
        if block.headerhash != expected_headerhash:
            logger.warning('Did not match headerhash')
            logger.warning('Expected headerhash %s', expected_headerhash)
            logger.warning('Found headerhash %s', block.headerhash)
            self._qrl_node.peer_manager.ban_channel(source)
            return

        if not block.validate(self._chain_manager, self.pow.future_blocks):
            logger.warning('Syncing Failed: Block Validation Failed')
            self._qrl_node.peer_manager.ban_channel(source)
            return

        if self._chain_manager.add_block(block, check_stale=False):
            if self._chain_manager.last_block.headerhash == block.headerhash:
                self.pow.suspend_mining_timestamp = ntp.getTime(
                ) + config.dev.sync_delay_mining
        else:
            logger.warning('Failed to Add Block')
            self._qrl_node.peer_manager.ban_channel(source)
            return

        try:
            reactor.download_monitor.cancel()
        except Exception as e:
            logger.warning("PB: %s", e)

        if self.is_syncing_finished():
            return

        self._last_requested_block_number += 1

        self.peer_fetch_block()

    def is_syncing(self) -> bool:
        return self._syncing_enabled

    def is_syncing_finished(self, force_finish=False):
        curr_index = self._last_requested_block_number - self._target_node_header_hash.block_number + 1
        if curr_index == len(
                self._target_node_header_hash.headerhashes) or force_finish:
            self._last_requested_block_number = None
            self._target_node_header_hash = None
            self._target_channel = None
            self._syncing_enabled = False
            return True

        return False

    def peer_fetch_block(self, retry=0):
        node_header_hash = self._target_node_header_hash
        curr_index = self._last_requested_block_number - node_header_hash.block_number

        block_headerhash = node_header_hash.headerhashes[curr_index]
        block = self._chain_manager.get_block(block_headerhash)

        if retry >= 1:
            logger.debug('Retry Limit Hit')
            self._qrl_node.peer_manager.ban_channel(self._target_channel)
            self.is_syncing_finished(force_finish=True)
            return

        while block and curr_index + 1 < len(node_header_hash.headerhashes):
            self._last_requested_block_number += 1
            curr_index = self._last_requested_block_number - node_header_hash.block_number
            block_headerhash = node_header_hash.headerhashes[curr_index]
            block = self._chain_manager.get_block(block_headerhash)

        if block and self.is_syncing_finished():
            return

        self._target_channel.send_fetch_block(
            self._last_requested_block_number)
        reactor.download_monitor = reactor.callLater(100,
                                                     self.peer_fetch_block,
                                                     retry + 1)

    def compare_and_sync(self, source_peer,
                         node_header_hash: qrl_pb2.NodeHeaderHash):
        if self._syncing_enabled:
            logger.info('>> Ignoring compare_and_sync Syncing Enabled')
            return
        last_block = self.get_last_block()
        node_last_block_number = node_header_hash.block_number + len(
            node_header_hash.headerhashes) - 1
        last_block_number = min(last_block.block_number,
                                node_last_block_number)
        if last_block_number < node_header_hash.block_number:
            return
        fork_block_number = last_block.block_number + 1
        fork_found = False
        for i in range(last_block_number, node_header_hash.block_number - 1,
                       -1):
            block = self._chain_manager.get_block_by_number(i)
            if block:
                if block.headerhash == node_header_hash.headerhashes[
                        i - node_header_hash.block_number]:
                    break
            fork_block_number = i
            fork_found = True

        if fork_found or (last_block.block_number < node_last_block_number):
            self._target_channel = source_peer
            self._target_node_header_hash = node_header_hash
            self._last_requested_block_number = fork_block_number
            self._syncing_enabled = True
            self.peer_fetch_block()

    ###################################################
    ###################################################
    ###################################################
    ###################################################
    ###################################################
    ###################################################

    def request_full_message(self, mr_data: qrllegacy_pb2.MRData):
        """
        Request Full Message
        This function request for the full message against,
        the Message Receipt received.
        :return:
        """

        # FIXME: Again, breaking encasulation
        # FIXME: Huge amount of lookups in dictionaries
        msg_hash = mr_data.hash

        if msg_hash in self.master_mr._hash_msg:
            if msg_hash in self.master_mr.requested_hash:
                del self.master_mr.requested_hash[msg_hash]
            return

        if msg_hash not in self.master_mr.requested_hash:
            return

        peers_list = self.master_mr.requested_hash[
            msg_hash].peers_connection_list
        message_request = self.master_mr.requested_hash[msg_hash]
        for peer in peers_list:
            if peer in message_request.already_requested_peers:
                continue
            message_request.already_requested_peers.append(peer)

            msg = qrllegacy_pb2.LegacyMessage(
                func_name=qrllegacy_pb2.LegacyMessage.SFM,
                mrData=qrllegacy_pb2.MRData(hash=mr_data.hash,
                                            type=mr_data.type))

            peer.send(msg)

            call_later_obj = reactor.callLater(
                config.dev.message_receipt_timeout, self.request_full_message,
                mr_data)

            message_request.callLater = call_later_obj
            return

        # If execution reach to this line, then it means no peer was able to provide
        # Full message for this hash thus the hash has to be deleted.
        # Moreover, negative points could be added to the peers, for this behavior
        if msg_hash in self.master_mr.requested_hash:
            del self.master_mr.requested_hash[msg_hash]

    ##############################################
    ##############################################
    ##############################################
    ##############################################

    def reset_processor_flag(self, _):
        self._txn_processor_running = False

    def reset_processor_flag_with_err(self, msg):
        logger.error('Exception in txn task')
        logger.error('%s', msg)
        self._txn_processor_running = False

    def add_unprocessed_txn(self, tx, ip) -> bool:
        if tx.fee < config.user.transaction_minimum_fee:
            logger.info("Dropping Txn %s", bin2hstr(tx.txhash))
            logger.info("Reason: Fee %s is below threshold fee %s", tx.fee,
                        config.user.transaction_minimum_fee)
            return False

        if not self._chain_manager.tx_pool.update_pending_tx_pool(tx, ip):
            return False

        if not self._txn_processor_running:
            txn_processor = TxnProcessor(
                chain_manager=self._chain_manager,
                transaction_pool_obj=self._chain_manager.tx_pool,
                broadcast_tx=self.broadcast_tx)

            task_defer = TxnProcessor.create_cooperate(
                txn_processor).whenDone()
            task_defer.addCallback(self.reset_processor_flag) \
                .addErrback(self.reset_processor_flag_with_err)
            self._txn_processor_running = True

        return True

    ##############################################
    ##############################################
    ##############################################
    ##############################################

    def broadcast_tx(self, tx: TransferTransaction):
        logger.info('<<<Transmitting TX: %s', bin2hstr(tx.txhash))

        if isinstance(tx, MessageTransaction):
            legacy_type = qrllegacy_pb2.LegacyMessage.MT
        elif isinstance(tx, TransferTransaction):
            legacy_type = qrllegacy_pb2.LegacyMessage.TX
        elif isinstance(tx, TokenTransaction):
            legacy_type = qrllegacy_pb2.LegacyMessage.TK
        elif isinstance(tx, TransferTokenTransaction):
            legacy_type = qrllegacy_pb2.LegacyMessage.TT
        elif isinstance(tx, SlaveTransaction):
            legacy_type = qrllegacy_pb2.LegacyMessage.SL
        elif isinstance(tx, LatticeTransaction):
            legacy_type = qrllegacy_pb2.LegacyMessage.LT
        elif isinstance(tx, MultiSigCreate):
            legacy_type = qrllegacy_pb2.LegacyMessage.MC
        elif isinstance(tx, MultiSigSpend):
            legacy_type = qrllegacy_pb2.LegacyMessage.MS
        elif isinstance(tx, MultiSigVote):
            legacy_type = qrllegacy_pb2.LegacyMessage.MV
        else:
            raise ValueError('Invalid Transaction Type')
        self.register_and_broadcast(legacy_type, tx.get_message_hash(),
                                    tx.pbdata)

    def broadcast_block(self, block: Block):
        # logger.info('<<<Transmitting block: ', block.headerhash)
        data = qrllegacy_pb2.MRData()
        data.stake_selector = block.transactions[0].public_key
        data.block_number = block.block_number
        data.prev_headerhash = bytes(block.prev_headerhash)

        self.register_and_broadcast(qrllegacy_pb2.LegacyMessage.BK,
                                    block.headerhash, block.pbdata, data)

    def register_and_broadcast(self,
                               msg_type,
                               msg_hash: bytes,
                               pbdata,
                               data=None):
        self.master_mr.register(msg_type, msg_hash, pbdata)
        self.broadcast(msg_type, msg_hash, data)

    def broadcast(self, msg_type, msg_hash: bytes, mr_data=None):
        """
        Broadcast
        This function sends the Message Receipt to all connected peers.
        :return:
        """
        ignore_peers = []
        if msg_hash in self.master_mr.requested_hash:
            ignore_peers = self.master_mr.requested_hash[
                msg_hash].peers_connection_list

        if not mr_data:
            mr_data = qrllegacy_pb2.MRData()

        mr_data.hash = msg_hash
        mr_data.type = msg_type
        data = qrllegacy_pb2.LegacyMessage(
            func_name=qrllegacy_pb2.LegacyMessage.MR, mrData=mr_data)

        for peer in self._peer_connections:
            if peer not in ignore_peers:
                peer.send(data)

    def broadcast_get_synced_state(self):
        # Request all peers to update their synced status
        for peer in self._peer_connections:
            peer.send_sync()

    ###################################################
    ###################################################
    ###################################################
    ###################################################
    # Event handlers / Comms related

    def start_listening(self):
        reactor.listenTCP(config.user.p2p_local_port, self)

    def clientConnectionLost(self, connector, reason):  # noqa
        logger.debug('connection lost: %s', reason)

    def clientConnectionFailed(self, connector, reason):
        logger.debug('connection failed: %s', reason)

    def startedConnecting(self, connector):
        logger.debug('Started connecting: %s', connector)

    def add_connection(self, conn_protocol) -> bool:
        # TODO: Most of this can go peer manager
        if self._qrl_node.peer_manager.is_banned(conn_protocol.peer):
            return False

        redundancy_count = 0
        for conn in self._peer_connections:
            if conn.peer.ip == conn_protocol.peer.ip:
                redundancy_count += 1

        if config.user.max_redundant_connections >= 0:
            if redundancy_count >= config.user.max_redundant_connections:
                logger.info('Redundant Limit. Disconnecting client %s',
                            conn_protocol.peer)
                return False

        if self.reached_conn_limit:
            # FIXME: Should we stop listening to avoid unnecessary load due to many connections?
            logger.info('Peer limit hit. Disconnecting client %s',
                        conn_protocol.peer)
            return False

        # Remove your own ip address from the connection
        if conn_protocol.peer.ip == conn_protocol.host.ip and conn_protocol.peer.port == config.user.p2p_public_port:
            peer_list = [
                p for p in self._qrl_node.peer_manager.known_peer_addresses
                if p != conn_protocol.peer.full_address
            ]
            self._qrl_node.peer_manager.extend_known_peers(peer_list)
            return False

        self._peer_connections.append(conn_protocol)

        logger.debug('>>> new connection: %s ', conn_protocol.peer)
        return True

    def remove_connection(self, conn_protocol):
        if conn_protocol in self._peer_connections:
            self._peer_connections.remove(conn_protocol)

        if conn_protocol.peer.full_address in self.peer_blockheight:
            del self.peer_blockheight[conn_protocol.peer.full_address]

    def monitor_connections(self):
        reactor.callLater(config.user.monitor_connections_interval,
                          self.monitor_connections)

        if len(self._peer_connections) == 0:
            logger.warning('No Connected Peer Found')
            known_peers = self._qrl_node.peer_manager.load_known_peers()
            self._peer_q.extend(known_peers)

        connected_peers_set = set()
        for conn_protocol in self._peer_connections:
            connected_peers_set.add(conn_protocol.peer.full_address)

        for peer_item in config.user.peer_list:
            peer_metadata = IPMetadata.from_full_address(peer_item)
            if peer_metadata.full_address in self._peer_q:
                self._peer_q.remove(peer_metadata.full_address)
            if peer_metadata.full_address not in connected_peers_set:
                self.connect_peer([peer_metadata.full_address])

        if len(self._peer_connections) >= config.user.max_peers_limit:
            return

        if len(self._peer_q) == 0:
            return

        peer_address_list = []
        max_length = min(10, config.user.max_peers_limit)
        while len(self._peer_q) > 0 and len(peer_address_list) != max_length:
            peer_address_list.append(self._peer_q.pop(0))

        self.connect_peer(peer_address_list)

    def connect_peer(self, full_address_list):
        for full_address in full_address_list:
            try:
                addr = IPMetadata.from_full_address(full_address)

                connected_peers = self.get_connected_peer_addrs()
                should_connect = addr.full_address not in connected_peers

                if should_connect:
                    reactor.connectTCP(addr.ip, addr.port, self)

            except Exception as e:
                logger.warning("Could not connect to %s - %s", full_address,
                               str(e))
Exemplo n.º 19
0
class P2PFactory(ServerFactory):
    protocol = P2PProtocol

    def __init__(self, buffered_chain: BufferedChain, sync_state: SyncState,
                 qrl_node: QRLNode):

        # FIXME: Constructor signature is not consistent with other factory classes
        self.master_mr = MessageReceipt()
        self.pos = None
        self.ntp = ntp
        self.buffered_chain = buffered_chain
        self.sync_state = sync_state

        self.sync = 0

        self.genesis_processed = False  # FIXME: Accessed by every p2pprotocol instance
        self.peer_connections = [
        ]  # FIXME: Accessed by every p2pprotocol instance
        self.synced_peers = set(
        )  # FIXME: Accessed by every p2pprotocol instance

        self.qrl_node = qrl_node

        self.txn_processor_running = False

        self.bkmr_blocknumber = 0  # Blocknumber for which bkmr is being tracked
        self.bkmr_priorityq = queue.PriorityQueue()
        # Scheduled and cancel the call, just to initialize with IDelayedCall
        self.bkmr_processor = reactor.callLater(1, lambda: None, pos=None)
        self.bkmr_processor.cancel()

    @property
    def connections(self):
        return len(self.peer_connections)

    ##############################################
    ##############################################
    ##############################################
    ##############################################

    def RFM(self, data):
        """
        Request Full Message
        This function request for the full message against,
        the Message Receipt received.
        :return:
        """

        # FIXME: Again, breaking encasulation
        # FIXME: Huge amount of lookups in dictionaries

        msg_hash = data.hash

        if msg_hash in self.master_mr.hash_msg:
            if msg_hash in self.master_mr.requested_hash:
                del self.master_mr.requested_hash[msg_hash]
            return

        if msg_hash not in self.master_mr.requested_hash:
            return

        peers_list = self.master_mr.requested_hash[
            msg_hash].peers_connection_list
        message_request = self.master_mr.requested_hash[msg_hash]
        for peer in peers_list:
            if peer in message_request.already_requested_peers:
                continue
            message_request.already_requested_peers.append(peer)

            peer.transport.write(peer.wrap_message('SFM', MessageToJson(data)))
            call_later_obj = reactor.callLater(
                config.dev.message_receipt_timeout, self.RFM, data)
            message_request.callLater = call_later_obj
            return

        # If execution reach to this line, then it means no peer was able to provide
        # Full message for this hash thus the hash has to be deleted.
        # Moreover, negative points could be added to the peers, for this behavior
        if msg_hash in self.master_mr.requested_hash:
            del self.master_mr.requested_hash[msg_hash]

    def select_best_bkmr(self):
        # FIXME: This seems to be a much higher level behavior
        blocknumber = self.bkmr_blocknumber
        try:
            dscore, dhash = self.bkmr_priorityq.get_nowait()
            if blocknumber <= self.buffered_chain.height:
                oldscore = self.buffered_chain.get_block_score(blocknumber)
                if dscore > oldscore:
                    del self.bkmr_priorityq
                    self.bkmr_priorityq = queue.PriorityQueue()
                    return

            data = qrl_pb2.MR()

            data.hash = dhash
            data.type = 'BK'

            self.RFM(data)
            self.bkmr_processor = reactor.callLater(5, self.select_best_bkmr)
        except queue.Empty:
            return
        except Exception as e:
            logger.error('select_best_bkmr Unexpected Exception')
            logger.error('%s', e)

    ##############################################
    # NOTE: PoS related.. broadcasting, etc. OBSOLETE
    def broadcast_st(self, st: StakeTransaction):
        logger.info('<<<Transmitting ST: %s', st.activation_blocknumber)
        self.register_and_broadcast('ST', st.get_message_hash(), st.to_json())

    def broadcast_vote(self, vote: Vote):
        logger.info('<<<Transmitting Vote Txn: %s', vote.blocknumber)
        self.register_and_broadcast('VT', vote.get_message_hash(),
                                    vote.to_json())

    def broadcast_destake(self, destake_txn: DestakeTransaction):
        logger.info('<<<Transmitting Destake Txn: %s', destake_txn.txfrom)
        self.register_and_broadcast('DST', destake_txn.get_message_hash(),
                                    destake_txn.to_json())

    def broadcast_block(self, block: Block):
        # logger.info('<<<Transmitting block: ', block.headerhash)
        data = qrl_pb2.MR()
        data.stake_selector = block.transactions[0].addr_from
        data.block_number = block.block_number
        data.prev_headerhash = bytes(block.prev_headerhash)

        if block.block_number > 1:
            data.reveal_hash = block.reveal_hash

        self.register_and_broadcast('BK', block.headerhash, block.to_json(),
                                    data)

    def broadcast_tx(self, tx, subtype='TX'):
        logger.info('<<<Transmitting TX: %s', tx.txhash)
        self.register_and_broadcast(subtype, tx.get_message_hash(),
                                    tx.to_json())

    def broadcast_lt(self, lattice_public_key_txn):
        logger.info('<<<Transmitting LATTICE txn: %s',
                    lattice_public_key_txn.txhash)
        self.buffered_chain.add_lattice_public_key(lattice_public_key_txn)
        self.register_and_broadcast('LT',
                                    lattice_public_key_txn.get_message_hash(),
                                    lattice_public_key_txn.to_json())

    def register_and_broadcast(self,
                               msg_type,
                               msg_hash: bytes,
                               msg_json: str,
                               data=None):
        self.master_mr.register(msg_type, msg_hash, msg_json)
        self.broadcast(msg_hash, msg_type, data)

    def broadcast(self,
                  msg_hash: bytes,
                  msg_type,
                  data=None):  # Move to factory
        """
        Broadcast
        This function sends the Message Receipt to all connected peers.
        :return:
        """
        ignore_peers = []
        if msg_hash in self.master_mr.requested_hash:
            ignore_peers = self.master_mr.requested_hash[
                msg_hash].peers_connection_list

        if not data:
            data = qrl_pb2.MR()

        data.hash = msg_hash
        data.type = msg_type

        for peer in self.peer_connections:
            if peer not in ignore_peers:
                peer.transport.write(
                    self.protocol.wrap_message('MR', MessageToJson(data)))

    def broadcast_get_synced_state(self):
        # Request all peers to update their synced status
        self.synced_peers = set()
        for peer in self.peer_connections:
            peer.transport.write(peer.wrap_message('SYNC'))

    ###################################################
    ###################################################
    ###################################################
    # NOTE: tx processor related. Obsolete stage 2?

    def reset_processor_flag(self, _):
        self.txn_processor_running = False

    def reset_processor_flag_with_err(self, msg):
        logger.error('Exception in txn task')
        logger.error('%s', msg)
        self.txn_processor_running = False

    ###################################################
    ###################################################
    ###################################################
    ###################################################
    # Event handlers
    # NOTE: No need to refactor, it is obsolete

    # noinspection PyMethodMayBeStatic
    def clientConnectionLost(self, connector, reason):
        logger.debug('connection lost: %s', reason)
        # TODO: Reconnect has been disabled
        # connector.connect()

    # noinspection PyMethodMayBeStatic
    def clientConnectionFailed(self, connector, reason):
        logger.debug('connection failed: %s', reason)

    # noinspection PyMethodMayBeStatic
    def startedConnecting(self, connector):
        logger.debug('Started connecting: %s', connector)

    def connect_peers(self):
        """
        Will connect to all known peers. This is typically the entry point
        It does result in:
        - connectionMade in each protocol (session)
        - :py:meth:startedConnecting
        - :py:meth:clientConnectionFailed
        - :py:meth:clientConnectionLost
        :return:
        :rtype: None
        """
        logger.info('<<<Reconnecting to peer list: %s',
                    self.qrl_node._peer_addresses)
        for peer_address in self.qrl_node._peer_addresses:
            # FIXME: Refactor search
            found = False
            for peer_conn in self.peer_connections:
                if peer_address == peer_conn.transport.getPeer().host:
                    found = True
                    break
            if found:
                continue
            reactor.connectTCP(peer_address, 9000, self)
Exemplo n.º 20
0
class P2PFactory(ServerFactory):
    protocol = P2PProtocol

    def __init__(self,
                 chain_manager: ChainManager,
                 sync_state: SyncState,
                 qrl_node):

        self.master_mr = MessageReceipt()
        self.pow = None
        self.sync_state = sync_state

        self._ntp = ntp
        self._qrl_node = qrl_node
        self._chain_manager = chain_manager

        self._syncing_enabled = False
        self._target_peer = None
        self._target_node_header_hash = None
        self._last_requested_block_idx = None

        self._genesis_processed = False
        self._peer_connections = []
        self._synced_peers_protocol = set()
        self._txn_processor_running = False

        self.peer_blockheight = dict()

        reactor.callLater(180, self.monitor_connections)

        self.p2p_msg_priority = {
            qrllegacy_pb2.LegacyMessage.VE: 0,
            qrllegacy_pb2.LegacyMessage.PL: 0,
            qrllegacy_pb2.LegacyMessage.PONG: 0,

            ######################
            qrllegacy_pb2.LegacyMessage.MR: 2,
            qrllegacy_pb2.LegacyMessage.SFM: 1,

            qrllegacy_pb2.LegacyMessage.BK: 1,
            qrllegacy_pb2.LegacyMessage.FB: 1,
            qrllegacy_pb2.LegacyMessage.PB: 1,
            qrllegacy_pb2.LegacyMessage.BH: 1,

            ############################
            qrllegacy_pb2.LegacyMessage.TX: 1,
            qrllegacy_pb2.LegacyMessage.MT: 1,
            qrllegacy_pb2.LegacyMessage.TK: 1,
            qrllegacy_pb2.LegacyMessage.TT: 1,
            qrllegacy_pb2.LegacyMessage.LT: 1,
            qrllegacy_pb2.LegacyMessage.SL: 1,

            qrllegacy_pb2.LegacyMessage.EPH: 3,

            qrllegacy_pb2.LegacyMessage.SYNC: 0,
            qrllegacy_pb2.LegacyMessage.CHAINSTATE: 0,
            qrllegacy_pb2.LegacyMessage.HEADERHASHES: 1,
            qrllegacy_pb2.LegacyMessage.P2P_ACK: 0,
        }

    ###################################################
    ###################################################
    ###################################################
    ###################################################
    ###################################################
    ###################################################

    @property
    def has_synced_peers(self):
        return len(self._synced_peers_protocol) > 0

    def get_random_peer(self):
        max_cumulative_difficulty = 0
        for connection_id in self.peer_blockheight:
            max_cumulative_difficulty = max(max_cumulative_difficulty, self.peer_blockheight[connection_id][2])

        connection_ids = []
        for connection_id in self.peer_blockheight:
            if self.peer_blockheight[connection_id][2] == max_cumulative_difficulty:
                connection_ids.append(connection_id)

        selected_peer_connections = []
        for connection_id in connection_ids:
            for peer_conn in self._peer_connections:
                if peer_conn.connection_id == connection_id:
                    selected_peer_connections.append(peer_conn)
        if len(selected_peer_connections) == 0 or max_cumulative_difficulty == 0:
            return None

        return random.sample(selected_peer_connections, 1)[0]

    def update_peer_blockheight(self, connection_id, block_number, headerhash, cumulative_difficulty):
        self.peer_blockheight[connection_id] = [block_number, headerhash, int(UInt256ToString(cumulative_difficulty))]

    def request_peer_blockheight(self):
        for peer in self._peer_connections:
            msg = qrllegacy_pb2.LegacyMessage(func_name=qrllegacy_pb2.LegacyMessage.BH,
                                              bhData=qrl_pb2.BlockHeightData(block_number=0))
            peer.send(msg)

    def set_peer_synced(self, conn_protocol, synced: bool):
        if synced:
            self._synced_peers_protocol.add(conn_protocol)
        else:
            self._synced_peers_protocol.discard(conn_protocol)

    ###################################################
    ###################################################
    ###################################################
    ###################################################
    ###################################################
    ###################################################
    ###################################################

    @property
    def connections(self):
        return len(self._peer_connections)

    @property
    def synced(self):
        return self.pow.sync_state.state == ESyncState.synced

    @property
    def reached_conn_limit(self):
        return len(self._peer_connections) >= config.user.max_peers_limit

    def get_connected_peer_ips(self):
        # FIXME: Convert self._peer_connections to set
        return set([peer.peer_ip for peer in self._peer_connections])

    ###################################################
    ###################################################
    ###################################################
    ###################################################
    ###################################################
    ###################################################

    @property
    def chain_height(self):
        return self._chain_manager.height

    def get_last_block(self):
        return self._chain_manager.get_last_block()

    def get_headerhashes(self, start_blocknumber):
        return self._chain_manager.get_headerhashes(start_blocknumber)

    def get_cumulative_difficulty(self):
        return self._chain_manager.get_cumulative_difficulty()

    def get_block(self, block_number):
        return self._chain_manager.get_block_by_number(block_number)

    def block_received(self, source, block: Block):
        self.pow.last_pb_time = time.time()
        logger.info('>>> Received Block #%d %s', block.block_number, bin2hstr(block.headerhash))

        if source != self._target_peer:
            logger.warning('Received block from unexpected peer')
            logger.warning('Expected peer: %s', self._target_peer.connection_id)
            logger.warning('Found peer: %s', source.connection_id)
            return

        if block.block_number != self._last_requested_block_idx:
            logger.warning('Did not match %s', self._last_requested_block_idx)
            return

        target_start_blocknumber = self._target_node_header_hash.block_number
        expected_headerhash = self._target_node_header_hash.headerhashes[block.block_number - target_start_blocknumber]
        if block.headerhash != expected_headerhash:
            logger.warning('Did not match headerhash')
            logger.warning('Expected headerhash %s', expected_headerhash)
            logger.warning('Found headerhash %s', block.headerhash)
            return

        # FIXME: This check should not be necessary
        if not self._chain_manager.add_block(block):
            logger.warning('Failed to Add Block')
            return

        try:
            reactor.download_monitor.cancel()
        except Exception as e:
            logger.warning("PB: %s", e)

        if self.is_syncing_finished():
            return

        self._last_requested_block_idx += 1
        if self.is_syncing_finished():
            return

        self.peer_fetch_block()

    def is_syncing(self) -> bool:
        return self._syncing_enabled

    def is_syncing_finished(self, force_finish=False):
        if self._last_requested_block_idx == len(self._target_node_header_hash.headerhashes) or force_finish:
            self._last_requested_block_idx = None
            self._target_node_header_hash = None
            self._target_peer = None
            self._syncing_enabled = False
            return True

        return False

    def peer_fetch_block(self, retry=0):
        node_header_hash = self._target_node_header_hash
        curr_index = self._last_requested_block_idx - node_header_hash.block_number

        block_headerhash = node_header_hash.headerhashes[curr_index]
        block = self._chain_manager.state.get_block(block_headerhash)

        if not block:
            if retry >= 5:
                logger.debug('Retry Limit Hit')
                self._qrl_node.ban_peer(self._target_peer)
                self.is_syncing_finished(force_finish=True)
                return
        else:
            while block and curr_index < len(node_header_hash.headerhashes):
                block_headerhash = node_header_hash.headerhashes[curr_index]
                block = self._chain_manager.state.get_block(block_headerhash)
                self._last_requested_block_idx += 1
                curr_index = self._last_requested_block_idx - node_header_hash.block_number

            retry = 0

        if self.is_syncing_finished():
            return

        self._target_peer.send_fetch_block(self._last_requested_block_idx)
        reactor.download_monitor = reactor.callLater(20, self.peer_fetch_block, retry+1)

    def compare_and_sync(self, peer, node_header_hash: qrl_pb2.NodeHeaderHash):
        if self._syncing_enabled:
            logger.info('>> Ignoring compare_and_sync Syncing Enabled')
            return
        last_block = self._chain_manager.get_last_block()
        node_last_block_number = node_header_hash.block_number + len(node_header_hash.headerhashes) - 1
        last_block_number = min(last_block.block_number, node_last_block_number)
        if last_block_number < node_header_hash.block_number:
            return
        fork_block_number = last_block.block_number + 1
        fork_found = False
        for i in range(last_block_number, node_header_hash.block_number - 1, -1):
            block = self._chain_manager.get_block_by_number(i)
            if block.headerhash == node_header_hash.headerhashes[i-node_header_hash.block_number]:
                break
            fork_block_number = i
            fork_found = True

        if fork_found or (last_block.block_number < node_last_block_number):
            self._target_peer = peer
            self._target_node_header_hash = node_header_hash
            self._last_requested_block_idx = fork_block_number
            self._syncing_enabled = True
            self.peer_fetch_block()

    ###################################################
    ###################################################
    ###################################################
    ###################################################
    ###################################################
    ###################################################

    def request_full_message(self, mr_data: qrllegacy_pb2.MRData):
        """
        Request Full Message
        This function request for the full message against,
        the Message Receipt received.
        :return:
        """

        # FIXME: Again, breaking encasulation
        # FIXME: Huge amount of lookups in dictionaries
        msg_hash = mr_data.hash

        if msg_hash in self.master_mr._hash_msg:
            if msg_hash in self.master_mr.requested_hash:
                del self.master_mr.requested_hash[msg_hash]
            return

        if msg_hash not in self.master_mr.requested_hash:
            return

        peers_list = self.master_mr.requested_hash[msg_hash].peers_connection_list
        message_request = self.master_mr.requested_hash[msg_hash]
        for peer in peers_list:
            if peer in message_request.already_requested_peers:
                continue
            message_request.already_requested_peers.append(peer)

            msg = qrllegacy_pb2.LegacyMessage(func_name=qrllegacy_pb2.LegacyMessage.SFM,
                                              mrData=qrllegacy_pb2.MRData(hash=mr_data.hash, type=mr_data.type))

            peer.send(msg)

            call_later_obj = reactor.callLater(config.dev.message_receipt_timeout,
                                               self.request_full_message,
                                               mr_data)

            message_request.callLater = call_later_obj
            return

        # If execution reach to this line, then it means no peer was able to provide
        # Full message for this hash thus the hash has to be deleted.
        # Moreover, negative points could be added to the peers, for this behavior
        if msg_hash in self.master_mr.requested_hash:
            del self.master_mr.requested_hash[msg_hash]

    ##############################################
    ##############################################
    ##############################################
    ##############################################
    # NOTE: PoW related.. broadcasting, etc. OBSOLETE

    def reset_processor_flag(self, _):
        self._txn_processor_running = False

    def reset_processor_flag_with_err(self, msg):
        logger.error('Exception in txn task')
        logger.error('%s', msg)
        self._txn_processor_running = False

    def add_unprocessed_txn(self, tx, ip):
        self._chain_manager.tx_pool.update_pending_tx_pool(tx, ip)

        if not self._txn_processor_running:
            txn_processor = TxnProcessor(state=self._chain_manager.state,
                                         transaction_pool_obj=self._chain_manager.tx_pool,
                                         broadcast_tx=self.broadcast_tx)

            task_defer = TxnProcessor.create_cooperate(txn_processor).whenDone()
            task_defer.addCallback(self.reset_processor_flag) \
                .addErrback(self.reset_processor_flag_with_err)
            self._txn_processor_running = True

    def broadcast_tx(self, tx: TransferTransaction):
        logger.info('<<<Transmitting TX: %s', tx.txhash)

        if tx.subtype == qrl_pb2.Transaction.MESSAGE:
            legacy_type = qrllegacy_pb2.LegacyMessage.MT
        elif tx.subtype == qrl_pb2.Transaction.TRANSFER:
            legacy_type = qrllegacy_pb2.LegacyMessage.TX
        elif tx.subtype == qrl_pb2.Transaction.TOKEN:
            legacy_type = qrllegacy_pb2.LegacyMessage.TK
        elif tx.subtype == qrl_pb2.Transaction.TRANSFERTOKEN:
            legacy_type = qrllegacy_pb2.LegacyMessage.TT
        elif tx.subtype == qrl_pb2.Transaction.LATTICE:
            legacy_type = qrllegacy_pb2.LegacyMessage.LT
        elif tx.subtype == qrl_pb2.Transaction.SLAVE:
            legacy_type = qrllegacy_pb2.LegacyMessage.SL
        else:
            raise ValueError('Invalid Transaction Type')
        self.register_and_broadcast(legacy_type, tx.get_message_hash(), tx.pbdata)

    def broadcast_ephemeral_message(self, encrypted_ephemeral):
        logger.info('<<<Broadcasting Encrypted Ephemeral Message')
        self._chain_manager.add_ephemeral_message(encrypted_ephemeral)
        self.register_and_broadcast('EPH',
                                    encrypted_ephemeral.get_message_hash(),
                                    encrypted_ephemeral.to_json())

    def broadcast_tx_relay(self, source_peer, tx):
        txn_msg = source_peer._wrap_message('TX', tx.to_json())
        for peer in self._peer_connections:
            if peer != source_peer:
                peer.transport.write(txn_msg)

    ##############################################
    ##############################################
    ##############################################
    ##############################################

    def broadcast_block(self, block: Block):
        # logger.info('<<<Transmitting block: ', block.headerhash)
        data = qrllegacy_pb2.MRData()
        data.stake_selector = block.transactions[0].public_key
        data.block_number = block.block_number
        data.prev_headerhash = bytes(block.prev_headerhash)

        self.register_and_broadcast(qrllegacy_pb2.LegacyMessage.BK, block.headerhash, block.pbdata, data)

    ##############################################
    ##############################################
    ##############################################
    ##############################################

    def register_and_broadcast(self, msg_type, msg_hash: bytes, pbdata, data=None):
        self.master_mr.register(msg_type, msg_hash, pbdata)
        self.broadcast(msg_type, msg_hash, data)

    def broadcast(self, msg_type, msg_hash: bytes, mr_data=None):
        """
        Broadcast
        This function sends the Message Receipt to all connected peers.
        :return:
        """
        ignore_peers = []
        if msg_hash in self.master_mr.requested_hash:
            ignore_peers = self.master_mr.requested_hash[msg_hash].peers_connection_list

        if not mr_data:
            mr_data = qrllegacy_pb2.MRData()

        mr_data.hash = msg_hash
        mr_data.type = msg_type
        data = qrllegacy_pb2.LegacyMessage(func_name=qrllegacy_pb2.LegacyMessage.MR,
                                           mrData=mr_data)

        for peer in self._peer_connections:
            if peer not in ignore_peers:
                peer.send(data)

    def broadcast_get_synced_state(self):
        # Request all peers to update their synced status
        self._synced_peers_protocol = set()
        for peer in self._peer_connections:
            peer.send_sync()

    ###################################################
    ###################################################
    ###################################################
    ###################################################
    # Event handlers / Comms related

    def start_listening(self):
        reactor.listenTCP(9000, self)

    # NOTE: No need to refactor, it is obsolete
    def clientConnectionLost(self, connector, reason):  # noqa
        logger.debug('connection lost: %s', reason)

    def clientConnectionFailed(self, connector, reason):
        logger.debug('connection failed: %s', reason)

    def startedConnecting(self, connector):
        logger.debug('Started connecting: %s', connector)

    def add_connection(self, conn_protocol) -> bool:
        # TODO: Most of this can go the peer manager

        if self._qrl_node.is_banned(conn_protocol.peer_ip):
            conn_protocol.loseConnection()
            return False

        # FIXME: (For AWS) This could be problematic for other users
        # FIXME: identify nodes by an GUID?
        if config.dev.public_ip and conn_protocol.peer_ip == config.dev.public_ip:
            conn_protocol.loseConnection()
            return False

        if self.reached_conn_limit:
            # FIXME: Should we stop listening to avoid unnecessary load due to many connections?
            logger.info('Peer limit hit. Disconnecting client %s', conn_protocol.peer_ip)
            conn_protocol.loseConnection()
            return False

        peer_list = self._qrl_node.peer_addresses
        if conn_protocol.peer_ip == conn_protocol.host_ip:
            if conn_protocol.peer_ip in peer_list:
                logger.info('Self in peer_list, removing..')
                peer_list.remove(conn_protocol.peer_ip)
                self._qrl_node.peer_manager.update_peer_addresses(peer_list)

            conn_protocol.loseConnection()
            return False

        self._peer_connections.append(conn_protocol)

        if conn_protocol.peer_ip not in peer_list:
            logger.debug('Adding to peer_list')
            peer_list.add(conn_protocol.peer_ip)
            self._qrl_node.peer_manager.update_peer_addresses(peer_list)

        logger.debug('>>> new peer connection : %s:%s ', conn_protocol.peer_ip, str(conn_protocol.peer_port))

        return True

    def remove_connection(self, conn_protocol):
        if conn_protocol in self._peer_connections:
            self._peer_connections.remove(conn_protocol)

        if conn_protocol.connection_id in self.peer_blockheight:
            del self.peer_blockheight[conn_protocol.connection_id]

        self._synced_peers_protocol.discard(conn_protocol)

    def monitor_connections(self):
        reactor.callLater(180, self.monitor_connections)

        if len(self._peer_connections) == 0:
            logger.warning('No Connected Peer Found')
            reactor.callLater(60, self._qrl_node.connect_peers)
            return

        connected_peers_set = set()
        for conn_protocol in self._peer_connections:
            connected_peers_set.add(conn_protocol.peer_ip)

        for ip in config.user.peer_list:
            if ip not in connected_peers_set:
                self.connect_peer(ip)

    def connect_peer(self, peer_address):
        if peer_address not in self.get_connected_peer_ips():
            reactor.connectTCP(peer_address, 9000, self)
Exemplo n.º 21
0
 def test_create(self):
     mr = MessageReceipt()
     self.assertIsNotNone(mr)
     self.assertEqual(mr.allowed_types, ['TX', 'ST', 'BK', 'DT'])