def compare_and_sync(self, source_peer, node_header_hash: xrd_pb2.NodeHeaderHash): if self._syncing_enabled: logger.info('>> Ignoring compare_and_sync Syncing Enabled') return last_block = self.get_last_block() node_last_block_number = node_header_hash.block_number + len( node_header_hash.headerhashes) - 1 last_block_number = min(last_block.block_number, node_last_block_number) if last_block_number < node_header_hash.block_number: return fork_block_number = last_block.block_number + 1 fork_found = False for i in range(last_block_number, node_header_hash.block_number - 1, -1): block = self._chain_manager.get_block_by_number(i) if block: if block.headerhash == node_header_hash.headerhashes[ i - node_header_hash.block_number]: break fork_block_number = i fork_found = True if fork_found or (last_block.block_number < node_last_block_number): self._target_channel = source_peer self._target_node_header_hash = node_header_hash self._last_requested_block_number = fork_block_number self._syncing_enabled = True self.peer_fetch_block()
def _validate_custom(self): for amount in self.amounts: if amount == 0: logger.warning('Amount cannot be 0 - %s', self.amounts) logger.warning('TransferTokenTransaction') return False if self.fee < 0: logger.info('TransferTokenTransaction [%s] Invalid Fee = %d', bin2hstr(self.txhash), self.fee) return False if len(self.addrs_to) != len(self.amounts): logger.warning( '[TransferTokenTransaction] Mismatch number of addresses to & amounts' ) logger.warning('>> Length of addresses_to %s', len(self.addrs_to)) logger.warning('>> Length of amounts %s', len(self.amounts)) return False if not OptimizedAddressState.address_is_valid(self.addr_from): logger.warning( '[TransferTokenTransaction] Invalid address addr_from: %s', bin2hstr(self.addr_from)) return False for addr_to in self.addrs_to: if not OptimizedAddressState.address_is_valid(addr_to): logger.warning( '[TransferTokenTransaction] Invalid address addr_to: %s', bin2hstr(addr_to)) return False return True
def handleEvent(self, event): # NOTE: This function usually runs in the context of a C++ thread if event.type == SOLUTION: logger.debug('handleEvent - TRY LOCK') if not self.lock.acquire(blocking=False): logger.debug('handleEvent - SKIP') return False try: logger.debug('handleEvent - LOCKED') logger.debug('Solution Found %s', event.nonce) logger.info('Hash Rate: %s H/s', self.hashRate()) cloned_block = copy.deepcopy(self._mining_block) cloned_block.set_nonces(self._dev_config, event.nonce, 0) logger.debug("Blob %s", cloned_block) logger.info('Block #%s nonce: %s', cloned_block.block_number, event.nonce) self.pre_block_logic(cloned_block) except Exception as e: logger.warning("Exception in solutionEvent") logger.exception(e) finally: logger.debug('handleEvent - UNLOCK') self.lock.release() return True
def handle_block(self, source, message: xrdlegacy_pb2.LegacyMessage): # block received """ Block This function processes any new block received. :return: """ P2PBaseObserver._validate_message(message, xrdlegacy_pb2.LegacyMessage.BK) try: block = Block(message.block) except Exception as e: logger.error( 'block rejected - unable to decode serialised data %s', source.peer) logger.exception(e) return logger.info('>>>Received block from %s %s %s', source.peer.full_address, block.block_number, bin2hstr(block.headerhash)) if not source.factory.master_mr.isRequested(block.headerhash, source, block): return source.factory.pow.pre_block_logic( block) # FIXME: Ignores return value source.factory.master_mr.register(xrdlegacy_pb2.LegacyMessage.BK, block.headerhash, message.block)
def __next__(self): tx_timestamp = self.transaction_pool_obj.get_pending_transaction() if not tx_timestamp: raise StopIteration tx, timestamp = tx_timestamp if not self.chain_manager.validate_all(tx, check_nonce=False): return False is_valid_pool_state = tx.validate_transaction_pool( self.transaction_pool_obj.transaction_pool) if not is_valid_pool_state: logger.info('>>>TX %s failed is_valid_pool_state', bin2hstr(tx.txhash)) return False logger.info('A TXN has been Processed %s', bin2hstr(tx.txhash)) self.transaction_pool_obj.add_tx_to_pool( tx, self.chain_manager.last_block.block_number, timestamp) self.broadcast_tx(tx) return True
def _validate_extended(self, state_container: StateContainer) -> bool: if len(self.addr_to) != 0: if state_container.block_number < state_container.current_dev_config.hard_fork_heights[ 0]: logger.warning( "[MessageTransaction] Hard Fork Feature not yet activated") return False if len( self.message_hash ) > state_container.current_dev_config.message_max_length: # TODO: Move to dev config logger.warning( 'Message length cannot be more than %s', state_container.current_dev_config.message_max_length) logger.warning('Found message length %s', len(self.message_hash)) return False tx_balance = state_container.addresses_state[self.addr_from].balance if tx_balance < self.fee: logger.info( 'State validation failed for %s because: Insufficient funds', bin2hstr(self.txhash)) logger.info('balance: %s, amount: %s', tx_balance, self.fee) return False return True
def add_connection(self, conn_protocol) -> bool: # TODO: Most of this can go peer manager if self._xrd_node.peer_manager.is_banned(conn_protocol.peer): return False redundancy_count = 0 for conn in self._peer_connections: if conn.peer.ip == conn_protocol.peer.ip: redundancy_count += 1 if config.user.max_redundant_connections >= 0: if redundancy_count >= config.user.max_redundant_connections: logger.info('Redundant Limit. Disconnecting client %s', conn_protocol.peer) return False if self.reached_conn_limit: # FIXME: Should we stop listening to avoid unnecessary load due to many connections? logger.info('Peer limit hit. Disconnecting client %s', conn_protocol.peer) return False # Remove your own ip address from the connection if conn_protocol.peer.ip == conn_protocol.host.ip and conn_protocol.peer.port == config.user.p2p_public_port: peer_list = [ p for p in self._xrd_node.peer_manager.known_peer_addresses if p != conn_protocol.peer.full_address ] self._xrd_node.peer_manager.extend_known_peers(peer_list) return False self._peer_connections.append(conn_protocol) logger.debug('>>> new connection: %s ', conn_protocol.peer) return True
def broadcast_tx(self, tx: TransferTransaction): logger.info('<<<Transmitting TX: %s', bin2hstr(tx.txhash)) if isinstance(tx, MessageTransaction): legacy_type = xrdlegacy_pb2.LegacyMessage.MT elif isinstance(tx, TransferTransaction): legacy_type = xrdlegacy_pb2.LegacyMessage.TX elif isinstance(tx, TokenTransaction): legacy_type = xrdlegacy_pb2.LegacyMessage.TK elif isinstance(tx, TransferTokenTransaction): legacy_type = xrdlegacy_pb2.LegacyMessage.TT elif isinstance(tx, SlaveTransaction): legacy_type = xrdlegacy_pb2.LegacyMessage.SL elif isinstance(tx, LatticeTransaction): legacy_type = xrdlegacy_pb2.LegacyMessage.LT elif isinstance(tx, MultiSigCreate): legacy_type = xrdlegacy_pb2.LegacyMessage.MC elif isinstance(tx, MultiSigSpend): legacy_type = xrdlegacy_pb2.LegacyMessage.MS elif isinstance(tx, MultiSigVote): legacy_type = xrdlegacy_pb2.LegacyMessage.MV else: raise ValueError('Invalid Transaction Type') self.register_and_broadcast(legacy_type, tx.get_message_hash(), tx.pbdata)
def test_getKnownPeers(self): p2p_factory = Mock(spec=P2PFactory) p2p_factory.sync_state = SyncState() p2p_factory.num_connections = 23 p2p_factory.pow = Mock() chain_manager = Mock(spec=ChainManager) chain_manager.height = 0 chain_manager.last_block = Block() xrdnode = xrdNode(mining_address=b'') xrdnode.set_chain_manager(chain_manager) xrdnode._p2pfactory = p2p_factory xrdnode._pow = p2p_factory.pow xrdnode.peer_manager = Mock() xrdnode.peer_manager.known_peer_addresses = ['127.0.0.1', '192.168.1.1'] service = PublicAPIService(xrdnode) response = service.GetKnownPeers(request=xrd_pb2.GetKnownPeersReq, context=None) self.assertEqual(2, len(response.known_peers)) self.assertEqual('127.0.0.1', response.known_peers[0].ip) self.assertEqual('192.168.1.1', response.known_peers[1].ip) logger.info(response)
def _validate_custom(self) -> bool: if self.fee < 0: logger.info( 'State validation failed for %s because: Negative send', bin2hstr(self.txhash)) return False return True
def restart_unsynced_logic(self, delay=0): logger.info('Restarting unsynced logic in %s seconds', delay) try: reactor.unsynced_logic.cancel() except Exception: # No need to log this exception pass reactor.unsynced_logic = reactor.callLater(delay, self.unsynced_logic)
def load(self): try: self._read_wallet_ver1(self.wallet_path) except TypeError: logger.info( "ReadWallet: reading ver1 wallet failed, this must be an old wallet" ) self._read_wallet_ver0(self.wallet_path)
def send(self, message: xrdlegacy_pb2.LegacyMessage): priority = self.factory.p2p_msg_priority[message.func_name] outgoing_msg = OutgoingMessage(priority, message) if self.outgoing_queue.full(): logger.info("Outgoing Queue Full: Skipping Message Type %s", message.WhichOneof('data')) return self.outgoing_queue.put( (outgoing_msg.priority, outgoing_msg.timestamp, outgoing_msg)) self.send_next()
def block_received(self, source, block: Block): self.pow.last_pb_time = ntp.getTime() logger.info('>>> Received Block #%d %s', block.block_number, bin2hstr(block.headerhash)) if source != self._target_channel: if self._target_channel is None: logger.warning('Received block and target channel is None') else: logger.warning('Received block from unexpected peer') logger.warning('Expected peer: %s', self._target_channel.peer) logger.warning('Found peer: %s', source.peer) return if block.block_number != self._last_requested_block_number: logger.warning('Did not match %s', self._last_requested_block_number) self._xrd_node.peer_manager.ban_channel(source) return target_start_blocknumber = self._target_node_header_hash.block_number expected_headerhash = self._target_node_header_hash.headerhashes[ block.block_number - target_start_blocknumber] if block.headerhash != expected_headerhash: logger.warning('Did not match headerhash') logger.warning('Expected headerhash %s', expected_headerhash) logger.warning('Found headerhash %s', block.headerhash) self._xrd_node.peer_manager.ban_channel(source) return if not block.validate(self._chain_manager, self.pow.future_blocks): logger.warning('Syncing Failed: Block Validation Failed') self._xrd_node.peer_manager.ban_channel(source) return if self._chain_manager.add_block(block, check_stale=False): if self._chain_manager.last_block.headerhash == block.headerhash: self.pow.suspend_mining_timestamp = ntp.getTime( ) + config.dev.sync_delay_mining else: logger.warning('Failed to Add Block') self._xrd_node.peer_manager.ban_channel(source) return try: reactor.download_monitor.cancel() except Exception as e: logger.warning("PB: %s", e) if self.is_syncing_finished(): return self._last_requested_block_number += 1 self.peer_fetch_block()
def send_fetch_block(self, block_idx): """ Fetch Block n Sends request for the block number n. :return: """ logger.info('<<<Fetching block: %s from %s', block_idx, self.peer) msg = xrdlegacy_pb2.LegacyMessage( func_name=xrdlegacy_pb2.LegacyMessage.FB, fbData=xrdlegacy_pb2.FBData(index=block_idx)) self.send(msg)
def wrap_f(caller_self, request, context): try: return f(caller_self, request, context) except ValueError as e: self._set_context(context, e, StatusCode.INVALID_ARGUMENT) logger.info(str(e)) return self.response_type() except Exception as e: self._set_context(context, e) logger.exception(e) return self.response_type()
def update_node_state(self, new_sync_state: ESyncState): self.sync_state.state = new_sync_state logger.info('Status changed to %s', self.sync_state.state) _mapping = { ESyncState.unsynced: self._handler_state_unsynced, ESyncState.syncing: self._handler_state_syncing, ESyncState.synced: self._handler_state_synced, ESyncState.forked: self._handler_state_forked, } _mapping[self.sync_state.state]()
def _validate_extended(self, state_container: StateContainer) -> bool: if (len(self.slave_pks) > state_container.current_dev_config.transaction_multi_output_limit or len(self.access_types) > state_container.current_dev_config.transaction_multi_output_limit): logger.warning('List has more than %s slave pks or access_types', state_container.current_dev_config.transaction_multi_output_limit) logger.warning('Slave pks len %s', len(self.slave_pks)) logger.warning('Access types len %s', len(self.access_types)) return False tx_balance = state_container.addresses_state[self.addr_from].balance if tx_balance < self.fee: logger.info('Slave: State validation failed for %s because: Insufficient funds', bin2hstr(self.txhash)) logger.info('balance: %s, amount: %s', tx_balance, self.fee) return False for i in range(len(self.slave_pks)): slave_pk = self.slave_pks[i] if state_container.block_number < state_container.current_dev_config.hard_fork_heights[0]: if len(slave_pk) > state_container.current_dev_config.slave_pk_max_length: logger.info("[Slave Transaction] Slave PK length is beyond limit") return False if (self.addr_from, slave_pk) in state_container.slaves.data: logger.info("[Slave Transaction] Invalid slave transaction as %s is already a slave for this address", slave_pk) return False return True
def _validate_extended(self, state_container: StateContainer): if len(self.message_data) > 0: if state_container.block_number < state_container.current_dev_config.hard_fork_heights[ 0]: logger.warning( "[TransferTransaction] Hard Fork Feature not yet activated" ) return False if len( self.addrs_to ) > state_container.current_dev_config.transaction_multi_output_limit: logger.warning( '[TransferTransaction] Number of addresses exceeds max limit') logger.warning('Number of addresses %s', len(self.addrs_to)) logger.warning('Number of amounts %s', len(self.amounts)) return False if len(self.message_data ) > state_container.current_dev_config.message_max_length: logger.warning( "[TransferTransaction] Message data is greater than message max length limit" ) logger.warning("Message data length %s", len(self.message_data)) logger.warning( "Message data length limit %s", state_container.current_dev_config.message_max_length) return False tx_balance = state_container.addresses_state[self.addr_from].balance total_amount = self.total_amount for addr_to in self.addrs_to: if MultiSigAddressState.address_is_valid(addr_to): if addr_to not in state_container.addresses_state: logger.warning( '[TransferTransaction] Multi Sig Address doesnt exist: %s', bin2hstr(addr_to)) return False if tx_balance < total_amount + self.fee: logger.info( 'State validation failed for %s because: Insufficient funds', bin2hstr(self.txhash)) logger.info('balance: %s, fee: %s, amount: %s', tx_balance, self.fee, total_amount) return False return True
def get_mining_address(mining_address: str): try: if not mining_address: mining_address = bytes(hstr2bin(config.user.mining_address[1:])) else: mining_address = bytes(hstr2bin(mining_address[1:])) if not AddressState.address_is_valid(mining_address): raise ValueError('Mining Address Validation Failed') return mining_address except Exception as e: logger.info('Failed Parsing Mining Address %s', e) return None
def validate(self, verify_signature=True) -> bool: """ This method calls validate_or_raise, logs any failure and returns True or False accordingly The main purpose is to avoid exceptions and accommodate legacy code :return: True is the transaction is valid :rtype: bool """ try: self.validate_or_raise(verify_signature) except ValueError as e: logger.info('[%s] failed validate_tx', bin2hstr(self.txhash)) logger.warning(str(e)) return False except Exception as e: logger.exception(e) return False return True
def validate_transaction_pool(self, transaction_pool): for tx_set in transaction_pool: txn = tx_set[1].transaction if txn.txhash == self.txhash: continue if self.PK != txn.PK: continue if txn.ots_key == self.ots_key: logger.info( 'State validation failed for %s because: OTS Public key re-use detected', bin2hstr(self.txhash)) logger.info('Subtype %s', self.type) return False return True
def start_download(self): # FIXME: Why PoW is downloading blocks? # add peers and their identity to requested list # FMBH if self.sync_state.state == ESyncState.synced: return logger.info('Checking Download..') if self.p2p_factory.num_connections == 0: logger.warning('No connected peers. Moving to synced state') self.update_node_state(ESyncState.synced) return self.update_node_state(ESyncState.syncing) logger.info('Initializing download from %s', self.chain_manager.height + 1) self.p2p_factory.randomize_block_fetch()
def _validate_custom(self) -> bool: if len(self.message_hash) == 0: logger.warning('Message cannot be empty') return False if len(self.addr_to) > 0 and not ( OptimizedAddressState.address_is_valid(self.addr_to)): logger.warning('[MessageTransaction] Invalid address addr_to: %s', bin2hstr(self.addr_to)) return False if self.fee < 0: logger.info( 'State validation failed for %s because: Negative send', bin2hstr(self.txhash)) return False return True
def monitor_bk(self): # FIXME: Too many magic numbers / timing constants # FIXME: This is obsolete time_diff1 = ntp.getTime() - self.last_pow_cycle if 90 < time_diff1: if self.sync_state.state == ESyncState.unsynced: if ntp.getTime() - self.last_bk_time > 120: self.last_pow_cycle = ntp.getTime() logger.info(' POW cycle activated by monitor_bk() ') self.update_node_state(ESyncState.synced) reactor.monitor_bk = reactor.callLater(60, self.monitor_bk) return time_diff2 = ntp.getTime() - self.last_pb_time if self.sync_state.state == ESyncState.syncing and time_diff2 > 60: self.update_node_state(ESyncState.unsynced) self.epoch_diff = -1 reactor.monitor_bk = reactor.callLater(60, self.monitor_bk)
def _validate_extended(self, state_container: StateContainer): if state_container.block_number < state_container.current_dev_config.hard_fork_heights[0]: logger.warning("[MultiSigCreate] Hard Fork Feature not yet activated") return False if len(self.signatories) > state_container.current_dev_config.transaction_multi_output_limit: logger.warning('[MultiSigCreate] Number of signatories exceeds max limit') logger.warning('Number of Signatories %s', len(self.signatories)) logger.warning('Number of Weights %s', len(self.weights)) return False tx_balance = state_container.addresses_state[self.addr_from].balance if tx_balance < self.fee: logger.info('State validation failed for %s because: Insufficient funds', bin2hstr(self.txhash)) logger.info('balance: %s, fee: %s, amount: %s', tx_balance, self.fee) return False return True
def __init__(self, db_dir=None): self.db_dir = os.path.join(config.user.data_dir, config.dev.db_name) if db_dir: self.db_dir = db_dir logger.info('DB path: %s', self.db_dir) os.makedirs(self.db_dir, exist_ok=True) try: self.db = plyvel.DB(self.db_dir, max_open_files=1000, lru_cache_size=5 * 1024) except Exception: self.db = plyvel.DB(self.db_dir, max_open_files=1000, lru_cache_size=5 * 1024, create_if_missing=True, compression='snappy') self.db.put(b'state_version', str(1).encode())
def handle_fetch_block( self, source, message: xrdlegacy_pb2.LegacyMessage): # Fetch Request for block """ Fetch Block Sends the request for the block. :return: """ P2PBaseObserver._validate_message(message, xrdlegacy_pb2.LegacyMessage.FB) block_number = message.fbData.index logger.info(' Request for %s by %s', block_number, source.peer) if 0 < block_number <= source.factory.chain_height: block = source.factory.get_block_by_number(block_number) msg = xrdlegacy_pb2.LegacyMessage( func_name=xrdlegacy_pb2.LegacyMessage.PB, pbData=xrdlegacy_pb2.PBData(block=block.pbdata)) source.send(msg)
def _mine_next(self, parent_block): if ntp.getTime() < self.suspend_mining_timestamp: return if config.user.mining_enabled: logger.debug('try get_block_metadata') parent_metadata = self.chain_manager.get_block_metadata( parent_block.headerhash) logger.debug('try prepare_next_unmined_block_template') dev_config = self.chain_manager.get_config_by_block_number( parent_block.block_number + 1) self.miner.prepare_next_unmined_block_template( mining_address=self.mining_address, tx_pool=self.chain_manager.tx_pool, parent_block=parent_block, parent_difficulty=parent_metadata.block_difficulty, dev_config=dev_config) logger.info('Mining Block #%s', parent_block.block_number + 1) self.miner.start_mining(parent_block, parent_metadata.block_difficulty, dev_config)
def revert(self, state: State, state_container: StateContainer) -> bool: vote_stats = state_container.votes_stats[self.shared_key] multi_sig_address = vote_stats.multi_sig_address weight, found = state_container.addresses_state[ multi_sig_address].get_weight_by_signatory(self.addr_from) if not found: logger.info( "[MultiSigVote] Address is not the signatory for the multi sig address" ) return False if not vote_stats.revert_vote_stats(self, weight, state_container): logger.info("[MultiSigVote] Failed to revert vote_stats") return False address_state = state_container.addresses_state[self.addr_from] address_state.update_balance(state_container, self.fee) state_container.paginated_tx_hash.remove(address_state, self.txhash) return self._revert_state_changes_for_PK(state_container)