def remove_multi_sig_address_state(state: State, multi_sig_address: bytes, batch=None): try: state._db.delete(multi_sig_address, batch) except Exception as e: logger.warning("Exception ", e)
def monitor_connections(self): reactor.callLater(config.user.monitor_connections_interval, self.monitor_connections) if len(self._peer_connections) == 0: logger.warning('No Connected Peer Found') known_peers = self._xrd_node.peer_manager.load_known_peers() self._peer_q.extend(known_peers) connected_peers_set = set() for conn_protocol in self._peer_connections: connected_peers_set.add(conn_protocol.peer.full_address) for peer_item in config.user.peer_list: peer_metadata = IPMetadata.from_full_address(peer_item) if peer_metadata.full_address in self._peer_q: self._peer_q.remove(peer_metadata.full_address) if peer_metadata.full_address not in connected_peers_set: self.connect_peer([peer_metadata.full_address]) if len(self._peer_connections) >= config.user.max_peers_limit: return if len(self._peer_q) == 0: return peer_address_list = [] max_length = min(10, config.user.max_peers_limit) while len(self._peer_q) > 0 and len(peer_address_list) != max_length: peer_address_list.append(self._peer_q.pop(0)) self.connect_peer(peer_address_list)
def start_mining(self, mining_block: Block, current_target: bytes, dev_config: DevConfig): try: logger.debug('start_mining - TRY LOCK') with self.lock: logger.debug('start_mining - LOCKED') self.cancel() mining_blob = mining_block.mining_blob(dev_config) nonce_offset = mining_block.mining_nonce_offset(dev_config) self._dev_config = dev_config self._mining_block = mining_block work_seq_id = self.start(input=mining_blob, nonceOffset=nonce_offset, target=current_target, thread_count=self._mining_thread_count) logger.debug("MINING START [{}]".format(work_seq_id)) except Exception as e: logger.warning("Exception in start_mining") logger.exception(e) logger.debug('start_mining - UNLOCKED')
def handleEvent(self, event): # NOTE: This function usually runs in the context of a C++ thread if event.type == SOLUTION: logger.debug('handleEvent - TRY LOCK') if not self.lock.acquire(blocking=False): logger.debug('handleEvent - SKIP') return False try: logger.debug('handleEvent - LOCKED') logger.debug('Solution Found %s', event.nonce) logger.info('Hash Rate: %s H/s', self.hashRate()) cloned_block = copy.deepcopy(self._mining_block) cloned_block.set_nonces(self._dev_config, event.nonce, 0) logger.debug("Blob %s", cloned_block) logger.info('Block #%s nonce: %s', cloned_block.block_number, event.nonce) self.pre_block_logic(cloned_block) except Exception as e: logger.warning("Exception in solutionEvent") logger.exception(e) finally: logger.debug('handleEvent - UNLOCK') self.lock.release() return True
def _validate_custom(self): if self.fee < 0: logger.warning('MultiSigVote [%s] Invalid Fee = %d', bin2hstr(self.txhash), self.fee) return False return True
def handle_block_height(self, source, message: xrdlegacy_pb2.LegacyMessage): """ Sends / Receives Blockheight :param source: :param message: :return: """ if message.bhData.block_number == 0: block = source.factory.last_block cumulative_difficulty = source.factory.get_cumulative_difficulty() if block.block_number == 0: return bhdata = xrd_pb2.BlockHeightData( block_number=block.block_number, block_headerhash=block.headerhash, cumulative_difficulty=bytes(cumulative_difficulty)) msg = xrdlegacy_pb2.LegacyMessage( func_name=xrdlegacy_pb2.LegacyMessage.BH, bhData=bhdata) source.send(msg) return try: UInt256ToString(message.bhData.cumulative_difficulty) except ValueError: logger.warning('Invalid Block Height Data') source.loseConnection() return source.factory.update_peer_blockheight( source.peer.full_address, message.bhData.block_number, message.bhData.block_headerhash, message.bhData.cumulative_difficulty)
def remove_tx_metadata(state: State, txn, batch) -> bool: try: state._db.delete(txn.txhash, batch) except KeyError: logger.warning("Error removing tx metadata") return False return True
def handle_message_received(source, message: xrdlegacy_pb2.LegacyMessage): """ Message Receipt This function accepts message receipt from peer, checks if the message hash already been received or not. In case its a already received message, it is ignored. Otherwise the request is made to get the full message. :return: """ mr_data = message.mrData msg_hash = mr_data.hash # FIXME: Separate into respective message handlers if mr_data.type not in MessageReceipt.allowed_types: return if mr_data.type == xrdlegacy_pb2.LegacyMessage.TX and source.factory.sync_state.state != ESyncState.synced: return if mr_data.type == xrdlegacy_pb2.LegacyMessage.TX: if ntp.getTime() < source.factory.pow.suspend_mining_timestamp: return if source.factory._chain_manager.tx_pool.is_full_pending_transaction_pool( ): logger.warning( 'TX pool size full, incoming tx dropped. mr hash: %s', bin2hstr(msg_hash)) return if mr_data.type == xrdlegacy_pb2.LegacyMessage.BK: if mr_data.block_number > source.factory.chain_height + config.dev.max_margin_block_number: logger.debug('Skipping block #%s as beyond lead limit', mr_data.block_number) return if mr_data.block_number < source.factory.chain_height - config.dev.min_margin_block_number: logger.debug('Skipping block #%s as beyond the limit', mr_data.block_number) return if not source.factory.is_block_present(mr_data.prev_headerhash): logger.debug('Skipping block #%s as prev_headerhash not found', mr_data.block_number) return if source.factory.master_mr.contains(msg_hash, mr_data.type): return source.factory.master_mr.add_peer(msg_hash, mr_data.type, source, mr_data) if source.factory.master_mr.is_callLater_active( msg_hash): # Ignore if already requested return source.factory.request_full_message(mr_data)
def remove(self, addr) -> bool: for item in self._address_items: if item.qaddress == addr: try: self._address_items.remove(item) self.save_wallet(self.wallet_path) return True except ValueError: logger.warning("Could not remove address from wallet") return False
def _parse_buffer(self, total_read): # FIXME: This parsing/wire protocol needs to be replaced """ >>> from pyxrdlib.pyxrdlib import hstr2bin >>> p=P2PProtocol() >>> p._buffer = bytes(hstr2bin('000000191a170a0776657273696f6e120c67656e657369735f68617368'+ \ '000000191a170a0776657273696f6e120c67656e657369735f68617368')) >>> messages = p._parse_buffer([0]) >>> len(list(messages)) 2 """ chunk_size = 0 while self._buffer: if len(self._buffer) < 5: # Buffer is still incomplete as it doesn't have message size return ignore_skip = False try: chunk_size_raw = self._buffer[:4] chunk_size = struct.unpack( '>L', chunk_size_raw)[0] # is m length encoded correctly? if chunk_size <= 0: logger.debug("<X< %s", bin2hstr(self._buffer)) raise Exception("Invalid chunk size <= 0") if chunk_size > config.dev.message_buffer_size: raise Exception("Invalid chunk size > message_buffer_size") if len( self._buffer ) - 4 < chunk_size: # As 4 bytes includes chunk_size_raw ignore_skip = True # Buffer is still incomplete as it doesn't have message so skip moving buffer return message_raw = self._buffer[4:4 + chunk_size] message = xrdlegacy_pb2.LegacyMessage() message.ParseFromString(message_raw) yield message except Exception as e: # no qa logger.warning( "Problem parsing message. Banning+Dropping connection") logger.exception(e) self.peer_manager.ban_channel(self) finally: if not ignore_skip: skip = 4 + chunk_size self._buffer = self._buffer[skip:] total_read[0] += skip
def add_tx_from_block_to_pool(self, block: Block, current_block_number): """ Move all transactions from block to transaction pool. :param block: :return: """ for protobuf_tx in block.transactions[1:]: if not self.add_tx_to_pool(Transaction.from_pbdata(protobuf_tx), current_block_number): logger.warning('Failed to Add transaction into transaction pool') logger.warning('Block #%s %s', block.block_number, bin2hstr(block.headerhash)) return
def put_tx_metadata(state: State, txn: Transaction, block_number: int, timestamp: int, batch) -> bool: try: tm = TransactionMetadata.create(tx=txn, block_number=block_number, timestamp=timestamp) state._db.put_raw(txn.txhash, tm.serialize(), batch) except Exception: logger.warning("Error writing tx metadata") return False return True
def check_stale_txn(self, new_state_container, update_state_container, current_block_number): i = 0 while i < len(self.transaction_pool): tx_info = self.transaction_pool[i][1] if tx_info.is_stale(current_block_number): if not tx_info.validate(new_state_container, update_state_container, current_block_number): logger.warning('Txn validation failed for tx in tx_pool') self.remove_tx_from_pool(tx_info.transaction) continue tx_info.update_block_number(current_block_number) self.broadcast_tx(tx_info.transaction) i += 1
def connect_peer(self, full_address_list): for full_address in full_address_list: try: addr = IPMetadata.from_full_address(full_address) connected_peers = self.get_connected_peer_addrs() should_connect = addr.full_address not in connected_peers if should_connect: reactor.connectTCP(addr.ip, addr.port, self) except Exception as e: logger.warning("Could not connect to %s - %s", full_address, str(e))
def remove(self, address_state, value: bytes): address_state.update_counter_by_name(self.name, value=1, subtract=True) key = address_state.address count = address_state.get_counter_by_name(self.name) storage_key = self.generate_key(address_state.address, count) if storage_key not in self.key_value: self.key_value[storage_key] = self.get_paginated_data(key, count) if self.key_value[storage_key][-1] != value: logger.warning("Expected value %s", self.key_value[storage_key][-1]) logger.warning("Found value %s", value) raise Exception("Unexpected value into storage") del self.key_value[storage_key][-1]
def get_last_txs(state: State): try: last_txn = LastTransactions.deserialize( state._db.get_raw(b'last_txn')) except KeyError: return [] except Exception as e: # noqa logger.warning("[get_last_txs] Exception during call %s", e) return [] txs = [] for tx_metadata in last_txn.tx_metadata: data = tx_metadata.transaction tx = Transaction.from_pbdata(data) txs.append(tx) return txs
def validate(self, verify_signature=True) -> bool: """ This method calls validate_or_raise, logs any failure and returns True or False accordingly The main purpose is to avoid exceptions and accommodate legacy code :return: True is the transaction is valid :rtype: bool """ try: self.validate_or_raise(verify_signature) except ValueError as e: logger.info('[%s] failed validate_tx', bin2hstr(self.txhash)) logger.warning(str(e)) return False except Exception as e: logger.exception(e) return False return True
def verify_wallet(self): """ Confirms that json address data is correct and valid. In order to verify, it needs to create XMSS trees, so the operation is time consuming :return: True if valid """ num_items = len(self._address_items) if not self.encrypted: try: for i in range(num_items): self._get_xmss_by_index_no_cache(i) except Exception as e: logger.warning(e) return False return True
def state_migration_step_2(self, state: State): """ Migration Step from State Version 0 to 1 :return: """ del self._tmp_state self._tmp_state = None del state._db tmp_db_dir = os.path.join(config.user.data_dir, config.dev.db_name + "3") db_dir = os.path.join(config.user.data_dir, config.dev.db_name) shutil.move(db_dir, tmp_db_dir) tmp_db_dir = os.path.join(config.user.data_dir, config.dev.db_name + "2") shutil.move(tmp_db_dir, db_dir) state._db = db.DB() logger.warning("State Migration Finished")
def get_ntp_response(): for retry in range(NTP_RETRIES): ntp_server = config.user.ntp_servers[retry % len(config.user.ntp_servers)] try: ntp_client = NTPClient() response = ntp_client.request( ntp_server, version=NTP_VERSION, timeout=config.user.ntp_request_timeout) except Exception as e: logger.warning(e) continue return response # FIXME: Provide some proper clean before exiting logger.fatal("Could not contact NTP servers after %d retries", NTP_RETRIES) sys.exit(-1)
def _validate_custom(self) -> bool: if len(self.message_hash) == 0: logger.warning('Message cannot be empty') return False if len(self.addr_to) > 0 and not ( OptimizedAddressState.address_is_valid(self.addr_to)): logger.warning('[MessageTransaction] Invalid address addr_to: %s', bin2hstr(self.addr_to)) return False if self.fee < 0: logger.info( 'State validation failed for %s because: Negative send', bin2hstr(self.txhash)) return False return True
def start_download(self): # FIXME: Why PoW is downloading blocks? # add peers and their identity to requested list # FMBH if self.sync_state.state == ESyncState.synced: return logger.info('Checking Download..') if self.p2p_factory.num_connections == 0: logger.warning('No connected peers. Moving to synced state') self.update_node_state(ESyncState.synced) return self.update_node_state(ESyncState.syncing) logger.info('Initializing download from %s', self.chain_manager.height + 1) self.p2p_factory.randomize_block_fetch()
def update_pending_tx_pool(self, tx, ip, ignore_reserve=True) -> bool: if self.is_full_pending_transaction_pool(ignore_reserve): return False idx = self.get_tx_index_from_pool(tx.txhash) if idx > -1: return False if isinstance(tx, CoinBase): logger.warning('Rejected CoinBase Transaction as received without block') return False if tx.txhash in self.pending_tx_pool_hash: return False # Since its a min heap giving priority to lower number # So -1 multiplied to give higher priority to higher txn heapq.heappush(self.pending_tx_pool, [tx.fee * -1, TransactionInfo(tx, -1), ip]) self.pending_tx_pool_hash.add(tx.txhash) return True
def dataReceived(self, data: bytes) -> None: self._buffer += data total_read = len(self._buffer) if total_read > config.dev.max_bytes_out: logger.warning('Disconnecting peer %s', self.peer) logger.warning('Buffer Size %s', len(self._buffer)) self.loseConnection() return read_bytes = [0] msg = None for msg in self._parse_buffer(read_bytes): self.update_counters() self.in_counter += 1 if self.in_counter > self.rate_limit * IN_FACTOR: logger.warning("Rate Limit hit by %s %s", self.peer.ip, self.peer.port) self.peer_manager.ban_channel(self) return if self._valid_message_count < config.dev.trust_min_msgcount * 2: # Avoid overflows self._valid_message_count += 1 self._observable.notify(msg) if msg is not None and read_bytes[ 0] and msg.func_name != xrdlegacy_pb2.LegacyMessage.P2P_ACK: p2p_ack = xrd_pb2.P2PAcknowledgement(bytes_processed=read_bytes[0]) msg = xrdlegacy_pb2.LegacyMessage( func_name=xrdlegacy_pb2.LegacyMessage.P2P_ACK, p2pAckData=p2p_ack) self.send(msg)
def validate_or_raise(self, verify_signature=True) -> bool: """ This method will validate a transaction and raise exception if problems are found :return: True if the exception is valid, exceptions otherwise :rtype: bool """ if not self._validate_custom(): raise ValueError("Custom validation failed") self._coinbase_filter() expected_transaction_hash = self.generate_txhash() if verify_signature and self.txhash != expected_transaction_hash: logger.warning('Invalid Transaction hash') logger.warning('Expected Transaction hash %s', bin2hstr(expected_transaction_hash)) logger.warning('Found Transaction hash %s', bin2hstr(self.txhash)) raise ValueError("Invalid Transaction Hash") if verify_signature: # Temporarily disabled following new added lines. # TODO: Review Juan # if not XMSS.validate_signature(self.signature, self.PK): # raise ValueError("Invalid xmss signature") if not XmssFast.verify(self.get_data_hash(), self.signature, self.PK): raise ValueError("Invalid xmss signature") return True
def _validate_extended(self, state_container: StateContainer) -> bool: if (len(self.slave_pks) > state_container.current_dev_config.transaction_multi_output_limit or len(self.access_types) > state_container.current_dev_config.transaction_multi_output_limit): logger.warning('List has more than %s slave pks or access_types', state_container.current_dev_config.transaction_multi_output_limit) logger.warning('Slave pks len %s', len(self.slave_pks)) logger.warning('Access types len %s', len(self.access_types)) return False tx_balance = state_container.addresses_state[self.addr_from].balance if tx_balance < self.fee: logger.info('Slave: State validation failed for %s because: Insufficient funds', bin2hstr(self.txhash)) logger.info('balance: %s, amount: %s', tx_balance, self.fee) return False for i in range(len(self.slave_pks)): slave_pk = self.slave_pks[i] if state_container.block_number < state_container.current_dev_config.hard_fork_heights[0]: if len(slave_pk) > state_container.current_dev_config.slave_pk_max_length: logger.info("[Slave Transaction] Slave PK length is beyond limit") return False if (self.addr_from, slave_pk) in state_container.slaves.data: logger.info("[Slave Transaction] Invalid slave transaction as %s is already a slave for this address", slave_pk) return False return True
def _validate_extended(self, state_container: StateContainer) -> bool: if len(self.addr_to) != 0: if state_container.block_number < state_container.current_dev_config.hard_fork_heights[ 0]: logger.warning( "[MessageTransaction] Hard Fork Feature not yet activated") return False if len( self.message_hash ) > state_container.current_dev_config.message_max_length: # TODO: Move to dev config logger.warning( 'Message length cannot be more than %s', state_container.current_dev_config.message_max_length) logger.warning('Found message length %s', len(self.message_hash)) return False tx_balance = state_container.addresses_state[self.addr_from].balance if tx_balance < self.fee: logger.info( 'State validation failed for %s because: Insufficient funds', bin2hstr(self.txhash)) logger.info('balance: %s, amount: %s', tx_balance, self.fee) return False return True
def pre_block_logic(self, block: Block): logger.debug('LOCK - TRY - pre_block_logic') with self.miner.lock: logger.debug('LOCK - LOCKED - pre_block_logic') if not block.validate(self.chain_manager, self.future_blocks): logger.warning('Block Validation failed for #%s %s', block.block_number, bin2hstr(block.headerhash)) return False dev_config = self.chain_manager.get_config_by_block_number( block.block_number) if block.is_future_block(dev_config): delay = abs(block.timestamp - ntp.getTime()) + 1 reactor.callLater(delay, self.process_future_blocks) self.add_future_block(block) return True logger.debug('Inside add_block') result = self.chain_manager.add_block(block) logger.debug('trigger_miner %s', self.chain_manager.trigger_miner) if self.chain_manager.trigger_miner: logger.debug('try last block') last_block = self.chain_manager.last_block logger.debug('got last block') self._mine_next(last_block) if not result: logger.debug('Block Rejected %s %s', block.block_number, bin2hstr(block.headerhash)) return False reactor.callLater(0, self.broadcast_block, block) logger.debug('LOCK - RELEASE - pre_block_logic') return result
def prepare_next_unmined_block_template(self, mining_address, tx_pool, parent_block: Block, parent_difficulty, dev_config: DevConfig): miner = self.get_miner(parent_block.block_number + 1, dev_config) try: logger.debug('Miner-Try - prepare_next_unmined_block_template') with self.lock: logger.debug( 'Miner-Locked - prepare_next_unmined_block_template') logger.debug( 'Miner-TryCancel - prepare_next_unmined_block_template') miner.cancel() logger.debug( 'Miner-Cancel - prepare_next_unmined_block_template') self._mining_block = self.create_block( last_block=parent_block, mining_nonce=0, tx_pool=tx_pool, miner_address=mining_address) parent_metadata = self._chain_manager.get_block_metadata( parent_block.headerhash) self._measurement = self._chain_manager.get_measurement( dev_config, self._mining_block.timestamp, self._mining_block.prev_headerhash, parent_metadata) self._current_difficulty, self._current_target = DifficultyTracker.get( measurement=self._measurement, parent_difficulty=parent_difficulty, dev_config=dev_config) except Exception as e: logger.warning("Exception in start_mining") logger.exception(e)
def _validate_extended(self, state_container: StateContainer): if (len(self.addrs_to) > state_container.current_dev_config. transaction_multi_output_limit or len(self.amounts) > state_container.current_dev_config. transaction_multi_output_limit): logger.warning( '[TransferTokenTransaction] Number of addresses or amounts exceeds max limit' ) logger.warning('Number of addresses %s', len(self.addrs_to)) logger.warning('Number of amounts %s', len(self.amounts)) return False if len(self.addrs_to) == 0: logger.warning("[TransferTokenTransaction] No Addrs To found") return False tx_balance = state_container.addresses_state[self.addr_from].balance total_amount = self.total_amount if self.fee < 0 or total_amount < 0: logger.info( '[TransferTokenTransaction] State validation failed for %s because: ', bin2hstr(self.txhash)) logger.info('Txn amount: %s, Fee: %s', total_amount, self.fee) return False if tx_balance < self.fee: logger.info( '[TransferTokenTransaction] State validation failed for %s because: Insufficient funds', bin2hstr(self.txhash)) logger.info('balance: %s, Fee: %s', tx_balance, self.fee) return False if (self.addr_from, self.token_txhash) not in state_container.tokens.data: logger.info('%s doesnt own any such token %s ', bin2hstr(self.addr_from), bin2hstr(self.token_txhash)) return False token_balance = state_container.tokens.data[(self.addr_from, self.token_txhash)] if token_balance.balance < total_amount: logger.info('Insufficient amount of token') logger.info('Token Balance: %s, Sent Token Amount: %s', token_balance.balance, total_amount) return False return True