def add_connection(self, conn_protocol) -> bool: # TODO: Most of this can go peer manager if self._qrl_node.peer_manager.is_banned(conn_protocol.peer): return False redundancy_count = 0 for conn in self._peer_connections: if conn.peer.ip == conn_protocol.peer.ip: redundancy_count += 1 if config.user.max_redundant_connections >= 0: if redundancy_count >= config.user.max_redundant_connections: logger.info('Redundant Limit. Disconnecting client %s', conn_protocol.peer) return False if self.reached_conn_limit: # FIXME: Should we stop listening to avoid unnecessary load due to many connections? logger.info('Peer limit hit. Disconnecting client %s', conn_protocol.peer) return False # Remove your own ip address from the connection if conn_protocol.peer.ip == conn_protocol.host.ip and conn_protocol.peer.port == config.user.p2p_public_port: peer_list = [ p for p in self._qrl_node.peer_manager.known_peer_addresses if p != conn_protocol.peer.full_address ] self._qrl_node.peer_manager.extend_known_peers(peer_list) return False self._peer_connections.append(conn_protocol) logger.debug('>>> new connection: %s ', conn_protocol.peer) return True
def add_connection(self, conn_protocol) -> bool: # TODO: Most of this can go peer manager if self._qrl_node.is_banned(conn_protocol.addr_remote): conn_protocol.loseConnection() return False if self.reached_conn_limit: # FIXME: Should we stop listening to avoid unnecessary load due to many connections? logger.info('Peer limit hit. Disconnecting client %s', conn_protocol.addr_remote) conn_protocol.loseConnection() return False if conn_protocol.peer_ip == conn_protocol.host_ip and conn_protocol.peer_ip == config.user.p2p_port: peer_list = [ p for p in self._qrl_node.peer_addresses if p != conn_protocol.addr_remote ] self._qrl_node.peer_manager.update_peer_addresses(peer_list) conn_protocol.loseConnection() return False self._peer_connections.append(conn_protocol) self._qrl_node.add_peer(conn_protocol) logger.debug('>>> new connection: %s ', conn_protocol.addr_remote) return True
def destroy_fork_states(self, block_number, headerhash): """ Removes all the cache state, which are created further, the current blocknumber. Usually done when a new branch found as main branch. :param block_number: :param headerhash: :return: """ str_headerhash = bin2hstr(headerhash).encode() len_state_loaders = len(self._state_loaders) index = 0 while index < len_state_loaders: state_loader = self._state_loaders[index] logger.debug('Comparing #%s>%s', state_loader.block_number, block_number) if state_loader.block_number > block_number: logger.debug('Destroyed State #%s', state_loader.block_number) self.destroy_state_loader(index) len_state_loaders -= 1 continue if state_loader.block_number == block_number: if state_loader.state_code != str_headerhash: self.destroy_state_loader(index) len_state_loaders -= 1 continue index += 1
def peer_fetch_block(self, retry=0): node_header_hash = self._target_node_header_hash curr_index = self._last_requested_block_idx - node_header_hash.block_number block_headerhash = node_header_hash.headerhashes[curr_index] block = self._chain_manager.state.get_block(block_headerhash) if not block: if retry >= 5: logger.debug('Retry Limit Hit') self._qrl_node.ban_peer(self._target_peer) self.is_syncing_finished(force_finish=True) return else: while block and curr_index + 1 < len( node_header_hash.headerhashes): self._last_requested_block_idx += 1 curr_index = self._last_requested_block_idx - node_header_hash.block_number block_headerhash = node_header_hash.headerhashes[curr_index] block = self._chain_manager.state.get_block(block_headerhash) retry = 0 if self.is_syncing_finished(): return self._target_peer.send_fetch_block(self._last_requested_block_idx) reactor.download_monitor = reactor.callLater(20, self.peer_fetch_block, retry + 1)
def connectionLost(self, reason=connectionDone): logger.debug('%s disconnected. remainder connected: %d', self.peer_ip, self.factory.connections) self.factory.remove_connection(self) if self.peer_manager: self.peer_manager.remove_channel(self)
def peer_fetch_block(self, retry=0): node_header_hash = self._target_node_header_hash curr_index = self._last_requested_block_number - node_header_hash.block_number block_headerhash = node_header_hash.headerhashes[curr_index] block = self._chain_manager.get_block(block_headerhash) if retry >= 1: logger.debug('Retry Limit Hit') self._qrl_node.peer_manager.ban_channel(self._target_channel) self.is_syncing_finished(force_finish=True) return while block and curr_index + 1 < len(node_header_hash.headerhashes): self._last_requested_block_number += 1 curr_index = self._last_requested_block_number - node_header_hash.block_number block_headerhash = node_header_hash.headerhashes[curr_index] block = self._chain_manager.get_block(block_headerhash) if block and self.is_syncing_finished(): return self._target_channel.send_fetch_block( self._last_requested_block_number) reactor.download_monitor = reactor.callLater(100, self.peer_fetch_block, retry + 1)
def _wrap_message(protobuf_obj) -> Optional[bytes]: """ Receives a protobuf object and encodes it as (length)(data) :return: the encoded message :rtype: bytes >>> veData = qrllegacy_pb2.VEData(version="version", genesis_prev_hash=b'genesis_hash') >>> msg = qrllegacy_pb2.LegacyMessage(func_name=qrllegacy_pb2.LegacyMessage.VE, veData=veData) >>> bin2hstr(P2PProtocol._wrap_message(msg)) '000000191a170a0776657273696f6e120c67656e657369735f68617368' msg = qrllegacy_pb2.LegacyMessage(func_name=qrllegacy_pb2.LegacyMessage.PL, plData=qrllegacy_pb2.PLData(peer_ips=trusted_peers, public_port=config.user.p2p_public_port)) >>> plData = qrllegacy_pb2.PLData(peer_ips=[]) >>> msg = qrllegacy_pb2.LegacyMessage(func_name=qrllegacy_pb2.LegacyMessage.PL, plData=plData) >>> bin2hstr(P2PProtocol._wrap_message(msg)) '0000000408012200' """ data = protobuf_obj.SerializeToString() if len(data) == 0: logger.debug("Skipping message. Zero bytes. %s", MessageToJson(protobuf_obj, sort_keys=True)) return None str_data_len = struct.pack('>L', len(data)) return str_data_len + data
def GetBlockByNumber(self, request: qrl_pb2.GetBlockByNumberReq, context) -> qrl_pb2.GetBlockByNumberResp: logger.debug("[PublicAPI] GetBlockFromNumber") block = self.qrlnode.get_block_from_index(request.block_number) if block: return qrl_pb2.GetBlockByNumberResp(block=block.pbdata) return qrl_pb2.GetBlockByNumberResp()
def GetBlock(self, request: qrl_pb2.GetBlockReq, context) -> qrl_pb2.GetBlockResp: logger.debug("[PublicAPI] GetBlock") block = self.qrlnode.get_block_from_hash(request.header_hash) if block: return qrl_pb2.GetBlockResp(block=block.pbdata) return qrl_pb2.GetBlockResp()
def peer_fetch_block(self, retry=0): node_header_hash = self._target_node_header_hash curr_index = self._last_requested_block_idx - node_header_hash.block_number block_headerhash = node_header_hash.headerhashes[curr_index] block = self._chain_manager.state.get_block(block_headerhash) if not block: if retry >= 5: logger.debug('Retry Limit Hit') self._qrl_node.ban_peer(self._target_peer) self.is_syncing_finished(force_finish=True) return else: while block and curr_index < len(node_header_hash.headerhashes): block_headerhash = node_header_hash.headerhashes[curr_index] block = self._chain_manager.state.get_block(block_headerhash) self._last_requested_block_idx += 1 curr_index = self._last_requested_block_idx - node_header_hash.block_number retry = 0 if self.is_syncing_finished(): return self._target_peer.send_fetch_block(self._last_requested_block_idx) reactor.download_monitor = reactor.callLater(20, self.peer_fetch_block, retry+1)
def GetMultiSigSpendTxsByAddress( self, request: qrl_pb2.GetMultiSigSpendTxsByAddressReq, context) -> qrl_pb2.GetMultiSigSpendTxsByAddressResp: logger.debug("[PublicAPI] GetMultiSigSpendTxsByAddress") return self.qrlnode.get_multi_sig_spend_txs_by_address( request.address, request.item_per_page, request.page_number, request.filter_type)
def GetBalance(self, request: qrl_pb2.GetBalanceReq, context) -> qrl_pb2.GetBalanceResp: logger.debug("[PublicAPI] GetBalance") address_state = self.qrlnode.get_optimized_address_state( request.address) response = qrl_pb2.GetBalanceResp(balance=address_state.balance) return response
def GetLatticePKsByAddress(self, request: qrl_pb2.GetTransactionsByAddressReq, context) -> qrl_pb2.GetLatticePKsByAddressResp: logger.debug("[PublicAPI] GetLatticePKsByAddress") return self.qrlnode.get_lattice_pks_by_address(request.address, request.item_per_page, request.page_number)
def start_mining(self, parent_block: Block, parent_difficulty): mining_xmss = self.get_mining_xmss() if not mining_xmss: logger.warning('No Mining XMSS Found') return try: self.cancel() mining_blob = self._mining_block.mining_blob nonce_offset = self._mining_block.mining_nonce_offset logger.debug('!!! Mine #{} | {} ({}) | {} -> {} | {}'.format( self._mining_block.block_number, self._measurement, self._mining_block.timestamp - parent_block.timestamp, UInt256ToString(parent_difficulty), UInt256ToString(self._current_difficulty), self._current_target)) self.start(input=mining_blob, nonceOffset=nonce_offset, target=self._current_target, thread_count=self._mining_thread_count) except Exception as e: logger.warning("Exception in start_mining") logger.exception(e)
def PushTransaction(self, request: qrl_pb2.PushTransactionReq, context) -> qrl_pb2.PushTransactionResp: logger.debug("[PublicAPI] PushTransaction") answer = qrl_pb2.PushTransactionResp() try: tx = Transaction.from_pbdata(request.transaction_signed) tx.update_txhash() # FIXME: Full validation takes too much time. At least verify there is a signature # the validation happens later in the tx pool if len(tx.signature) > 1000: self.qrlnode.submit_send_tx(tx) answer.error_code = qrl_pb2.PushTransactionResp.SUBMITTED answer.tx_hash = tx.txhash else: answer.error_description = 'Signature too short' answer.error_code = qrl_pb2.PushTransactionResp.VALIDATION_FAILED except Exception as e: error_str = traceback.format_exception(None, e, e.__traceback__) answer.error_description = str(''.join(error_str)) answer.error_code = qrl_pb2.PushTransactionResp.ERROR return answer
def CollectEphemeralMessage(self, request: qrl_pb2.CollectEphemeralMessageReq, context) -> qrl_pb2.CollectEphemeralMessageResp: logger.debug("[PublicAPI] CollectEphemeralMessage") ephemeral_metadata = self.qrlnode.collect_ephemeral_message(request.msg_id) answer = qrl_pb2.CollectEphemeralMessageResp(ephemeral_metadata=ephemeral_metadata.pbdata) return answer
def GetOTS(self, request: qrl_pb2.GetOTSReq, context) -> qrl_pb2.GetOTSResp: logger.debug("[PublicAPI] GetOTS") address_state = self.qrlnode.get_address_state(request.address) response = qrl_pb2.GetOTSResp( ots_bitfield=address_state.ots_bitfield, next_unused_ots_index=address_state.get_unused_ots_index()) return response
def main(): args = parse_arguments() logger.debug( "=====================================================================================" ) logger.info("QRL Path: %s", args.qrl_dir) config.user.qrl_dir = expanduser(args.qrl_dir) config.create_path(config.user.qrl_dir) logger.debug( "=====================================================================================" ) config.create_path(config.user.wallet_dir) mining_address = None if config.user.mining_enabled: mining_address = get_mining_address(args.mining_address) if not mining_address: logger.warning('Invalid Mining Credit Wallet Address') logger.warning('%s', args.mining_address) return False ntp.setDrift() if args.debug: logger.warning("FAULT HANDLER ENABLED") faulthandler.enable() logger.info('Initializing chain..') persistent_state = State() if args.measurement > -1: persistent_state.get_measurement = MagicMock( return_value=args.measurement) chain_manager = ChainManager(state=persistent_state) chain_manager.load(Block.from_json(GenesisBlock().to_json())) qrlnode = QRLNode(db_state=persistent_state, mining_address=mining_address) qrlnode.set_chain_manager(chain_manager) set_logger(args, qrlnode.sync_state) ####### # NOTE: Keep assigned to a variable or might get collected admin_service, grpc_service, mining_service = start_services(qrlnode) qrlnode.start_listening() qrlnode.connect_peers() qrlnode.start_pow(args.mining_thread_count) logger.info('QRL blockchain ledger %s', config.dev.version) logger.info('mining/staking address %s', args.mining_address) # FIXME: This will be removed once we move away from Twisted reactor.run()
def GetObject(self, request: qrl_pb2.GetObjectReq, context) -> qrl_pb2.GetObjectResp: logger.debug("[PublicAPI] GetObject") answer = qrl_pb2.GetObjectResp() answer.found = False # FIXME: We need a unified way to access and validate data. query = bytes( request.query ) # query will be as a string, if Q is detected convert, etc. if AddressState.address_is_valid(query): if self.qrlnode.get_address_is_used(query): address_state = self.qrlnode.get_address_state(query) if address_state is not None: answer.found = True answer.address_state.CopyFrom(address_state.pbdata) return answer transaction, block_number = self.qrlnode.get_transaction(query) if transaction is not None: answer.found = True blockheader = None if block_number is not None: block = self.qrlnode.get_block_from_index(block_number) blockheader = block.blockheader.pbdata txextended = qrl_pb2.TransactionExtended( header=blockheader, tx=transaction.pbdata, addr_from=transaction.addr_from, size=transaction.size) answer.transaction.CopyFrom(txextended) return answer # NOTE: This is temporary, indexes are accepted for blocks try: block = self.qrlnode.get_block_from_hash(query) if block is None: query_str = query.decode() query_index = int(query_str) block = self.qrlnode.get_block_from_index(query_index) answer.found = True block_extended = qrl_pb2.BlockExtended() block_extended.header.CopyFrom(block.blockheader.pbdata) block_extended.size = block.size for transaction in block.transactions: tx = Transaction.from_pbdata(transaction) extended_tx = qrl_pb2.TransactionExtended( tx=transaction, addr_from=tx.addr_from, size=tx.size) block_extended.extended_transactions.extend([extended_tx]) answer.block_extended.CopyFrom(block_extended) return answer except Exception: pass return answer
def notify(self, message): # FIXME: Add mutexes observers = self._observers.get(message.func_name, []) for o in observers: try: o(self.source, message) except Exception as e: logger.debug("[%s] executing %s", self.source, message) logger.exception(e)
def get_dev_config_current_state_key(self): try: return self._db.get_raw(b'dev_config_current_state_key') except KeyError: logger.debug('[get_dev_config_current_state_key] Dev Config not found') except Exception as e: logger.error('[get_dev_config_current_state_key] %s', e) return None
def GetLatestData(self, request: qrl_pb2.GetLatestDataReq, context) -> qrl_pb2.GetLatestDataResp: logger.debug("[PublicAPI] GetLatestData") response = qrl_pb2.GetLatestDataResp() all_requested = request.filter == qrl_pb2.GetLatestDataReq.ALL quantity = min(request.quantity, self.MAX_REQUEST_QUANTITY) if all_requested or request.filter == qrl_pb2.GetLatestDataReq.BLOCKHEADERS: result = [] for blk in self.qrlnode.get_latest_blocks(offset=request.offset, count=quantity): transaction_count = qrl_pb2.TransactionCount() for tx in blk.transactions: transaction_count.count[CODEMAP[tx.WhichOneof( 'transactionType')]] += 1 result.append( qrl_pb2.BlockHeaderExtended( header=blk.blockheader.pbdata, transaction_count=transaction_count)) response.blockheaders.extend(result) if all_requested or request.filter == qrl_pb2.GetLatestDataReq.TRANSACTIONS: result = [] for tx in self.qrlnode.get_latest_transactions( offset=request.offset, count=quantity): # FIXME: Improve this once we have a proper database schema block_index = self.qrlnode.get_blockidx_from_txhash(tx.txhash) block = self.qrlnode.get_block_from_index(block_index) header = None if block: header = block.blockheader.pbdata txextended = qrl_pb2.TransactionExtended( header=header, tx=tx.pbdata, addr_from=tx.addr_from, size=tx.size) result.append(txextended) response.transactions.extend(result) if all_requested or request.filter == qrl_pb2.GetLatestDataReq.TRANSACTIONS_UNCONFIRMED: result = [] for tx_info in self.qrlnode.get_latest_transactions_unconfirmed( offset=request.offset, count=quantity): tx = tx_info.transaction txextended = qrl_pb2.TransactionExtended( header=None, tx=tx.pbdata, addr_from=tx.addr_from, size=tx.size, timestamp_seconds=tx_info.timestamp) result.append(txextended) response.transactions_unconfirmed.extend(result) return response
def GetTotalBalance(self, request: qrl_pb2.GetTotalBalanceReq, context) -> qrl_pb2.GetTotalBalanceResp: logger.debug("[PublicAPI] GetTotalBalance") response = qrl_pb2.GetBalanceResp(balance=0) for address in request.addresses: address_state = self.qrlnode.get_address_state(address) response.balance += address_state.balance return response
def GetTransactionsByAddress(self, request: qrl_pb2.GetTransactionsByAddressReq, context) -> qrl_pb2.GetTransactionsByAddressResp: logger.debug("[PublicAPI] GetTransactionsByAddress") response = qrl_pb2.GetTransactionsByAddressResp() mini_transactions, balance = self.qrlnode.get_transactions_by_address(request.address) response.mini_transactions.extend(mini_transactions) response.balance = balance return response
def get_block(state: State, header_hash: bytes): try: data = state._db.get_raw(header_hash) return Block.deserialize(data) except KeyError: logger.debug('[get_block] Block header_hash %s not found', bin2hstr(header_hash).encode()) except Exception as e: logger.error('[get_block] %s', e) return None
def get_block(self, header_hash: bytes) -> Optional[Block]: try: json_data = self._db.get_raw(bin2hstr(header_hash).encode()) return Block.from_json(json_data) except KeyError: logger.debug('[get_block] Block header_hash %s not found', bin2hstr(header_hash).encode()) except Exception as e: logger.error('[get_block] %s', e) return None
def GetSlaveTxn(self, request: qrl_pb2.SlaveTxnReq, context) -> qrl_pb2.TransferCoinsResp: logger.debug("[PublicAPI] GetSlaveTxn") tx = self.qrlnode.create_slave_tx(addr_from=request.address_from, slave_pks=request.slave_pks, access_types=request.access_types, fee=request.fee, xmss_pk=request.xmss_pk, xmss_ots_index=request.xmss_ots_index) return qrl_pb2.TransferCoinsResp(transaction_unsigned=tx.pbdata)
def CollectEphemeralMessage( self, request: qrl_pb2.CollectEphemeralMessageReq, context) -> qrl_pb2.CollectEphemeralMessageResp: logger.debug("[PublicAPI] CollectEphemeralMessage") ephemeral_metadata = self.qrlnode.collect_ephemeral_message( request.msg_id) answer = qrl_pb2.CollectEphemeralMessageResp( ephemeral_metadata=ephemeral_metadata.pbdata) return answer
def GetSlaveTxn(self, request: qrl_pb2.SlaveTxnReq, context) -> qrl_pb2.TransferCoinsResp: logger.debug("[PublicAPI] GetSlaveTxn") tx = self.qrlnode.create_slave_tx(addr_from=request.address_from, slave_pks=request.slave_pks, access_types=request.access_types, fee=request.fee, xmss_pk=request.xmss_pk) return qrl_pb2.TransferCoinsResp(transaction_unsigned=tx.pbdata)
def TransferCoins(self, request: qrl_pb2.TransferCoinsReq, context) -> qrl_pb2.TransferCoinsResp: logger.debug("[PublicAPI] TransferCoins") tx = self.qrlnode.create_send_tx(addr_from=request.address_from, addr_to=request.address_to, amount=request.amount, fee=request.fee, xmss_pk=request.xmss_pk, xmss_ots_index=request.xmss_ots_index) return qrl_pb2.TransferCoinsResp(transaction_unsigned=tx.pbdata)
def TransferCoins(self, request: qrl_pb2.TransferCoinsReq, context) -> qrl_pb2.TransferCoinsResp: logger.debug("[PublicAPI] TransferCoins") tx = self.qrlnode.create_send_tx(addr_from=request.address_from, addrs_to=request.addresses_to, amounts=request.amounts, fee=request.fee, xmss_pk=request.xmss_pk) return qrl_pb2.TransferCoinsResp(transaction_unsigned=tx.pbdata)
def PushTransaction(self, request: qrl_pb2.PushTransactionReq, context) -> qrl_pb2.PushTransactionResp: logger.debug("[PublicAPI] PushTransaction") tx = Transaction.from_pbdata(request.transaction_signed) submitted = self.qrlnode.submit_send_tx(tx) # FIXME: Improve response type # Prepare response answer = qrl_pb2.PushTransactionResp() answer.some_response = str(submitted) return answer
def _parse_buffer(self, total_read): # FIXME: This parsing/wire protocol needs to be replaced """ >>> from pyqrllib.pyqrllib import hstr2bin >>> p=P2PProtocol() >>> p._buffer = bytes(hstr2bin('000000191a170a0776657273696f6e120c67656e657369735f68617368'+ \ '000000191a170a0776657273696f6e120c67656e657369735f68617368')) >>> messages = p._parse_buffer([0]) >>> len(list(messages)) 2 """ chunk_size = 0 while self._buffer: if len(self._buffer) < 5: # Buffer is still incomplete as it doesn't have message size return ignore_skip = False try: chunk_size_raw = self._buffer[:4] chunk_size = struct.unpack( '>L', chunk_size_raw)[0] # is m length encoded correctly? if chunk_size <= 0: logger.debug("<X< %s", bin2hstr(self._buffer)) raise Exception("Invalid chunk size <= 0") if chunk_size > config.dev.message_buffer_size: raise Exception("Invalid chunk size > message_buffer_size") if len( self._buffer ) - 4 < chunk_size: # As 4 bytes includes chunk_size_raw ignore_skip = True # Buffer is still incomplete as it doesn't have message so skip moving buffer return message_raw = self._buffer[4:4 + chunk_size] message = qrllegacy_pb2.LegacyMessage() message.ParseFromString(message_raw) yield message except Exception as e: # no qa logger.warning( "Problem parsing message. Banning+Dropping connection") logger.exception(e) self.peer_manager.ban_channel(self) finally: if not ignore_skip: skip = 4 + chunk_size self._buffer = self._buffer[skip:] total_read[0] += skip
def get_block_number_mapping(state: State, block_number: int): try: data = state._db.get_raw(str(block_number).encode()) block_number_mapping = qrl_pb2.BlockNumberMapping() return Parse(data, block_number_mapping) except KeyError: logger.debug('[get_block_number_mapping] Block #%s not found', block_number) except Exception as e: logger.error('[get_block_number_mapping] %s', e) return None
def get_block_number_mapping(self, block_number: bytes) -> Optional[qrl_pb2.BlockNumberMapping]: try: json_data = self._db.get_raw(str(block_number).encode()) block_number_mapping = qrl_pb2.BlockNumberMapping() return Parse(json_data, block_number_mapping) except KeyError: logger.debug('[get_block_number_mapping] Block #%s not found', block_number) except Exception as e: logger.error('[get_block_number_mapping] %s', e) return None
def PushEphemeralMessage(self, request: qrl_pb2.PushEphemeralMessageReq, context) -> qrl_pb2.PushTransactionResp: logger.debug("[PublicAPI] PushEphemeralMessageReq") submitted = False if config.user.accept_ephemeral: encrypted_ephemeral_message = EncryptedEphemeralMessage(request.ephemeral_message) submitted = self.qrlnode.broadcast_ephemeral_message(encrypted_ephemeral_message) answer = qrl_pb2.PushTransactionResp() answer.some_response = str(submitted) return answer
def monitor_chain_state(self): current_timestamp = time.time() for channel in self._channels: if channel not in self._peer_node_status: channel.loseConnection() continue delta = current_timestamp - self._peer_node_status[channel].timestamp if delta > config.user.chain_state_timeout: del self._peer_node_status[channel] logger.debug('>>>> No State Update [%18s] %2.2f (TIMEOUT)', channel.connection_id, delta) channel.loseConnection()
def get(self, key_obj): if not isinstance(key_obj, bytes): key_obj = key_obj.encode() value_obj = self.db.Get(key_obj) try: # FIXME: This is a massive bottleneck as start up. return json.loads(value_obj.decode())['value'] except KeyError: logger.debug("Key not found %s", key_obj) except Exception as e: logger.exception(e)
def add_block(self, block: Block) -> bool: if block.block_number < self.height - config.dev.reorg_limit: logger.debug('Skipping block #%s as beyond re-org limit', block.block_number) return False batch = self.state.get_batch() if self._add_block(block, batch=batch): self.state.write_batch(batch) self.update_child_metadata(block.headerhash) return True return False
def GetTransferTokenTxn(self, request: qrl_pb2.TransferTokenTxnReq, context) -> qrl_pb2.TransferCoinsResp: logger.debug("[PublicAPI] GetTransferTokenTxn") bin_token_txhash = bytes(hstr2bin(request.token_txhash.decode())) tx = self.qrlnode.create_transfer_token_txn(addr_from=request.address_from, addr_to=request.address_to, token_txhash=bin_token_txhash, amount=request.amount, fee=request.fee, xmss_pk=request.xmss_pk, xmss_ots_index=request.xmss_ots_index) return qrl_pb2.TransferCoinsResp(transaction_unsigned=tx.pbdata)
def solutionEvent(self, nonce): # NOTE: This function usually runs in the context of a C++ thread try: logger.debug('Solution Found %s', nonce) self._mining_block.set_mining_nonce(nonce) logger.info('Block #%s nonce: %s', self._mining_block.block_number, StringToUInt256(str(nonce))[-4:]) logger.info('Hash Rate: %s H/s', self.hashRate()) cloned_block = copy.deepcopy(self._mining_block) self.pre_block_logic(cloned_block) except Exception as e: logger.warning("Exception in solutionEvent") logger.exception(e)
def GetTokenTxn(self, request: qrl_pb2.TokenTxnReq, context) -> qrl_pb2.TransferCoinsResp: logger.debug("[PublicAPI] GetTokenTxn") tx = self.qrlnode.create_token_txn(addr_from=request.address_from, symbol=request.symbol, name=request.name, owner=request.owner, decimals=request.decimals, initial_balances=request.initial_balances, fee=request.fee, xmss_pk=request.xmss_pk, xmss_ots_index=request.xmss_ots_index) return qrl_pb2.TransferCoinsResp(transaction_unsigned=tx.pbdata)
def monitor_miner(self): reactor.callLater(60, self.monitor_miner) if self.p2p_factory.is_syncing(): return if not self.miner.isRunning() or self.miner_toggler: logger.debug('Mine next called by monitor_miner') self.miner_toggler = False self.mine_next(self.chain_manager.last_block) elif self.miner.solutionAvailable(): self.miner_toggler = True else: self.miner_toggler = False
def save_wallet(self): logger.debug('Syncing wallet file') wallet_store = qrl_pb2.WalletStore() wallets = [] for a in self.address_bundle: wallets.append(qrl_pb2.Wallet(address=a.address, mnemonic=a.xmss.get_mnemonic(), xmss_index=a.xmss.get_index())) wallet_store.wallets.extend(wallets) with open(self.wallet_dat_filename, "wb") as outfile: outfile.write(wallet_store.SerializeToString())
def send_peer_list(self): """ Get Peers Sends the peers list. :return: """ peer_ips = self.factory.get_connected_peer_ips() logger.debug('<<< Sending connected peers to %s [%s]', self.peer_ip, peer_ips) msg = qrllegacy_pb2.LegacyMessage(func_name=qrllegacy_pb2.LegacyMessage.PL, plData=qrllegacy_pb2.PLData(peer_ips=peer_ips)) self.send(msg)
def GetObject(self, request: qrl_pb2.GetObjectReq, context) -> qrl_pb2.GetObjectResp: logger.debug("[PublicAPI] GetObject") answer = qrl_pb2.GetObjectResp() answer.found = False # FIXME: We need a unified way to access and validate data. query = bytes(request.query) # query will be as a string, if Q is detected convert, etc. if AddressState.address_is_valid(query): if self.qrlnode.get_address_is_used(query): address_state = self.qrlnode.get_address_state(query) if address_state is not None: answer.found = True answer.address_state.CopyFrom(address_state) return answer transaction, block_number = self.qrlnode.get_transaction(query) if transaction is not None: answer.found = True blockheader = None if block_number is not None: block = self.qrlnode.get_block_from_index(block_number) blockheader = block.blockheader.pbdata txextended = qrl_pb2.TransactionExtended(header=blockheader, tx=transaction.pbdata) answer.transaction.CopyFrom(txextended) return answer block = self.qrlnode.get_block_from_hash(query) if block is not None: answer.found = True answer.block.CopyFrom(block.pbdata) return answer # NOTE: This is temporary, indexes are accepted for blocks try: query_str = query.decode() query_index = int(query_str) block = self.qrlnode.get_block_from_index(query_index) if block is not None: answer.found = True answer.block.CopyFrom(block.pbdata) return answer except Exception: pass return answer
def pre_block_logic(self, block: Block): logger.debug('Checking miner lock') with self._miner_lock: logger.debug('Inside add_block') result = self.chain_manager.add_block(block) logger.debug('trigger_miner %s', self.chain_manager.trigger_miner) logger.debug('is_syncing %s', self.p2p_factory.is_syncing()) if not self.p2p_factory.is_syncing(): if self.chain_manager.trigger_miner or not self.miner.isRunning(): self.mine_next(self.chain_manager.last_block) if not result: logger.debug('Block Rejected %s %s', block.block_number, bin2hstr(block.headerhash)) return reactor.callLater(0, self.broadcast_block, block)
def _add_block(self, block, ignore_duplicate=False, batch=None): block_size_limit = self.state.get_block_size_limit(block) if block_size_limit and block.size > block_size_limit: logger.info('Block Size greater than threshold limit %s > %s', block.size, block_size_limit) return False if not self._pre_check(block, ignore_duplicate): logger.debug('Failed pre_check') return False if self._try_orphan_add_block(block, batch): return True if self._try_branch_add_block(block, batch): return True return False
def GetLatestData(self, request: qrl_pb2.GetLatestDataReq, context) -> qrl_pb2.GetLatestDataResp: logger.debug("[PublicAPI] GetLatestData") response = qrl_pb2.GetLatestDataResp() all_requested = request.filter == qrl_pb2.GetLatestDataReq.ALL quantity = min(request.quantity, self.MAX_REQUEST_QUANTITY) if all_requested or request.filter == qrl_pb2.GetLatestDataReq.BLOCKHEADERS: result = [] for blk in self.qrlnode.get_latest_blocks(offset=request.offset, count=quantity): transaction_count = qrl_pb2.TransactionCount() for tx in blk.transactions: transaction_count.count[tx.type] += 1 result.append(qrl_pb2.BlockHeaderExtended(header=blk.blockheader.pbdata, transaction_count=transaction_count)) response.blockheaders.extend(result) if all_requested or request.filter == qrl_pb2.GetLatestDataReq.TRANSACTIONS: result = [] for tx in self.qrlnode.get_latest_transactions(offset=request.offset, count=quantity): # FIXME: Improve this once we have a proper database schema block_index = self.qrlnode.get_blockidx_from_txhash(tx.txhash) block = self.qrlnode.get_block_from_index(block_index) header = None if block: header = block.blockheader.pbdata txextended = qrl_pb2.TransactionExtended(header=header, tx=tx.pbdata) result.append(txextended) response.transactions.extend(result) if all_requested or request.filter == qrl_pb2.GetLatestDataReq.TRANSACTIONS_UNCONFIRMED: result = [] for tx in self.qrlnode.get_latest_transactions_unconfirmed(offset=request.offset, count=quantity): txextended = qrl_pb2.TransactionExtended(header=None, tx=tx.pbdata) result.append(txextended) response.transactions_unconfirmed.extend(result) return response
def start_mining(self, tx_pool, parent_block, parent_difficulty, thread_count=config.user.mining_thread_count): mining_xmss = self.get_mining_xmss() if not mining_xmss: logger.warning('No Mining XMSS Found') return try: self.cancel() self._mining_block = self.create_block(last_block=parent_block, mining_nonce=0, tx_pool=tx_pool, signing_xmss=self._mining_xmss, master_address=self._master_address) measurement = self.state.get_measurement(self._mining_block.timestamp, self._mining_block.prev_headerhash) current_difficulty, current_target = self._difficulty_tracker.get( measurement=measurement, parent_difficulty=parent_difficulty) input_bytes, nonce_offset = self._get_mining_data(self._mining_block) logger.debug('!!! Mine #{} | {} ({}) | {} -> {} | {}'.format( self._mining_block.block_number, measurement, self._mining_block.timestamp - parent_block.timestamp, UInt256ToString(parent_difficulty), UInt256ToString(current_difficulty), current_target )) logger.debug('!!! {}'.format(current_target)) self.start(input=input_bytes, nonceOffset=nonce_offset, target=current_target, thread_count=thread_count) except Exception as e: logger.warning("Exception in start_mining") logger.exception(e)
def add_connection(self, conn_protocol) -> bool: # TODO: Most of this can go the peer manager if self._qrl_node.is_banned(conn_protocol.peer_ip): conn_protocol.loseConnection() return False # FIXME: (For AWS) This could be problematic for other users # FIXME: identify nodes by an GUID? if config.dev.public_ip and conn_protocol.peer_ip == config.dev.public_ip: conn_protocol.loseConnection() return False if self.reached_conn_limit: # FIXME: Should we stop listening to avoid unnecessary load due to many connections? logger.info('Peer limit hit. Disconnecting client %s', conn_protocol.peer_ip) conn_protocol.loseConnection() return False peer_list = self._qrl_node.peer_addresses if conn_protocol.peer_ip == conn_protocol.host_ip: if conn_protocol.peer_ip in peer_list: logger.info('Self in peer_list, removing..') peer_list.remove(conn_protocol.peer_ip) self._qrl_node.peer_manager.update_peer_addresses(peer_list) conn_protocol.loseConnection() return False self._peer_connections.append(conn_protocol) if conn_protocol.peer_ip not in peer_list: logger.debug('Adding to peer_list') peer_list.add(conn_protocol.peer_ip) self._qrl_node.peer_manager.update_peer_addresses(peer_list) logger.debug('>>> new peer connection : %s:%s ', conn_protocol.peer_ip, str(conn_protocol.peer_port)) return True
def main(): args = parse_arguments() config.create_path(config.user.wallet_dir) slaves = mining_wallet_checks(args) logger.debug("=====================================================================================") logger.info("Data Path: %s", args.data_dir) config.user.data_dir = args.data_dir config.create_path(config.user.data_dir) ntp.setDrift() logger.info('Initializing chain..') persistent_state = State() chain_manager = ChainManager(state=persistent_state) chain_manager.load(Block.from_json(GenesisBlock().to_json())) qrlnode = QRLNode(db_state=persistent_state, slaves=slaves) qrlnode.set_chain(chain_manager) set_logger(args, qrlnode.sync_state) ####### # NOTE: Keep assigned to a variable or might get collected admin_service, grpc_service = start_services(qrlnode) qrlnode.start_listening() qrlnode.connect_peers() qrlnode.start_pow() logger.info('QRL blockchain ledger %s', config.dev.version) logger.info('mining/staking address %s', slaves[0]) # FIXME: This will be removed once we move away from Twisted reactor.run()
def validate_mining_nonce(self, block, enable_logging=False): parent_metadata = self.state.get_block_metadata(block.prev_headerhash) parent_block = self.state.get_block(block.prev_headerhash) input_bytes = StringToUInt256(str(block.mining_nonce))[-4:] + tuple(block.mining_hash) measurement = self.state.get_measurement(block.timestamp, block.prev_headerhash) diff, target = self._difficulty_tracker.get( measurement=measurement, parent_difficulty=parent_metadata.block_difficulty) if enable_logging: logger.debug('-----------------START--------------------') logger.debug('Validate #%s', block.block_number) logger.debug('block.timestamp %s', block.timestamp) logger.debug('parent_block.timestamp %s', parent_block.timestamp) logger.debug('parent_block.difficulty %s', UInt256ToString(parent_metadata.block_difficulty)) logger.debug('input_bytes %s', UInt256ToString(input_bytes)) logger.debug('diff : %s | target : %s', UInt256ToString(diff), target) logger.debug('-------------------END--------------------') if not PoWHelper.verifyInput(input_bytes, target): if enable_logging: logger.warning("PoW verification failed") qn = Qryptonight() tmp_hash = qn.hash(input_bytes) logger.warning("{}".format(tmp_hash)) logger.debug('%s', block.to_json()) return False return True
def GetTokenDetailedList(self, request: qrl_pb2.Empty, context) -> qrl_pb2.TokenDetailedList: logger.debug("[PublicAPI] TokenDetailedList") token_detailed_list = self.qrlnode.get_token_detailed_list() return token_detailed_list
def startedConnecting(self, connector): logger.debug('Started connecting: %s', connector)
def clientConnectionFailed(self, connector, reason): logger.debug('connection failed: %s', reason)
def clientConnectionLost(self, connector, reason): # noqa logger.debug('connection lost: %s', reason)