def _get_previous_block_state_root(self, block): block_header = BlockHeader() block_header.ParseFromString(block.header) if block_header.previous_block_id == NULL_BLOCK_IDENTIFIER: return INIT_ROOT_KEY try: block = next( self._block_manager.get([block_header.previous_block_id])) except StopIteration: return None block_header = BlockHeader() block_header.ParseFromString(block.header) return block_header.state_root_hash
def handle_request(self, request, response, connection_id): try: blocks = [] for block in self._proxy.blocks_get(request.block_ids): block_header = BlockHeader() block_header.ParseFromString(block.header) blocks.append( consensus_pb2.ConsensusBlock( block_id=bytes.fromhex(block.header_signature), previous_id=bytes.fromhex( block_header.previous_block_id), signer_id=bytes.fromhex( block_header.signer_public_key), block_num=block_header.block_num, payload=block_header.consensus)) response.blocks.extend(blocks) except UnknownBlock: response.status =\ consensus_pb2.ConsensusBlocksGetResponse.UNKNOWN_BLOCK except Exception: # pylint: disable=broad-except LOGGER.exception("ConsensusBlocksGet") response.status =\ consensus_pb2.ConsensusBlocksGetResponse.SERVICE_ERROR return HandlerStatus.RETURN
def handle(self, identity, message_content): helper = _ClientHelper( message_content, client_pb2.ClientBlockListRequest, client_pb2.ClientBlockListResponse, validator_pb2.Message.CLIENT_BLOCK_LIST_RESPONSE, block_store=self._block_store) helper.set_head_id() if helper.has_response(): return helper.result # Build block list current_id = helper.head_id blocks = [] while current_id in self._block_store: block = self._block_store[current_id].block blocks.append(block) header = BlockHeader() header.ParseFromString(block.header) current_id = header.previous_block_id if blocks: helper.set_response( helper.status.OK, head_id=helper.head_id, blocks=blocks) else: helper.set_response(helper.status.NO_ROOT) return helper.result
def handle_request(self, request, response): try: LOGGER.debug('ConsensusChainHeadGetHandler: proxy parent_id=(%s) new_parent=(%s) is_new=%s',request.parent_id.hex()[:8],request.new_parent_id.hex()[:8],request.is_new) chain_head = self._proxy.chain_head_get(request.parent_id,request.new_parent_id,request.is_new) block_header = BlockHeader() """ chain_head.header for RUST """ block_header.ParseFromString(chain_head.block.header) response.block.block_id = bytes.fromhex( chain_head.header_signature) response.block.previous_id =\ bytes.fromhex(block_header.previous_block_id) response.block.signer_id =\ bytes.fromhex(block_header.signer_public_key) response.block.block_num = block_header.block_num response.block.payload = block_header.consensus except TooManyBranch: response.status = consensus_pb2.ConsensusChainHeadGetResponse.TOO_MANY_BRANCH # change bgx.publisher.max_batches_per_block after nests were made self._proxy.reset_max_batches_per_block() except UnknownBlock: response.status = consensus_pb2.ConsensusChainHeadGetResponse.NO_CHAIN_HEAD except Exception: # pylint: disable=broad-except LOGGER.exception("ConsensusChainHeadGet") response.status = consensus_pb2.ConsensusChainHeadGetResponse.SERVICE_ERROR
def set_root(self): """ Used by handlers that fetch data from the merkle tree. Sets the tree with the proper root, and returns the chain head id if used. """ if self.has_response(): return if self.request.merkle_root: self._tree.set_merkle_root(self.request.merkle_root) return if self.request.head_id: try: head = self._block_store[self.request.head_id].block except KeyError as e: LOGGER.debug('Unable to find block "%s" in store', e) self.set_response(self.status.NO_ROOT) else: head = self.get_genesis() if self.has_response(): return header = BlockHeader() header.ParseFromString(head.header) self._tree.set_merkle_root(header.state_root_hash) self.head_id = head.header_signature
def activate_if_configured(self, engine_name, engine_version): # Wait until chain head is committed chain_head = None while chain_head is None: try: chain_head = self.chain_head_get() except UnknownBlock: pass header = BlockHeader() header.ParseFromString(chain_head.header) settings_view = self._settings_view_factory.create_settings_view( header.state_root_hash) conf_name = settings_view.get_setting( 'sawtooth.consensus.algorithm.name') conf_version = settings_view.get_setting( 'sawtooth.consensus.algorithm.version') if engine_name == conf_name and engine_version == conf_version: self._consensus_registry.activate_engine(engine_name, engine_version) self._consensus_notifier.notify_engine_activated(chain_head) LOGGER.info("Consensus engine activated: %s %s", engine_name, engine_version)
def handle_request(self, request, response): try: blocks = [] for block in self._proxy.blocks_get(request.block_ids): LOGGER.debug('ConsensusBlocksGetHandler: block %s',type(block.header)) """ block manager return blocks from store where header is string and we should decode it in case of chain controller it is object """ if not isinstance(block.header,BlockHeader) : block_header = BlockHeader() block_header.ParseFromString(block.header) else: block_header = block.header blocks.append(consensus_pb2.ConsensusBlock( block_id=bytes.fromhex(block.header_signature), previous_id=bytes.fromhex(block_header.previous_block_id), signer_id=bytes.fromhex(block_header.signer_public_key), block_num=block_header.block_num, payload=block_header.consensus)) response.blocks.extend(blocks) except UnknownBlock: LOGGER.debug('ConsensusBlocksGetHandler:proxy UNKNOWN_BLOCK') response.status = consensus_pb2.ConsensusBlocksGetResponse.UNKNOWN_BLOCK except Exception: # pylint: disable=broad-except LOGGER.exception("ConsensusBlocksGet") response.status = consensus_pb2.ConsensusBlocksGetResponse.SERVICE_ERROR
def state_get(self, block_id, addresses): '''Returns a list of address/data pairs (str, bytes)''' block = self._get_blocks([block_id.hex()])[0] block_header = BlockHeader() block_header.ParseFromString(block.header) state_view = self._state_view_factory.create_view( block_header.state_root_hash) result = [] for address in addresses: # a fully specified address if len(address) == 70: try: value = state_view.get(address) except KeyError: # if the key is missing, leave it out of the response continue result.append((address, value)) continue # an address prefix leaves = state_view.leaves(address) for leaf in leaves: result.append(leaf) return result
def notify_block_new(self, block): """ A new block was received and passed initial consensus validation in federation mode - send only own cluster's nodes """ summary = hashlib.sha256() for batch in block.batches: summary.update(batch.header_signature.encode()) LOGGER.debug( 'ConsensusNotifier: notify_block_new BLOCK=%s SUMMARY=%s\n', block.header_signature[:8], summary.digest().hex()[:10]) block_header = BlockHeader() block_header.ParseFromString(block.header) self._notify( validator_pb2.Message.CONSENSUS_NOTIFY_BLOCK_NEW, consensus_pb2.ConsensusNotifyBlockNew( block=consensus_pb2.ConsensusBlock( block_id=bytes.fromhex(block.header_signature), previous_id=bytes.fromhex(block_header.previous_block_id), signer_id=bytes.fromhex(block_header.signer_public_key), block_num=block_header.block_num, payload=block_header.consensus, summary=summary.digest())))
def settings_get(self, block_id, settings): '''Returns a list of key/value pairs (str, str).''' block = self._get_blocks([block_id.hex()])[0] block_header = BlockHeader() block_header.ParseFromString(block.header) try: settings_view = self._settings_view_factory.create_settings_view( block_header.state_root_hash) except KeyError: LOGGER.error( 'Settings from block %s requested, but root hash %s was ' 'missing. Returning no setting values.', block_id.hex(), block_header.state_root_hash) # The state root does not exist, which may indicate a pruned root # from a dropped fork or an invalid state. return [] result = [] for setting in settings: try: value = settings_view.get_setting(setting) except KeyError: # if the key is missing, leave it out of the response continue result.append((setting, value)) return result
def handle(self, connection_id, message_content): message = GossipMessage() message.ParseFromString(message_content) if message.content_type == "BLOCK": public_key = \ self._network.connection_id_to_public_key(connection_id) block = Block() block.ParseFromString(message.content) header = BlockHeader() header.ParseFromString(block.header) if header.signer_public_key == public_key: permitted = \ self._permission_verifier.check_network_consensus_role( public_key) if not permitted: LOGGER.debug( "Public key is not permitted to publish block, " "remove connection: %s", connection_id) self._gossip.unregister_peer(connection_id) violation = AuthorizationViolation( violation=RoleType.Value("NETWORK")) return HandlerResult(HandlerStatus.RETURN_AND_CLOSE, message_out=violation, message_type=validator_pb2.Message. AUTHORIZATION_VIOLATION) # if allowed pass message return HandlerResult(HandlerStatus.PASS)
def get_configured_engine(block, settings_view_factory): header = BlockHeader() header.ParseFromString(block.header) settings_view = settings_view_factory.create_settings_view( header.state_root_hash) conf_name = settings_view.get_setting('sawtooth.consensus.algorithm.name') conf_version = settings_view.get_setting( 'sawtooth.consensus.algorithm.version') # For backwards compatibility with 1.0: # - Use version "0.1" if sawtooth.consensus.algorithm.version is unset # - Use sawtooth.consensus.algorithm if sawtooth.consensus.algorithm.name # is unset # - Use "Devmode" if sawtooth.consensus.algorithm is unset if conf_version is not None: version = conf_version else: version = "0.1" if conf_name is not None: name = conf_name else: algorithm = settings_view.get_setting('sawtooth.consensus.algorithm') if algorithm is not None: name = algorithm else: name = "Devmode" return name, version
def _set_root(self, request): """Sets the root of the merkle tree, returning any head id used. Note: This method will fail if `_tree` has not been set Args: request (object): The parsed protobuf request object Returns: None: if a merkle_root is specified directly, no id is returned str: the id of the head block used to specify the root Raises: ResponseFailed: Failed to set the root if the merkle tree """ if request.merkle_root: root = request.merkle_root head_id = None else: head = self._get_head_block(request) header = BlockHeader() header.ParseFromString(head.header) root = header.state_root_hash head_id = head.header_signature try: self._tree.set_merkle_root(root) except KeyError as e: LOGGER.debug('Unable to find root "%s" in database', e) raise _ResponseFailed(self._status.NO_ROOT) return head_id
def get_configured_engine(block, settings_view_factory): header = BlockHeader() header.ParseFromString(block.header) settings_view = settings_view_factory.create_settings_view( header.state_root_hash) conf_name = settings_view.get_setting('sawtooth.consensus.algorithm.name') conf_version = settings_view.get_setting( 'sawtooth.consensus.algorithm.version') # Fallback to devmode if nothing else is set name = "Devmode" version = "0.1" # If name and version settings aren't set, check for PoET if conf_name is None or conf_version is None: algorithm = settings_view.get_setting('sawtooth.consensus.algorithm') if algorithm and (algorithm.lower() == 'poet'): name = "PoET" # Otherwise use name and version settings else: name = conf_name version = conf_version return name, version
def on_block_received(self, block): with self._lock: if block.header_signature in self._block_store: # do we already have this block return header = BlockHeader() header.ParseFromString(block.header) block = BlockWrapper(header, block) block_state = BlockState(block_wrapper=block, weight=0, status=BlockStatus.Unknown) self._block_store[block.header_signature] = block_state self._blocks_pending[block.header_signature] = [] if block.header_signature in self._blocks_requested: # is it a requested block # route block to the validator that requested validator = self._blocks_requested.pop(block.header_signature) if validator.chain_head.block.header_signature != \ self._chain_head.block.header_signature: # the head of the chain has changed start over self._verify_block(validator.new_block) else: self._executor.submit(validator.run) elif block.previous_block_id in self._blocks_processing: # if the previous block is being processed...put it in a wait # queue pending_blocks = \ self._blocks_pending.get(block.previous_block_id, []) pending_blocks.append(block_state) self._blocks_pending[block.previous_block_id] = \ pending_blocks else: # schedule this block for validation. self._verify_block(block_state)
def handle(self, identity, message_content): helper = _ClientHelper( message_content, client_pb2.ClientBlockListRequest, client_pb2.ClientBlockListResponse, validator_pb2.Message.CLIENT_BLOCK_LIST_RESPONSE, block_store=self._block_store) blocks = [helper.get_head_block()] if helper.has_response(): return helper.result # Build block list while True: header = BlockHeader() header.ParseFromString(blocks[-1].header) previous_id = header.previous_block_id if previous_id not in self._block_store: break blocks.append(self._block_store[previous_id].block) helper.set_response(helper.status.OK, head_id=helper.head_id, blocks=blocks) return helper.result
def __init__(self, head_id, block_manager, block_store): """The constructor should be passed the previous block id of the block being validated.""" uncommitted_block_ids = list() uncommitted_batch_ids = set() uncommitted_txn_ids = set() # Find the most recent ancestor of this block that is in the block # store. Batches and transactions that are in a block that is in the # block store and that has a greater block number than this block must # be ignored. if head_id != NULL_BLOCK_IDENTIFIER: head = next(block_manager.get([head_id])) ancestor = head while ancestor.header_signature not in block_store: # For every block not in the block store, we need to track all # its batch ids and transaction ids separately to ensure there # are no duplicates. for batch in ancestor.batches: uncommitted_batch_ids.add(batch.header_signature) for txn in batch.transactions: uncommitted_txn_ids.add(txn.header_signature) uncommitted_block_ids.append(ancestor.header_signature) ancestor_header = BlockHeader() ancestor_header.ParseFromString(ancestor.header) previous_block_id = ancestor_header.previous_block_id if previous_block_id == NULL_BLOCK_IDENTIFIER: break ancestor = next(block_manager.get([previous_block_id])) else: ancestor = None self.block_store = block_store ancestor_header = None if ancestor: ancestor_header = BlockHeader() ancestor_header.ParseFromString(ancestor.header) self.common_ancestor = ancestor_header self.uncommitted_block_ids = uncommitted_block_ids self.uncommitted_batch_ids = uncommitted_batch_ids self.uncommitted_txn_ids = uncommitted_txn_ids
def _add_block_to_pending(self, block): with self._lock: self._pending.add(block.header_signature) block_header = BlockHeader() block_header.ParseFromString(block.header) previous = block_header.previous_block_id if previous not in self._descendants: self._descendants[previous] = [block] else: if block not in self._descendants[previous]: self._descendants[previous].append(block)
def test_block_missing_batch_not_in_cache(self): """ The block is a missing batch and the batch is not in the cache. The batch will be requested and the block will not be passed to on_block_recieved. """ block = self._create_blocks(1, 3, missing_batch=True, find_batch=False)[0] self.completer.add_block(block) header = BlockHeader() header.ParseFromString(block.header) self.assertIn(header.batch_ids[-1], self.gossip.requested_batches)
def _list_blocks(self): blocks = [] current_id = self._block_store['chain_head_id'] while current_id in self._block_store: block = self._block_store[current_id].block.get_block() blocks.append(block) header = BlockHeader() header.ParseFromString(block.header) current_id = header.previous_block_id return blocks
def is_valid_block(block): # block structure verification header = BlockHeader() header.ParseFromString(block.header) if len(header.batch_ids) != len(set(header.batch_ids)): LOGGER.debug("Block has duplicate batches. Dropping block: %s", block.header_signature) return False if not all(map(is_valid_batch, block.batches)): return False return True
def _validate_on_chain_rules(self, block, prev_state_root): """ Validate that the block conforms to all validation rules stored in state. If the block breaks any of the stored rules, the block is invalid. """ block_header = BlockHeader() block_header.ParseFromString(block.header) if block_header.block_num != 0: return enforce_validation_rules( self._settings_view_factory.create_settings_view( prev_state_root), block_header.signer_public_key, block.batches) return True
def validate_block(block): # validate block signature header = BlockHeader() header.ParseFromString(block.header) valid = signing.verify(block.header, block.header_signature, header.signer_pubkey) # validate all batches in block. These are not all batches in the # batch_ids stored in the block header, only those sent with the block. total = len(block.batches) index = 0 while valid and index < total: valid = validate_batch(block.batches[index]) index += 1 return valid
def _validate_permissions(self, block, prev_state_root): """ Validate that all of the batch signers and transaction signer for the batches in the block are permitted by the transactor permissioning roles stored in state as of the previous block. If a transactor is found to not be permitted, the block is invalid. """ block_header = BlockHeader() block_header.ParseFromString(block.header) if block_header.block_num != 0: for batch in block.batches: if not self._permission_verifier.is_batch_signer_authorized( batch, prev_state_root, from_state=True): return False return True
def notify_block_new(self, block): """A new block was received and passed initial consensus validation""" summary = hashlib.sha256() for batch in block.batches: summary.update(batch.header_signature.encode()) block_header = BlockHeader() block_header.ParseFromString(block.header) self._notify( validator_pb2.Message.CONSENSUS_NOTIFY_BLOCK_NEW, consensus_pb2.ConsensusNotifyBlockNew( block=consensus_pb2.ConsensusBlock( block_id=bytes.fromhex(block.header_signature), previous_id=bytes.fromhex(block_header.previous_block_id), signer_id=bytes.fromhex(block_header.signer_public_key), block_num=block_header.block_num, payload=block_header.consensus, summary=summary.digest())))
def is_valid_block(block): # validate block signature header = BlockHeader() header.ParseFromString(block.header) if not signing.verify(block.header, block.header_signature, header.signer_public_key): LOGGER.debug("block failed signature validation: %s", block.header_signature) return False # validate all batches in block. These are not all batches in the # batch_ids stored in the block header, only those sent with the block. if not all(map(is_valid_batch, block.batches)): return False return True
def is_valid_block(block): # validate block signature header = BlockHeader() header.ParseFromString(block.header) context = create_context('secp256k1') public_key = Secp256k1PublicKey.from_hex(header.signer_public_key) if not context.verify(block.header_signature, block.header, public_key): LOGGER.debug("block failed signature validation: %s", block.header_signature) return False # validate all batches in block. These are not all batches in the # batch_ids stored in the block header, only those sent with the block. if not all(map(is_valid_batch, block.batches)): return False return True
def run(self): while True: if self._exit: return try: block = self.block_queue.get(block=False) block_header = BlockHeader() block_header.ParseFromString(block.header) status = self.check_block(block, block_header) if status: self.on_block_complete(block) LOGGER.debug("Block passed to journal %s", block.header_signature) else: self.block_queue.put(block) except queue.Empty: with self.condition: self.condition.wait()
def generate_block(self, previous_block=None, add_to_store=False, batch_count=0, status=BlockStatus.Unknown, invalid_consensus=False, invalid_batch=False, invalid_signature=False, weight=1): previous = self._get_block(previous_block) if previous is None: previous = BlockWrapper( BlockHeader( block_num=0, previous_block_id="0000000000000000", )) previous.set_signature(_generate_id()) previous_block_state = BlockState(block_wrapper=previous, weight=0, status=BlockStatus.Valid) self.block_store[previous.header_signature] = previous_block_state self.block_publisher.on_chain_updated(previous) while self._new_block is None: self.block_publisher.on_batch_received(Batch()) self.block_publisher.on_check_publish_block(True) block = self._new_block self._new_block = None header = BlockHeader() header.ParseFromString(block.header) block = BlockWrapper(header, block) if invalid_signature: block.set_signature("BAD") if add_to_store: block_state = BlockState(block_wrapper=block, weight=0) block_state.status = status self.block_store[block.header_signature] = block_state return block
def handle_request(self, request, response): #LOGGER.debug('ConsensusRegisterHandler: proxy.register') startup_info = self._proxy.register() if startup_info is None: # not ready for working with consensus engine response.status = consensus_pb2.ConsensusRegisterResponse.NOT_READY if self._last_status != response.status: LOGGER.debug('ConsensusRegisterHandler: NOT READY yet for working with consensus engine!\n') self._last_status = response.status return #if self._proxy.is_recovery : # recovery mode # response.status = consensus_pb2.ConsensusRegisterResponse.RECOVERY chain_head = startup_info.chain_head # README when not enought resource some peers could not be connected peers = [bytes.fromhex(peer_id) for peer_id in startup_info.peers if peer_id is not None] local_peer_info = startup_info.local_peer_info LOGGER.debug('ConsensusRegisterHandler: peers=%s local=%s chain_head[%s]=%s header=%s block=%s',peers,local_peer_info,type(chain_head),chain_head,type(chain_head.header),type(chain_head.block)) block_header = BlockHeader() """ for last version validator(rust) used chain_head.header because chain_head is Block not WrapperBlock as for python validator """ block_header.ParseFromString(chain_head.block.header) response.chain_head.block_id = bytes.fromhex(chain_head.header_signature) response.chain_head.previous_id = bytes.fromhex(block_header.previous_block_id) response.chain_head.signer_id = bytes.fromhex(block_header.signer_public_key) response.chain_head.block_num = block_header.block_num response.chain_head.payload = block_header.consensus response.peers.extend([ consensus_pb2.ConsensusPeerInfo(peer_id=peer_id) for peer_id in peers ]) response.local_peer_info.peer_id = local_peer_info response.peering_mode = startup_info.peering_mode self._consensus_notifier.add_registered_engine(request.name,request.version) LOGGER.info("Consensus engine registered: %s %s",request.name,request.version)