def handle(self, connection_id, message_content): message = GossipMessage() message.ParseFromString(message_content) if message.content_type == GossipMessage.BLOCK: public_key = \ self._network.connection_id_to_public_key(connection_id) block = Block() block.ParseFromString(message.content) header = BlockHeader() header.ParseFromString(block.header) if header.signer_public_key == public_key: permitted = \ self._permission_verifier.check_network_consensus_role( public_key) if not permitted: LOGGER.debug( "Public key is not permitted to publish block, " "remove connection: %s", connection_id) self._gossip.unregister_peer(connection_id) violation = AuthorizationViolation( violation=RoleType.Value("NETWORK")) return HandlerResult(HandlerStatus.RETURN_AND_CLOSE, message_out=violation, message_type=validator_pb2.Message. AUTHORIZATION_VIOLATION) # if allowed pass message return HandlerResult(HandlerStatus.PASS)
def header(self): """ Returns the header of the block """ if self._block_header is None: self._block_header = BlockHeader() self._block_header.ParseFromString(self.block.header) return self._block_header
def is_valid_block(block): # block structure verification header = BlockHeader() header.ParseFromString(block.header) if len(header.batch_ids) != len(set(header.batch_ids)): LOGGER.debug("Block has duplicate batches. Dropping block: %s", block.header_signature) return False if not all(map(is_valid_batch, block.batches)): return False return True
def is_valid_block(block): # validate block signature header = BlockHeader() header.ParseFromString(block.header) context = create_context('secp256k1') public_key = Secp256k1PublicKey.from_hex(header.signer_public_key) if not context.verify(block.header_signature, block.header, public_key): LOGGER.debug("block failed signature validation: %s", block.header_signature) return False # validate all batches in block. These are not all batches in the # batch_ids stored in the block header, only those sent with the block. if not all(map(is_valid_batch, block.batches)): return False return True
def block_start(self, previous_block_id): """Returns an ordered list of batches to inject at the beginning of the block. Can also return None if no batches should be injected. Args: previous_block_id (str): The signature of the previous block. Returns: A list of batches to inject. """ previous_block = self._block_store[previous_block_id].get_block() previous_header = BlockHeader() previous_header.ParseFromString(previous_block.header) block_info = BlockInfo( block_num=previous_header.block_num, previous_block_id=previous_header.previous_block_id, signer_public_key=previous_header.signer_public_key, header_signature=previous_block.header_signature, timestamp=int(time.time())) return [self.create_batch(block_info)]
def _list_store_resources(self, request, head_id, filter_ids, resource_fetcher, block_xform): """Builds a list of blocks or resources derived from blocks, handling multiple possible filter requests: - filtered by a set of ids - filtered by head block - filtered by both id and head block - not filtered (all current resources) Note: This method will fail if `_block_store` has not been set Args: request (object): The parsed protobuf request object head_id (str): Either request.head_id, or the current chain head filter_ids (list of str): the resource ids (if any) to filter by resource_fetcher (function): Fetches a resource by its id Expected args: resource_id: The id of the resource to be fetched Expected return: object: The resource to be appended to the results block_xform (function): Transforms a block into a list of resources Expected args: block: A block object from the block store Expected return: list: To be concatenated to the end of the results Returns: list: List of blocks or data from blocks. If filtered by ids, they will be listed in the same order as the id filters, otherwise they will be ordered from newest to oldest """ resources = [] # Simply fetch by id if filtered by id but not by head block if filter_ids and not request.head_id: for resource_id in filter_ids: try: resources.append(resource_fetcher(resource_id)) except (KeyError, ValueError, TypeError): # Invalid ids should be omitted, not raise an exception pass # Traverse block chain to build results for most scenarios else: current_id = head_id while current_id in self._block_store: block = self._block_store[current_id].block resources += block_xform(block) header = BlockHeader() header.ParseFromString(block.header) current_id = header.previous_block_id # If filtering by head AND ids, the traverse results must be winnowed if request.head_id and filter_ids: matches = { r.header_signature: r for r in resources if r.header_signature in filter_ids } resources = [matches[i] for i in filter_ids if i in matches] return resources
def _build_candidate_block(self, chain_head): """ Build a candidate block and construct the consensus object to validate it. :param chain_head: The block to build on top of. :return: (BlockBuilder) - The candidate block in a BlockBuilder wrapper. """ state_view = BlockWrapper.state_view_for_block( chain_head, self._state_view_factory) consensus_module = ConsensusFactory.get_configured_consensus_module( chain_head.header_signature, state_view) # using chain_head so so we can use the setting_cache max_batches = int( self._settings_cache.get_setting( 'suomi.publisher.max_batches_per_block', chain_head.state_root_hash, default_value=0)) public_key = self._identity_signer.get_public_key().as_hex() consensus = consensus_module.\ BlockPublisher(block_cache=self._block_cache, state_view_factory=self._state_view_factory, batch_publisher=self._batch_publisher, data_dir=self._data_dir, config_dir=self._config_dir, validator_id=public_key) batch_injectors = [] if self._batch_injector_factory is not None: batch_injectors = self._batch_injector_factory.create_injectors( chain_head.identifier) if batch_injectors: LOGGER.debug("Loaded batch injectors: %s", batch_injectors) block_header = BlockHeader( block_num=chain_head.block_num + 1, previous_block_id=chain_head.header_signature, signer_public_key=public_key) block_builder = BlockBuilder(block_header) if not consensus.initialize_block(block_builder.block_header): if not self._logging_states.consensus_not_ready: self._logging_states.consensus_not_ready = True LOGGER.debug("Consensus not ready to build candidate block.") return None if self._logging_states.consensus_not_ready: self._logging_states.consensus_not_ready = False LOGGER.debug("Consensus is ready to build candidate block.") # create a new scheduler scheduler = self._transaction_executor.create_scheduler( self._squash_handler, chain_head.state_root_hash) # build the TransactionCommitCache committed_txn_cache = TransactionCommitCache( self._block_cache.block_store) self._transaction_executor.execute(scheduler) self._candidate_block = _CandidateBlock( self._block_cache.block_store, consensus, scheduler, committed_txn_cache, block_builder, max_batches, batch_injectors, SettingsView(state_view), public_key) for batch in self._pending_batches: if self._candidate_block.can_add_batch: self._candidate_block.add_batch(batch) else: break
class BlockWrapper(object): """ Utility class to make accessing block members more convenient. This also add storage of the weight and status used by the Journal components to track the state of a block. This is the object type stored in the Block Cache. """ def __init__(self, block, weight=0, status=BlockStatus.Unknown): self.block = block self._block_header = None self.weight = weight # the block weight calculated by the # consensus algorithm. self.status = status # One of the BlockStatus types. self.execution_results = [] self.num_transactions = 0 @staticmethod def wrap(block, weight=0, status=BlockStatus.Unknown): if isinstance(block, BlockWrapper): return block return BlockWrapper(block, weight=weight, status=status) @property def batches(self): """ Returns the consensus object of the block. """ return self.block.batches @property def consensus(self): """ Returns the consensus object of the block. """ return self.header.consensus def get_block(self): """ Return the wrapped block object. """ return self.block @property def header(self): """ Returns the header of the block """ if self._block_header is None: self._block_header = BlockHeader() self._block_header.ParseFromString(self.block.header) return self._block_header @property def header_signature(self): """ Returns the header signature of the block """ return self.block.header_signature @property def identifier(self): """ Returns the identifier of the block, currently the header signature """ return self.block.header_signature @property def block_num(self): """ Returns the depth or block_number """ return self.header.block_num @property def state_root_hash(self): """ Returns the state root hash """ return self.header.state_root_hash @property def previous_block_id(self): """ Returns the identifier of the previous block. """ return self.header.previous_block_id @staticmethod def state_view_for_block(block_wrapper, state_view_factory): """ Returns the state view for an arbitrary block. Args: block_wrapper (BlockWrapper): The block for which a state view is to be returned state_view_factory (StateViewFactory): The state view factory used to create the StateView object Returns: StateView object associated with the block """ state_root_hash = \ block_wrapper.state_root_hash \ if block_wrapper is not None else None return state_view_factory.create_view(state_root_hash) def get_state_view(self, state_view_factory): """ Returns the state view associated with this block Args: state_view_factory (StateViewFactory): The state view factory used to create the StateView object Returns: StateView object """ return BlockWrapper.state_view_for_block(self, state_view_factory) def __repr__(self): return "{}({}, S:{}, P:{})". \ format(self.identifier, self.block_num, self.state_root_hash, self.previous_block_id) def __str__(self): return "{} (block_num:{}, state:{}, previous_block_id:{})".format( self.identifier, self.block_num, self.state_root_hash, self.previous_block_id, )