Ejemplo n.º 1
0
    def _sequential_add_batch_after_all_results_set(self,
                                                    scheduler,
                                                    context_manager):
        """Tests that adding a new batch only after setting all of the
        txn results will produce only expected state roots. Here no state
        roots were specified, so similar to block publishing use of scheduler.
        Basically:
            1) Create 3 batches, the last being marked as having an invalid
               transaction. Add one batch and then while the scheduler keeps
               on returning transactions, set the txn result, and then
               call next_transaction.
            2) Call finalize, and then assert that the scheduler is complete
            3) Assert that the first batch is valid and has no state hash,
               the second batch is valid and since it is the last valid batch
               in the scheduler has a state hash, and that the third batch
               is invalid and consequently has no state hash.
        """

        private_key = self._context.new_random_private_key()
        signer = self._crypto_factory.new_signer(private_key)

        # 1)
        batch_signatures = []
        batches = []
        for names in [['a', 'b'], ['d', 'e'], ['invalid', 'c']]:
            batch_txns = []
            for name in names:
                txn, _ = create_transaction(
                    payload=name.encode(),
                    signer=signer)

                batch_txns.append(txn)

            batch = create_batch(
                transactions=batch_txns,
                signer=signer)
            batches.append(batch)
            batch_signatures.append(batch.header_signature)
        invalid_payload_sha = hashlib.sha512(
            'invalid'.encode()).hexdigest()
        for batch in batches:
            scheduler.add_batch(batch=batch)
            txn_info = scheduler.next_transaction()
            while txn_info is not None:
                txn_header = transaction_pb2.TransactionHeader()
                txn_header.ParseFromString(txn_info.txn.header)
                inputs_outputs = list(txn_header.inputs)
                c_id = context_manager.create_context(
                    state_hash=context_manager.get_first_root(),
                    base_contexts=txn_info.base_context_ids,
                    inputs=list(txn_header.inputs),
                    outputs=list(txn_header.outputs))
                context_manager.set(
                    context_id=c_id,
                    address_value_list=[{inputs_outputs[0]: b'5'}])
                if txn_header.payload_sha512 == invalid_payload_sha:
                    scheduler.set_transaction_execution_result(
                        txn_info.txn.header_signature,
                        is_valid=False,
                        context_id=None)
                else:
                    scheduler.set_transaction_execution_result(
                        txn_info.txn.header_signature,
                        is_valid=True,
                        context_id=c_id)
                txn_info = scheduler.next_transaction()

        # 2)
        scheduler.finalize()
        self.assertTrue(scheduler.complete(block=False),
                        "The scheduler has had all txn results set so after "
                        " calling finalize the scheduler is complete")
        # 3)
        first_batch_id = batch_signatures.pop(0)
        result1 = scheduler.get_batch_execution_result(first_batch_id)
        self.assertEqual(
            result1.is_valid,
            True,
            "The first batch is valid")
        self.assertIsNone(result1.state_hash,
                          "The first batch doesn't produce"
                          " a state hash")
        second_batch_id = batch_signatures.pop(0)
        result2 = scheduler.get_batch_execution_result(second_batch_id)
        self.assertEqual(
            result2.is_valid,
            True,
            "The second batch is valid")
        self.assertIsNotNone(result2.state_hash, "The second batch is the "
                                                 "last valid batch in the "
                                                 "scheduler")

        third_batch_id = batch_signatures.pop(0)
        result3 = scheduler.get_batch_execution_result(third_batch_id)
        self.assertEqual(result3.is_valid, False)
        self.assertIsNone(result3.state_hash,
                          "The last batch is invalid so "
                          "doesn't have a state hash")
Ejemplo n.º 2
0
def do_genesis(args):
    """Executes the `poet genesis` subcommand.

    This command generates a validator registry transaction and saves it to a
    file, whose location is determined by the args.  The signup data, generated
    by the selected enclave, is also stored in a well-known location.
    """
    public_key, signing_key = _read_signing_keys(args.key)

    public_key_hash = sha256(public_key.encode()).hexdigest()

    with PoetEnclaveModuleWrapper(
            enclave_module=args.enclave_module,
            config_dir=config.get_config_dir(),
            data_dir=config.get_data_dir()) as poet_enclave_module:
        signup_info = SignupInfo.create_signup_info(
            poet_enclave_module=poet_enclave_module,
            originator_public_key_hash=public_key_hash,
            nonce=SignupInfo.block_id_to_nonce(NULL_BLOCK_IDENTIFIER))

    print(
        'Writing key state for PoET public key: {}...{}'.format(
            signup_info.poet_public_key[:8],
            signup_info.poet_public_key[-8:]))

    # Store the newly-created PoET key state, associating it with its
    # corresponding public key
    poet_key_state_store = \
        PoetKeyStateStore(
            data_dir=config.get_data_dir(),
            validator_id=public_key)
    poet_key_state_store[signup_info.poet_public_key] = \
        PoetKeyState(
            sealed_signup_data=signup_info.sealed_signup_data,
            has_been_refreshed=False)

    # Create the validator registry payload
    payload = \
        vr_pb.ValidatorRegistryPayload(
            verb='register',
            name='validator-{}'.format(public_key[:8]),
            id=public_key,
            signup_info=vr_pb.SignUpInfo(
                poet_public_key=signup_info.poet_public_key,
                proof_data=signup_info.proof_data,
                anti_sybil_id=signup_info.anti_sybil_id,
                nonce=NULL_BLOCK_IDENTIFIER))
    serialized = payload.SerializeToString()

    # Create the address that will be used to look up this validator
    # registry transaction.  Seems like a potential for refactoring..
    validator_entry_address = \
        VR_NAMESPACE + sha256(public_key.encode()).hexdigest()

    # Create a transaction header and transaction for the validator
    # registry update amd then hand it off to the batch publisher to
    # send out.
    output_addresses = [validator_entry_address, VALIDATOR_MAP_ADDRESS]
    input_addresses = \
        output_addresses + \
        [SettingsView.setting_address('sawtooth.poet.report_public_key_pem'),
         SettingsView.setting_address('sawtooth.poet.'
                                      'valid_enclave_measurements'),
         SettingsView.setting_address('sawtooth.poet.valid_enclave_basenames')]

    header = \
        txn_pb.TransactionHeader(
            signer_public_key=public_key,
            family_name='sawtooth_validator_registry',
            family_version='1.0',
            inputs=input_addresses,
            outputs=output_addresses,
            dependencies=[],
            payload_sha512=sha512(serialized).hexdigest(),
            batcher_public_key=public_key,
            nonce=time.time().hex().encode()).SerializeToString()
    signature = signing.sign(header, signing_key)

    transaction = \
        txn_pb.Transaction(
            header=header,
            payload=serialized,
            header_signature=signature)

    batch = _create_batch(public_key, signing_key, [transaction])
    batch_list = batch_pb.BatchList(batches=[batch])
    try:
        print('Generating {}'.format(args.output))
        with open(args.output, 'wb') as batch_file:
            batch_file.write(batch_list.SerializeToString())
    except IOError as e:
        raise CliException(
            'Unable to write to batch file: {}'.format(str(e)))
Ejemplo n.º 3
0
    def _add_valid_batch_invalid_batch(self, scheduler, context_manager):
        """Tests the squash function. That the correct state hash is found
        at the end of valid and invalid batches, similar to block publishing.

         Basically:
            1. Adds two batches, one where all the txns are valid,
               and one where one of the txns is invalid.
            2. Run through the scheduler executor interaction
               as txns are processed.
            3. Verify that the state root obtained through the squash function
               is the same as directly updating the merkle tree.
            4. Verify that correct batch statuses are set

        This test should work for both a serial and parallel scheduler.
        """
        private_key = self._context.new_random_private_key()
        signer = self._crypto_factory.new_signer(private_key)

        # 1)
        batch_signatures = []
        for names in [['a', 'b'], ['invalid', 'c'], ['d', 'e']]:
            batch_txns = []
            for name in names:
                txn, _ = create_transaction(
                    payload=name.encode(),
                    signer=signer)

                batch_txns.append(txn)

            batch = create_batch(
                transactions=batch_txns,
                signer=signer)

            batch_signatures.append(batch.header_signature)
            scheduler.add_batch(batch)
        scheduler.finalize()
        # 2)
        sched1 = iter(scheduler)
        invalid_payload = hashlib.sha512('invalid'.encode()).hexdigest()
        while not scheduler.complete(block=False):
            try:
                txn_info = next(sched1)
            except StopIteration:
                break
            txn_header = transaction_pb2.TransactionHeader()
            txn_header.ParseFromString(txn_info.txn.header)
            inputs_or_outputs = list(txn_header.inputs)
            c_id = context_manager.create_context(
                state_hash=txn_info.state_hash,
                inputs=inputs_or_outputs,
                outputs=inputs_or_outputs,
                base_contexts=txn_info.base_context_ids)
            if txn_header.payload_sha512 == invalid_payload:
                scheduler.set_transaction_execution_result(
                    txn_info.txn.header_signature, False, None)
            else:
                context_manager.set(c_id, [{inputs_or_outputs[0]: b"1"}])
                scheduler.set_transaction_execution_result(
                    txn_info.txn.header_signature, True, c_id)

        sched2 = iter(scheduler)
        # 3)
        txn_info_a = next(sched2)

        address_a = _get_address_from_txn(txn_info_a)

        txn_info_b = next(sched2)
        address_b = _get_address_from_txn(txn_info_b)

        next(sched2)

        txn_info_d = next(sched2)
        address_d = _get_address_from_txn(txn_info_d)

        txn_info_e = next(sched2)
        address_e = _get_address_from_txn(txn_info_e)

        merkle_database = MerkleDatabase(dict_database.DictDatabase())
        state_root_end = merkle_database.update(
            {address_a: b"1", address_b: b"1",
             address_d: b"1", address_e: b"1"},
            virtual=False)

        # 4)
        batch1_result = scheduler.get_batch_execution_result(
            batch_signatures[0])
        self.assertTrue(batch1_result.is_valid)

        batch2_result = scheduler.get_batch_execution_result(
            batch_signatures[1])
        self.assertFalse(batch2_result.is_valid)

        batch3_result = scheduler.get_batch_execution_result(
            batch_signatures[2])
        self.assertTrue(batch3_result.is_valid)
        self.assertEqual(batch3_result.state_hash, state_root_end)
Ejemplo n.º 4
0
def _get_address_from_txn(txn_info):
    txn_header = transaction_pb2.TransactionHeader()
    txn_header.ParseFromString(txn_info.txn.header)
    inputs_or_outputs = list(txn_header.inputs)
    address_b = inputs_or_outputs[0]
    return address_b
Ejemplo n.º 5
0
    def _register_signup_information(self, block_header, poet_enclave_module):
        # Create signup information for this validator, putting the block ID
        # of the block previous to the block referenced by block_header in the
        # nonce.  Block ID is better than wait certificate ID for testing
        # freshness as we need to account for non-PoET blocks.
        public_key_hash = \
            hashlib.sha256(
                block_header.signer_pubkey.encode()).hexdigest()
        signup_info = \
            SignupInfo.create_signup_info(
                poet_enclave_module=poet_enclave_module,
                validator_address=block_header.signer_pubkey,
                originator_public_key_hash=public_key_hash,
                nonce=block_header.previous_block_id)

        # Create the validator registry payload
        payload = \
            vr_pb.ValidatorRegistryPayload(
                verb='register',
                name='validator-{}'.format(block_header.signer_pubkey[:8]),
                id=block_header.signer_pubkey,
                signup_info=vr_pb.SignUpInfo(
                    poet_public_key=signup_info.poet_public_key,
                    proof_data=signup_info.proof_data,
                    anti_sybil_id=signup_info.anti_sybil_id,
                    nonce=block_header.previous_block_id),
            )
        serialized = payload.SerializeToString()

        # Create the address that will be used to look up this validator
        # registry transaction.  Seems like a potential for refactoring..
        validator_entry_address = \
            PoetBlockPublisher._validator_registry_namespace + \
            hashlib.sha256(block_header.signer_pubkey.encode()).hexdigest()

        # Create a transaction header and transaction for the validator
        # registry update amd then hand it off to the batch publisher to
        # send out.
        addresses = \
            [validator_entry_address,
             PoetBlockPublisher._validator_map_address]

        header = \
            txn_pb.TransactionHeader(
                signer_pubkey=block_header.signer_pubkey,
                family_name='sawtooth_validator_registry',
                family_version='1.0',
                inputs=addresses,
                outputs=addresses,
                dependencies=[],
                payload_encoding="application/protobuf",
                payload_sha512=hashlib.sha512(serialized).hexdigest(),
                batcher_pubkey=block_header.signer_pubkey,
                nonce=time.time().hex().encode()).SerializeToString()
        signature = \
            signing.sign(header, self._batch_publisher.identity_signing_key)

        transaction = \
            txn_pb.Transaction(
                header=header,
                payload=serialized,
                header_signature=signature)

        LOGGER.info(
            'Register Validator Name=%s, ID=%s...%s, PoET public key=%s...%s, '
            'Nonce=%s', payload.name, payload.id[:8], payload.id[-8:],
            payload.signup_info.poet_public_key[:8],
            payload.signup_info.poet_public_key[-8:],
            block_header.previous_block_id[:8])

        self._batch_publisher.send([transaction])

        # Store the key state so that we can look it up later if need be
        LOGGER.info('Save key state PPK=%s...%s => SSD=%s...%s',
                    signup_info.poet_public_key[:8],
                    signup_info.poet_public_key[-8:],
                    signup_info.sealed_signup_data[:8],
                    signup_info.sealed_signup_data[-8:])
        self._poet_key_state_store[signup_info.poet_public_key] = \
            PoetKeyState(
                sealed_signup_data=signup_info.sealed_signup_data,
                has_been_refreshed=False)

        # Cache the PoET public key in a class to indicate that this is the
        # current public key for the PoET enclave
        PoetBlockPublisher._poet_public_key = signup_info.poet_public_key
Ejemplo n.º 6
0
    def _future_done_callback(self, request, result):
        """
        :param request (bytes):the serialized request
        :param result (FutureResult):
        """
        self._in_process_transactions_count.dec()
        req = processor_pb2.TpProcessRequest()
        req.ParseFromString(request)
        # If raw header bytes were sent then deserialize to get
        # transaction family name and version
        if req.header_bytes == b'':
            # When header_bytes field is empty, the header field will
            # be populated, so we can use that directly.
            request_header = req.header
        else:
            # Deserialize the header_bytes
            request_header = transaction_pb2.TransactionHeader()
            request_header.ParseFromString(req.header_bytes)
        response = processor_pb2.TpProcessResponse()
        response.ParseFromString(result.content)

        processor_type = ProcessorType(request_header.family_name,
                                       request_header.family_version)

        self._processor_manager[processor_type].get_processor(
            result.connection_id).dec_occupancy()
        self._processor_manager.notify()

        self._get_tp_process_response_counter(
            response.Status.Name(response.status)).inc()

        if result.connection_id in self._open_futures and \
                req.signature in self._open_futures[result.connection_id]:
            del self._open_futures[result.connection_id][req.signature]

        if response.status == processor_pb2.TpProcessResponse.OK:
            state_sets, state_deletes, events, data = \
                self._context_manager.get_execution_results(req.context_id)

            state_changes = [
                transaction_receipt_pb2.StateChange(
                    address=addr,
                    value=value,
                    type=transaction_receipt_pb2.StateChange.SET)
                for addr, value in state_sets.items()
            ] + [
                transaction_receipt_pb2.StateChange(
                    address=addr,
                    type=transaction_receipt_pb2.StateChange.DELETE)
                for addr in state_deletes
            ]

            self._scheduler.set_transaction_execution_result(
                txn_signature=req.signature,
                is_valid=True,
                context_id=req.context_id,
                state_changes=state_changes,
                events=events,
                data=data)

        elif response.status == processor_pb2.TpProcessResponse.INTERNAL_ERROR:
            LOGGER.error(
                "Transaction processor internal error: %s "
                "(transaction: %s, name: %s, version: %s)", response.message,
                req.signature, request_header.family_name,
                request_header.family_version)

            # Make sure that the transaction wasn't unscheduled in the interim
            if self._scheduler.is_transaction_in_schedule(req.signature):
                self._execute(processor_type=processor_type,
                              process_request=req)

        else:
            self._context_manager.delete_contexts(
                context_id_list=[req.context_id])

            self._fail_transaction(txn_signature=req.signature,
                                   context_id=req.context_id,
                                   error_message=response.message,
                                   error_data=response.extended_data)
Ejemplo n.º 7
0
    def _execute_schedule(self):
        for txn_info in self._scheduler:
            self._transaction_execution_count.inc()

            txn = txn_info.txn
            header = transaction_pb2.TransactionHeader()
            header.ParseFromString(txn.header)

            processor_type = ProcessorType(header.family_name,
                                           header.family_version)

            config = self._settings_view_factory.create_settings_view(
                txn_info.state_hash)

            transaction_families = config.get_setting(
                key=self._tp_settings_key, default_value="[]")

            # After reading the transaction families required in configuration
            # try to json.loads them into a python object
            # If there is a misconfiguration, proceed as if there is no
            # configuration.
            try:
                transaction_families = json.loads(transaction_families)
                required_transaction_processors = [
                    ProcessorType(d.get('family'), d.get('version'))
                    for d in transaction_families
                ]
            except ValueError:
                LOGGER.error(
                    "sawtooth.validator.transaction_families "
                    "misconfigured. Expecting a json array, found"
                    " %s", transaction_families)
                required_transaction_processors = []

            # First check if the transaction should be failed
            # based on configuration
            if required_transaction_processors and \
                    processor_type not in required_transaction_processors:
                # The txn processor type is not in the required
                # transaction processors so
                # failing transaction right away
                LOGGER.debug(
                    "failing transaction %s of type (name=%s,"
                    "version=%s) since it isn't"
                    " required in the configuration", txn.header_signature,
                    processor_type.name, processor_type.version)

                self._fail_transaction(txn.header_signature)
                continue

            if processor_type in required_transaction_processors:
                # The txn processor type is in the required
                # transaction processors: check all the outputs of
                # the transaction match one namespace listed
                transaction_family = \
                    next(t for t in transaction_families
                         if t.get('family') == header.family_name
                         and t.get('version') == header.family_version)

                # if no namespaces are indicated, then the empty prefix is
                # inserted by default
                namespaces = transaction_family.get('namespaces', [''])
                if not isinstance(namespaces, list):
                    LOGGER.error(
                        "namespaces should be a list for "
                        "transaction family (name=%s, version=%s)",
                        processor_type.name, processor_type.version)
                prefixes = header.outputs
                bad_prefixes = [
                    prefix for prefix in prefixes
                    if not any(prefix.startswith(n) for n in namespaces)
                ]
                for prefix in bad_prefixes:
                    # log each
                    LOGGER.debug(
                        "failing transaction %s of type (name=%s,"
                        "version=%s) because of no namespace listed "
                        "in %s from the configuration settings can "
                        "match the prefix %s", txn.header_signature,
                        processor_type.name, processor_type.version,
                        namespaces, prefix)

                if bad_prefixes:
                    self._fail_transaction(txn.header_signature)
                    continue

            try:
                context_id = self._context_manager.create_context(
                    state_hash=txn_info.state_hash,
                    base_contexts=txn_info.base_context_ids,
                    inputs=list(header.inputs),
                    outputs=list(header.outputs))
            except KeyError:
                LOGGER.error(
                    "Error creating context for transaction %s, "
                    "scheduler provided a base context that was not "
                    "in the context manager.", txn.header_signature)
                self._scheduler.set_transaction_execution_result(
                    txn_signature=txn.header_signature,
                    is_valid=False,
                    context_id=None)
                continue
            except CreateContextException:
                LOGGER.exception("Exception creating context")
                self._scheduler.set_transaction_execution_result(
                    txn_signature=txn.header_signature,
                    is_valid=False,
                    context_id=None)
                continue

            process_request = processor_pb2.TpProcessRequest(
                header=header,
                payload=txn.payload,
                signature=txn.header_signature,
                context_id=context_id,
                header_bytes=txn.header)

            # Since we have already checked if the transaction should be failed
            # all other cases should either be executed or waited for.
            self._execute(processor_type=processor_type,
                          process_request=process_request)

        self._done = True