def __init__(self, with_genesis=True):
        self.block_sender = MockBlockSender()
        self.batch_sender = MockBatchSender()
        self.block_store = BlockStore(DictDatabase())
        self.block_cache = BlockCache(self.block_store)
        self.state_db = {}

        # add the mock reference to the consensus
        consensus_setting_addr = SettingsView.setting_address(
            'sawtooth.consensus.algorithm')
        self.state_db[consensus_setting_addr] = _setting_entry(
            'sawtooth.consensus.algorithm', 'test_journal.mock_consensus')

        self.state_view_factory = MockStateViewFactory(self.state_db)
        self.signing_key = signing.generate_privkey()
        self.public_key = signing.generate_pubkey(self.signing_key)

        self.identity_signing_key = signing.generate_privkey()
        chain_head = None
        if with_genesis:
            self.genesis_block = self.generate_genesis_block()
            self.set_chain_head(self.genesis_block)
            chain_head = self.genesis_block

        self.block_publisher = BlockPublisher(
            transaction_executor=MockTransactionExecutor(),
            block_cache=self.block_cache,
            state_view_factory=self.state_view_factory,
            block_sender=self.block_sender,
            batch_sender=self.block_sender,
            squash_handler=None,
            chain_head=chain_head,
            identity_signing_key=self.identity_signing_key,
            data_dir=None,
            config_dir=None)
Exemple #2
0
 def __init__(self, delegate, args):
     super(NoopWorkload, self).__init__(delegate, args)
     self._urls = []
     self._lock = threading.Lock()
     self._delegate = delegate
     self._private_key = signing.generate_privkey()
     self._public_key = signing.generate_pubkey(self._private_key)
Exemple #3
0
    def _create_key(self, key_name='validator.priv'):
        privkey = signing.generate_privkey()
        priv_file = os.path.join(self._temp_dir, key_name)
        with open(priv_file, 'w') as priv_fd:
            priv_fd.write(privkey)

        return signing.generate_pubkey(privkey)
def do_populate(args, batches, keys):
    private_key = signing.generate_privkey()
    public_key = signing.generate_pubkey(private_key)

    total_txn_count = 0
    txns = []
    for i in range(0, len(keys)):
        name = list(keys)[i]
        txn = create_intkey_transaction(
            verb='set',
            name=name,
            value=random.randint(9000, 100000),
            deps=[],
            private_key=private_key,
            public_key=public_key)
        total_txn_count += 1
        txns.append(txn)
        # Establish the signature of the txn associated with the word
        # so we can create good dependencies later
        keys[name] = txn.header_signature

    batch = create_batch(
        transactions=txns,
        private_key=private_key,
        public_key=public_key)

    batches.append(batch)
Exemple #5
0
def do_populate(args):
    private_key = signing.generate_privkey()
    public_key = signing.generate_pubkey(private_key)

    words = generate_word_list(args.pool_size)

    batches = []
    total_txn_count = 0
    txns = []
    for i in range(0, len(words)):
        txn = create_intkey_transaction(
            verb='set',
            name=words[i],
            value=random.randint(9000, 100000),
            private_key=private_key,
            public_key=public_key)
        total_txn_count += 1
        txns.append(txn)

    batch = create_batch(
        transactions=txns,
        private_key=private_key,
        public_key=public_key)

    batches.append(batch)

    batch_list = batch_pb2.BatchList(batches=batches)

    print("Writing to {}...".format(args.output))
    with open(args.output, "wb") as fd:
        fd.write(batch_list.SerializeToString())
Exemple #6
0
def do_keygen(args):
    if args.key_name is not None:
        key_name = args.key_name
    else:
        key_name = getpass.getuser()

    if args.key_dir is not None:
        key_dir = args.key_dir
        if not os.path.exists(key_dir):
            raise CliException('no such directory: {}'.format(key_dir))
    else:
        key_dir = os.path.join(os.path.expanduser('~'), '.sawtooth', 'keys')
        if not os.path.exists(key_dir):
            if not args.quiet:
                print('creating key directory: {}'.format(key_dir))
            try:
                os.makedirs(key_dir)
            except IOError as e:
                raise CliException('IOError: {}'.format(str(e)))

    priv_filename = os.path.join(key_dir, key_name + '.priv')
    pub_filename = os.path.join(key_dir, key_name + '.pub')

    if not args.force:
        file_exists = False
        for filename in [priv_filename, pub_filename]:
            if os.path.exists(filename):
                file_exists = True
                print('file exists: {}'.format(filename), file=sys.stderr)
        if file_exists:
            raise CliException(
                'files exist, rerun with --force to overwrite existing files')

    privkey = signing.generate_privkey()
    pubkey = signing.generate_pubkey(privkey)

    try:
        priv_exists = os.path.exists(priv_filename)
        with open(priv_filename, 'w') as priv_fd:
            if not args.quiet:
                if priv_exists:
                    print('overwriting file: {}'.format(priv_filename))
                else:
                    print('writing file: {}'.format(priv_filename))
            priv_fd.write(privkey)
            priv_fd.write('\n')

        pub_exists = os.path.exists(pub_filename)
        with open(pub_filename, 'w') as pub_fd:
            if not args.quiet:
                if pub_exists:
                    print('overwriting file: {}'.format(pub_filename))
                else:
                    print('writing file: {}'.format(pub_filename))
            pub_fd.write(pubkey)
            pub_fd.write('\n')

    except IOError as ioe:
        raise CliException('IOError: {}'.format(str(ioe)))
Exemple #7
0
def do_keygen(args):
    """Executes the key generation operation, given the parsed arguments.

    Args:
        args (:obj:`Namespace`): The parsed args.
    """
    if args.key_name is not None:
        key_name = args.key_name
    else:
        key_name = 'validator'

    key_dir = get_key_dir()

    if not os.path.exists(key_dir):
        raise CliException(
            "Key directory does not exist: {}".format(key_dir))

    priv_filename = os.path.join(key_dir, key_name + '.priv')
    pub_filename = os.path.join(key_dir, key_name + '.pub')

    if not args.force:
        file_exists = False
        for filename in [priv_filename, pub_filename]:
            if os.path.exists(filename):
                file_exists = True
                print('file exists: {}'.format(filename), file=sys.stderr)
        if file_exists:
            raise CliException(
                'files exist, rerun with --force to overwrite existing files')

    privkey = signing.generate_privkey()
    pubkey = signing.generate_pubkey(privkey)

    try:
        priv_exists = os.path.exists(priv_filename)
        with open(priv_filename, 'w') as priv_fd:
            if not args.quiet:
                if priv_exists:
                    print('overwriting file: {}'.format(priv_filename))
                else:
                    print('writing file: {}'.format(priv_filename))
            priv_fd.write(privkey)
            priv_fd.write('\n')

        pub_exists = os.path.exists(pub_filename)
        with open(pub_filename, 'w') as pub_fd:
            if not args.quiet:
                if pub_exists:
                    print('overwriting file: {}'.format(pub_filename))
                else:
                    print('writing file: {}'.format(pub_filename))
            pub_fd.write(pubkey)
            pub_fd.write('\n')

    except IOError as ioe:
        raise CliException('IOError: {}'.format(str(ioe)))
Exemple #8
0
 def __init__(self, delegate, args):
     super(IntKeyWorkload, self).__init__(delegate, args)
     self._auth_info = args.auth_info
     self._urls = []
     self._pending_batches = {}
     self._lock = threading.Lock()
     self._delegate = delegate
     self._deps = {}
     self._private_key = signing.generate_privkey()
     self._public_key = signing.generate_pubkey(self._private_key)
Exemple #9
0
 def setUp(self):
     self.block_store = BlockStore({})
     self.gossip = MockGossip()
     self.completer = Completer(self.block_store, self.gossip)
     self.completer._on_block_received = self._on_block_received
     self.completer._on_batch_received = self._on_batch_received
     self.private_key = signing.generate_privkey()
     self.public_key = signing.generate_pubkey(self.private_key)
     self.blocks = []
     self.batches = []
Exemple #10
0
def do_generate(args):
    private_key = signing.generate_privkey()
    public_key = signing.generate_pubkey(private_key)

    words = generate_word_list(args.pool_size)

    batches = []
    start = time.time()
    total_txn_count = 0
    for i in range(0, args.count):
        txns = []
        for _ in range(0, random.randint(1, args.batch_max_size)):
            txn = create_intkey_transaction(
                verb=random.choice(['inc', 'dec']),
                name=random.choice(words),
                value=1,
                private_key=private_key,
                public_key=public_key)
            total_txn_count += 1
            txns.append(txn)

        batch = create_batch(
            transactions=txns,
            private_key=private_key,
            public_key=public_key)

        batches.append(batch)

        if i % 100 == 0 and i != 0:
            stop = time.time()

            txn_count = 0
            for batch in batches[-100:]:
                txn_count += len(batch.transactions)

            fmt = 'batches {}, batch/sec: {:.2f}, txns: {}, txns/sec: {:.2f}'
            print(fmt.format(
                str(i),
                100 / (stop - start),
                str(total_txn_count),
                txn_count / (stop - start)))
            start = stop

    batch_list = batch_pb2.BatchList(batches=batches)

    print("Writing to {}...".format(args.output))
    with open(args.output, "wb") as fd:
        fd.write(batch_list.SerializeToString())
    def _create_batches(self):
        test_yaml = self._yaml_from_file()
        priv_key = signing.generate_privkey()
        pub_key = signing.generate_pubkey(priv_key)

        batches, batch_results, batches_waiting = self._process_batches(
            yaml_batches=test_yaml,
            priv_key=priv_key,
            pub_key=pub_key)
        # if there aren't any explicit dependencies that need to be created
        # based on the transaction 'id' listed in the yaml, the next two
        # code blocks won't be run.
        while len(batches_waiting) > 0:
            b, b_r, b_w = self._process_prev_batches(
                unprocessed_batches=batches_waiting,
                priv_key=priv_key,
                pub_key=pub_key)
            if len(batches_waiting) == len(b_w):
                # If any process attempt doesn't produce a new batch,
                # there is probably a cyclic dependency
                break
            if b:
                for batch, key in b:
                    ind = batches.index(key)
                    batches[ind] = batch
                batch_results.update(b_r)
            batches_waiting = b_w
        # Here process the batches with transaction dependencies that can't
        # be computed for some reason, so just strip them out.
        if batches_waiting:
            b, b_r, b_w = self._process_prev_batches(
                batches_waiting,
                priv_key=priv_key,
                pub_key=pub_key,
                strip_deps=True)
            for batch, key in b:
                ind = batches.index(key)
                batches[ind] = batch
            batch_results.update(b_r)

        self._batch_results = batch_results
        self._batches = batches
def do_generate(args, batches, keys):
    private_key = signing.generate_privkey()
    public_key = signing.generate_pubkey(private_key)

    start = time.time()
    total_txn_count = 0
    for i in range(0, args.count):
        txns = []
        for _ in range(0, random.randint(1, args.max_batch_size)):
            name = random.choice(list(keys))
            txn = create_intkey_transaction(
                verb=random.choice(['inc', 'dec']),
                name=name,
                value=random.randint(1, 10),
                deps=[keys[name]],
                private_key=private_key,
                public_key=public_key)
            total_txn_count += 1
            txns.append(txn)

        batch = create_batch(
            transactions=txns,
            private_key=private_key,
            public_key=public_key)

        batches.append(batch)

        if i % 100 == 0 and i != 0:
            stop = time.time()

            txn_count = 0
            for batch in batches[-100:]:
                txn_count += len(batch.transactions)

            fmt = 'batches {}, batch/sec: {:.2f}, txns: {}, txns/sec: {:.2f}'
            print(fmt.format(
                str(i),
                100 / (stop - start),
                str(total_txn_count),
                txn_count / (stop - start)))
            start = stop
def do_init(args, config):
    username = config.get('DEFAULT', 'username')
    if args.username is not None:
        username = args.username

    config.set('DEFAULT', 'username', username)
    print("set username: {}".format(username))

    save_config(config)

    priv_filename = config.get('DEFAULT', 'key_file')
    if priv_filename.endswith(".priv"):
        addr_filename = priv_filename[0:-len(".priv")] + ".addr"
    else:
        addr_filename = priv_filename + ".addr"

    if not os.path.exists(priv_filename):
        try:
            if not os.path.exists(os.path.dirname(priv_filename)):
                os.makedirs(os.path.dirname(priv_filename))

            privkey = signing.generate_privkey()
            pubkey = signing.generate_pubkey(privkey)
            addr = signing.generate_identifier(pubkey)

            with open(priv_filename, "w") as priv_fd:
                print("writing file: {}".format(priv_filename))
                priv_fd.write(privkey)
                priv_fd.write("\n")

            with open(addr_filename, "w") as addr_fd:
                print("writing file: {}".format(addr_filename))
                addr_fd.write(addr)
                addr_fd.write("\n")
        except IOError, ioe:
            raise BattleshipException("IOError: {}".format(str(ioe)))
    def test_no_validator_registry(
            self, mock_utils, mock_validator_registry_view,
            mock_consensus_state, mock_poet_enclave_factory,
            mock_consensus_state_store, mock_poet_key_state_store,
            mock_signup_info, mock_poet_config_view, mock_block_wrapper):
        """ Test verifies that PoET Block Publisher fails
        if a validator doesn't have any signup info
        in the validator registry (the validator is not listed
        in the validator registry)
        """

        # create a mock_validator_registry_view that throws KeyError
        mock_validator_registry_view.return_value.get_validator_info. \
            side_effect = KeyError('Non-existent validator')

        # create a mock_wait_certificate that does nothing in check_valid
        mock_wait_certificate = mock.Mock()
        mock_wait_certificate.check_valid.return_value = None

        mock_utils.deserialize_wait_certificate.return_value = \
            mock_wait_certificate

        # create a mock_consensus_state that returns a mock with
        # the following settings:
        mock_state = mock.Mock()
        mock_state.validator_signup_was_committed_too_late.return_value = False
        mock_state.validator_has_claimed_block_limit.return_value = False
        mock_state.validator_is_claiming_too_early.return_value = False
        mock_state.validator_is_claiming_too_frequently.return_value = False

        mock_consensus_state.consensus_state_for_block_id.return_value = \
            mock_state

        mock_poet_key_state_store.return_value = \
            _MockPoetKeyStateStore(active_key=None)

        # create mock_signup_info
        mock_signup_info.create_signup_info.return_value = \
            mock.Mock(
                poet_public_key='poet public key',
                proof_data='proof data',
                anti_sybil_id='anti-sybil ID',
                sealed_signup_data='sealed signup data')

        # create mock_batch_publisher
        mock_batch_publisher = mock.Mock(
            identity_signing_key=signing.generate_privkey())

        mock_block_cache = mock.MagicMock()
        mock_state_view_factory = mock.Mock()

        # create mock_block_header with the following fields
        mock_block = mock.Mock(identifier='0123456789abcdefedcba9876543210')
        mock_block.header.signer_pubkey = '90834587139405781349807435098745'
        mock_block.header.previous_block_id = '2'
        mock_block.header.block_num = 1
        mock_block.header.state_root_hash = '6'
        mock_block.header.batch_ids = '4'

        # check test
        block_publisher = \
            poet_block_publisher.PoetBlockPublisher(
                block_cache=mock_block_cache,
                state_view_factory=mock_state_view_factory,
                batch_publisher=mock_batch_publisher,
                data_dir=self._temp_dir,
                config_dir=self._temp_dir,
                validator_id='validator_deadbeef')

        self.assertFalse(
            block_publisher.initialize_block(block_header=mock_block.header))

        # check that batch publisher was called to send out
        # the txn header and txn for the validator registry update
        self.assertTrue(mock_batch_publisher.send.called)
Exemple #15
0
def do_generate(args):
    private_key = signing.generate_privkey()
    public_key = signing.generate_pubkey(private_key)

    words = generate_word_list(args.pool_size)

    txns = []
    batches = []
    bytecode = ""
    with open(args.contract, "rb") as fd:
        byte = fd.readline()
        while byte != "":
            bytecode += byte
            byte = fd.readline()

    byte_addr = get_address("jvm_sc", bytecode)

    txn = create_jvm_sc_transaction(verb='store',
                                    private_key=private_key,
                                    public_key=public_key,
                                    bytecode=bytecode,
                                    methods=["set", "inc", "dec"])
    txns.append(txn)

    keys = []
    addresses = []
    for i in range(20):
        if len(txns) < 10:
            key = random.choice(words)
            keys.append(key)
            value = str(random.randint(0, 1000))
            key_addr = get_address("intkey", key)
            addresses.append(key_addr)
            txn = create_jvm_sc_transaction(verb='run',
                                            private_key=private_key,
                                            public_key=public_key,
                                            byte_addr=byte_addr,
                                            method="set",
                                            parameters=[
                                                "key," + key, "value," + value,
                                                "&check," + key_addr
                                            ],
                                            addresses=addresses)
            txns.append(txn)
            addresses = []
        else:
            batch = create_batch(txns, private_key, public_key)
            batches.append(batch)
            txns = []

    for i in range(20):
        if len(txns) < 10:
            key = random.choice(keys)
            key_addr = get_address("intkey", key)
            addresses.append(key_addr)
            txn = create_jvm_sc_transaction(
                verb='run',
                private_key=private_key,
                public_key=public_key,
                byte_addr=byte_addr,
                method=random.choice(["inc", "dec"]),
                parameters=["key," + key, "&value," + key_addr, "diff,2"],
                addresses=addresses)
            txns.append(txn)
            addresses = []
        else:
            batch = create_batch(txns, private_key, public_key)
            batches.append(batch)
            txns = []

    batch_list = batch_pb2.BatchList(batches=batches)
    print "Writing to {}...".format(args.output)
    with open(args.output, "w") as fd:
        fd.write(batch_list.SerializeToString())
USE_L10N = True

USE_TZ = True


# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/

STATIC_URL = '/static/'

# Rest Frameworks

REST_FRAMEWORK = {
    # Use Django's standard `django.contrib.auth` permissions,
    # or allow read-only access for unauthenticated users.
    'DEFAULT_PERMISSION_CLASSES': [
        'rest_framework.permissions.IsAuthenticatedOrReadOnly'
    ]
}

STL_REST_URL = os.environ.get('STL_REST_URL', 'http://rest_api:8080')
STL_PRIVKEY_FILE = os.path.join(BASE_DIR, 'omi.privkey')
if os.path.isfile(STL_PRIVKEY_FILE):
    with open(STL_PRIVKEY_FILE) as f:
        STL_PRIVKEY = f.read()
else:
    import sawtooth_signing as signing
    OMI_PRIVKEY = signing.generate_privkey()
    with open(STL_PRIVKEY_FILE, "w") as f:
        f.write(STL_PRIVKEY)
def _private():
    return signing.generate_privkey()
Exemple #18
0
 def setUp(self):
     self._temp_dir = tempfile.mkdtemp()
     self._identity_key = signing.generate_privkey()
 def _create_random_key(cls):
     return signing.generate_privkey()
Exemple #20
0
    def test_valid_batch_invalid_batch(self):
        """Tests the squash function. That the correct hash is being used
        for each txn and that the batch ending state hash is being set.

         Basically:
            1. Adds two batches, one where all the txns are valid,
               and one where one of the txns is invalid.
            2. Run through the scheduler executor interaction
               as txns are processed.
            3. Verify that the valid state root is obtained
               through the squash function.
            4. Verify that correct batch statuses are set
        """
        private_key = signing.generate_privkey()
        public_key = signing.generate_pubkey(private_key)

        # 1)
        batch_signatures = []
        for names in [['a', 'b'], ['invalid', 'c']]:
            batch_txns = []
            for name in names:
                txn = create_transaction(
                    name=name,
                    private_key=private_key,
                    public_key=public_key)

                batch_txns.append(txn)

            batch = create_batch(
                transactions=batch_txns,
                private_key=private_key,
                public_key=public_key)

            batch_signatures.append(batch.header_signature)
            self.scheduler.add_batch(batch)
        self.scheduler.finalize()
        # 2)
        sched1 = iter(self.scheduler)
        invalid_payload = hashlib.sha512('invalid'.encode()).hexdigest()
        while not self.scheduler.complete(block=False):
            txn_info = next(sched1)
            txn_header = transaction_pb2.TransactionHeader()
            txn_header.ParseFromString(txn_info.txn.header)
            inputs_or_outputs = list(txn_header.inputs)
            c_id = self.context_manager.create_context(
                state_hash=txn_info.state_hash,
                inputs=inputs_or_outputs,
                outputs=inputs_or_outputs,
                base_contexts=txn_info.base_context_ids)
            if txn_header.payload_sha512 == invalid_payload:
                self.scheduler.set_transaction_execution_result(
                    txn_info.txn.header_signature, False, c_id)
            else:
                self.context_manager.set(c_id, [{inputs_or_outputs[0]: 1}])
                self.scheduler.set_transaction_execution_result(
                    txn_info.txn.header_signature, True, c_id)

        sched2 = iter(self.scheduler)
        # 3)
        txn_info_a = next(sched2)
        self.assertEquals(self.first_state_root, txn_info_a.state_hash)

        txn_a_header = transaction_pb2.TransactionHeader()
        txn_a_header.ParseFromString(txn_info_a.txn.header)
        inputs_or_outputs = list(txn_a_header.inputs)
        address_a = inputs_or_outputs[0]
        c_id_a = self.context_manager.create_context(
            state_hash=self.first_state_root,
            inputs=inputs_or_outputs,
            outputs=inputs_or_outputs,
            base_contexts=txn_info_a.base_context_ids)
        self.context_manager.set(c_id_a, [{address_a: 1}])
        state_root2 = self.context_manager.commit_context([c_id_a], virtual=False)
        txn_info_b = next(sched2)

        self.assertEquals(txn_info_b.state_hash, state_root2)

        txn_b_header = transaction_pb2.TransactionHeader()
        txn_b_header.ParseFromString(txn_info_b.txn.header)
        inputs_or_outputs = list(txn_b_header.inputs)
        address_b = inputs_or_outputs[0]
        c_id_b = self.context_manager.create_context(
            state_hash=state_root2,
            inputs=inputs_or_outputs,
            outputs=inputs_or_outputs,
            base_contexts=txn_info_b.base_context_ids)
        self.context_manager.set(c_id_b, [{address_b: 1}])
        state_root3 = self.context_manager.commit_context([c_id_b], virtual=False)
        txn_infoInvalid = next(sched2)

        self.assertEquals(txn_infoInvalid.state_hash, state_root3)

        txn_info_c = next(sched2)
        self.assertEquals(txn_info_c.state_hash, state_root3)
        # 4)
        batch1_result = self.scheduler.get_batch_execution_result(
            batch_signatures[0])
        self.assertTrue(batch1_result.is_valid)
        self.assertEquals(batch1_result.state_hash, state_root3)

        batch2_result = self.scheduler.get_batch_execution_result(
            batch_signatures[1])
        self.assertFalse(batch2_result.is_valid)
        self.assertIsNone(batch2_result.state_hash)
class _PoetEnclaveSimulator(object):
    # A lock to protect threaded access
    _lock = threading.Lock()

    # The private key we generate to sign the certificate ID when creating
    # the random wait timeout value
    _seal_private_key = signing.generate_privkey()
    _seal_public_key = signing.generate_pubkey(_seal_private_key)

    # The basename and enclave measurement values we will put into and verify
    # are in the enclave quote in the attestation verification report.
    __VALID_BASENAME__ = \
        bytes.fromhex(
            'b785c58b77152cbe7fd55ee3851c4990'
            '00000000000000000000000000000000')
    __VALID_ENCLAVE_MEASUREMENT__ = \
        bytes.fromhex(
            'c99f21955e38dbb03d2ca838d3af6e43'
            'ef438926ed02db4cc729380c8c7a174e')

    # We use the report private key PEM to create the private key used to
    # sign attestation verification reports.  On the flip side, the report
    # public key PEM is used to create the public key used to verify the
    # signature on the attestation verification reports.
    __REPORT_PRIVATE_KEY_PEM__ = \
        '-----BEGIN PRIVATE KEY-----\n' \
        'MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCsy/NmLwZP6Uj0\n' \
        'p5mIiefgK8VOK7KJ34g3h0/X6aFOd/Ff4j+e23wtQpkxsjVHWLM5SjElGhfpVDhL\n' \
        '1WAMsQI9bpCWR4sjV6p7gOJhv34nkA2Grj5eSHCAJRQXCl+pJ9dYIeKaNoaxkdtq\n' \
        '+Xme//ohtkkv/ZjMTfsjMl0RLXokJ+YhSuTpNSovRaCtZfLB5MihVJuV3Qzb2ROh\n' \
        'KQxcuyPy9tBtOIrBWJaFiXOLRxAijs+ICyzrqUBbRfoAztkljIBx9KNItHiC4zPv\n' \
        'o6DxpGSO2yMQSSrs13PkfyGWVZSgenEYOouEz07X+H5B29PPuW5mCl4nkoH3a9gv\n' \
        'rI6VLEx9AgMBAAECggEAImfFge4RCq4/eX85gcc7pRXyBjuLJAqe+7d0fWAmXxJg\n' \
        'vB+3XTEEi5p8GDoMg7U0kk6kdGe6pRnAz9CffEduU78FCPcbzCCzcD3cVWwkeUok\n' \
        'd1GQV4OC6vD3DBNjsrGdHg45KU18CjUphCZCQhdjvXynG+gZmWxZecuYXkg4zqPT\n' \
        'LwOkcdWBPhJ9CbjtiYOtKDZbhcbdfnb2fkxmvnAoz1OWNfVFXh+x7651FrmL2Pga\n' \
        'xGz5XoxFYYT6DWW1fL6GNuVrd97wkcYUcjazMgunuUMC+6XFxqK+BoqnxeaxnsSt\n' \
        'G2r0sdVaCyK1sU41ftbEQsc5oYeQ3v5frGZL+BgrYQKBgQDgZnjqnVI/B+9iarx1\n' \
        'MjAFyhurcKvFvlBtGKUg9Q62V6wI4VZvPnzA2zEaR1J0cZPB1lCcMsFACpuQF2Mr\n' \
        '3VDyJbnpSG9q05POBtfLjGQdXKtGb8cfXY2SwjzLH/tvxHm3SP+RxvLICQcLX2/y\n' \
        'GTJ+mY9C6Hs6jIVLOnMWkRWamQKBgQDFITE3Qs3Y0ZwkKfGQMKuqJLRw29Tyzw0n\n' \
        'XKaVmO/pEzYcXZMPBrFhGvdmNcJLo2fcsmGZnmit8RP4ChwHUlD11dH1Ffqw9FWc\n' \
        '387i0chlE5FhQPirSM8sWFVmjt2sxC4qFWJoAD/COQtKHgEaVKVc4sH/yRostL1C\n' \
        'r+7aWuqzhQKBgQDcuC5LJr8VPGrbtPz1kY3mw+r/cG2krRNSm6Egj6oO9KFEgtCP\n' \
        'zzjKQU9E985EtsqNKI5VdR7cLRLiYf6r0J6j7zO0IAlnXADP768miUqYDuRw/dUw\n' \
        'JsbwCZneefDI+Mp325d1/egjla2WJCNqUBp4p/Zf62f6KOmbGzzEf6RuUQKBgG2y\n' \
        'E8YRiaTOt5m0MXUwcEZk2Hg5DF31c/dkalqy2UYU57aPJ8djzQ8hR2x8G9ulWaWJ\n' \
        'KiCm8s9gaOFNFt3II785NfWxPmh7/qwmKuUzIdWFNxAsbHQ8NvURTqyccaSzIpFO\n' \
        'hw0inlhBEBQ1cB2r3r06fgQNb2BTT0Itzrd5gkNVAoGBAJcMgeKdBMukT8dKxb4R\n' \
        '1PgQtFlR3COu2+B00pDyUpROFhHYLw/KlUv5TKrH1k3+E0KM+winVUIcZHlmFyuy\n' \
        'Ilquaova1YSFXP5cpD+PKtxRV76Qlqt6o+aPywm81licdOAXotT4JyJhrgz9ISnn\n' \
        'J13KkHoAZ9qd0rX7s37czb3O\n' \
        '-----END PRIVATE KEY-----'

    _report_private_key = \
        serialization.load_pem_private_key(
            __REPORT_PRIVATE_KEY_PEM__.encode(),
            password=None,
            backend=backends.default_backend())

    # The anti-sybil ID for this particular validator.  This will get set when
    # the enclave is initialized
    _anti_sybil_id = None

    # The PoET keys will remain unset until signup info is either created or
    # unsealed
    _poet_public_key = None
    _poet_private_key = None
    _active_wait_timer = None

    @classmethod
    def initialize(cls, config_dir):
        # See if our configuration file exists.  If so, then we are going to
        # see if there is a configuration value for the validator ID.  If so,
        # then we'll use that when constructing the simulated anti-Sybil ID.
        # Otherwise, we are going to fall back on trying to create one that is
        # unique.
        validator_id = datetime.datetime.now().isoformat()

        config_file = os.path.join(config_dir, 'poet_enclave_simulator.toml')
        if os.path.exists(config_file):
            LOGGER.info(
                'Loading PoET enclave simulator config from : %s',
                config_file)

            try:
                with open(config_file) as fd:
                    toml_config = toml.loads(fd.read())
            except IOError as e:
                LOGGER.info(
                    'Error loading PoET enclave simulator configuration: %s',
                    e)
                LOGGER.info('Continuing with default configuration')

            invalid_keys = set(toml_config.keys()).difference(['validator_id'])
            if invalid_keys:
                LOGGER.warning(
                    'Ignoring invalid keys in PoET enclave simulator config: '
                    '%s',
                    ', '.join(sorted(list(invalid_keys))))

            validator_id = toml_config.get('validator_id', validator_id)

        LOGGER.debug(
            'PoET enclave simulator creating anti-Sybil ID from: %s',
            validator_id)

        # Create an anti-Sybil ID that is unique for this validator
        cls._anti_sybil_id = hashlib.sha256(validator_id.encode()).hexdigest()

    @classmethod
    def create_signup_info(cls,
                           originator_public_key_hash,
                           nonce):
        with cls._lock:
            # First we need to create a public/private key pair for the PoET
            # enclave to use.
            cls._poet_private_key = signing.generate_privkey()
            cls._poet_public_key = \
                signing.generate_pubkey(cls._poet_private_key)
            cls._active_wait_timer = None

            # Simulate sealing (encrypting) the signup data.
            signup_data = {
                'poet_public_key': cls._poet_public_key,
                'poet_private_key': cls._poet_private_key
            }
            sealed_signup_data = \
                base64.b64encode(bytes(dict2json(signup_data).encode()))

            # Build up a fake SGX quote containing:
            # 1. The basename
            # 2. The report body that contains:
            #    a. The enclave measurement
            #    b. The report data SHA256(SHA256(OPK)|PPK)
            sgx_basename = \
                sgx_structs.SgxBasename(name=cls.__VALID_BASENAME__)
            sgx_measurement = \
                sgx_structs.SgxMeasurement(
                    m=cls.__VALID_ENCLAVE_MEASUREMENT__)

            hash_input = \
                '{0}{1}'.format(
                    originator_public_key_hash.upper(),
                    cls._poet_public_key.upper()).encode()
            report_data = hashlib.sha256(hash_input).digest()
            sgx_report_data = sgx_structs.SgxReportData(d=report_data)
            sgx_report_body = \
                sgx_structs.SgxReportBody(
                    mr_enclave=sgx_measurement,
                    report_data=sgx_report_data)

            sgx_quote = \
                sgx_structs.SgxQuote(
                    basename=sgx_basename,
                    report_body=sgx_report_body)

            # Create a fake PSE manifest.  A base64 encoding of the
            # originator public key hash should suffice.
            pse_manifest = \
                base64.b64encode(originator_public_key_hash.encode())

            timestamp = datetime.datetime.now().isoformat()

            # Fake our "proof" data.
            verification_report = {
                'epidPseudonym': cls._anti_sybil_id,
                'id': base64.b64encode(
                    hashlib.sha256(
                        timestamp.encode()).hexdigest().encode()).decode(),
                'isvEnclaveQuoteStatus': 'OK',
                'isvEnclaveQuoteBody':
                    base64.b64encode(sgx_quote.serialize_to_bytes()).decode(),
                'pseManifestStatus': 'OK',
                'pseManifestHash':
                    base64.b64encode(
                        hashlib.sha256(
                            pse_manifest).hexdigest().encode()).decode(),
                'nonce': nonce,
                'timestamp': timestamp
            }

            # Serialize the verification report, sign it, and then put
            # in the proof data
            verification_report_json = dict2json(verification_report)
            signature = \
                cls._report_private_key.sign(
                    verification_report_json.encode(),
                    padding.PKCS1v15(),
                    hashes.SHA256())

            proof_data_dict = {
                'evidence_payload': {
                    'pse_manifest': pse_manifest.decode()
                },
                'verification_report': verification_report_json,
                'signature': base64.b64encode(signature).decode()
            }
            proof_data = dict2json(proof_data_dict)

            return \
                EnclaveSignupInfo(
                    poet_public_key=signup_data['poet_public_key'],
                    proof_data=proof_data,
                    anti_sybil_id=cls._anti_sybil_id,
                    sealed_signup_data=sealed_signup_data)

    @classmethod
    def deserialize_signup_info(cls, serialized_signup_info):
        return \
            EnclaveSignupInfo.signup_info_from_serialized(
                serialized_signup_info=serialized_signup_info)

    @classmethod
    def unseal_signup_data(cls, sealed_signup_data):
        """

        Args:
            sealed_signup_data: Sealed signup data that was returned
                previously in a EnclaveSignupInfo object from a call to
                create_signup_info

        Returns:
            A string The hex encoded PoET public key that was extracted from
            the sealed data
        """

        # Reverse the process we used in creating "sealed" signup info.
        # Specifically, we will do a base 64 decode, which gives us JSON
        # we can convert back to a dictionary we can use to get the
        # data we need
        signup_data = \
            json2dict(base64.b64decode(sealed_signup_data).decode())

        with cls._lock:
            cls._poet_public_key = str(signup_data.get('poet_public_key'))
            cls._poet_private_key = str(signup_data.get('poet_private_key'))
            cls._active_wait_timer = None

            return signup_data.get('poet_public_key')

    @classmethod
    def create_wait_timer(cls,
                          validator_address,
                          previous_certificate_id,
                          local_mean,
                          minimum_wait_time):
        with cls._lock:
            # If we don't have a PoET private key, then the enclave has not
            # been properly initialized (either by calling create_signup_info
            # or unseal_signup_data)
            if cls._poet_private_key is None:
                raise \
                    ValueError(
                        'Enclave must be initialized before attempting to '
                        'create a wait timer')

            # Create some value from the cert ID.  We are just going to use
            # the seal key to sign the cert ID.  We will then use the
            # low-order 64 bits to change that to a number [0, 1]
            tag = \
                base64.b64decode(
                    signing.sign(
                        previous_certificate_id,
                        cls._seal_private_key))

            tagd = float(struct.unpack('Q', tag[-8:])[0]) / (2**64 - 1)

            # Now compute the duration with a minimum wait time guaranteed
            duration = minimum_wait_time - local_mean * math.log(tagd)

            # Create and sign the wait timer
            wait_timer = \
                EnclaveWaitTimer(
                    validator_address=validator_address,
                    duration=duration,
                    previous_certificate_id=previous_certificate_id,
                    local_mean=local_mean)
            wait_timer.signature = \
                signing.sign(
                    wait_timer.serialize(),
                    cls._poet_private_key)

            # Keep track of the active wait timer
            cls._active_wait_timer = wait_timer

            return wait_timer

    @classmethod
    def deserialize_wait_timer(cls, serialized_timer, signature):
        with cls._lock:
            # Verify the signature before trying to deserialize
            if not signing.verify(
                    serialized_timer,
                    signature,
                    cls._poet_public_key):
                return None

        return \
            EnclaveWaitTimer.wait_timer_from_serialized(
                serialized_timer=serialized_timer,
                signature=signature)

    @classmethod
    def create_wait_certificate(cls,
                                wait_timer,
                                block_hash):
        with cls._lock:
            # If we don't have a PoET private key, then the enclave has not
            # been properly initialized (either by calling create_signup_info
            # or unseal_signup_data)
            if cls._poet_private_key is None:
                raise \
                    ValueError(
                        'Enclave must be initialized before attempting to '
                        'create a wait certificate')

            # Several criteria need to be met before we can create a wait
            # certificate:
            # 1. We have an active timer
            # 2. The caller's wait timer is the active wait timer.  We are not
            #    going to rely the objects, being the same, but will compute
            #    a signature over the object and verify that the signatures
            #    are the same.
            # 3. The active timer has expired
            # 4. The active timer has not timed out
            #
            # Note - we make a concession for the genesis block (i.e., a wait
            # timer for which the previous certificate ID is the Null
            # identifier) in that we don't require the timer to have expired
            # and we don't worry about the timer having timed out.
            if cls._active_wait_timer is None:
                raise \
                    ValueError(
                        'There is not a current enclave active wait timer')

            if wait_timer is None or \
                    cls._active_wait_timer.signature != \
                    signing.sign(
                        wait_timer.serialize(),
                        cls._poet_private_key):
                raise \
                    ValueError(
                        'Validator is not using the current wait timer')

            is_not_genesis_block = \
                (cls._active_wait_timer.previous_certificate_id !=
                 NULL_BLOCK_IDENTIFIER)

            now = time.time()
            expire_time = \
                cls._active_wait_timer.request_time + \
                cls._active_wait_timer.duration

            if is_not_genesis_block and now < expire_time:
                raise \
                    ValueError(
                        'Cannot create wait certificate because timer has '
                        'not expired')

            time_out_time = \
                cls._active_wait_timer.request_time + \
                cls._active_wait_timer.duration + \
                TIMER_TIMEOUT_PERIOD

            if is_not_genesis_block and time_out_time < now:
                raise \
                    ValueError(
                        'Cannot create wait certificate because timer '
                        'has timed out')

            # Create a random nonce for the certificate.  For our "random"
            # nonce we will take the timer signature, concat that with the
            # current time, JSON-ize it and create a SHA-256 hash over it.
            # Probably not considered random by security professional
            # standards, but it is good enough for the simulator.
            random_string = \
                dict2json({
                    'wait_timer_signature': cls._active_wait_timer.signature,
                    'now': datetime.datetime.utcnow().isoformat()
                })
            nonce = hashlib.sha256(random_string.encode()).hexdigest()

            # First create a new enclave wait certificate using the data
            # provided and then sign the certificate with the PoET private key
            wait_certificate = \
                EnclaveWaitCertificate.wait_certificate_with_wait_timer(
                    wait_timer=cls._active_wait_timer,
                    nonce=nonce,
                    block_hash=block_hash)
            wait_certificate.signature = \
                signing.sign(
                    wait_certificate.serialize(),
                    cls._poet_private_key)

            # Now that we have created the certificate, we no longer have an
            # active timer
            cls._active_wait_timer = None

            return wait_certificate

    @classmethod
    def deserialize_wait_certificate(cls, serialized_certificate, signature):
        return \
            EnclaveWaitCertificate.wait_certificate_from_serialized(
                serialized_certificate=serialized_certificate,
                signature=signature)

    @classmethod
    def verify_wait_certificate(cls, certificate, poet_public_key):
        # Since the signing module uses a hex-encoded string as the canonical
        # format for public keys and we should be handed a public key that was
        # part of signup information created by us, don't bother decoding
        # the public key.
        if not \
            signing.verify(
                certificate.serialize(),
                certificate.signature,
                poet_public_key):
            raise ValueError('Wait certificate signature does not match')
Exemple #22
0
 def setUp(self):
     self.private_key = signing.generate_privkey()
     self.public_key = signing.generate_pubkey(self.private_key)
 def _create_random_key(cls):
     return signing.generate_privkey()
    def test_block_publisher_success_case(
            self, mock_utils, mock_validator_registry_view,
            mock_consensus_state, mock_poet_enclave_factory,
            mock_consensus_state_store, mock_poet_key_state_store,
            mock_signup_info, mock_poet_config_view, mock_block_wrapper):
        """ Test verifies that PoET Block Publisher succeeds
        if a validator successfully passes all criteria necessary
        to publish a block
        """

        # create a mock_validator_registry_view with
        # get_validator_info that does nothing
        mock_validator_registry_view.return_value.get_validator_info. \
            return_value = \
            ValidatorInfo(
                name='validator_001',
                id='validator_deadbeef',
                signup_info=SignUpInfo(
                    poet_public_key='00112233445566778899aabbccddeeff'))

        # create a mock_wait_certificate that does nothing in check_valid
        mock_wait_certificate = mock.Mock()
        mock_wait_certificate.check_valid.return_value = None

        mock_utils.deserialize_wait_certificate.return_value = \
            mock_wait_certificate

        # create a mock_consensus_state that returns a mock with
        # the following settings:
        mock_state = mock.Mock()
        mock_state.validator_signup_was_committed_too_late.return_value = False
        mock_state.validator_has_claimed_block_limit.return_value = False
        mock_state.validator_is_claiming_too_early.return_value = False
        mock_state.validator_is_claiming_too_frequently.return_value = False

        mock_consensus_state.consensus_state_for_block_id.return_value = \
            mock_state

        mock_consensus_state_store.return_value.__getitem__.return_value = \
            mock_consensus_state

        # create mock_signup_info
        mock_signup_info.unseal_signup_data.return_value = \
            '00112233445566778899aabbccddeeff'

        # create mock_batch_publisher
        mock_batch_publisher = mock.Mock(
            identity_signing_key=signing.generate_privkey())

        mock_block_cache = mock.MagicMock()
        mock_state_view_factory = mock.Mock()

        # create mock_block_header with the following fields
        mock_block = mock.Mock(identifier='0123456789abcdefedcba9876543210')
        mock_block.header.signer_pubkey = '90834587139405781349807435098745'
        mock_block.header.previous_block_id = '2'
        mock_block.header.block_num = 1
        mock_block.header.state_root_hash = '6'
        mock_block.header.batch_ids = '4'

        # check test
        block_publisher = \
            poet_block_publisher.PoetBlockPublisher(
                block_cache=mock_block_cache,
                state_view_factory=mock_state_view_factory,
                batch_publisher=mock_batch_publisher,
                data_dir=self._temp_dir,
                config_dir=self._temp_dir,
                validator_id='validator_deadbeef')

        self.assertTrue(
            block_publisher.initialize_block(block_header=mock_block.header))
    def test_z_policy(self, mock_utils, mock_validator_registry_view,
                      mock_consensus_state, mock_poet_enclave_factory,
                      mock_consensus_state_store, mock_poet_key_state_store,
                      mock_signup_info, mock_poet_config_view,
                      mock_block_wrapper):
        """ Z Policy: Test verifies that PoET Block Publisher fails
        if a validator attempts to claim more blocks frequently than is allowed
        """

        # create a mock_validator_registry_view with
        # get_validator_info that does nothing
        mock_validator_registry_view.return_value.get_validator_info. \
            return_value = \
            ValidatorInfo(
                name='validator_001',
                id='validator_deadbeef',
                signup_info=SignUpInfo(
                    poet_public_key='00112233445566778899aabbccddeeff'))

        # create a mock_wait_certificate that does nothing in check_valid
        mock_wait_certificate = mock.Mock()
        mock_wait_certificate.check_valid.return_value = None

        mock_utils.deserialize_wait_certificate.return_value = \
            mock_wait_certificate

        # create a mock_consensus_state that returns a mock with
        # the following settings:
        mock_state = mock.Mock()
        mock_state.validator_signup_was_committed_too_late.return_value = False
        mock_state.validator_has_claimed_block_limit.return_value = False
        mock_state.validator_is_claiming_too_early.return_value = False
        mock_state.validator_is_claiming_too_frequently.return_value = True

        mock_consensus_state.consensus_state_for_block_id.return_value = \
            mock_state

        mock_consensus_state_store.return_value.__getitem__.return_value = \
            mock_consensus_state

        # create mock_signup_info
        mock_signup_info.unseal_signup_data.return_value = \
            '00112233445566778899aabbccddeeff'

        # create mock_batch_publisher
        mock_batch_publisher = mock.Mock(
            identity_signing_key=signing.generate_privkey())

        mock_block_cache = mock.MagicMock()
        mock_state_view_factory = mock.Mock()

        # create mock_block_header with the following fields
        mock_block = mock.Mock(identifier='0123456789abcdefedcba9876543210')
        mock_block.header.signer_pubkey = '90834587139405781349807435098745'
        mock_block.header.previous_block_id = '2'
        mock_block.header.block_num = 1
        mock_block.header.state_root_hash = '6'
        mock_block.header.batch_ids = '4'

        # check test
        with mock.patch('sawtooth_poet.poet_consensus.poet_block_publisher.'
                        'LOGGER') as mock_logger:
            block_publisher = \
                poet_block_publisher.PoetBlockPublisher(
                    block_cache=mock_block_cache,
                    state_view_factory=mock_state_view_factory,
                    batch_publisher=mock_batch_publisher,
                    data_dir=self._temp_dir,
                    config_dir=self._temp_dir,
                    validator_id='validator_deadbeef')

            self.assertFalse(
                block_publisher.initialize_block(
                    block_header=mock_block.header))

            # Could be a hack, but verify that the appropriate log message is
            # generated - so we at least have some faith that the failure was
            # because of what we are testing and not something else.  I know
            # that this is fragile if the log message is changed, so would
            # accept any suggestions on a better way to verify that the
            # function fails for the reason we expect.

            (message, *_), _ = mock_logger.error.call_args
            self.assertTrue('Validator is claiming blocks too '
                            'frequently' in message)
Exemple #26
0
def create_random_private_key():
    return signing.generate_privkey()
class _PoetEnclaveSimulator(object):
    # A lock to protect threaded access
    _lock = threading.Lock()

    # The private key we generate to sign the certificate ID when creating
    # the random wait timeout value
    _seal_private_key = signing.generate_privkey()
    _seal_public_key = signing.generate_pubkey(_seal_private_key)

    # The basename and enclave measurement values we will put into and verify
    # are in the enclave quote in the attestation verification report.
    __VALID_BASENAME__ = \
        bytes.fromhex(
            'b785c58b77152cbe7fd55ee3851c4990'
            '00000000000000000000000000000000')
    __VALID_ENCLAVE_MEASUREMENT__ = \
        bytes.fromhex(
            'c99f21955e38dbb03d2ca838d3af6e43'
            'ef438926ed02db4cc729380c8c7a174e')

    # We use the report private key PEM to create the private key used to
    # sign attestation verification reports.  On the flip side, the report
    # public key PEM is used to create the public key used to verify the
    # signature on the attestation verification reports.
    __REPORT_PRIVATE_KEY_PEM__ = \
        '-----BEGIN PRIVATE KEY-----\n' \
        'MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCsy/NmLwZP6Uj0\n' \
        'p5mIiefgK8VOK7KJ34g3h0/X6aFOd/Ff4j+e23wtQpkxsjVHWLM5SjElGhfpVDhL\n' \
        '1WAMsQI9bpCWR4sjV6p7gOJhv34nkA2Grj5eSHCAJRQXCl+pJ9dYIeKaNoaxkdtq\n' \
        '+Xme//ohtkkv/ZjMTfsjMl0RLXokJ+YhSuTpNSovRaCtZfLB5MihVJuV3Qzb2ROh\n' \
        'KQxcuyPy9tBtOIrBWJaFiXOLRxAijs+ICyzrqUBbRfoAztkljIBx9KNItHiC4zPv\n' \
        'o6DxpGSO2yMQSSrs13PkfyGWVZSgenEYOouEz07X+H5B29PPuW5mCl4nkoH3a9gv\n' \
        'rI6VLEx9AgMBAAECggEAImfFge4RCq4/eX85gcc7pRXyBjuLJAqe+7d0fWAmXxJg\n' \
        'vB+3XTEEi5p8GDoMg7U0kk6kdGe6pRnAz9CffEduU78FCPcbzCCzcD3cVWwkeUok\n' \
        'd1GQV4OC6vD3DBNjsrGdHg45KU18CjUphCZCQhdjvXynG+gZmWxZecuYXkg4zqPT\n' \
        'LwOkcdWBPhJ9CbjtiYOtKDZbhcbdfnb2fkxmvnAoz1OWNfVFXh+x7651FrmL2Pga\n' \
        'xGz5XoxFYYT6DWW1fL6GNuVrd97wkcYUcjazMgunuUMC+6XFxqK+BoqnxeaxnsSt\n' \
        'G2r0sdVaCyK1sU41ftbEQsc5oYeQ3v5frGZL+BgrYQKBgQDgZnjqnVI/B+9iarx1\n' \
        'MjAFyhurcKvFvlBtGKUg9Q62V6wI4VZvPnzA2zEaR1J0cZPB1lCcMsFACpuQF2Mr\n' \
        '3VDyJbnpSG9q05POBtfLjGQdXKtGb8cfXY2SwjzLH/tvxHm3SP+RxvLICQcLX2/y\n' \
        'GTJ+mY9C6Hs6jIVLOnMWkRWamQKBgQDFITE3Qs3Y0ZwkKfGQMKuqJLRw29Tyzw0n\n' \
        'XKaVmO/pEzYcXZMPBrFhGvdmNcJLo2fcsmGZnmit8RP4ChwHUlD11dH1Ffqw9FWc\n' \
        '387i0chlE5FhQPirSM8sWFVmjt2sxC4qFWJoAD/COQtKHgEaVKVc4sH/yRostL1C\n' \
        'r+7aWuqzhQKBgQDcuC5LJr8VPGrbtPz1kY3mw+r/cG2krRNSm6Egj6oO9KFEgtCP\n' \
        'zzjKQU9E985EtsqNKI5VdR7cLRLiYf6r0J6j7zO0IAlnXADP768miUqYDuRw/dUw\n' \
        'JsbwCZneefDI+Mp325d1/egjla2WJCNqUBp4p/Zf62f6KOmbGzzEf6RuUQKBgG2y\n' \
        'E8YRiaTOt5m0MXUwcEZk2Hg5DF31c/dkalqy2UYU57aPJ8djzQ8hR2x8G9ulWaWJ\n' \
        'KiCm8s9gaOFNFt3II785NfWxPmh7/qwmKuUzIdWFNxAsbHQ8NvURTqyccaSzIpFO\n' \
        'hw0inlhBEBQ1cB2r3r06fgQNb2BTT0Itzrd5gkNVAoGBAJcMgeKdBMukT8dKxb4R\n' \
        '1PgQtFlR3COu2+B00pDyUpROFhHYLw/KlUv5TKrH1k3+E0KM+winVUIcZHlmFyuy\n' \
        'Ilquaova1YSFXP5cpD+PKtxRV76Qlqt6o+aPywm81licdOAXotT4JyJhrgz9ISnn\n' \
        'J13KkHoAZ9qd0rX7s37czb3O\n' \
        '-----END PRIVATE KEY-----'

    __REPORT_PUBLIC_KEY_PEM__ = \
        '-----BEGIN PUBLIC KEY-----\n' \
        'MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArMvzZi8GT+lI9KeZiInn\n' \
        '4CvFTiuyid+IN4dP1+mhTnfxX+I/ntt8LUKZMbI1R1izOUoxJRoX6VQ4S9VgDLEC\n' \
        'PW6QlkeLI1eqe4DiYb9+J5ANhq4+XkhwgCUUFwpfqSfXWCHimjaGsZHbavl5nv/6\n' \
        'IbZJL/2YzE37IzJdES16JCfmIUrk6TUqL0WgrWXyweTIoVSbld0M29kToSkMXLsj\n' \
        '8vbQbTiKwViWhYlzi0cQIo7PiAss66lAW0X6AM7ZJYyAcfSjSLR4guMz76Og8aRk\n' \
        'jtsjEEkq7Ndz5H8hllWUoHpxGDqLhM9O1/h+QdvTz7luZgpeJ5KB92vYL6yOlSxM\n' \
        'fQIDAQAB\n' \
        '-----END PUBLIC KEY-----'

    _report_private_key = \
        serialization.load_pem_private_key(
            __REPORT_PRIVATE_KEY_PEM__.encode(),
            password=None,
            backend=backends.default_backend())
    _report_public_key = \
        serialization.load_pem_public_key(
            __REPORT_PUBLIC_KEY_PEM__.encode(),
            backend=backends.default_backend())

    # The anti-sybil ID for this particular validator.  This will get set when
    # the enclave is initialized
    _anti_sybil_id = None

    # The PoET keys will remain unset until signup info is either created or
    # unsealed
    _poet_public_key = None
    _poet_private_key = None
    _active_wait_timer = None

    @classmethod
    def initialize(cls, **kwargs):
        # Create an anti-Sybil ID that is unique for this validator
        cls._anti_sybil_id = \
            hashlib.sha256(
                kwargs.get(
                    'NodeName',
                    datetime.datetime.now().isoformat()).encode()).hexdigest()

    @classmethod
    def create_signup_info(cls, originator_public_key_hash, nonce):
        with cls._lock:
            # First we need to create a public/private key pair for the PoET
            # enclave to use.
            cls._poet_private_key = signing.generate_privkey()
            cls._poet_public_key = \
                signing.generate_pubkey(cls._poet_private_key)
            cls._active_wait_timer = None

            # Simulate sealing (encrypting) the signup data.
            signup_data = {
                'poet_public_key': cls._poet_public_key,
                'poet_private_key': cls._poet_private_key
            }
            sealed_signup_data = \
                base64.b64encode(bytes(dict2json(signup_data).encode()))

            # Build up a fake SGX quote containing:
            # 1. The basename
            # 2. The report body that contains:
            #    a. The enclave measurement
            #    b. The report data SHA256(SHA256(OPK)|PPK)
            sgx_basename = \
                sgx_structs.SgxBasename(name=cls.__VALID_BASENAME__)
            sgx_measurement = \
                sgx_structs.SgxMeasurement(
                    m=cls.__VALID_ENCLAVE_MEASUREMENT__)

            hash_input = \
                '{0}{1}'.format(
                    originator_public_key_hash.upper(),
                    cls._poet_public_key.upper()).encode()
            report_data = hashlib.sha256(hash_input).digest()
            sgx_report_data = sgx_structs.SgxReportData(d=report_data)
            sgx_report_body = \
                sgx_structs.SgxReportBody(
                    mr_enclave=sgx_measurement,
                    report_data=sgx_report_data)

            sgx_quote = \
                sgx_structs.SgxQuote(
                    basename=sgx_basename,
                    report_body=sgx_report_body)

            # Create a fake PSE manifest.  A base64 encoding of the
            # originator public key hash should suffice.
            pse_manifest = \
                base64.b64encode(originator_public_key_hash.encode())

            timestamp = datetime.datetime.now().isoformat()

            # Fake our "proof" data.
            verification_report = {
                'epidPseudonym':
                originator_public_key_hash,
                'id':
                base64.b64encode(
                    hashlib.sha256(
                        timestamp.encode()).hexdigest().encode()).decode(),
                'isvEnclaveQuoteStatus':
                'OK',
                'isvEnclaveQuoteBody':
                base64.b64encode(sgx_quote.serialize_to_bytes()).decode(),
                'pseManifestStatus':
                'OK',
                'pseManifestHash':
                base64.b64encode(
                    hashlib.sha256(
                        pse_manifest).hexdigest().encode()).decode(),
                'nonce':
                nonce,
                'timestamp':
                timestamp
            }

            # Serialize the verification report, sign it, and then put
            # in the proof data
            verification_report_json = dict2json(verification_report)
            signature = \
                cls._report_private_key.sign(
                    verification_report_json.encode(),
                    padding.PKCS1v15(),
                    hashes.SHA256())

            proof_data_dict = {
                'evidence_payload': {
                    'pse_manifest': pse_manifest.decode()
                },
                'verification_report': verification_report_json,
                'signature': base64.b64encode(signature).decode()
            }
            proof_data = dict2json(proof_data_dict)

            return \
                EnclaveSignupInfo(
                    poet_public_key=signup_data['poet_public_key'],
                    proof_data=proof_data,
                    anti_sybil_id=originator_public_key_hash,
                    sealed_signup_data=sealed_signup_data)

    @classmethod
    def deserialize_signup_info(cls, serialized_signup_info):
        return \
            EnclaveSignupInfo.signup_info_from_serialized(
                serialized_signup_info=serialized_signup_info)

    @classmethod
    def unseal_signup_data(cls, sealed_signup_data):
        """

        Args:
            sealed_signup_data: Sealed signup data that was returned
                previously in a EnclaveSignupInfo object from a call to
                create_signup_info

        Returns:
            A string The hex encoded PoET public key that was extracted from
            the sealed data
        """

        # Reverse the process we used in creating "sealed" signup info.
        # Specifically, we will do a base 64 decode, which gives us JSON
        # we can convert back to a dictionary we can use to get the
        # data we need
        signup_data = \
            json2dict(base64.b64decode(sealed_signup_data).decode())

        with cls._lock:
            cls._poet_public_key = str(signup_data.get('poet_public_key'))
            cls._poet_private_key = str(signup_data.get('poet_private_key'))
            cls._active_wait_timer = None

            return signup_data.get('poet_public_key')

    @classmethod
    def verify_signup_info(cls, signup_info, originator_public_key_hash):
        # Verify the attestation verification report signature
        proof_data_dict = json2dict(signup_info.proof_data)
        verification_report = proof_data_dict.get('verification_report')
        if verification_report is None:
            raise ValueError('Verification report is missing from proof data')

        signature = proof_data_dict.get('signature')
        if signature is None:
            raise ValueError('Signature is missing from proof data')

        try:
            cls._report_public_key.verify(base64.b64decode(signature.encode()),
                                          verification_report.encode(),
                                          padding.PKCS1v15(), hashes.SHA256())
        except InvalidSignature:
            raise ValueError('Verification report signature is invalid')

        verification_report_dict = json2dict(verification_report)

        # Verify that the verification report contains an ID field
        if 'id' not in verification_report_dict:
            raise ValueError('Verification report does not contain an ID')

        # Verify that the verification report contains an EPID pseudonym and
        # that it matches the anti-Sybil ID
        epid_pseudonym = verification_report_dict.get('epidPseudonym')
        if epid_pseudonym is None:
            raise \
                ValueError(
                    'Verification report does not contain an EPID pseudonym')

        if epid_pseudonym != signup_info.anti_sybil_id:
            raise \
                ValueError(
                    'The anti-Sybil ID in the verification report [{0}] does '
                    'not match the one contained in the signup information '
                    '[{1}]'.format(
                        epid_pseudonym,
                        signup_info.anti_sybil_id))

        # Verify that the verification report contains a PSE manifest status
        # and it is OK
        pse_manifest_status = \
            verification_report_dict.get('pseManifestStatus')
        if pse_manifest_status is None:
            raise \
                ValueError(
                    'Verification report does not contain a PSE manifest '
                    'status')
        if pse_manifest_status.upper() != 'OK':
            raise \
                ValueError(
                    'PSE manifest status is {} (i.e., not OK)'.format(
                        pse_manifest_status))

        # Verify that the verification report contains a PSE manifest hash
        pse_manifest_hash = \
            verification_report_dict.get('pseManifestHash')
        if pse_manifest_hash is None:
            raise \
                ValueError(
                    'Verification report does not contain a PSE manifest '
                    'hash')

        # Verify that the proof data contains evidence payload
        evidence_payload = proof_data_dict.get('evidence_payload')
        if evidence_payload is None:
            raise ValueError('Evidence payload is missing from proof data')

        # Verify that the evidence payload contains a PSE manifest and then
        # use it to make sure that the PSE manifest hash is what we expect
        pse_manifest = evidence_payload.get('pse_manifest')
        if pse_manifest is None:
            raise ValueError('Evidence payload does not include PSE manifest')

        expected_pse_manifest_hash = \
            base64.b64encode(
                hashlib.sha256(
                    pse_manifest.encode()).hexdigest().encode()).decode()

        if pse_manifest_hash.upper() != expected_pse_manifest_hash.upper():
            raise \
                ValueError(
                    'PSE manifest hash {0} does not match {1}'.format(
                        pse_manifest_hash,
                        expected_pse_manifest_hash))

        # Verify that the verification report contains an enclave quote status
        # and the status is OK
        enclave_quote_status = \
            verification_report_dict.get('isvEnclaveQuoteStatus')
        if enclave_quote_status is None:
            raise \
                ValueError(
                    'Verification report does not contain an enclave quote '
                    'status')
        if enclave_quote_status.upper() != 'OK':
            raise \
                ValueError(
                    'Enclave quote status is {} (i.e., not OK)'.format(
                        enclave_quote_status))

        # Verify that the verification report contains an enclave quote
        enclave_quote = verification_report_dict.get('isvEnclaveQuoteBody')
        if enclave_quote is None:
            raise \
                ValueError(
                    'Verification report does not contain an enclave quote')

        # The ISV enclave quote body is base 64 encoded, so decode it and then
        # create an SGX quote structure from it so we can inspect
        sgx_quote = sgx_structs.SgxQuote()
        sgx_quote.parse_from_bytes(base64.b64decode(enclave_quote))

        # The report body should be SHA256(SHA256(OPK)|PPK)
        #
        # NOTE - since the code that created the report data is in the enclave
        # code, this code needs to be kept in sync with it.  Any changes to how
        # the report data is created, needs to be reflected in how we re-create
        # the report data for verification.

        hash_input = \
            '{0}{1}'.format(
                originator_public_key_hash.upper(),
                cls._poet_public_key.upper()).encode()
        hash_value = hashlib.sha256(hash_input).digest()
        expected_report_data = \
            hash_value + \
            (b'\x00' *
             (sgx_structs.SgxReportData.STRUCT_SIZE - len(hash_value)))

        if sgx_quote.report_body.report_data.d != expected_report_data:
            raise \
                ValueError(
                    'AVR report data [{0}] not equal to [{1}]'.format(
                        sgx_quote.report_body.report_data.d.hex(),
                        expected_report_data.hex()))

        # Compare the enclave measurement against the expected valid enclave
        # measurement.
        #
        # NOTE - this is only a temporary check.  Instead of checking against
        # a predefined enclave measurement value, we should be configured with
        # a set of one or more enclave measurement values that we will
        # consider as valid.

        if sgx_quote.report_body.mr_enclave.m != \
                cls.__VALID_ENCLAVE_MEASUREMENT__:
            raise \
                ValueError(
                    'AVR enclave measurement [{0}] not equal to [{1}]'.format(
                        sgx_quote.report_body.mr_enclave.m.hex(),
                        cls.__VALID_ENCLAVE_MEASUREMENT__.hex()))

        # Compare the enclave basename in the verification report against the
        # expected enclave basename.
        #
        # NOTE - this is only a temporary check.  Instead of checking against
        # a predefined enclave basenme value, we should be configured with a
        # set of one or more enclave basenames that we will consider as valid.

        if sgx_quote.basename.name != cls.__VALID_BASENAME__:
            raise \
                ValueError(
                    'AVR enclave basename [{0}] not equal to [{1}]'.format(
                        sgx_quote.basename.name.hex(),
                        cls.__VALID_BASENAME__.hex()))

    @classmethod
    def create_wait_timer(cls, validator_address, previous_certificate_id,
                          local_mean, minimum_wait_time):
        with cls._lock:
            # If we don't have a PoET private key, then the enclave has not
            # been properly initialized (either by calling create_signup_info
            # or unseal_signup_data)
            if cls._poet_private_key is None:
                raise \
                    ValueError(
                        'Enclave must be initialized before attempting to '
                        'create a wait timer')

            # Create some value from the cert ID.  We are just going to use
            # the seal key to sign the cert ID.  We will then use the
            # low-order 64 bits to change that to a number [0, 1]
            tag = \
                base64.b64decode(
                    signing.sign(
                        previous_certificate_id,
                        cls._seal_private_key))

            tagd = float(struct.unpack('Q', tag[-8:])[0]) / (2**64 - 1)

            # Now compute the duration with a minimum wait time guaranteed
            duration = minimum_wait_time - local_mean * math.log(tagd)

            # Create and sign the wait timer
            wait_timer = \
                EnclaveWaitTimer(
                    validator_address=validator_address,
                    duration=duration,
                    previous_certificate_id=previous_certificate_id,
                    local_mean=local_mean)
            wait_timer.signature = \
                signing.sign(
                    wait_timer.serialize(),
                    cls._poet_private_key)

            # Keep track of the active wait timer
            cls._active_wait_timer = wait_timer

            return wait_timer

    @classmethod
    def deserialize_wait_timer(cls, serialized_timer, signature):
        with cls._lock:
            # Verify the signature before trying to deserialize
            if not signing.verify(serialized_timer, signature,
                                  cls._poet_public_key):
                return None

        return \
            EnclaveWaitTimer.wait_timer_from_serialized(
                serialized_timer=serialized_timer,
                signature=signature)

    @classmethod
    def create_wait_certificate(cls, wait_timer, block_hash):
        with cls._lock:
            # If we don't have a PoET private key, then the enclave has not
            # been properly initialized (either by calling create_signup_info
            # or unseal_signup_data)
            if cls._poet_private_key is None:
                raise \
                    ValueError(
                        'Enclave must be initialized before attempting to '
                        'create a wait certificate')

            # Several criteria need to be met before we can create a wait
            # certificate:
            # 1. We have an active timer
            # 2. The caller's wait timer is the active wait timer.  We are not
            #    going to rely the objects, being the same, but will compute
            #    a signature over the object and verify that the signatures
            #    are the same.
            # 3. The active timer has expired
            # 4. The active timer has not timed out
            #
            # Note - we make a concession for the genesis block (i.e., a wait
            # timer for which the previous certificate ID is the Null
            # identifier) in that we don't require the timer to have expired
            # and we don't worry about the timer having timed out.
            if cls._active_wait_timer is None:
                raise \
                    ValueError(
                        'There is not a current enclave active wait timer')

            if wait_timer is None or \
                    cls._active_wait_timer.signature != \
                    signing.sign(
                        wait_timer.serialize(),
                        cls._poet_private_key):
                raise \
                    ValueError(
                        'Validator is not using the current wait timer')

            is_not_genesis_block = \
                (cls._active_wait_timer.previous_certificate_id !=
                 NULL_BLOCK_IDENTIFIER)

            now = time.time()
            expire_time = \
                cls._active_wait_timer.request_time + \
                cls._active_wait_timer.duration

            if is_not_genesis_block and now < expire_time:
                raise \
                    ValueError(
                        'Cannot create wait certificate because timer has '
                        'not expired')

            time_out_time = \
                cls._active_wait_timer.request_time + \
                cls._active_wait_timer.duration + \
                TIMER_TIMEOUT_PERIOD

            if is_not_genesis_block and time_out_time < now:
                raise \
                    ValueError(
                        'Cannot create wait certificate because timer '
                        'has timed out')

            # Create a random nonce for the certificate.  For our "random"
            # nonce we will take the timer signature, concat that with the
            # current time, JSON-ize it and create a SHA-256 hash over it.
            # Probably not considered random by security professional
            # standards, but it is good enough for the simulator.
            random_string = \
                dict2json({
                    'wait_timer_signature': cls._active_wait_timer.signature,
                    'now': datetime.datetime.utcnow().isoformat()
                })
            nonce = hashlib.sha256(random_string.encode()).hexdigest()

            # First create a new enclave wait certificate using the data
            # provided and then sign the certificate with the PoET private key
            wait_certificate = \
                EnclaveWaitCertificate.wait_certificate_with_wait_timer(
                    wait_timer=cls._active_wait_timer,
                    nonce=nonce,
                    block_hash=block_hash)
            wait_certificate.signature = \
                signing.sign(
                    wait_certificate.serialize(),
                    cls._poet_private_key)

            # Now that we have created the certificate, we no longer have an
            # active timer
            cls._active_wait_timer = None

            return wait_certificate

    @classmethod
    def deserialize_wait_certificate(cls, serialized_certificate, signature):
        return \
            EnclaveWaitCertificate.wait_certificate_from_serialized(
                serialized_certificate=serialized_certificate,
                signature=signature)

    @classmethod
    def verify_wait_certificate(cls, certificate, poet_public_key):
        # Since the signing module uses a hex-encoded string as the canonical
        # format for public keys and we should be handed a public key that was
        # part of signup information created by us, don't bother decoding
        # the public key.
        if not \
            signing.verify(
                certificate.serialize(),
                certificate.signature,
                poet_public_key):
            raise ValueError('Wait certificate signature does not match')
    def create_signup_info(cls,
                           originator_public_key_hash,
                           nonce):
        with cls._lock:
            # First we need to create a public/private key pair for the PoET
            # enclave to use.
            cls._poet_private_key = signing.generate_privkey()
            cls._poet_public_key = \
                signing.generate_pubkey(cls._poet_private_key)
            cls._active_wait_timer = None

            # Simulate sealing (encrypting) the signup data.
            signup_data = {
                'poet_public_key': cls._poet_public_key,
                'poet_private_key': cls._poet_private_key
            }
            sealed_signup_data = \
                base64.b64encode(bytes(dict2json(signup_data).encode()))

            # Build up a fake SGX quote containing:
            # 1. The basename
            # 2. The report body that contains:
            #    a. The enclave measurement
            #    b. The report data SHA256(SHA256(OPK)|PPK)
            sgx_basename = \
                sgx_structs.SgxBasename(name=cls.__VALID_BASENAME__)
            sgx_measurement = \
                sgx_structs.SgxMeasurement(
                    m=cls.__VALID_ENCLAVE_MEASUREMENT__)

            hash_input = \
                '{0}{1}'.format(
                    originator_public_key_hash.upper(),
                    cls._poet_public_key.upper()).encode()
            report_data = hashlib.sha256(hash_input).digest()
            sgx_report_data = sgx_structs.SgxReportData(d=report_data)
            sgx_report_body = \
                sgx_structs.SgxReportBody(
                    mr_enclave=sgx_measurement,
                    report_data=sgx_report_data)

            sgx_quote = \
                sgx_structs.SgxQuote(
                    basename=sgx_basename,
                    report_body=sgx_report_body)

            # Create a fake PSE manifest.  A base64 encoding of the
            # originator public key hash should suffice.
            pse_manifest = \
                base64.b64encode(originator_public_key_hash.encode())

            timestamp = datetime.datetime.now().isoformat()

            # Fake our "proof" data.
            verification_report = {
                'epidPseudonym': cls._anti_sybil_id,
                'id': base64.b64encode(
                    hashlib.sha256(
                        timestamp.encode()).hexdigest().encode()).decode(),
                'isvEnclaveQuoteStatus': 'OK',
                'isvEnclaveQuoteBody':
                    base64.b64encode(sgx_quote.serialize_to_bytes()).decode(),
                'pseManifestStatus': 'OK',
                'pseManifestHash':
                    base64.b64encode(
                        hashlib.sha256(
                            pse_manifest).hexdigest().encode()).decode(),
                'nonce': nonce,
                'timestamp': timestamp
            }

            # Serialize the verification report, sign it, and then put
            # in the proof data
            verification_report_json = dict2json(verification_report)
            signature = \
                cls._report_private_key.sign(
                    verification_report_json.encode(),
                    padding.PKCS1v15(),
                    hashes.SHA256())

            proof_data_dict = {
                'evidence_payload': {
                    'pse_manifest': pse_manifest.decode()
                },
                'verification_report': verification_report_json,
                'signature': base64.b64encode(signature).decode()
            }
            proof_data = dict2json(proof_data_dict)

            return \
                EnclaveSignupInfo(
                    poet_public_key=signup_data['poet_public_key'],
                    proof_data=proof_data,
                    anti_sybil_id=cls._anti_sybil_id,
                    sealed_signup_data=sealed_signup_data)
    def test_block_publisher_doesnt_claim_readiness(
            self, mock_utils, mock_validator_registry_view,
            mock_consensus_state, mock_poet_enclave_factory,
            mock_consensus_state_store, mock_poet_key_state_store,
            mock_signup_info, mock_wait_time, mock_poet_config_view,
            mock_block_wrapper):
        """ Test verifies that PoET Block Publisher doesn't
         claims readiness if the wait timer hasn't expired
        """

        # create a mock_validator_registry_view with
        # get_validator_info that does nothing
        mock_validator_registry_view.return_value.get_validator_info. \
            return_value = \
            ValidatorInfo(
                name='validator_001',
                id='validator_deadbeef',
                signup_info=SignUpInfo(
                    poet_public_key='00112233445566778899aabbccddeeff'))

        # create a mock_consensus_state that returns a mock with
        # the following settings:
        mock_state = mock.Mock()
        mock_state.validator_signup_was_committed_too_late.return_value = False
        mock_state.validator_has_claimed_block_limit.return_value = False
        mock_state.validator_is_claiming_too_early.return_value = False
        mock_state.validator_is_claiming_too_frequently.return_value = False

        mock_consensus_state.consensus_state_for_block_id.return_value = \
            mock_state

        mock_consensus_state_store.return_value.__getitem__.return_value = \
            mock_consensus_state

        # create mock_signup_info
        mock_signup_info.unseal_signup_data.return_value = \
            '00112233445566778899aabbccddeeff'

        # create mock_batch_publisher
        mock_batch_publisher = mock.Mock(
            identity_signing_key=signing.generate_privkey())

        mock_block_cache = mock.MagicMock()
        mock_state_view_factory = mock.Mock()

        # create mock_block_header with the following fields
        mock_block = mock.Mock(identifier='0123456789abcdefedcba9876543210')
        mock_block.header.signer_pubkey = '90834587139405781349807435098745'
        mock_block.header.previous_block_id = '2'
        mock_block.header.block_num = 1
        mock_block.header.state_root_hash = '6'
        mock_block.header.batch_ids = '4'

        # create a mock_wait_timer that hasn't expired yet
        my_wait_time = mock.Mock()
        my_wait_time.has_expired.return_value = False

        mock_wait_time.create_wait_timer.return_value = my_wait_time

        # create mock_poet_enclave_module
        mock_poet_enclave_module = mock.Mock()
        mock_poet_enclave_module.return_value = \
            mock_poet_enclave_factory.get_poet_enclave_module.return_value

        # check test
        block_publisher = \
            poet_block_publisher.PoetBlockPublisher(
                block_cache=mock_block_cache,
                state_view_factory=mock_state_view_factory,
                batch_publisher=mock_batch_publisher,
                data_dir=self._temp_dir,
                config_dir=self._temp_dir,
                validator_id='validator_deadbeef')

        # check initialize_block() first to set wait_timer
        self.assertTrue(
            block_publisher.initialize_block(block_header=mock_block.header))

        # check that block_publisher only claims readiness
        # when the wait_timer has expired
        self.assertFalse(
            block_publisher.check_publish_block(
                block_header=mock_block.header))
Exemple #30
0
    def test_signup_info_not_committed_within_allowed_delay(
            self, mock_utils, mock_validator_registry_view,
            mock_consensus_state, mock_poet_enclave_factory,
            mock_consensus_state_store, mock_poet_key_state_store,
            mock_signup_info, mock_poet_config_view, mock_block_wrapper):
        """ Test verifies that PoET Block Publisher fails if
        a validator's signup info was not committed to
        the block chain within the allowed configured delay
        """

        # create a mock_validator_registry_view with
        # get_validator_info that does nothing
        mock_validator_registry_view.return_value.get_validator_info. \
            return_value = \
            ValidatorInfo(
                name='validator_001',
                id='validator_deadbeef',
                signup_info=SignUpInfo(
                    poet_public_key='00112233445566778899aabbccddeeff',
                    nonce='nonce'))

        # create a mock_wait_certificate that does nothing in check_valid
        mock_wait_certificate = mock.Mock()
        mock_wait_certificate.check_valid.return_value = None

        mock_utils.deserialize_wait_certificate.return_value = \
            mock_wait_certificate

        # create a mock_consensus_state that returns a mock with
        # the following settings:
        mock_state = MockConsensusState.create_mock_consensus_state(
            committed_too_late=True)
        mock_consensus_state.consensus_state_for_block_id.return_value = \
            mock_state

        mock_consensus_state_store.return_value.__getitem__.return_value = \
            mock_consensus_state

        # create mock_signup_info
        mock_signup_info.create_signup_info.return_value = \
            mock.Mock(
                poet_public_key='poet public key',
                proof_data='proof data',
                anti_sybil_id='anti-sybil ID',
                sealed_signup_data='sealed signup data')
        mock_signup_info.block_id_to_nonce.return_value = 'nonce'
        mock_signup_info.unseal_signup_data.return_value = \
            '00112233445566778899aabbccddeeff'

        # create mock_batch_publisher
        mock_batch_publisher = mock.Mock(
            identity_signing_key=signing.generate_privkey())

        mock_block_cache = mock.MagicMock()
        mock_state_view_factory = mock.Mock()

        # create mock_block_header with the following fields
        mock_block = mock.Mock(identifier='0123456789abcdefedcba9876543210')
        mock_block.header.signer_pubkey = '90834587139405781349807435098745'
        mock_block.header.previous_block_id = '2'
        mock_block.header.block_num = 1
        mock_block.header.state_root_hash = '6'
        mock_block.header.batch_ids = '4'

        # check test
        with mock.patch('sawtooth_poet.poet_consensus.poet_block_publisher.'
                        'LOGGER') as mock_logger:
            block_publisher = \
                poet_block_publisher.PoetBlockPublisher(
                    block_cache=mock_block_cache,
                    state_view_factory=mock_state_view_factory,
                    batch_publisher=mock_batch_publisher,
                    data_dir=self._temp_dir,
                    config_dir=self._temp_dir,
                    validator_id='validator_deadbeef')

            self.assertFalse(
                block_publisher.initialize_block(
                    block_header=mock_block.header))

            # Could be a hack, but verify that the appropriate log message is
            # generated - so we at least have some faith that the failure was
            # because of what we are testing and not something else.  I know
            # that this is fragile if the log message is changed, so would
            # accept any suggestions on a better way to verify that the
            # function fails for the reason we expect.

            (message, *_), _ = mock_logger.error.call_args
            self.assertTrue('Validator signup information '
                            'not committed in a timely manner.' in message)

            # check that create.signup_info() was called to create
            # the validator registry payload with new set of keys
            self.assertTrue(mock_signup_info.create_signup_info.called)
Exemple #31
0
 def setUp(self):
     self._temp_dir = tempfile.mkdtemp()
     self._identity_key = signing.generate_privkey()
    def test_block_publisher_doesnt_finalize_block(
            self, mock_utils, mock_validator_registry_view,
            mock_consensus_state, mock_poet_enclave_factory,
            mock_consensus_state_store, mock_poet_key_state_store,
            mock_signup_info, mock_wait_certificate, mock_poet_settings_view,
            mock_block_wrapper):
        """ Test verifies that PoET Block Publisher doesn't finalize
            a candidate block that doesn't have a valid wait certificate.
        """

        # create a mock_validator_registry_view with
        # get_validator_info that does nothing
        mock_validator_registry_view.return_value.get_validator_info. \
            return_value = \
            ValidatorInfo(
                name='validator_001',
                id='validator_deadbeef',
                signup_info=SignUpInfo(
                    poet_public_key='00112233445566778899aabbccddeeff'))

        # create a mock_wait_certificate that pretends to fail
        mock_wait_certificate.create_wait_certificate.side_effect = \
            ValueError('Unit test fake failure')

        # create a mock_consensus_state that returns a mock with
        # the following settings:
        mock_state = MockConsensusState().create_mock_consensus_state()

        mock_consensus_state.consensus_state_for_block_id.return_value = \
            mock_state

        # create mock_batch_publisher
        mock_batch_publisher = mock.Mock(
            identity_signing_key=signing.generate_privkey())

        mock_block_cache = mock.MagicMock()
        mock_state_view_factory = mock.Mock()

        # create mock_block_header with the following fields
        mock_block = mock.Mock(identifier='0123456789abcdefedcba9876543210')
        mock_block.header.signer_pubkey = '90834587139405781349807435098745'
        mock_block.header.previous_block_id = '2'
        mock_block.header.block_num = 1
        mock_block.header.state_root_hash = '6'
        mock_block.header.batch_ids = '4'

        # check test
        with mock.patch('sawtooth_poet.poet_consensus.poet_block_publisher.'
                        'LOGGER') as mock_logger:
            block_publisher = \
                poet_block_publisher.PoetBlockPublisher(
                    block_cache=mock_block_cache,
                    state_view_factory=mock_state_view_factory,
                    batch_publisher=mock_batch_publisher,
                    data_dir=self._temp_dir,
                    config_dir=self._temp_dir,
                    validator_id='validator_deadbeef')

            with mock.patch('sawtooth_poet.poet_consensus.'
                            'poet_block_publisher.json') as _:
                self.assertFalse(
                    block_publisher.finalize_block(
                        block_header=mock_block.header))

            # Could be a hack, but verify that the appropriate log message is
            # generated - so we at least have some faith that the failure was
            # because of what we are testing and not something else.  I know
            # that this is fragile if the log message is changed, so would
            # accept any suggestions on a better way to verify that the
            # function fails for the reason we expect.

            (message, *_), _ = mock_logger.error.call_args
            self.assertTrue('Failed to create wait certificate: ' in message)
Exemple #33
0
    def _sequential_add_batch_after_all_results_set(self,
                                                    scheduler,
                                                    context_manager):
        """Tests that adding a new batch only after setting all of the
        txn results will produce only expected state roots. Here no state
        roots were specified, so similar to block publishing use of scheduler.
        Basically:
            1) Create 3 batches, the last being marked as having an invalid
               transaction. Add one batch and then while the scheduler keeps
               on returning transactions, set the txn result, and then
               call next_transaction.
            2) Call finalize, and then assert that the scheduler is complete
            3) Assert that the first batch is valid and has no state hash,
               the second batch is valid and since it is the last valid batch
               in the scheduler has a state hash, and that the third batch
               is invalid and consequently has no state hash.
        """

        private_key = signing.generate_privkey()
        public_key = signing.generate_pubkey(private_key)

        # 1)
        batch_signatures = []
        batches = []
        for names in [['a', 'b'], ['d', 'e'], ['invalid', 'c']]:
            batch_txns = []
            for name in names:
                txn, _ = create_transaction(
                    payload=name.encode(),
                    private_key=private_key,
                    public_key=public_key)

                batch_txns.append(txn)

            batch = create_batch(
                transactions=batch_txns,
                private_key=private_key,
                public_key=public_key)
            batches.append(batch)
            batch_signatures.append(batch.header_signature)
        invalid_payload_sha = hashlib.sha512(
            'invalid'.encode()).hexdigest()
        for batch in batches:
            scheduler.add_batch(batch=batch)
            txn_info = scheduler.next_transaction()
            while txn_info is not None:
                txn_header = transaction_pb2.TransactionHeader()
                txn_header.ParseFromString(txn_info.txn.header)
                inputs_outputs = list(txn_header.inputs)
                c_id = context_manager.create_context(
                    state_hash=context_manager.get_first_root(),
                    base_contexts=txn_info.base_context_ids,
                    inputs=list(txn_header.inputs),
                    outputs=list(txn_header.outputs))
                context_manager.set(
                    context_id=c_id,
                    address_value_list=[{inputs_outputs[0]: b'5'}])
                if txn_header.payload_sha512 == invalid_payload_sha:
                    scheduler.set_transaction_execution_result(
                        txn_info.txn.header_signature,
                        is_valid=False,
                        context_id=None)
                else:
                    scheduler.set_transaction_execution_result(
                        txn_info.txn.header_signature,
                        is_valid=True,
                        context_id=c_id)
                txn_info = scheduler.next_transaction()

        # 2)
        scheduler.finalize()
        self.assertTrue(scheduler.complete(block=False),
                        "The scheduler has had all txn results set so after "
                        " calling finalize the scheduler is complete")
        # 3)
        first_batch_id = batch_signatures.pop(0)
        result1 = scheduler.get_batch_execution_result(first_batch_id)
        self.assertEqual(
            result1.is_valid,
            True,
            "The first batch is valid")
        self.assertIsNone(result1.state_hash,
                          "The first batch doesn't produce"
                          " a state hash")
        second_batch_id = batch_signatures.pop(0)
        result2 = scheduler.get_batch_execution_result(second_batch_id)
        self.assertEqual(
            result2.is_valid,
            True,
            "The second batch is valid")
        self.assertIsNotNone(result2.state_hash, "The second batch is the "
                                                 "last valid batch in the "
                                                 "scheduler")

        third_batch_id = batch_signatures.pop(0)
        result3 = scheduler.get_batch_execution_result(third_batch_id)
        self.assertEqual(result3.is_valid, False)
        self.assertIsNone(result3.state_hash,
                          "The last batch is invalid so "
                          "doesn't have a state hash")
    def create_signup_info(cls,
                           originator_public_key_hash,
                           nonce):
        with cls._lock:
            # First we need to create a public/private key pair for the PoET
            # enclave to use.
            cls._poet_private_key = signing.generate_privkey()
            cls._poet_public_key = \
                signing.generate_pubkey(cls._poet_private_key)
            cls._active_wait_timer = None

            # Simulate sealing (encrypting) the signup data.
            signup_data = {
                'poet_public_key': cls._poet_public_key,
                'poet_private_key': cls._poet_private_key
            }
            sealed_signup_data = \
                base64.b64encode(
                    dict2json(signup_data).encode()).decode('utf-8')

            # Build up a fake SGX quote containing:
            # 1. The basename
            # 2. The report body that contains:
            #    a. The enclave measurement
            #    b. The report data SHA256(SHA256(OPK)|PPK)
            sgx_basename = \
                sgx_structs.SgxBasename(name=cls.__VALID_BASENAME__)
            sgx_measurement = \
                sgx_structs.SgxMeasurement(
                    m=cls.__VALID_ENCLAVE_MEASUREMENT__)

            hash_input = \
                '{0}{1}'.format(
                    originator_public_key_hash.upper(),
                    cls._poet_public_key.upper()).encode()
            report_data = hashlib.sha256(hash_input).digest()
            sgx_report_data = sgx_structs.SgxReportData(d=report_data)
            sgx_report_body = \
                sgx_structs.SgxReportBody(
                    mr_enclave=sgx_measurement,
                    report_data=sgx_report_data)

            sgx_quote = \
                sgx_structs.SgxQuote(
                    basename=sgx_basename,
                    report_body=sgx_report_body)

            # Create a fake PSE manifest.  A base64 encoding of the
            # originator public key hash should suffice.
            pse_manifest = \
                base64.b64encode(originator_public_key_hash.encode())

            timestamp = datetime.datetime.now().isoformat()

            # Fake our "proof" data.
            verification_report = {
                'epidPseudonym': cls._anti_sybil_id,
                'id': base64.b64encode(
                    hashlib.sha256(
                        timestamp.encode()).hexdigest().encode()).decode(),
                'isvEnclaveQuoteStatus': 'OK',
                'isvEnclaveQuoteBody':
                    base64.b64encode(sgx_quote.serialize_to_bytes()).decode(),
                'pseManifestStatus': 'OK',
                'pseManifestHash':
                    hashlib.sha256(base64.b64decode(pse_manifest)).hexdigest(),
                'nonce': nonce,
                'timestamp': timestamp
            }

            # Serialize the verification report, sign it, and then put
            # in the proof data
            verification_report_json = dict2json(verification_report)
            signature = \
                cls._report_private_key.sign(
                    verification_report_json.encode(),
                    padding.PKCS1v15(),
                    hashes.SHA256())

            proof_data_dict = {
                'evidence_payload': {
                    'pse_manifest': pse_manifest.decode()
                },
                'verification_report': verification_report_json,
                'signature': base64.b64encode(signature).decode()
            }
            proof_data = dict2json(proof_data_dict)

            return \
                EnclaveSignupInfo(
                    poet_public_key=signup_data['poet_public_key'],
                    proof_data=proof_data,
                    anti_sybil_id=cls._anti_sybil_id,
                    sealed_signup_data=sealed_signup_data)
Exemple #35
0
    def test_transaction_order_with_dependencies(self):
        """Tests the that transactions are returned in the expected order given
        dependencies implied by state.

        Creates one batch with four transactions.
        """
        private_key = signing.generate_privkey()
        public_key = signing.generate_pubkey(private_key)

        txns = []
        headers = []

        txn, header = create_transaction(
            payload='a'.encode(),
            private_key=private_key,
            public_key=public_key)
        txns.append(txn)
        headers.append(header)

        txn, header = create_transaction(
            payload='b'.encode(),
            private_key=private_key,
            public_key=public_key)
        txns.append(txn)
        headers.append(header)

        txn, header =create_transaction(
            payload='aa'.encode(),
            private_key=private_key,
            public_key=public_key,
            inputs=['000000' + hashlib.sha512('a'.encode()).hexdigest()[:64]],
            outputs=['000000' + hashlib.sha512('a'.encode()).hexdigest()[:64]])
        txns.append(txn)
        headers.append(header)

        txn, header = create_transaction(
            payload='bb'.encode(),
            private_key=private_key,
            public_key=public_key,
            inputs=['000000' + hashlib.sha512('b'.encode()).hexdigest()[:64]],
            outputs=['000000' + hashlib.sha512('b'.encode()).hexdigest()[:64]])
        txns.append(txn)
        headers.append(header)

        batch = create_batch(
            transactions=txns,
            private_key=private_key,
            public_key=public_key)

        self.scheduler.add_batch(batch)
        self.scheduler.finalize()
        self.assertFalse(self.scheduler.complete(block=False))

        iterable = iter(self.scheduler)
        scheduled_txn_info = []

        self.assertEqual(2, self.scheduler.available())
        scheduled_txn_info.append(next(iterable))
        self.assertIsNotNone(scheduled_txn_info[0])
        self.assertEqual(txns[0].payload, scheduled_txn_info[0].txn.payload)
        self.assertFalse(self.scheduler.complete(block=False))

        self.assertEqual(1, self.scheduler.available())
        scheduled_txn_info.append(next(iterable))
        self.assertIsNotNone(scheduled_txn_info[1])
        self.assertEqual(txns[1].payload, scheduled_txn_info[1].txn.payload)
        self.assertFalse(self.scheduler.complete(block=False))

        self.assertEqual(0, self.scheduler.available())
        context_id1 = self.context_manager.create_context(
            state_hash=self.first_state_root,
            inputs=list(headers[1].inputs),
            outputs=list(headers[1].outputs),
            base_contexts=[])
        self.scheduler.set_transaction_execution_result(
            txns[1].header_signature, True, context_id1)

        self.assertEqual(1, self.scheduler.available())
        scheduled_txn_info.append(next(iterable))
        self.assertIsNotNone(scheduled_txn_info[2])
        self.assertEqual(txns[3].payload, scheduled_txn_info[2].txn.payload)
        self.assertFalse(self.scheduler.complete(block=False))

        self.assertEqual(0, self.scheduler.available())
        context_id2 = self.context_manager.create_context(
            state_hash=self.first_state_root,
            inputs=list(headers[0].inputs),
            outputs=list(headers[0].outputs),
            base_contexts=[context_id1])
        self.scheduler.set_transaction_execution_result(
            txns[0].header_signature, True, context_id2)

        self.assertEqual(1, self.scheduler.available())
        scheduled_txn_info.append(next(iterable))
        self.assertIsNotNone(scheduled_txn_info[3])
        self.assertEqual(txns[2].payload, scheduled_txn_info[3].txn.payload)
        self.assertFalse(self.scheduler.complete(block=False))

        self.assertEqual(0, self.scheduler.available())
        context_id3 = self.context_manager.create_context(
            state_hash=self.first_state_root,
            inputs=list(headers[2].inputs),
            outputs=list(headers[2].outputs),
            base_contexts=[context_id2])
        self.scheduler.set_transaction_execution_result(
            txns[2].header_signature, True, context_id3)
        context_id4 = self.context_manager.create_context(
            state_hash=self.first_state_root,
            inputs=list(headers[3].inputs),
            outputs=list(headers[3].outputs),
            base_contexts=[context_id3])
        self.scheduler.set_transaction_execution_result(
            txns[3].header_signature, True, context_id4)

        self.assertEqual(0, self.scheduler.available())
        self.assertTrue(self.scheduler.complete(block=False))
        with self.assertRaises(StopIteration):
            next(iterable)

        result = self.scheduler.get_batch_execution_result(batch.header_signature)
        self.assertIsNotNone(result)
        self.assertTrue(result.is_valid)
    def test_k_policy(self, mock_utils, mock_validator_registry_view,
                      mock_consensus_state, mock_poet_enclave_factory,
                      mock_consensus_state_store, mock_poet_key_state_store,
                      mock_signup_info, mock_poet_settings_view,
                      mock_block_wrapper):
        """ K Policy: Test verifies that PoET Block Publisher fails if
            if a validator attempts to claim more blocks than is allowed
            by the key block claim limit
        """

        # create a mock_validator_registry_view with
        # get_validator_info that does nothing
        mock_validator_registry_view.return_value.get_validator_info. \
            return_value = \
            ValidatorInfo(
                name='validator_001',
                id='validator_deadbeef',
                signup_info=SignUpInfo(
                    poet_public_key='00112233445566778899aabbccddeeff',
                    anti_sybil_id='Sally Fields',
                    proof_data="You can't handle the truth"))

        # create a mock_wait_certificate that does nothing in check_valid
        mock_wait_certificate = mock.Mock()
        mock_wait_certificate.check_valid.return_value = None

        mock_utils.deserialize_wait_certificate.return_value = \
            mock_wait_certificate

        # create a mock_consensus_state that returns a mock with
        # the following settings:
        mock_state = MockConsensusState.create_mock_consensus_state(
            claimed_block_limit=True)

        mock_consensus_state.consensus_state_for_block_id.return_value = \
            mock_state

        mock_consensus_state_store.return_value.__getitem__.return_value = \
            mock_consensus_state

        # Create mock key state
        mock_poet_key_state_store.return_value.__getitem__.return_value = \
            mock.Mock(
                sealed_signup_data='sealed signup data',
                has_been_refreshed=False)

        # create mock_signup_info
        mock_signup_info.block_id_to_nonce.return_value = 'nonce'
        mock_signup_info.create_signup_info.return_value = \
            mock.Mock(
                poet_public_key='00112233445566778899aabbccddeeff',
                anti_sybil_id='Sally Fields',
                proof_data="You can't handle the truth",
                sealed_signup_data='00112233445566778899aabbccddee')
        mock_signup_info.unseal_signup_data.return_value = \
            '00112233445566778899aabbccddeeff'

        # create mock_batch_publisher
        mock_batch_publisher = mock.Mock(
            identity_signing_key=signing.generate_privkey())

        mock_block_cache = mock.MagicMock()
        mock_state_view_factory = mock.Mock()

        # create mock_block_header with the following fields
        mock_block = mock.Mock(identifier='0123456789abcdefedcba9876543210')
        mock_block.header.signer_pubkey = '90834587139405781349807435098745'
        mock_block.header.previous_block_id = '2'
        mock_block.header.block_num = 1
        mock_block.header.state_root_hash = '6'
        mock_block.header.batch_ids = '4'

        # check test
        with mock.patch('sawtooth_poet.poet_consensus.poet_block_publisher.'
                        'LOGGER') as mock_logger:
            block_publisher = \
                poet_block_publisher.PoetBlockPublisher(
                    block_cache=mock_block_cache,
                    state_view_factory=mock_state_view_factory,
                    batch_publisher=mock_batch_publisher,
                    data_dir=self._temp_dir,
                    config_dir=self._temp_dir,
                    validator_id='validator_deadbeef')

            self.assertFalse(
                block_publisher.initialize_block(
                    block_header=mock_block.header))

            # Could be a hack, but verify that the appropriate log message is
            # generated - so we at least have some faith that the failure was
            # because of what we are testing and not something else.  I know
            # that this is fragile if the log message is changed, so would
            # accept any suggestions on a better way to verify that the
            # function fails for the reason we expect.

            (message, *_), _ = mock_logger.info.call_args
            self.assertTrue('Validator has reached maximum number of '
                            'blocks with key pair' in message)

            # check that create.signup_info() was called to renew the set of
            # keys
            self.assertTrue(mock_signup_info.create_signup_info.called)
Exemple #37
0
def _private():
    return signing.generate_privkey()
Exemple #38
0
    def _add_batch_after_empty_iteration(self, scheduler):
        """Tests that iterations will continue as result of add_batch().
        This test calls next() on a scheduler iterator in a separate thread
        called the IteratorThread.  The test waits until the IteratorThread
        is waiting in next(); internal to the scheduler, it will be waiting on
        a condition variable as there are no transactions to return and the
        scheduler is not finalized.  Then, the test continues by running
        add_batch(), which should cause the next() running in the
        IterableThread to return a transaction.
        This demonstrates the scheduler's ability to wait on an empty iterator
        but continue as transactions become available via add_batch.

        This test should work for both a serial and parallel scheduler.
        """
        private_key = signing.generate_privkey()
        public_key = signing.generate_pubkey(private_key)

        # Create a basic transaction and batch.
        txn, _ = create_transaction(
            payload='a'.encode(),
            private_key=private_key,
            public_key=public_key)
        batch = create_batch(
            transactions=[txn],
            private_key=private_key,
            public_key=public_key)

        # This class is used to run the scheduler's iterator.
        class IteratorThread(threading.Thread):
            def __init__(self, iterable):
                threading.Thread.__init__(self)
                self._iterable = iterable
                self.ready = False
                self.condition = threading.Condition()
                self.txn_info = None

            def run(self):
                # Even with this lock here, there is a race condition between
                # exit of the lock and entry into the iterable.  That is solved
                # by sleep later in the test.
                with self.condition:
                    self.ready = True
                    self.condition.notify()
                txn_info = next(self._iterable)
                with self.condition:
                    self.txn_info = txn_info
                    self.condition.notify()

        # This is the iterable we are testing, which we will use in the
        # IteratorThread.  We also use it in this thread below to test
        # for StopIteration.
        iterable = iter(scheduler)

        # Create and startup thread.
        thread = IteratorThread(iterable=iterable)
        thread.start()

        # Pause here to make sure the thread is absolutely as far along as
        # possible; in other words, right before we call next() in it's run()
        # method.  When this returns, there should be very little time until
        # the iterator is blocked on a condition variable.
        with thread.condition:
            while not thread.ready:
                thread.condition.wait()

        # May the daemons stay away during this dark time, and may we be
        # forgiven upon our return.
        time.sleep(1)

        # At this point, the IteratorThread should be waiting next(), so we go
        # ahead and give it a batch.
        scheduler.add_batch(batch)

        # If all goes well, thread.txn_info will get set to the result of the
        # next() call.  If not, it will timeout and thread.txn_info will be
        # empty.
        with thread.condition:
            if thread.txn_info is None:
                thread.condition.wait(5)

        # If thread.txn_info is empty, the test failed as iteration did not
        # continue after add_batch().
        self.assertIsNotNone(thread.txn_info, "iterable failed to return txn")
        self.assertEqual(txn.payload, thread.txn_info.txn.payload)

        # Continue with normal shutdown/cleanup.
        scheduler.finalize()
        scheduler.set_transaction_execution_result(
            txn.header_signature, False, None)
        with self.assertRaises(StopIteration):
            next(iterable)
Exemple #39
0
 def setUp(self):
     self.private_key = signing.generate_privkey()
     self.public_key = signing.generate_pubkey(self.private_key)
Exemple #40
0
    def _add_valid_batch_invalid_batch(self, scheduler, context_manager):

        """Tests the squash function. That the correct state hash is found
        at the end of valid and invalid batches, similar to block publishing.

         Basically:
            1. Adds two batches, one where all the txns are valid,
               and one where one of the txns is invalid.
            2. Run through the scheduler executor interaction
               as txns are processed.
            3. Verify that the state root obtained through the squash function
               is the same as directly updating the merkle tree.
            4. Verify that correct batch statuses are set

        This test should work for both a serial and parallel scheduler.
        """
        private_key = signing.generate_privkey()
        public_key = signing.generate_pubkey(private_key)

        # 1)
        batch_signatures = []
        for names in [['a', 'b'], ['invalid', 'c'], ['d', 'e']]:
            batch_txns = []
            for name in names:
                txn, _ = create_transaction(
                    payload=name.encode(),
                    private_key=private_key,
                    public_key=public_key)

                batch_txns.append(txn)

            batch = create_batch(
                transactions=batch_txns,
                private_key=private_key,
                public_key=public_key)

            batch_signatures.append(batch.header_signature)
            scheduler.add_batch(batch)
        scheduler.finalize()
        # 2)
        sched1 = iter(scheduler)
        invalid_payload = hashlib.sha512('invalid'.encode()).hexdigest()
        while not scheduler.complete(block=False):
            txn_info = next(sched1)
            txn_header = transaction_pb2.TransactionHeader()
            txn_header.ParseFromString(txn_info.txn.header)
            inputs_or_outputs = list(txn_header.inputs)
            c_id = context_manager.create_context(
                state_hash=txn_info.state_hash,
                inputs=inputs_or_outputs,
                outputs=inputs_or_outputs,
                base_contexts=txn_info.base_context_ids)
            if txn_header.payload_sha512 == invalid_payload:
                scheduler.set_transaction_execution_result(
                    txn_info.txn.header_signature, False, None)
            else:
                context_manager.set(c_id, [{inputs_or_outputs[0]: b"1"}])
                scheduler.set_transaction_execution_result(
                    txn_info.txn.header_signature, True, c_id)

        sched2 = iter(scheduler)
        # 3)
        txn_info_a = next(sched2)
        txn_a_header = transaction_pb2.TransactionHeader()
        txn_a_header.ParseFromString(txn_info_a.txn.header)
        inputs_or_outputs = list(txn_a_header.inputs)
        address_a = inputs_or_outputs[0]

        txn_info_b = next(sched2)
        address_b = _get_address_from_txn(txn_info_b)

        txn_infoInvalid = next(sched2)
        txn_info_c = next(sched2)

        txn_info_d = next(sched2)
        address_d = _get_address_from_txn(txn_info_d)

        txn_info_e = next(sched2)
        address_e = _get_address_from_txn(txn_info_e)

        merkle_database = MerkleDatabase(dict_database.DictDatabase())
        state_root_end = merkle_database.update(
            {address_a: b"1", address_b: b"1",
             address_d: b"1", address_e: b"1"},
            virtual=False)

        # 4)
        batch1_result = scheduler.get_batch_execution_result(
            batch_signatures[0])
        self.assertTrue(batch1_result.is_valid)

        batch2_result = scheduler.get_batch_execution_result(
            batch_signatures[1])
        self.assertFalse(batch2_result.is_valid)

        batch3_result = scheduler.get_batch_execution_result(
            batch_signatures[2])
        self.assertTrue(batch3_result.is_valid)
        self.assertEqual(batch3_result.state_hash, state_root_end)
    def test_block_publisher_finalize_block(
            self, mock_utils, mock_validator_registry_view,
            mock_consensus_state, mock_poet_enclave_factory,
            mock_consensus_state_store, mock_poet_key_state_store,
            mock_signup_info, mock_wait_certificate, mock_poet_settings_view,
            mock_block_wrapper):
        """ Test verifies that PoET Block Publisher finalizes the block,
            meaning that the candidate block is good and should be generated.
        """

        # create a mock_validator_registry_view with
        # get_validator_info that does nothing
        mock_validator_registry_view.return_value.get_validator_info. \
            return_value = \
            ValidatorInfo(
                name='validator_001',
                id='validator_deadbeef',
                signup_info=SignUpInfo(
                    poet_public_key='00112233445566778899aabbccddeeff'))

        # create a mock_wait_certificate that does nothing in check_valid
        my_wait_certificate = mock.Mock()
        my_wait_certificate.check_valid.return_value = None
        mock_wait_certificate.create_wait_certificate.return_value = \
            my_wait_certificate

        # create a mock_consensus_state that returns a mock with
        # the following settings:
        mock_state = MockConsensusState().create_mock_consensus_state()

        mock_consensus_state.consensus_state_for_block_id.return_value = \
            mock_state

        # create mock_batch_publisher
        mock_batch_publisher = mock.Mock(
            identity_signing_key=signing.generate_privkey())

        mock_block_cache = mock.MagicMock()
        mock_state_view_factory = mock.Mock()

        # create mock_block_header with the following fields
        mock_block = mock.Mock(identifier='0123456789abcdefedcba9876543210')
        mock_block.header.signer_pubkey = '90834587139405781349807435098745'
        mock_block.header.previous_block_id = '2'
        mock_block.header.block_num = 1
        mock_block.header.state_root_hash = '6'
        mock_block.header.batch_ids = '4'

        # check test
        block_publisher = \
            poet_block_publisher.PoetBlockPublisher(
                block_cache=mock_block_cache,
                state_view_factory=mock_state_view_factory,
                batch_publisher=mock_batch_publisher,
                data_dir=self._temp_dir,
                config_dir=self._temp_dir,
                validator_id='validator_deadbeef')

        with mock.patch('sawtooth_poet.poet_consensus.'
                        'poet_block_publisher.json') as _:
            self.assertTrue(
                block_publisher.finalize_block(block_header=mock_block.header))