Exemple #1
0
    async def _create_rev_reg(self, rr_id: str, rr_size: int = None) -> None:
        """
        Create revocation registry and new tails file (and association to
        corresponding revocation registry definition via symbolic link) for input
        revocation registry identifier.

        :param rr_id: revocation registry identifier
        :param rr_size: revocation registry size (defaults to 256)
        """

        LOGGER.debug('Issuer._create_rev_reg >>> rr_id: %s, rr_size: %s',
                     rr_id, rr_size)

        if not ok_rev_reg_id(rr_id):
            LOGGER.debug('Issuer._create_rev_reg <!< Bad rev reg id %s', rr_id)
            raise BadIdentifier('Bad rev reg id {}'.format(rr_id))

        rr_size = rr_size or 256
        (cd_id, tag) = rev_reg_id2cred_def_id_tag(rr_id)

        LOGGER.info(
            'Creating revocation registry (capacity %s) for rev reg id %s',
            rr_size, rr_id)
        tails_writer_handle = await blob_storage.open_writer(
            'default',
            json.dumps({
                'base_dir': Tails.dir(self._dir_tails, rr_id),
                'uri_pattern': ''
            }))
        apriori = Tails.unlinked(self._dir_tails)
        (rr_id, rrd_json,
         rre_json) = await anoncreds.issuer_create_and_store_revoc_reg(
             self.wallet.handle, self.did, 'CL_ACCUM', tag, cd_id,
             json.dumps({
                 'max_cred_num': rr_size,
                 'issuance_type': 'ISSUANCE_ON_DEMAND'
             }), tails_writer_handle)
        delta = Tails.unlinked(self._dir_tails) - apriori
        if len(delta) != 1:
            LOGGER.debug(
                'Issuer._create_rev_reg: <!< Could not create tails file for rev reg id: %s',
                rr_id)
            raise CorruptTails(
                'Could not create tails file for rev reg id {}'.format(rr_id))
        tails_hash = basename(delta.pop())
        Tails.associate(self._dir_tails, rr_id, tails_hash)

        with REVO_CACHE.lock:
            rrd_req_json = await ledger.build_revoc_reg_def_request(
                self.did, rrd_json)
            await self._sign_submit(rrd_req_json)
            await self._get_rev_reg_def(rr_id)  # add to cache en passant

        rre_req_json = await ledger.build_revoc_reg_entry_request(
            self.did, rr_id, 'CL_ACCUM', rre_json)
        await self._sign_submit(rre_req_json)

        LOGGER.debug('Issuer._create_rev_reg <<<')
Exemple #2
0
async def test_von_tails(pool_ip, genesis_txn_file, path_cli_ini, cli_ini,
                         path_setnym_ini, setnym_ini):

    print(
        Ink.YELLOW('\n\n== Testing tails server vs. IP {} =='.format(pool_ip)))

    # Set config for tails clients
    config = {}
    i = 0
    for profile in path_cli_ini:
        cli_config = inis2dict(str(path_cli_ini[profile]))
        config[profile] = cli_config
        with open(path_cli_ini[profile], 'r') as fh_cfg:
            print('\n\n== 0.{} == {} tails sync configuration:\n{}'.format(
                i, profile, fh_cfg.read()))
        i += 1

    # Start tails server
    print('\n\n== 1 == Starting tails server on port {}'.format(
        config['issuer']['Tails Server']['port']))
    tsrv = TailsServer(config['issuer']['Tails Server']['port'])
    started = tsrv.start()
    if not started:
        print(
            '\n\n== X == Server already running - stop it to run test from scratch'
        )
        assert False

    assert tsrv.is_up()
    print(
        '\n\n== 2 == Started tails server, docker-compose port-forwarded via localhost:{}'
        .format(tsrv.port))
    atexit.register(shutdown)

    # Set nyms (operation creates pool if need be)
    i = 0
    setnym_config = {}
    for profile in path_setnym_ini:
        cli_config = inis2dict(str(path_setnym_ini[profile]))
        if profile == 'admin':  # tails server anchor on ledger a priori
            continue
        setnym_config[profile] = cli_config
        with open(path_setnym_ini[profile], 'r') as fh_cfg:
            print('\n\n== 3.{} == {} setnym configuration:\n{}'.format(
                i, profile, fh_cfg.read()))
        sub_proc = subprocess.run(
            ['von_anchor_setnym',
             str(path_setnym_ini[profile])],
            stdout=subprocess.PIPE,
            stderr=subprocess.DEVNULL)
        assert not sub_proc.returncode
        i += 1
    print('\n\n== 4 == Setnym ops completed OK')

    # wallets = {profile: Wallet(setnym_config[profile]['VON Anchor']['name']) for profile in setnym_config}
    # wallets['admin'] = Wallet(config['admin']['VON Anchor']['name'])
    wallets = await get_wallets(
        {
            **{
                profile: setnym_config[profile]['VON Anchor']
                for profile in setnym_config
            }, 'admin': config['admin']['VON Anchor']
        },
        open_all=False)

    # Open pool and anchors, issue creds to create tails files
    async with wallets['issuer'] as w_issuer, (
        wallets['prover']) as w_prover, (NodePool(
            config['issuer']['Node Pool']['name'])) as pool, (RegistrarAnchor(
                w_issuer, pool)) as ian, (OrgBookAnchor(w_prover,
                                                        pool)) as pan:

        # Get nyms from ledger for display
        i = 0
        for an in (ian, pan):
            print('\n\n== 5.{} == {} nym on ledger: {}'.format(
                i, an.wallet.name, ppjson(await an.get_nym())))
            i += 1

        # Publish schema to ledger
        S_ID = schema_id(ian.did, 'rainbow', '{}.0'.format(int(time())))
        schema_data = {
            'name': schema_key(S_ID).name,
            'version': schema_key(S_ID).version,
            'attr_names': ['numeric', 'sha256']
        }

        S_KEY = schema_key(S_ID)
        try:
            await ian.get_schema(S_KEY)  # may exist (almost certainly not)
        except AbsentSchema:
            await ian.send_schema(json.dumps(schema_data))
        schema_json = await ian.get_schema(S_KEY)
        schema = json.loads(schema_json)
        print('\n\n== 6 == SCHEMA [{} v{}]: {}'.format(S_KEY.name,
                                                       S_KEY.version,
                                                       ppjson(schema)))
        assert schema  # should exist now

        # Setup link secret for creation of cred req or proof
        await pan.create_link_secret('LinkSecret')

        # Issuer anchor create, store, publish cred definitions to ledger; create cred offers
        await ian.send_cred_def(S_ID, revo=True)

        cd_id = cred_def_id(S_KEY.origin_did, schema['seqNo'], pool.protocol)

        assert ((not Tails.unlinked(ian.dir_tails)) and
                [f for f in Tails.links(ian.dir_tails, ian.did) if cd_id in f])

        cred_def_json = await ian.get_cred_def(cd_id)  # ought to exist now
        cred_def = json.loads(cred_def_json)
        print('\n\n== 7.0 == Cred def [{} v{}]: {}'.format(
            S_KEY.name, S_KEY.version, ppjson(json.loads(cred_def_json))))
        assert cred_def.get('schemaId', None) == str(schema['seqNo'])

        cred_offer_json = await ian.create_cred_offer(schema['seqNo'])
        cred_offer = json.loads(cred_offer_json)
        print('\n\n== 7.1 == Credential offer [{} v{}]: {}'.format(
            S_KEY.name, S_KEY.version, ppjson(cred_offer_json)))

        (cred_req_json, cred_req_metadata_json) = await pan.create_cred_req(
            cred_offer_json, cd_id)
        cred_req = json.loads(cred_req_json)
        print('\n\n== 8 == Credential request [{} v{}]: metadata {}, cred {}'.
              format(S_KEY.name, S_KEY.version, ppjson(cred_req_metadata_json),
                     ppjson(cred_req_json)))
        assert json.loads(cred_req_json)

        # Issuer anchor issues creds and stores at HolderProver: get cred req, create cred, store cred
        cred_data = []

        CREDS = 450  # enough to build 4 rev regs
        print('\n\n== 9 == creating and storing {} credentials:'.format(CREDS))
        for number in range(CREDS):
            (cred_json, _) = await ian.create_cred(
                cred_offer_json, cred_req_json, {
                    'numeric': str(number),
                    'sha256': sha256(str(number).encode()).hexdigest(),
                })

            cred_id = await pan.store_cred(cred_json, cred_req_metadata_json)
            print('.',
                  end='' if (number + 1) % 100 else '{}\n'.format(number + 1),
                  flush=True)

        # Exercise list view, least to most specific
        for tails_list_path in ('all', ian.did, cd_id):
            url = url_for(tsrv.port, 'tails/list/{}'.format(tails_list_path))
            r = requests.get(url)
            assert r.status_code == 200
            assert not r.json()
        rr_ids_up = {
            basename(link)
            for link in Tails.links(ian.dir_tails, ian.did)
        }
        for rr_id in rr_ids_up:
            url = url_for(tsrv.port, 'tails/list/{}'.format(rr_id))
            r = requests.get(url)
            assert r.status_code == 200
            assert not r.json()
        print(
            '\n\n== 10 == All listing views at server come back OK and empty as expected'
        )

        rv = pexpect.run('python ../src/sync/sync.py {}'.format(
            path_cli_ini['issuer']))
        print('\n\n== 11 == Issuer sync uploaded local tails files')

        for tails_list_path in ('all', ian.did, cd_id):
            url = url_for(tsrv.port, 'tails/list/{}'.format(tails_list_path))
            r = requests.get(url)
            assert r.status_code == 200
            assert {rr for rr in r.json()} == rr_ids_up
        for rr_id in rr_ids_up:
            url = url_for(tsrv.port, 'tails/list/{}'.format(rr_id))
            r = requests.get(url)
            assert r.status_code == 200
            assert r.json() == [rr_id]  # list with one rr_id should come back

        # Exercise list view, least to most specific
        for tails_list_path in ('all', ian.did, cd_id):
            url = url_for(tsrv.port, 'tails/list/{}'.format(tails_list_path))
            r = requests.get(url)
            assert r.status_code == 200
            assert len(r.json()) == len(rr_ids_up)
        print(
            '\n\n== 12 == All listing views at server come back OK with {} uploaded files'
            .format(len(rr_ids_up)))

        rv = pexpect.run('python ../src/sync/sync.py {}'.format(
            path_cli_ini['prover']))
        print('\n\n== 13 == Prover sync downloaded remote tails files')

        rr_ids_down = {
            basename(link)
            for link in Tails.links(
                config['prover']['Tails Client']['tails.dir'], ian.did)
        }
        assert rr_ids_down == rr_ids_up

        # Exercise admin-delete
        rv = pexpect.run('python ../src/admin/delete.py {} all'.format(
            path_cli_ini['admin']))
        print('\n\n== 14 == Admin called for deletion at tails server')

        # Check tails server deletion
        url = url_for(tsrv.port, 'tails/list/all')
        r = requests.get(url)
        assert r.status_code == 200
        assert not r.json()
        print(
            '\n\n== 15 == All listing views at server come back OK and empty as expected'
        )

        rv = pexpect.run('python ../src/sync/multisync.py 1 {}'.format(
            path_cli_ini['issuer']))
        print(
            '\n\n== 16 == Issuer multisync on 1 sync iteration uploaded local tails files'
        )

        for tails_list_path in ('all', ian.did, cd_id):
            url = url_for(tsrv.port, 'tails/list/{}'.format(tails_list_path))
            r = requests.get(url)
            assert r.status_code == 200
            assert {rr for rr in r.json()} == rr_ids_up
        for rr_id in rr_ids_up:
            url = url_for(tsrv.port, 'tails/list/{}'.format(rr_id))
            r = requests.get(url)
            assert r.status_code == 200
            assert r.json() == [rr_id]  # list with one rr_id should come back

        # Exercise list view, least to most specific
        for tails_list_path in ('all', ian.did, cd_id):
            url = url_for(tsrv.port, 'tails/list/{}'.format(tails_list_path))
            r = requests.get(url)
            assert r.status_code == 200
            assert len(r.json()) == len(rr_ids_up)
        print(
            '\n\n== 17 == All listing views at server come back OK with {} uploaded files'
            .format(len(rr_ids_up)))

        # Remove tails server anchor wallet
        await wallets['admin'].remove()
        print('\n\n== 18 == Removed admin (tails server anchor {}) wallet'.
              format(wallets['admin'].name))
Exemple #3
0
async def test_anchors_tails_load(
        pool_name,
        pool_genesis_txn_data,
        seed_trustee1):

    rrbx = True
    print(Ink.YELLOW('\n\n== Load-testing tails on {}ternal rev reg builder ==').format("ex" if rrbx else "in"))

    await RevRegBuilder.stop(WALLET_NAME)  # in case of re-run

    # Set up node pool ledger config and wallets, open pool, init anchors
    p_mgr = NodePoolManager()
    if pool_name not in await p_mgr.list():
        await p_mgr.add_config(pool_name, pool_genesis_txn_data)
    pool = p_mgr.get(pool_name)
    await pool.open()

    w_mgr = WalletManager()
    wallets = {
        'trustee-anchor': {
            'seed': seed_trustee1,
            'storage_type': None,
            'config': None,
            'access_creds': None
        },
        WALLET_NAME: {
            'seed': 'Superstar-Anchor-000000000000000',
            'storage_type': None,
            'config': None,
            'access_creds': {
                'key': 'rrbx-test'
            }
        }
    }
    for (name, wdata) in wallets.items():
        try:
            wdata['wallet'] = await w_mgr.create({
                'id': name,
                'seed': wdata['seed']
            })
        except ExtantWallet:
            wdata['wallet'] = w_mgr.get({'id': name})
        finally:
            await wdata['wallet'].open()

    tan = TrusteeAnchor(wallets['trustee-anchor']['wallet'], pool)
    no_prox = rrbx_prox()
    san = OrgHubAnchor(wallets[WALLET_NAME]['wallet'], pool, rrbx=rrbx)
    if rrbx:
        await beep('external rev reg builder process on {}'.format(WALLET_NAME), 15)
        if rrbx_prox() != no_prox + 1:
            await RevRegBuilder.stop(WALLET_NAME)
            assert False, "External rev reg builder process did not start"
        async with OrgHubAnchor(
                wallets[WALLET_NAME]['wallet'],
                pool,
                rrbx=rrbx):  # check for exactly 1 external rev reg builder process
            await beep('external rev reg builder process uniqueness test on {}'.format(WALLET_NAME), 5)
            if rrbx_prox() != no_prox + 1:
                await RevRegBuilder.stop(WALLET_NAME)
                assert False, "External rev reg builder process was not unique"

    assert pool.handle

    await tan.open()
    await san.open()

    # Publish anchor particulars to ledger if not yet present
    for an in (tan, san):
        if not json.loads(await tan.get_nym(an.did)):
            await tan.send_nym(an.did, an.verkey, an.wallet.name, an.least_role())

    nyms = {
        'tan': json.loads(await tan.get_nym(tan.did)),
        'san': json.loads(await tan.get_nym(san.did))
    }
    print('\n\n== 1 == nyms: {}'.format(ppjson(nyms)))

    for k in nyms:
        assert 'dest' in nyms[k]

    # Publish schema to ledger if not yet present; get from ledger
    S_ID = schema_id(san.did, 'tails_load', '{}.0'.format(int(time.time())))
    S_KEY = schema_key(S_ID)

    schema_data = {
        'name': schema_key(S_ID).name,
        'version': schema_key(S_ID).version,
        'attr_names': [
            'number',
            'remainder'
        ]
    }

    try:
        await san.get_schema(S_KEY)  # may exist (almost certainly not)
    except AbsentSchema:
        await san.send_schema(json.dumps(schema_data))
    schema_json = await san.get_schema(S_KEY)
    schema = json.loads(schema_json)
    assert schema  # should exist now
    print('\n\n== 2 == SCHEMA [{} v{}]: {}'.format(S_KEY.name, S_KEY.version, ppjson(schema)))

    # Setup link secret for creation of cred req or proof
    await san.create_link_secret('LinkSecret')

    # SRI anchor create, store, publish cred definitions to ledger; create cred offers
    await san.send_cred_def(S_ID, revo=True)
    cd_id = cred_def_id(S_KEY.origin_did, schema['seqNo'], pool.protocol)

    assert ((not Tails.unlinked(san.dir_tails)) and
        [f for f in Tails.links(san.dir_tails, san.did) if cd_id in f])

    cred_def_json = await san.get_cred_def(cd_id)  # ought to exist now
    cred_def = json.loads(cred_def_json)
    print('\n\n== 3.0 == Cred def [{} v{}]: {}'.format(
        S_KEY.name,
        S_KEY.version,
        ppjson(json.loads(cred_def_json))))
    assert cred_def.get('schemaId', None) == str(schema['seqNo'])

    cred_offer_json = await san.create_cred_offer(schema['seqNo'])
    print('\n\n== 3.1 == Credential offer [{} v{}]: {}'.format(
        S_KEY.name,
        S_KEY.version,
        ppjson(cred_offer_json)))

    (cred_req_json, cred_req_metadata_json) = await san.create_cred_req(cred_offer_json, cd_id)
    print('\n\n== 4 == Credential request [{} v{}]: metadata {}, cred-req {}'.format(
        S_KEY.name,
        S_KEY.version,
        ppjson(cred_req_metadata_json),
        ppjson(cred_req_json)))
    assert json.loads(cred_req_json)

    # BC Reg anchor (as Issuer) issues creds and stores at HolderProver: get cred req, create cred, store cred
    CREDS = 4034  # enough to kick off rev reg on size 4096 and issue two creds in it: 1 needing set-rev-reg, 1 not
    print('\n\n== 5 == creating {} credentials'.format(CREDS))
    swatch = Stopwatch(2)
    optima = {}  # per rev-reg, fastest/slowest pairs
    for number in range(CREDS):
        swatch.mark()
        (cred_json, _) = await san.create_cred(
            cred_offer_json,
            cred_req_json,
            {
                'number': str(number),
                'remainder': str(number % 100)
            })
        elapsed = swatch.mark()
        tag = rev_reg_id2tag(Tails.current_rev_reg_id(san.dir_tails, cd_id))
        if tag not in optima:
            optima[tag] = (elapsed, elapsed)
        else:
            optima[tag] = (min(optima[tag][0], elapsed), max(optima[tag][1], elapsed))
        print('.', end='', flush=True)
        if ((number + 1) % 100) == 0:
            print('{}: #{}: {:.2f}-{:.2f}s'.format(number + 1, tag, *optima[tag]), flush=True)

        assert json.loads(cred_json)
    print('{}: #{}: {:.2f}-{:.2f}s'.format(number + 1, tag, *optima[tag]), flush=True)

    print('\n\n== 6 == best, worst times by revocation registry: {}'.format(ppjson(optima)))
    assert (not rrbx) or (max(optima[tag][1] for tag in optima) <
        4 * min(optima[tag][1] for tag in optima if int(tag) > 0))  # if waiting on rr beyond #0, sizes increase as 2^n

    await san.close()
    if rrbx:
        await RevRegBuilder.stop(WALLET_NAME)
    await tan.close()
    for (name, wdata) in wallets.items():
        await wdata['wallet'].close()
    await pool.close()
Exemple #4
0
    async def create_rev_reg(self, rr_id: str, rr_size: int = None) -> None:
        """
        Create revocation registry artifacts and new tails file (with association to
        corresponding revocation registry identifier via symbolic link name)
        for input revocation registry identifier. Symbolic link presence signals completion.
        If revocation registry builder operates in a process external to its Issuer's,
        target directory is hopper directory.

        Raise WalletState for closed wallet.

        :param rr_id: revocation registry identifier
        :param rr_size: revocation registry size (defaults to 64)
        """

        LOGGER.debug('RevRegBuilder.create_rev_reg >>> rr_id: %s, rr_size: %s',
                     rr_id, rr_size)

        if not self.wallet.handle:
            LOGGER.debug(
                'RevRegBuilder.create_rev_reg <!< Wallet %s is closed',
                self.name)
            raise WalletState('Wallet {} is closed'.format(self.name))

        if not ok_rev_reg_id(rr_id):
            LOGGER.debug('RevRegBuilder.create_rev_reg <!< Bad rev reg id %s',
                         rr_id)
            raise BadIdentifier('Bad rev reg id {}'.format(rr_id))

        rr_size = rr_size or 64

        (cd_id, tag) = rev_reg_id2cred_def_id_tag(rr_id)

        dir_tails = self.dir_tails_top(rr_id)
        dir_target = self.dir_tails_target(rr_id)
        if self.external:
            try:
                makedirs(dir_target, exist_ok=False)
            except FileExistsError:
                LOGGER.warning(
                    'RevRegBuilder.create_rev_reg found dir %s, but task not in progress: rebuilding rev reg %s',
                    dir_target, rr_id)
                rmtree(dir_target)
                makedirs(dir_target, exist_ok=False)

        LOGGER.info(
            'Creating revocation registry (capacity %s) for rev reg id %s',
            rr_size, rr_id)
        tails_writer_handle = await blob_storage.open_writer(
            'default', json.dumps({
                'base_dir': dir_target,
                'uri_pattern': ''
            }))

        (created_rr_id, rr_def_json,
         rr_ent_json) = await anoncreds.issuer_create_and_store_revoc_reg(
             self.wallet.handle, self.did, 'CL_ACCUM', tag, cd_id,
             json.dumps({
                 'max_cred_num': rr_size,
                 'issuance_type': 'ISSUANCE_BY_DEFAULT'
             }), tails_writer_handle)

        tails_hash = basename(Tails.unlinked(dir_target).pop())
        with open(join(dir_target, 'rr_def.json'), 'w') as rr_def_fh:
            print(rr_def_json, file=rr_def_fh)
        with open(join(dir_target, 'rr_ent.json'), 'w') as rr_ent_fh:
            print(rr_ent_json, file=rr_ent_fh)
        Tails.associate(
            dir_tails, created_rr_id,
            tails_hash)  # associate last: symlink signals completion

        LOGGER.debug('RevRegBuilder.create_rev_reg <<<')