Esempio n. 1
0
async def list_tails(request, ident):
    """
    List tails files by corresponding rev reg ids: all, by rev reg id, by cred def id, or by issuer DID.

    :param request: Sanic request structure
    :param ident: 'all' for no filter; rev reg id, cred def id, or issuer DID to filter by any such identifier
    :return: JSON array of rev reg ids corresponding to tails files available
    """

    rv = []
    dir_tails = pjoin(dirname(dirname(abspath(__file__))), 'tails')

    if ident == 'all':  # list everything: 'all' is not valid base58 so it can't be any case below
        rv = [basename(link) for link in Tails.links(dir_tails)]
    elif ok_rev_reg_id(ident) and Tails.linked(dir_tails, ident):  # it's a rev reg id
        rv = [ident]
    elif ok_cred_def_id(ident):  # it's a cred def id (starts with issuer DID)
        rv = [basename(link) for link in Tails.links(dir_tails, ident.split(':')[0])
            if rev_reg_id2cred_def_id(basename(link)) == ident]
    elif ok_did(ident):  # it's an issuer DID
        rv = [basename(link) for link in Tails.links(dir_tails, ident)]
    else:
        LOGGER.error("Token %s must be 'all', rev reg id, cred def id, or issuer DID", ident)
        raise InvalidUsage("Token {} must be 'all', rev reg id, cred def id, or issuer DID".format(ident))

    LOGGER.info('Fulfilling GET request listing tails files on filter %s', ident)
    return response.json(rv)
Esempio n. 2
0
async def list_tails(request: Request, ident: str) -> HTTPResponse:
    """
    List tails files by corresponding rev reg ids: all, by rev reg id, by cred def id, or by issuer DID.

    :param request: Sanic request structure
    :param ident: 'all' for no filter; rev reg id, cred def id, or issuer DID to filter by any such identifier
    :return: HTTP response with JSON array of rev reg ids corresponding to available tails files
    """

    rv = []
    dir_tails = join(dirname(dirname(realpath(__file__))), 'tails')

    if ident == 'all':  # list everything: 'all' is not valid base58 so it can't be any case below
        rv = [basename(link) for link in Tails.links(dir_tails)]
    elif ok_rev_reg_id(ident):  # it's a rev reg id
        if Tails.linked(dir_tails, ident):
            rv = [ident]
    elif ok_cred_def_id(ident):  # it's a cred def id (starts with issuer DID)
        rv = [basename(link) for link in Tails.links(dir_tails, ident.split(':')[0])
            if rev_reg_id2cred_def_id(basename(link)) == ident]
    elif ok_did(ident):  # it's an issuer DID
        rv = [basename(link) for link in Tails.links(dir_tails, ident)]
    else:
        LOGGER.error('Token %s is not a valid specifier for tails files', ident)
        return response.text('Token {} is not a valid specifier for tails files'.format(ident), status=400)

    LOGGER.info('Fulfilling GET request listing tails files on filter %s', ident)
    return response.json(rv)
Esempio n. 3
0
    async def close(self) -> None:
        """
        Explicit exit. If so configured, populate cache to prove for any creds on schemata,
        cred defs, and rev regs marked of interest in configuration at initialization,
        archive cache, and purge prior cache archives.

        :return: current object
        """

        LOGGER.debug('OrgHubAnchor.close >>>')

        archive_caches = False
        if self.cfg.get('archive-holder-prover-caches-on-close', False):
            archive_caches = True
            await self.load_cache_for_proof(False)
        if self.cfg.get('archive-verifier-caches-on-close', {}):
            archive_caches = True
            await self.load_cache_for_verification(False)
        if archive_caches:
            Caches.archive(self.dir_cache)
            Caches.purge_archives(self.dir_cache, True)

        await self.wallet.close()
        # Do not close pool independently: let relying party decide when to go on-line and off-line

        for path_rr_id in Tails.links(self._dir_tails):
            rr_id = basename(path_rr_id)
            try:
                await self._sync_revoc(rr_id)
            except ClosedPool:
                LOGGER.warning('OrgHubAnchor sync-revoc on close required ledger for %s but pool was closed', rr_id)

        LOGGER.debug('OrgHubAnchor.close <<<')
Esempio n. 4
0
def survey(dir_tails: str,
           host: str,
           port: int,
           issuer_did: str = None) -> tuple:
    """
    Return tuple with paths to local tails symbolic links (revocation registry identifiers) and
    revocation registry identifiers of interest on tails server.

    Raise ConnectionError on connection failure.

    :param dir_tails: local tails directory
    :param host: tails server host
    :param port: tails server port
    :param issuer_did: issuer DID of interest for local and remote tails file survey (default all)
    :return: pair (remote paths to tails links, remote rev reg ids)
    """

    loc = Tails.links(dir_tails, issuer_did)
    url = 'http://{}:{}/tails/list/{}'.format(
        host, port, issuer_did if issuer_did else 'all')
    resp = requests.get(url)
    rem = set(resp.json())

    logging.debug('Survey: local=%s, remote=%s', ppjson(loc), ppjson(rem))
    return (loc, rem)
Esempio n. 5
0
    async def close(self) -> None:
        """
        Explicit exit. If so configured, populate cache to prove for any creds on schemata,
        cred defs, and rev regs marked of interest in configuration at initialization,
        archive cache, and purge prior cache archives.

        :return: current object
        """

        LOGGER.debug('OrgHubAnchor.close >>>')

        archive_caches = False
        if self.config.get('archive-holder-prover-caches-on-close', False):
            archive_caches = True
            try:
                await self.load_cache_for_proof(False)
            except WalletState:
                LOGGER.warning(
                    'OrgHubAnchor load cache for proof on close required open wallet %s but it was closed',
                    self.name)
        if self.config.get('archive-verifier-caches-on-close', {}):
            archive_caches = True
            await self.load_cache_for_verification(False)
        if archive_caches:
            ArchivableCaches.archive(self.dir_cache)
            ArchivableCaches.purge_archives(self.dir_cache, True)

        # Do not close wallet independently: allow for sharing open wallet over many anchor lifetimes
        # Do not close pool independently: let relying party decide when to go on-line and off-line

        for path_rr_id in Tails.links(self._dir_tails):
            rr_id = basename(path_rr_id)
            await HolderProver._sync_revoc_for_proof(self, rr_id)  # warns for closed pool

        LOGGER.debug('OrgHubAnchor.close <<<')
Esempio n. 6
0
    async def get_box_ids_issued(self) -> str:
        """
        Return json object on lists of all unique box identifiers (schema identifiers,
        credential definition identifiers, and revocation registry identifiers) for
        all credential definitions and credentials issued; e.g.,

        ::

            {
                "schema_id": [
                    "R17v42T4pk...:2:tombstone:1.2",
                    ...
                ],
                "cred_def_id": [
                    "R17v42T4pk...:3:CL:19:tag",
                    ...
                ]
                "rev_reg_id": [
                    "R17v42T4pk...:4:R17v42T4pk...:3:CL:19:tag:CL_ACCUM:0",
                    "R17v42T4pk...:4:R17v42T4pk...:3:CL:19:tag:CL_ACCUM:1",
                    ...
                ]
            }

        An issuer must issue a credential definition to include its schema identifier
        in the returned values; the schema identifier in isolation belongs properly
        to an Origin, not necessarily to an Issuer.

        The operation may be useful for a Verifier anchor going off-line to seed its
        cache before doing so.

        :return: tuple of sets for schema ids, cred def ids, rev reg ids
        """

        LOGGER.debug('Issuer.get_box_ids_issued >>>')

        cd_ids = [d for d in listdir(self._dir_tails)
            if isdir(join(self._dir_tails, d)) and ok_cred_def_id(d, self.did)]
        s_ids = []
        for cd_id in cd_ids:
            try:
                s_ids.append(json.loads(await self.get_schema(cred_def_id2seq_no(cd_id)))['id'])
            except AbsentSchema:
                LOGGER.error(
                    'Issuer %s has issued cred def %s but no corresponding schema on ledger',
                    self.wallet.name,
                    cd_id)
        rr_ids = [basename(link) for link in Tails.links(self._dir_tails, self.did)]

        rv = json.dumps({
            'schema_id': s_ids,
            'cred_def_id': cd_ids,
            'rev_reg_id': rr_ids
        })
        LOGGER.debug('Issuer.get_box_ids_issued <<< %s', rv)
        return rv
Esempio n. 7
0
async def delete_tails(request, ident):
    """
    Delete tails files by corresponding rev reg ids: all, by rev reg id, by cred def id, or by issuer DID.

    :param request: Sanic request structure
    :param ident: 'all' for no filter; rev reg id, cred def id, or issuer DID to filter by any such identifier
    :return: empty text string
    """

    dir_tails = pjoin(dirname(dirname(abspath(__file__))), 'tails')

    if ident == 'all':  # delete everything: 'all' is not valid base58 so it can't be any case below
        rmtree(dir_tails)
        makedirs(dir_tails, exist_ok=True)

    elif ok_rev_reg_id(ident):  # it's a rev reg id
        path_tails = Tails.linked(dir_tails, ident)
        if path_tails and isfile(path_tails):
            unlink(path_tails)
            LOGGER.info('Deleted %s', path_tails)
        path_link = pjoin(Tails.dir(dir_tails, ident), ident)
        if path_link and islink(path_link):
            unlink(path_link)
            LOGGER.info('Deleted %s', path_link)

    elif ok_cred_def_id(ident):  # it's a cred def id (starts with issuer DID)
        dir_cd_id = pjoin(dir_tails, ident)
        if isdir(dir_cd_id):
            rmtree(dir_cd_id)
            LOGGER.info('Deleted %s', dir_cd_id)
        elif exists(dir_cd_id):  # non-dir is squatting on name reserved for dir: it's corrupt; remove it
            unlink(dir_cd_id)
            LOGGER.info('Deleted spurious non-directory %s', dir_cd_id)

    elif ok_did(ident):  # it's an issuer DID
        dirs_cd_id = {dirname(link) for link in Tails.links(dir_tails, ident)}
        for dir_cd_id in dirs_cd_id:
            if ok_cred_def_id(basename(dir_cd_id)):
                if isdir(dir_cd_id):
                    rmtree(dir_cd_id)
                    LOGGER.info('Deleted %s', dir_cd_id)
                elif exists(dir_cd_id):  # non-dir is squatting on name reserved for dir: it's corrupt; remove it
                    unlink(dir_cd_id)
                    LOGGER.info('Deleted spurious non-directory %s', dir_cd_id)

    else:
        LOGGER.error('Token %s must be rev reg id, cred def id, or issuer DID', ident)
        raise InvalidUsage('Token {} must be rev reg id, cred def id, or issuer DID'.format(ident))

    LOGGER.info('Fulfilled DELETE request deleting tails files on filter %s', ident)
    return response.text('')
Esempio n. 8
0
    async def open(self) -> 'Issuer':
        """
        Explicit entry. Perform ancestor opening operations,
        then synchronize revocation registry to tails tree content.

        :return: current object
        """

        LOGGER.debug('Issuer.open >>>')

        await super().open()
        for path_rr_id in Tails.links(self._dir_tails, self.did):
            await self._sync_revoc(basename(path_rr_id))

        LOGGER.debug('Issuer.open <<<')
        return self
Esempio n. 9
0
def survey(dir_tails, host, port):
    """
    Return tuple with paths to local tails symbolic links (revocation registry identifiers) and
    revocation registry identifiers that remote server holds.

    Raise ConnectionError on connection failure.

    :param dir_tails: local root tails directory
    :param host: tails server host
    :param port: tails server port
    :return: pair (remote paths to tails links, remote rev reg ids)
    """

    loc = Tails.links(dir_tails)
    url = 'http://{}:{}/tails/list/all'.format(host, port)
    resp = requests.get(url)
    rem = set(resp.json())

    logging.debug('Survey: local=%s, remote=%s', loc, rem)
    return (loc, rem)
Esempio n. 10
0
async def test_von_tails(pool_ip, genesis_txn_file, path_cli_ini, cli_ini,
                         path_setnym_ini, setnym_ini):

    print(
        Ink.YELLOW('\n\n== Testing tails server vs. IP {} =='.format(pool_ip)))

    # Set config for tails clients
    config = {}
    i = 0
    for profile in path_cli_ini:
        cli_config = inis2dict(str(path_cli_ini[profile]))
        config[profile] = cli_config
        with open(path_cli_ini[profile], 'r') as fh_cfg:
            print('\n\n== 0.{} == {} tails sync configuration:\n{}'.format(
                i, profile, fh_cfg.read()))
        i += 1

    # Start tails server
    print('\n\n== 1 == Starting tails server on port {}'.format(
        config['issuer']['Tails Server']['port']))
    tsrv = TailsServer(config['issuer']['Tails Server']['port'])
    started = tsrv.start()
    if not started:
        print(
            '\n\n== X == Server already running - stop it to run test from scratch'
        )
        assert False

    assert tsrv.is_up()
    print(
        '\n\n== 2 == Started tails server, docker-compose port-forwarded via localhost:{}'
        .format(tsrv.port))
    atexit.register(shutdown)

    # Set nyms (operation creates pool if need be)
    i = 0
    setnym_config = {}
    for profile in path_setnym_ini:
        cli_config = inis2dict(str(path_setnym_ini[profile]))
        if profile == 'admin':  # tails server anchor on ledger a priori
            continue
        setnym_config[profile] = cli_config
        with open(path_setnym_ini[profile], 'r') as fh_cfg:
            print('\n\n== 3.{} == {} setnym configuration:\n{}'.format(
                i, profile, fh_cfg.read()))
        sub_proc = subprocess.run(
            ['von_anchor_setnym',
             str(path_setnym_ini[profile])],
            stdout=subprocess.PIPE,
            stderr=subprocess.DEVNULL)
        assert not sub_proc.returncode
        i += 1
    print('\n\n== 4 == Setnym ops completed OK')

    # wallets = {profile: Wallet(setnym_config[profile]['VON Anchor']['name']) for profile in setnym_config}
    # wallets['admin'] = Wallet(config['admin']['VON Anchor']['name'])
    wallets = await get_wallets(
        {
            **{
                profile: setnym_config[profile]['VON Anchor']
                for profile in setnym_config
            }, 'admin': config['admin']['VON Anchor']
        },
        open_all=False)

    # Open pool and anchors, issue creds to create tails files
    async with wallets['issuer'] as w_issuer, (
        wallets['prover']) as w_prover, (NodePool(
            config['issuer']['Node Pool']['name'])) as pool, (RegistrarAnchor(
                w_issuer, pool)) as ian, (OrgBookAnchor(w_prover,
                                                        pool)) as pan:

        # Get nyms from ledger for display
        i = 0
        for an in (ian, pan):
            print('\n\n== 5.{} == {} nym on ledger: {}'.format(
                i, an.wallet.name, ppjson(await an.get_nym())))
            i += 1

        # Publish schema to ledger
        S_ID = schema_id(ian.did, 'rainbow', '{}.0'.format(int(time())))
        schema_data = {
            'name': schema_key(S_ID).name,
            'version': schema_key(S_ID).version,
            'attr_names': ['numeric', 'sha256']
        }

        S_KEY = schema_key(S_ID)
        try:
            await ian.get_schema(S_KEY)  # may exist (almost certainly not)
        except AbsentSchema:
            await ian.send_schema(json.dumps(schema_data))
        schema_json = await ian.get_schema(S_KEY)
        schema = json.loads(schema_json)
        print('\n\n== 6 == SCHEMA [{} v{}]: {}'.format(S_KEY.name,
                                                       S_KEY.version,
                                                       ppjson(schema)))
        assert schema  # should exist now

        # Setup link secret for creation of cred req or proof
        await pan.create_link_secret('LinkSecret')

        # Issuer anchor create, store, publish cred definitions to ledger; create cred offers
        await ian.send_cred_def(S_ID, revo=True)

        cd_id = cred_def_id(S_KEY.origin_did, schema['seqNo'], pool.protocol)

        assert ((not Tails.unlinked(ian.dir_tails)) and
                [f for f in Tails.links(ian.dir_tails, ian.did) if cd_id in f])

        cred_def_json = await ian.get_cred_def(cd_id)  # ought to exist now
        cred_def = json.loads(cred_def_json)
        print('\n\n== 7.0 == Cred def [{} v{}]: {}'.format(
            S_KEY.name, S_KEY.version, ppjson(json.loads(cred_def_json))))
        assert cred_def.get('schemaId', None) == str(schema['seqNo'])

        cred_offer_json = await ian.create_cred_offer(schema['seqNo'])
        cred_offer = json.loads(cred_offer_json)
        print('\n\n== 7.1 == Credential offer [{} v{}]: {}'.format(
            S_KEY.name, S_KEY.version, ppjson(cred_offer_json)))

        (cred_req_json, cred_req_metadata_json) = await pan.create_cred_req(
            cred_offer_json, cd_id)
        cred_req = json.loads(cred_req_json)
        print('\n\n== 8 == Credential request [{} v{}]: metadata {}, cred {}'.
              format(S_KEY.name, S_KEY.version, ppjson(cred_req_metadata_json),
                     ppjson(cred_req_json)))
        assert json.loads(cred_req_json)

        # Issuer anchor issues creds and stores at HolderProver: get cred req, create cred, store cred
        cred_data = []

        CREDS = 450  # enough to build 4 rev regs
        print('\n\n== 9 == creating and storing {} credentials:'.format(CREDS))
        for number in range(CREDS):
            (cred_json, _) = await ian.create_cred(
                cred_offer_json, cred_req_json, {
                    'numeric': str(number),
                    'sha256': sha256(str(number).encode()).hexdigest(),
                })

            cred_id = await pan.store_cred(cred_json, cred_req_metadata_json)
            print('.',
                  end='' if (number + 1) % 100 else '{}\n'.format(number + 1),
                  flush=True)

        # Exercise list view, least to most specific
        for tails_list_path in ('all', ian.did, cd_id):
            url = url_for(tsrv.port, 'tails/list/{}'.format(tails_list_path))
            r = requests.get(url)
            assert r.status_code == 200
            assert not r.json()
        rr_ids_up = {
            basename(link)
            for link in Tails.links(ian.dir_tails, ian.did)
        }
        for rr_id in rr_ids_up:
            url = url_for(tsrv.port, 'tails/list/{}'.format(rr_id))
            r = requests.get(url)
            assert r.status_code == 200
            assert not r.json()
        print(
            '\n\n== 10 == All listing views at server come back OK and empty as expected'
        )

        rv = pexpect.run('python ../src/sync/sync.py {}'.format(
            path_cli_ini['issuer']))
        print('\n\n== 11 == Issuer sync uploaded local tails files')

        for tails_list_path in ('all', ian.did, cd_id):
            url = url_for(tsrv.port, 'tails/list/{}'.format(tails_list_path))
            r = requests.get(url)
            assert r.status_code == 200
            assert {rr for rr in r.json()} == rr_ids_up
        for rr_id in rr_ids_up:
            url = url_for(tsrv.port, 'tails/list/{}'.format(rr_id))
            r = requests.get(url)
            assert r.status_code == 200
            assert r.json() == [rr_id]  # list with one rr_id should come back

        # Exercise list view, least to most specific
        for tails_list_path in ('all', ian.did, cd_id):
            url = url_for(tsrv.port, 'tails/list/{}'.format(tails_list_path))
            r = requests.get(url)
            assert r.status_code == 200
            assert len(r.json()) == len(rr_ids_up)
        print(
            '\n\n== 12 == All listing views at server come back OK with {} uploaded files'
            .format(len(rr_ids_up)))

        rv = pexpect.run('python ../src/sync/sync.py {}'.format(
            path_cli_ini['prover']))
        print('\n\n== 13 == Prover sync downloaded remote tails files')

        rr_ids_down = {
            basename(link)
            for link in Tails.links(
                config['prover']['Tails Client']['tails.dir'], ian.did)
        }
        assert rr_ids_down == rr_ids_up

        # Exercise admin-delete
        rv = pexpect.run('python ../src/admin/delete.py {} all'.format(
            path_cli_ini['admin']))
        print('\n\n== 14 == Admin called for deletion at tails server')

        # Check tails server deletion
        url = url_for(tsrv.port, 'tails/list/all')
        r = requests.get(url)
        assert r.status_code == 200
        assert not r.json()
        print(
            '\n\n== 15 == All listing views at server come back OK and empty as expected'
        )

        rv = pexpect.run('python ../src/sync/multisync.py 1 {}'.format(
            path_cli_ini['issuer']))
        print(
            '\n\n== 16 == Issuer multisync on 1 sync iteration uploaded local tails files'
        )

        for tails_list_path in ('all', ian.did, cd_id):
            url = url_for(tsrv.port, 'tails/list/{}'.format(tails_list_path))
            r = requests.get(url)
            assert r.status_code == 200
            assert {rr for rr in r.json()} == rr_ids_up
        for rr_id in rr_ids_up:
            url = url_for(tsrv.port, 'tails/list/{}'.format(rr_id))
            r = requests.get(url)
            assert r.status_code == 200
            assert r.json() == [rr_id]  # list with one rr_id should come back

        # Exercise list view, least to most specific
        for tails_list_path in ('all', ian.did, cd_id):
            url = url_for(tsrv.port, 'tails/list/{}'.format(tails_list_path))
            r = requests.get(url)
            assert r.status_code == 200
            assert len(r.json()) == len(rr_ids_up)
        print(
            '\n\n== 17 == All listing views at server come back OK with {} uploaded files'
            .format(len(rr_ids_up)))

        # Remove tails server anchor wallet
        await wallets['admin'].remove()
        print('\n\n== 18 == Removed admin (tails server anchor {}) wallet'.
              format(wallets['admin'].name))
Esempio n. 11
0
async def test_anchors_tails_load(
        pool_name,
        pool_genesis_txn_data,
        seed_trustee1):

    rrbx = True
    print(Ink.YELLOW('\n\n== Load-testing tails on {}ternal rev reg builder ==').format("ex" if rrbx else "in"))

    await RevRegBuilder.stop(WALLET_NAME)  # in case of re-run

    # Set up node pool ledger config and wallets, open pool, init anchors
    p_mgr = NodePoolManager()
    if pool_name not in await p_mgr.list():
        await p_mgr.add_config(pool_name, pool_genesis_txn_data)
    pool = p_mgr.get(pool_name)
    await pool.open()

    w_mgr = WalletManager()
    wallets = {
        'trustee-anchor': {
            'seed': seed_trustee1,
            'storage_type': None,
            'config': None,
            'access_creds': None
        },
        WALLET_NAME: {
            'seed': 'Superstar-Anchor-000000000000000',
            'storage_type': None,
            'config': None,
            'access_creds': {
                'key': 'rrbx-test'
            }
        }
    }
    for (name, wdata) in wallets.items():
        try:
            wdata['wallet'] = await w_mgr.create({
                'id': name,
                'seed': wdata['seed']
            })
        except ExtantWallet:
            wdata['wallet'] = w_mgr.get({'id': name})
        finally:
            await wdata['wallet'].open()

    tan = TrusteeAnchor(wallets['trustee-anchor']['wallet'], pool)
    no_prox = rrbx_prox()
    san = OrgHubAnchor(wallets[WALLET_NAME]['wallet'], pool, rrbx=rrbx)
    if rrbx:
        await beep('external rev reg builder process on {}'.format(WALLET_NAME), 15)
        if rrbx_prox() != no_prox + 1:
            await RevRegBuilder.stop(WALLET_NAME)
            assert False, "External rev reg builder process did not start"
        async with OrgHubAnchor(
                wallets[WALLET_NAME]['wallet'],
                pool,
                rrbx=rrbx):  # check for exactly 1 external rev reg builder process
            await beep('external rev reg builder process uniqueness test on {}'.format(WALLET_NAME), 5)
            if rrbx_prox() != no_prox + 1:
                await RevRegBuilder.stop(WALLET_NAME)
                assert False, "External rev reg builder process was not unique"

    assert pool.handle

    await tan.open()
    await san.open()

    # Publish anchor particulars to ledger if not yet present
    for an in (tan, san):
        if not json.loads(await tan.get_nym(an.did)):
            await tan.send_nym(an.did, an.verkey, an.wallet.name, an.least_role())

    nyms = {
        'tan': json.loads(await tan.get_nym(tan.did)),
        'san': json.loads(await tan.get_nym(san.did))
    }
    print('\n\n== 1 == nyms: {}'.format(ppjson(nyms)))

    for k in nyms:
        assert 'dest' in nyms[k]

    # Publish schema to ledger if not yet present; get from ledger
    S_ID = schema_id(san.did, 'tails_load', '{}.0'.format(int(time.time())))
    S_KEY = schema_key(S_ID)

    schema_data = {
        'name': schema_key(S_ID).name,
        'version': schema_key(S_ID).version,
        'attr_names': [
            'number',
            'remainder'
        ]
    }

    try:
        await san.get_schema(S_KEY)  # may exist (almost certainly not)
    except AbsentSchema:
        await san.send_schema(json.dumps(schema_data))
    schema_json = await san.get_schema(S_KEY)
    schema = json.loads(schema_json)
    assert schema  # should exist now
    print('\n\n== 2 == SCHEMA [{} v{}]: {}'.format(S_KEY.name, S_KEY.version, ppjson(schema)))

    # Setup link secret for creation of cred req or proof
    await san.create_link_secret('LinkSecret')

    # SRI anchor create, store, publish cred definitions to ledger; create cred offers
    await san.send_cred_def(S_ID, revo=True)
    cd_id = cred_def_id(S_KEY.origin_did, schema['seqNo'], pool.protocol)

    assert ((not Tails.unlinked(san.dir_tails)) and
        [f for f in Tails.links(san.dir_tails, san.did) if cd_id in f])

    cred_def_json = await san.get_cred_def(cd_id)  # ought to exist now
    cred_def = json.loads(cred_def_json)
    print('\n\n== 3.0 == Cred def [{} v{}]: {}'.format(
        S_KEY.name,
        S_KEY.version,
        ppjson(json.loads(cred_def_json))))
    assert cred_def.get('schemaId', None) == str(schema['seqNo'])

    cred_offer_json = await san.create_cred_offer(schema['seqNo'])
    print('\n\n== 3.1 == Credential offer [{} v{}]: {}'.format(
        S_KEY.name,
        S_KEY.version,
        ppjson(cred_offer_json)))

    (cred_req_json, cred_req_metadata_json) = await san.create_cred_req(cred_offer_json, cd_id)
    print('\n\n== 4 == Credential request [{} v{}]: metadata {}, cred-req {}'.format(
        S_KEY.name,
        S_KEY.version,
        ppjson(cred_req_metadata_json),
        ppjson(cred_req_json)))
    assert json.loads(cred_req_json)

    # BC Reg anchor (as Issuer) issues creds and stores at HolderProver: get cred req, create cred, store cred
    CREDS = 4034  # enough to kick off rev reg on size 4096 and issue two creds in it: 1 needing set-rev-reg, 1 not
    print('\n\n== 5 == creating {} credentials'.format(CREDS))
    swatch = Stopwatch(2)
    optima = {}  # per rev-reg, fastest/slowest pairs
    for number in range(CREDS):
        swatch.mark()
        (cred_json, _) = await san.create_cred(
            cred_offer_json,
            cred_req_json,
            {
                'number': str(number),
                'remainder': str(number % 100)
            })
        elapsed = swatch.mark()
        tag = rev_reg_id2tag(Tails.current_rev_reg_id(san.dir_tails, cd_id))
        if tag not in optima:
            optima[tag] = (elapsed, elapsed)
        else:
            optima[tag] = (min(optima[tag][0], elapsed), max(optima[tag][1], elapsed))
        print('.', end='', flush=True)
        if ((number + 1) % 100) == 0:
            print('{}: #{}: {:.2f}-{:.2f}s'.format(number + 1, tag, *optima[tag]), flush=True)

        assert json.loads(cred_json)
    print('{}: #{}: {:.2f}-{:.2f}s'.format(number + 1, tag, *optima[tag]), flush=True)

    print('\n\n== 6 == best, worst times by revocation registry: {}'.format(ppjson(optima)))
    assert (not rrbx) or (max(optima[tag][1] for tag in optima) <
        4 * min(optima[tag][1] for tag in optima if int(tag) > 0))  # if waiting on rr beyond #0, sizes increase as 2^n

    await san.close()
    if rrbx:
        await RevRegBuilder.stop(WALLET_NAME)
    await tan.close()
    for (name, wdata) in wallets.items():
        await wdata['wallet'].close()
    await pool.close()
Esempio n. 12
0
async def delete_tails(request: Request, ident: str, epoch: int) -> HTTPResponse:
    """
    Delete tails files by corresponding rev reg ids: all, by rev reg id, by cred def id, or by issuer DID.

    :param request: Sanic request structure
    :param ident: 'all' for no filter; rev reg id, cred def id, or issuer DID to filter by any such identifier
    :param epoch: current EPOCH time, must be within 5 minutes of current server time
    :return: empty text response
    """

    if not await is_current(int(epoch)):
        LOGGER.error('DELETE epoch %s in too far from current server time', epoch)
        return response.text('DELETE epoch {} is too far from current server time'.format(epoch), status=400)

    signature = request.body
    plain = '{}||{}'.format(epoch, ident)

    tsan = await MEM_CACHE.get('tsan')
    if not tsan.verify(plain, signature, tsan.did):
        LOGGER.error('DELETE signature failed to verify')
        return response.text('DELETE signature failed to verify', status=400)

    dir_tails = join(dirname(dirname(realpath(__file__))), 'tails')

    if ident == 'all':  # delete everything -- note that 'all' is not valid base58 so no case below can apply
        if isdir(dir_tails):
            rmtree(dir_tails)
        makedirs(dir_tails, exist_ok=True)

    elif ok_rev_reg_id(ident):  # it's a rev reg id
        path_tails = Tails.linked(dir_tails, ident)
        if path_tails and isfile(path_tails):
            unlink(path_tails)
            LOGGER.info('Deleted %s', path_tails)
        path_link = join(Tails.dir(dir_tails, ident), ident)
        if path_link and islink(path_link):
            unlink(path_link)
            LOGGER.info('Deleted %s', path_link)

    elif ok_cred_def_id(ident):  # it's a cred def id (starts with issuer DID)
        dir_cd_id = join(dir_tails, ident)
        if isdir(dir_cd_id):
            rmtree(dir_cd_id)
            LOGGER.info('Deleted %s', dir_cd_id)
        elif exists(dir_cd_id):  # non-dir is squatting on name reserved for dir: it's corrupt; remove it
            unlink(dir_cd_id)
            LOGGER.info('Deleted spurious non-directory %s', dir_cd_id)

    elif ok_did(ident):  # it's an issuer DID
        dirs_cd_id = {dirname(link) for link in Tails.links(dir_tails, ident)}
        for dir_cd_id in dirs_cd_id:
            if ok_cred_def_id(basename(dir_cd_id)):
                if isdir(dir_cd_id):
                    rmtree(dir_cd_id)
                    LOGGER.info('Deleted %s', dir_cd_id)
                elif exists(dir_cd_id):  # non-dir is squatting on name reserved for dir: it's corrupt; remove it
                    unlink(dir_cd_id)
                    LOGGER.info('Deleted spurious non-directory %s', dir_cd_id)

    else:
        LOGGER.error('Token %s is not a valid specifier for tails files', ident)
        return response.text('Token {} is not a valid specifier for tails files'.format(ident), status=400)

    LOGGER.info('Fulfilled DELETE request deleting tails files on filter %s', ident)
    return response.text('')