async def create_cred_offer(self, schema_seq_no: int) -> str: """ Create credential offer as Issuer for given schema. Raise CorruptWallet if the wallet has no private key for the corresponding credential definition. :param schema_seq_no: schema sequence number :return: credential offer json for use in storing credentials at HolderProver. """ LOGGER.debug('Issuer.create_cred_offer >>> schema_seq_no: %s', schema_seq_no) rv = None cd_id = cred_def_id(self.did, schema_seq_no, self.pool.protocol) try: rv = await anoncreds.issuer_create_credential_offer( self.wallet.handle, cd_id) except IndyError as x_indy: if x_indy.error_code == ErrorCode.WalletNotFoundError: LOGGER.debug( 'Issuer.create_cred_offer: <!< did not issue cred definition from wallet %s', self.wallet.name) raise CorruptWallet( 'Cannot create cred offer: did not issue cred definition from wallet {}' .format(self.wallet.name)) else: LOGGER.debug( 'Issuer.create_cred_offer: <!< cannot create cred offer, indy error code %s', x_indy.error_code) raise LOGGER.debug('Issuer.create_cred_offer <<< %s', rv) return rv
async def _create_cred_def(self, schema: dict, ledger_cred_def: dict, revo: bool) -> (str, bool): """ Create credential definition in wallet as part of the send_cred_def() sequence. Return whether the private key for the cred def is OK to continue with the sequence, propagating the cred def and revocation registry info to the ledger. :param schema: schema on which to create cred def :param ledger_cred_def: credential definition as ledger has it (typically, None) :param revo: whether cred def supports revocation :return: cred def json and whether local cred def private key is OK, hence cred def is OK to send to the ledger """ LOGGER.debug( 'Issuer._create_cred_def >>> schema: %s, ledger_cred_def: %s, revo: %s', schema, ledger_cred_def, revo) cred_def_json = '{}' private_key_ok = True try: (_, cred_def_json ) = await anoncreds.issuer_create_and_store_credential_def( self.wallet.handle, self.did, # issuer DID json.dumps(schema), self.pool.protocol.cd_id_tag( False), # expect only one cred def per schema and issuer 'CL', json.dumps({'support_revocation': revo})) if ledger_cred_def: private_key_ok = False LOGGER.warning( 'New cred def on %s in wallet shadows existing one on ledger: private key not usable', cred_def_id(self.did, schema['seqNo'], self.pool.protocol)) # carry on though, this anchor may have other capacities so public key may be good enough except IndyError as x_indy: if x_indy.error_code == ErrorCode.AnoncredsCredDefAlreadyExistsError: if ledger_cred_def: LOGGER.info( 'Issuer wallet %s reusing existing cred def on schema %s version %s', self.name, schema['name'], schema['version']) else: LOGGER.debug( 'Issuer._create_cred_def <!< corrupt wallet %s', self.name) raise CorruptWallet( 'Corrupt Issuer wallet {} has cred def on schema {} not on ledger' .format(self.name, schema['id'])) else: LOGGER.debug( 'Issuer._create_cred_def <!< cannot store cred def in wallet %s: indy error code %s', self.name, x_indy.error_code) raise rv = (cred_def_json, private_key_ok) LOGGER.debug('Issuer._create_cred_def <<< %s', rv) return rv
async def simulate_get(ser_no, did): rv = None with CRED_DEF_CACHE.lock: # REVO_CACHE builds on same lock mechanism - this unit test suffices for both caches cd_id = cred_def_id(did, ser_no) if cd_id in CRED_DEF_CACHE: rv = CRED_DEF_CACHE[cd_id] # print('<< got from cache[{}] = {}'.format((ser_no, did), rv)) else: rv = hash(cd_id) # print('>> added cache[{}] = {}'.format((ser_no, did), rv)) await asyncio.sleep(DELAY) CRED_DEF_CACHE[cd_id] = rv return rv
async def test_von_tails(pool_ip, genesis_txn_file, path_cli_ini, cli_ini, path_setnym_ini, setnym_ini): print( Ink.YELLOW('\n\n== Testing tails server vs. IP {} =='.format(pool_ip))) # Set config for tails clients config = {} i = 0 for profile in path_cli_ini: cli_config = inis2dict(str(path_cli_ini[profile])) config[profile] = cli_config with open(path_cli_ini[profile], 'r') as fh_cfg: print('\n\n== 0.{} == {} tails sync configuration:\n{}'.format( i, profile, fh_cfg.read())) i += 1 # Start tails server print('\n\n== 1 == Starting tails server on port {}'.format( config['issuer']['Tails Server']['port'])) tsrv = TailsServer(config['issuer']['Tails Server']['port']) started = tsrv.start() if not started: print( '\n\n== X == Server already running - stop it to run test from scratch' ) assert False assert tsrv.is_up() print( '\n\n== 2 == Started tails server, docker-compose port-forwarded via localhost:{}' .format(tsrv.port)) atexit.register(shutdown) # Set nyms (operation creates pool if need be) i = 0 setnym_config = {} for profile in path_setnym_ini: cli_config = inis2dict(str(path_setnym_ini[profile])) if profile == 'admin': # tails server anchor on ledger a priori continue setnym_config[profile] = cli_config with open(path_setnym_ini[profile], 'r') as fh_cfg: print('\n\n== 3.{} == {} setnym configuration:\n{}'.format( i, profile, fh_cfg.read())) sub_proc = subprocess.run( ['von_anchor_setnym', str(path_setnym_ini[profile])], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) assert not sub_proc.returncode i += 1 print('\n\n== 4 == Setnym ops completed OK') # wallets = {profile: Wallet(setnym_config[profile]['VON Anchor']['name']) for profile in setnym_config} # wallets['admin'] = Wallet(config['admin']['VON Anchor']['name']) wallets = await get_wallets( { **{ profile: setnym_config[profile]['VON Anchor'] for profile in setnym_config }, 'admin': config['admin']['VON Anchor'] }, open_all=False) # Open pool and anchors, issue creds to create tails files async with wallets['issuer'] as w_issuer, ( wallets['prover']) as w_prover, (NodePool( config['issuer']['Node Pool']['name'])) as pool, (RegistrarAnchor( w_issuer, pool)) as ian, (OrgBookAnchor(w_prover, pool)) as pan: # Get nyms from ledger for display i = 0 for an in (ian, pan): print('\n\n== 5.{} == {} nym on ledger: {}'.format( i, an.wallet.name, ppjson(await an.get_nym()))) i += 1 # Publish schema to ledger S_ID = schema_id(ian.did, 'rainbow', '{}.0'.format(int(time()))) schema_data = { 'name': schema_key(S_ID).name, 'version': schema_key(S_ID).version, 'attr_names': ['numeric', 'sha256'] } S_KEY = schema_key(S_ID) try: await ian.get_schema(S_KEY) # may exist (almost certainly not) except AbsentSchema: await ian.send_schema(json.dumps(schema_data)) schema_json = await ian.get_schema(S_KEY) schema = json.loads(schema_json) print('\n\n== 6 == SCHEMA [{} v{}]: {}'.format(S_KEY.name, S_KEY.version, ppjson(schema))) assert schema # should exist now # Setup link secret for creation of cred req or proof await pan.create_link_secret('LinkSecret') # Issuer anchor create, store, publish cred definitions to ledger; create cred offers await ian.send_cred_def(S_ID, revo=True) cd_id = cred_def_id(S_KEY.origin_did, schema['seqNo'], pool.protocol) assert ((not Tails.unlinked(ian.dir_tails)) and [f for f in Tails.links(ian.dir_tails, ian.did) if cd_id in f]) cred_def_json = await ian.get_cred_def(cd_id) # ought to exist now cred_def = json.loads(cred_def_json) print('\n\n== 7.0 == Cred def [{} v{}]: {}'.format( S_KEY.name, S_KEY.version, ppjson(json.loads(cred_def_json)))) assert cred_def.get('schemaId', None) == str(schema['seqNo']) cred_offer_json = await ian.create_cred_offer(schema['seqNo']) cred_offer = json.loads(cred_offer_json) print('\n\n== 7.1 == Credential offer [{} v{}]: {}'.format( S_KEY.name, S_KEY.version, ppjson(cred_offer_json))) (cred_req_json, cred_req_metadata_json) = await pan.create_cred_req( cred_offer_json, cd_id) cred_req = json.loads(cred_req_json) print('\n\n== 8 == Credential request [{} v{}]: metadata {}, cred {}'. format(S_KEY.name, S_KEY.version, ppjson(cred_req_metadata_json), ppjson(cred_req_json))) assert json.loads(cred_req_json) # Issuer anchor issues creds and stores at HolderProver: get cred req, create cred, store cred cred_data = [] CREDS = 450 # enough to build 4 rev regs print('\n\n== 9 == creating and storing {} credentials:'.format(CREDS)) for number in range(CREDS): (cred_json, _) = await ian.create_cred( cred_offer_json, cred_req_json, { 'numeric': str(number), 'sha256': sha256(str(number).encode()).hexdigest(), }) cred_id = await pan.store_cred(cred_json, cred_req_metadata_json) print('.', end='' if (number + 1) % 100 else '{}\n'.format(number + 1), flush=True) # Exercise list view, least to most specific for tails_list_path in ('all', ian.did, cd_id): url = url_for(tsrv.port, 'tails/list/{}'.format(tails_list_path)) r = requests.get(url) assert r.status_code == 200 assert not r.json() rr_ids_up = { basename(link) for link in Tails.links(ian.dir_tails, ian.did) } for rr_id in rr_ids_up: url = url_for(tsrv.port, 'tails/list/{}'.format(rr_id)) r = requests.get(url) assert r.status_code == 200 assert not r.json() print( '\n\n== 10 == All listing views at server come back OK and empty as expected' ) rv = pexpect.run('python ../src/sync/sync.py {}'.format( path_cli_ini['issuer'])) print('\n\n== 11 == Issuer sync uploaded local tails files') for tails_list_path in ('all', ian.did, cd_id): url = url_for(tsrv.port, 'tails/list/{}'.format(tails_list_path)) r = requests.get(url) assert r.status_code == 200 assert {rr for rr in r.json()} == rr_ids_up for rr_id in rr_ids_up: url = url_for(tsrv.port, 'tails/list/{}'.format(rr_id)) r = requests.get(url) assert r.status_code == 200 assert r.json() == [rr_id] # list with one rr_id should come back # Exercise list view, least to most specific for tails_list_path in ('all', ian.did, cd_id): url = url_for(tsrv.port, 'tails/list/{}'.format(tails_list_path)) r = requests.get(url) assert r.status_code == 200 assert len(r.json()) == len(rr_ids_up) print( '\n\n== 12 == All listing views at server come back OK with {} uploaded files' .format(len(rr_ids_up))) rv = pexpect.run('python ../src/sync/sync.py {}'.format( path_cli_ini['prover'])) print('\n\n== 13 == Prover sync downloaded remote tails files') rr_ids_down = { basename(link) for link in Tails.links( config['prover']['Tails Client']['tails.dir'], ian.did) } assert rr_ids_down == rr_ids_up # Exercise admin-delete rv = pexpect.run('python ../src/admin/delete.py {} all'.format( path_cli_ini['admin'])) print('\n\n== 14 == Admin called for deletion at tails server') # Check tails server deletion url = url_for(tsrv.port, 'tails/list/all') r = requests.get(url) assert r.status_code == 200 assert not r.json() print( '\n\n== 15 == All listing views at server come back OK and empty as expected' ) rv = pexpect.run('python ../src/sync/multisync.py 1 {}'.format( path_cli_ini['issuer'])) print( '\n\n== 16 == Issuer multisync on 1 sync iteration uploaded local tails files' ) for tails_list_path in ('all', ian.did, cd_id): url = url_for(tsrv.port, 'tails/list/{}'.format(tails_list_path)) r = requests.get(url) assert r.status_code == 200 assert {rr for rr in r.json()} == rr_ids_up for rr_id in rr_ids_up: url = url_for(tsrv.port, 'tails/list/{}'.format(rr_id)) r = requests.get(url) assert r.status_code == 200 assert r.json() == [rr_id] # list with one rr_id should come back # Exercise list view, least to most specific for tails_list_path in ('all', ian.did, cd_id): url = url_for(tsrv.port, 'tails/list/{}'.format(tails_list_path)) r = requests.get(url) assert r.status_code == 200 assert len(r.json()) == len(rr_ids_up) print( '\n\n== 17 == All listing views at server come back OK with {} uploaded files' .format(len(rr_ids_up))) # Remove tails server anchor wallet await wallets['admin'].remove() print('\n\n== 18 == Removed admin (tails server anchor {}) wallet'. format(wallets['admin'].name))
async def test_anchors_tails_load( pool_name, pool_genesis_txn_data, seed_trustee1): rrbx = True print(Ink.YELLOW('\n\n== Load-testing tails on {}ternal rev reg builder ==').format("ex" if rrbx else "in")) await RevRegBuilder.stop(WALLET_NAME) # in case of re-run # Set up node pool ledger config and wallets, open pool, init anchors p_mgr = NodePoolManager() if pool_name not in await p_mgr.list(): await p_mgr.add_config(pool_name, pool_genesis_txn_data) pool = p_mgr.get(pool_name) await pool.open() w_mgr = WalletManager() wallets = { 'trustee-anchor': { 'seed': seed_trustee1, 'storage_type': None, 'config': None, 'access_creds': None }, WALLET_NAME: { 'seed': 'Superstar-Anchor-000000000000000', 'storage_type': None, 'config': None, 'access_creds': { 'key': 'rrbx-test' } } } for (name, wdata) in wallets.items(): try: wdata['wallet'] = await w_mgr.create({ 'id': name, 'seed': wdata['seed'] }) except ExtantWallet: wdata['wallet'] = w_mgr.get({'id': name}) finally: await wdata['wallet'].open() tan = TrusteeAnchor(wallets['trustee-anchor']['wallet'], pool) no_prox = rrbx_prox() san = OrgHubAnchor(wallets[WALLET_NAME]['wallet'], pool, rrbx=rrbx) if rrbx: await beep('external rev reg builder process on {}'.format(WALLET_NAME), 15) if rrbx_prox() != no_prox + 1: await RevRegBuilder.stop(WALLET_NAME) assert False, "External rev reg builder process did not start" async with OrgHubAnchor( wallets[WALLET_NAME]['wallet'], pool, rrbx=rrbx): # check for exactly 1 external rev reg builder process await beep('external rev reg builder process uniqueness test on {}'.format(WALLET_NAME), 5) if rrbx_prox() != no_prox + 1: await RevRegBuilder.stop(WALLET_NAME) assert False, "External rev reg builder process was not unique" assert pool.handle await tan.open() await san.open() # Publish anchor particulars to ledger if not yet present for an in (tan, san): if not json.loads(await tan.get_nym(an.did)): await tan.send_nym(an.did, an.verkey, an.wallet.name, an.least_role()) nyms = { 'tan': json.loads(await tan.get_nym(tan.did)), 'san': json.loads(await tan.get_nym(san.did)) } print('\n\n== 1 == nyms: {}'.format(ppjson(nyms))) for k in nyms: assert 'dest' in nyms[k] # Publish schema to ledger if not yet present; get from ledger S_ID = schema_id(san.did, 'tails_load', '{}.0'.format(int(time.time()))) S_KEY = schema_key(S_ID) schema_data = { 'name': schema_key(S_ID).name, 'version': schema_key(S_ID).version, 'attr_names': [ 'number', 'remainder' ] } try: await san.get_schema(S_KEY) # may exist (almost certainly not) except AbsentSchema: await san.send_schema(json.dumps(schema_data)) schema_json = await san.get_schema(S_KEY) schema = json.loads(schema_json) assert schema # should exist now print('\n\n== 2 == SCHEMA [{} v{}]: {}'.format(S_KEY.name, S_KEY.version, ppjson(schema))) # Setup link secret for creation of cred req or proof await san.create_link_secret('LinkSecret') # SRI anchor create, store, publish cred definitions to ledger; create cred offers await san.send_cred_def(S_ID, revo=True) cd_id = cred_def_id(S_KEY.origin_did, schema['seqNo'], pool.protocol) assert ((not Tails.unlinked(san.dir_tails)) and [f for f in Tails.links(san.dir_tails, san.did) if cd_id in f]) cred_def_json = await san.get_cred_def(cd_id) # ought to exist now cred_def = json.loads(cred_def_json) print('\n\n== 3.0 == Cred def [{} v{}]: {}'.format( S_KEY.name, S_KEY.version, ppjson(json.loads(cred_def_json)))) assert cred_def.get('schemaId', None) == str(schema['seqNo']) cred_offer_json = await san.create_cred_offer(schema['seqNo']) print('\n\n== 3.1 == Credential offer [{} v{}]: {}'.format( S_KEY.name, S_KEY.version, ppjson(cred_offer_json))) (cred_req_json, cred_req_metadata_json) = await san.create_cred_req(cred_offer_json, cd_id) print('\n\n== 4 == Credential request [{} v{}]: metadata {}, cred-req {}'.format( S_KEY.name, S_KEY.version, ppjson(cred_req_metadata_json), ppjson(cred_req_json))) assert json.loads(cred_req_json) # BC Reg anchor (as Issuer) issues creds and stores at HolderProver: get cred req, create cred, store cred CREDS = 4034 # enough to kick off rev reg on size 4096 and issue two creds in it: 1 needing set-rev-reg, 1 not print('\n\n== 5 == creating {} credentials'.format(CREDS)) swatch = Stopwatch(2) optima = {} # per rev-reg, fastest/slowest pairs for number in range(CREDS): swatch.mark() (cred_json, _) = await san.create_cred( cred_offer_json, cred_req_json, { 'number': str(number), 'remainder': str(number % 100) }) elapsed = swatch.mark() tag = rev_reg_id2tag(Tails.current_rev_reg_id(san.dir_tails, cd_id)) if tag not in optima: optima[tag] = (elapsed, elapsed) else: optima[tag] = (min(optima[tag][0], elapsed), max(optima[tag][1], elapsed)) print('.', end='', flush=True) if ((number + 1) % 100) == 0: print('{}: #{}: {:.2f}-{:.2f}s'.format(number + 1, tag, *optima[tag]), flush=True) assert json.loads(cred_json) print('{}: #{}: {:.2f}-{:.2f}s'.format(number + 1, tag, *optima[tag]), flush=True) print('\n\n== 6 == best, worst times by revocation registry: {}'.format(ppjson(optima))) assert (not rrbx) or (max(optima[tag][1] for tag in optima) < 4 * min(optima[tag][1] for tag in optima if int(tag) > 0)) # if waiting on rr beyond #0, sizes increase as 2^n await san.close() if rrbx: await RevRegBuilder.stop(WALLET_NAME) await tan.close() for (name, wdata) in wallets.items(): await wdata['wallet'].close() await pool.close()
async def send_cred_def(self, s_id: str, revocation: bool = True, rr_size: int = None) -> str: """ Create a credential definition as Issuer, store it in its wallet, and send it to the ledger. Raise CorruptWallet for wallet not pertaining to current ledger, BadLedgerTxn on failure to send credential definition to ledger if need be, or IndyError for any other failure to create and store credential definition in wallet. :param s_id: schema identifier :param revocation: whether to support revocation for cred def :param rr_size: size of initial revocation registry (default as per _create_rev_reg()), if revocation supported :return: json credential definition as it appears on ledger """ LOGGER.debug( 'Issuer.send_cred_def >>> s_id: %s, revocation: %s, rr_size: %s', s_id, revocation, rr_size) if not ok_schema_id(s_id): LOGGER.debug('Issuer.send_cred_def <!< Bad schema id %s', s_id) raise BadIdentifier('Bad schema id {}'.format(s_id)) rv_json = json.dumps({}) schema_json = await self.get_schema(schema_key(s_id)) schema = json.loads(schema_json) cd_id = cred_def_id(self.did, schema['seqNo'], self.pool.protocol) private_key_ok = True with CRED_DEF_CACHE.lock: try: rv_json = await self.get_cred_def(cd_id) LOGGER.info( 'Cred def on schema %s version %s already exists on ledger; Issuer %s not sending another', schema['name'], schema['version'], self.wallet.name) except AbsentCredDef: pass # OK - about to create, store, and send it try: (_, cred_def_json ) = await anoncreds.issuer_create_and_store_credential_def( self.wallet.handle, self.did, # issuer DID schema_json, self.pool.protocol.cd_id_tag( False ), # expect only one cred def per schema and issuer 'CL', json.dumps({'support_revocation': revocation})) if json.loads(rv_json): private_key_ok = False LOGGER.warning( 'New cred def on %s in wallet shadows existing one on ledger: private key not usable', cd_id) # carry on though, this anchor may have other roles so public key may be good enough except IndyError as x_indy: if x_indy.error_code == ErrorCode.AnoncredsCredDefAlreadyExistsError: if json.loads(rv_json): LOGGER.info( 'Issuer wallet %s reusing existing cred def on schema %s version %s', self.wallet.name, schema['name'], schema['version']) else: LOGGER.debug( 'Issuer.send_cred_def: <!< corrupt wallet %s', self.wallet.name) raise CorruptWallet( 'Corrupt Issuer wallet {} has cred def on schema {} version {} not on ledger' .format(self.wallet.name, schema['name'], schema['version'])) else: LOGGER.debug( 'Issuer.send_cred_def: <!< cannot store cred def in wallet %s: indy error code %s', self.wallet.name, x_indy.error_code) raise if not json.loads( rv_json ): # checking the ledger returned no cred def: send it req_json = await ledger.build_cred_def_request( self.did, cred_def_json) await self._sign_submit(req_json) for _ in range(16): # reasonable timeout try: rv_json = await self.get_cred_def(cd_id ) # adds to cache break except AbsentCredDef: await sleep(1) LOGGER.info( 'Sent cred def %s to ledger, waiting 1s for its appearance', cd_id) if not rv_json: LOGGER.debug( 'Issuer.send_cred_def <!< timed out waiting on sent cred_def %s', cd_id) raise BadLedgerTxn( 'Timed out waiting on sent cred_def {}'.format(cd_id)) if revocation: await self._sync_revoc( rev_reg_id(cd_id, 0), rr_size) # create new rev reg, tails file for tag 0 if revocation and private_key_ok: for tag in [ str(t) for t in range( int(Tails.next_tag(self._dir_tails, cd_id)[0])) ]: # '0' to str(next-1) await self._sync_revoc(rev_reg_id(cd_id, tag), rr_size if tag == 0 else None) makedirs(join(self._dir_tails, cd_id), exist_ok=True ) # make sure dir exists for box id collection, revo or not LOGGER.debug('Issuer.send_cred_def <<< %s', rv_json) return rv_json
async def send_cred_def(self, s_id: str, revo: bool = True, rr_size: int = None) -> str: """ Create a credential definition as Issuer, store it in its wallet, and send it to the ledger. Raise CorruptWallet for wallet not pertaining to current ledger, BadLedgerTxn on failure to send credential definition to ledger if need be, WalletState for closed wallet, or IndyError for any other failure to create and store credential definition in wallet. :param s_id: schema identifier :param revo: whether to support revocation for cred def :param rr_size: size of initial revocation registry (default as per RevRegBuilder.create_rev_reg()), if revocation supported :return: json credential definition as it appears on ledger """ LOGGER.debug( 'Issuer.send_cred_def >>> s_id: %s, revo: %s, rr_size: %s', s_id, revo, rr_size) if not ok_schema_id(s_id): LOGGER.debug('Issuer.send_cred_def <!< Bad schema id %s', s_id) raise BadIdentifier('Bad schema id {}'.format(s_id)) if not self.wallet.handle: LOGGER.debug('Issuer.send_cred_def <!< Wallet %s is closed', self.name) raise WalletState('Wallet {} is closed'.format(self.name)) if not self.pool: LOGGER.debug('Issuer.send_cred_def <!< issuer %s has no pool', self.name) raise AbsentPool( 'Issuer {} has no pool: cannot send cred def'.format( self.name)) rv_json = json.dumps({}) schema_json = await self.get_schema(schema_key(s_id)) schema = json.loads(schema_json) cd_id = cred_def_id(self.did, schema['seqNo'], self.pool.protocol) private_key_ok = True with CRED_DEF_CACHE.lock: try: rv_json = await self.get_cred_def(cd_id) LOGGER.info( 'Cred def on schema %s version %s already exists on ledger; Issuer %s not sending another', schema['name'], schema['version'], self.name) except AbsentCredDef: pass # OK - about to create, store, and send it (cred_def_json, private_key_ok) = await self._create_cred_def( schema, json.loads(rv_json), revo) if not json.loads( rv_json ): # checking the ledger returned no cred def: send it req_json = await ledger.build_cred_def_request( self.did, cred_def_json) await self._sign_submit(req_json) for _ in range(16): # reasonable timeout try: rv_json = await self.get_cred_def(cd_id ) # adds to cache break except AbsentCredDef: await asyncio.sleep(1) LOGGER.info( 'Sent cred def %s to ledger, waiting 1s for its appearance', cd_id) if not rv_json: LOGGER.debug( 'Issuer.send_cred_def <!< timed out waiting on sent cred_def %s', cd_id) raise BadLedgerTxn( 'Timed out waiting on sent cred_def {}'.format(cd_id)) if revo: # create new rev reg for tag '0' if self.rrbx: (_, rr_size_suggested) = Tails.next_tag( self.dir_tails, cd_id) self.rrb.mark_in_progress(rev_reg_id(cd_id, '0'), rr_size or rr_size_suggested) await self._sync_revoc_for_issue(rev_reg_id( cd_id, '0'), rr_size) # sync rev reg on tag '0' if revo and private_key_ok: for tag in [ str(t) for t in range( 1, int(Tails.next_tag(self.dir_tails, cd_id)[0])) ]: # '1' to next-1 await self._sync_revoc_for_issue( rev_reg_id(cd_id, tag), rr_size if tag == '0' else None) makedirs( join(self.dir_tails, cd_id), exist_ok=True) # dir required for box id collection, revo or not LOGGER.debug('Issuer.send_cred_def <<< %s', rv_json) return rv_json
async def _publish_schema(self, issuer: AgentCfg, cred_type: dict) -> None: """ Check the ledger for a specific schema and version, and publish it if not found. Also publish the related credential definition if not found Args: issuer: the initialized and opened issuer instance publishing the schema cred_type: a dict which will be updated with the published schema and credential def """ if not cred_type or "definition" not in cred_type: raise IndyConfigError("Missing schema definition") definition = cred_type["definition"] s_id = schema_id(issuer.did, definition.name, definition.version) if not cred_type.get("ledger_schema"): LOGGER.info( "Checking for schema: %s (%s)", definition.name, definition.version, ) # Check if schema exists on ledger try: s_key = schema_key(s_id) schema_json = await issuer.instance.get_schema(s_key) ledger_schema = json.loads(schema_json) log_json("Schema found on ledger:", ledger_schema, LOGGER) if sorted(ledger_schema["attrNames"]) != sorted( definition.attr_names): raise IndyConfigError( "Ledger schema attributes do not match definition, found: %s", ledger_schema["attrNames"]) except AbsentSchema: # If not found, send the schema to the ledger LOGGER.info( "Publishing schema: %s (%s)", definition.name, definition.version, ) schema_json = await issuer.instance.send_schema( json.dumps({ "name": definition.name, "version": definition.version, "attr_names": definition.attr_names, })) ledger_schema = json.loads(schema_json) if not ledger_schema or not ledger_schema.get("seqNo"): raise ServiceSyncError( "Schema was not published to ledger") log_json("Published schema:", ledger_schema, LOGGER) cred_type["ledger_schema"] = ledger_schema if not cred_type.get("cred_def"): # Check if credential definition has been published LOGGER.info( "Checking for credential def: %s (%s)", definition.name, definition.version, ) try: cred_def_json = await issuer.instance.get_cred_def( cred_def_id(issuer.did, cred_type["ledger_schema"]["seqNo"], self._pool.protocol)) cred_def = json.loads(cred_def_json) log_json("Credential def found on ledger:", cred_def, LOGGER) except AbsentCredDef: # If credential definition is not found then publish it LOGGER.info( "Publishing credential def: %s (%s)", definition.name, definition.version, ) cred_def_json = await issuer.instance.send_cred_def( s_id, revocation=False) cred_def = json.loads(cred_def_json) log_json("Published credential def:", cred_def, LOGGER) cred_type["cred_def"] = cred_def