async def get_schema(self, index: Union[SchemaKey, int, str]) -> str: """ Get schema from ledger by SchemaKey namedtuple (origin DID, name, version), sequence number, or schema identifier. Raise AbsentSchema for no such schema, logging any error condition and raising BadLedgerTxn on bad request. Retrieve the schema from the anchor's schema cache if it has it; cache it en passant if it does not (and there is a corresponding schema on the ledger). :param index: schema key (origin DID, name, version), sequence number, or schema identifier :return: schema json, parsed from ledger """ LOGGER.debug('BaseAnchor.get_schema >>> index: %s', index) rv_json = json.dumps({}) with SCHEMA_CACHE.lock: if SCHEMA_CACHE.contains(index): LOGGER.info('BaseAnchor.get_schema: got schema %s from cache', index) rv_json = SCHEMA_CACHE[index] LOGGER.debug('BaseAnchor.get_schema <<< %s', rv_json) return json.dumps(rv_json) if isinstance(index, SchemaKey) or (isinstance(index, str) and ok_schema_id(index)): s_id = schema_id(*index) if isinstance(index, SchemaKey) else index s_key = schema_key(s_id) req_json = await ledger.build_get_schema_request(self.did, s_id) resp_json = await self._submit(req_json) resp = json.loads(resp_json) if not ('result' in resp and resp['result'].get('data', {}).get('attr_names', None)): LOGGER.debug('BaseAnchor.get_schema <!< no schema exists on %s', index) raise AbsentSchema('No schema exists on {}'.format(index)) try: (_, rv_json) = await ledger.parse_get_schema_response(resp_json) except IndyError: # ledger replied, but there is no such schema LOGGER.debug('BaseAnchor.get_schema <!< no schema exists on %s', index) raise AbsentSchema('No schema exists on {}'.format(index)) SCHEMA_CACHE[s_key] = json.loads(rv_json) # cache indexes by both txn# and schema key en passant LOGGER.info('BaseAnchor.get_schema: got schema %s from ledger', index) elif isinstance(index, (int, str)): # index is not a schema id: it's a stringified int txn# if it's a str txn_json = await self.get_txn(int(index)) # raises AbsentPool if anchor has no pool txn = json.loads(txn_json) if txn.get('type', None) == '101': # {} for no such txn; 101 marks indy-sdk schema txn type rv_json = await self.get_schema(self.pool.protocol.txn_data2schema_key(txn)) else: LOGGER.debug('BaseAnchor.get_schema <!< no schema at seq #%s on ledger', index) raise AbsentSchema('No schema at seq #{} on ledger'.format(index)) else: LOGGER.debug('BaseAnchor.get_schema <!< bad schema index type') raise AbsentSchema('Attempt to get schema on ({}) {} , must use schema key or an int'.format( type(index), index)) LOGGER.debug('BaseAnchor.get_schema <<< %s', rv_json) return rv_json
async def send_schema(self, schema_data_json: str) -> str: """ Send schema to ledger, then retrieve it as written to the ledger and return it. Raise BadLedgerTxn on failure. If schema already exists on ledger, log error and return schema. :param schema_data_json: schema data json with name, version, attribute names; e.g., :: { 'name': 'my-schema', 'version': '1.234', 'attr_names': ['favourite_drink', 'height', 'last_visit_date'] } :return: schema json as written to ledger (or existed a priori) """ LOGGER.debug('Origin.send_schema >>> schema_data_json: %s', schema_data_json) schema_data = json.loads(schema_data_json) s_id = schema_id(self.did, schema_data['name'], schema_data['version']) s_key = schema_key(s_id) rv_json = None with SCHEMA_CACHE.lock: try: rv_json = await self.get_schema(s_key) LOGGER.error( 'Schema %s version %s already exists on ledger for origin-did %s: not sending', schema_data['name'], schema_data['version'], self.did) except AbsentSchema: # OK - about to create and send it (_, schema_json) = await anoncreds.issuer_create_schema( self.did, schema_data['name'], schema_data['version'], json.dumps(schema_data['attr_names'])) req_json = await ledger.build_schema_request( self.did, schema_json) await self._sign_submit(req_json) for _ in range(16): # reasonable timeout try: rv_json = await self.get_schema(s_key) # adds to cache break except AbsentSchema: await sleep(1) LOGGER.info( 'Sent schema %s to ledger, waiting 1s for its appearance', s_id) if not rv_json: LOGGER.debug( 'Origin.send_schema <!< timed out waiting on sent schema %s', s_id) raise BadLedgerTxn( 'Timed out waiting on sent schema {}'.format(s_id)) LOGGER.debug('Origin.send_schema <<< %s', rv_json) return rv_json
async def _resolve_schema(self, schema_name: str, schema_version: str, origin_did: str) -> ResolvedSchema: """ Resolve a schema defined by one of our issuers """ lookup_agent = None for agent_id, agent in self._agents.items(): if agent.synced: found = agent.find_credential_type(schema_name, schema_version, origin_did) if found: defn = found["definition"] did = defn.origin_did or agent.did return ResolvedSchema( agent_id, schema_id(did, defn.name, defn.version), defn.name, defn.version, did, defn.attr_names, ) lookup_agent = agent if schema_name and schema_version and origin_did and lookup_agent: s_id = schema_id(origin_did, schema_name, schema_version) s_key = schema_key(s_id) try: schema_json = await lookup_agent.instance.get_schema(s_key) ledger_schema = json.loads(schema_json) log_json("Schema found on ledger:", ledger_schema, LOGGER) return ResolvedSchema( None, s_id, schema_name, schema_version, origin_did, ledger_schema["attrNames"], ) except AbsentSchema: pass raise IndyConfigError("Issuer schema not found: {}/{}".format( schema_name, schema_version))
async def test_schema_cache(): print(Ink.YELLOW('\n\n== Testing Schema Cache ==')) N = 32 s_key = [] schema = [] for i in range(N): s_key.append( SchemaKey('Q4zqM7aXqm7gDQkUVLng{:02d}'.format(i).replace('0', 'Q'), 'schema-{}'.format(i // 5), '{}'.format(i % 5))) schema.append({ # 'id': schema_id(s_key[i].origin_did, s_key[i].name, s_key[i].version), 'id': schema_id(*s_key[i]), 'name': s_key[i].version, 'version': s_key[i].version, 'seqNo': i, 'attrNames': ['attr-{}-{}'.format(i, j) for j in range(N)], 'ver': '1.0' }) for i in range(N): if i % 2: SCHEMA_CACHE[s_key[i]] = schema[i] else: SCHEMA_CACHE[schema[i]['seqNo']] = schema[i] for i in range(N): assert SCHEMA_CACHE.contains(s_key[i]) assert SCHEMA_CACHE.contains(schema[i]['seqNo']) assert SCHEMA_CACHE[s_key[i]] == SCHEMA_CACHE[schema[i]['seqNo']] assert len(SCHEMA_CACHE.index()) == N assert not SCHEMA_CACHE.contains(-1) try: SCHEMA_CACHE[-1] except CacheIndex: pass # Exercise cache clearing and feeding cached = SCHEMA_CACHE.schemata() assert SCHEMA_CACHE.schemata() cached_json = json.dumps(cached) SCHEMA_CACHE.clear() assert not SCHEMA_CACHE.schemata() SCHEMA_CACHE.feed(json.loads(cached_json)) assert len(SCHEMA_CACHE.schemata()) == len(cached)
async def test_von_tails(pool_ip, genesis_txn_file, path_cli_ini, cli_ini, path_setnym_ini, setnym_ini): print( Ink.YELLOW('\n\n== Testing tails server vs. IP {} =='.format(pool_ip))) # Set config for tails clients config = {} i = 0 for profile in path_cli_ini: cli_config = inis2dict(str(path_cli_ini[profile])) config[profile] = cli_config with open(path_cli_ini[profile], 'r') as fh_cfg: print('\n\n== 0.{} == {} tails sync configuration:\n{}'.format( i, profile, fh_cfg.read())) i += 1 # Start tails server print('\n\n== 1 == Starting tails server on port {}'.format( config['issuer']['Tails Server']['port'])) tsrv = TailsServer(config['issuer']['Tails Server']['port']) started = tsrv.start() if not started: print( '\n\n== X == Server already running - stop it to run test from scratch' ) assert False assert tsrv.is_up() print( '\n\n== 2 == Started tails server, docker-compose port-forwarded via localhost:{}' .format(tsrv.port)) atexit.register(shutdown) # Set nyms (operation creates pool if need be) i = 0 setnym_config = {} for profile in path_setnym_ini: cli_config = inis2dict(str(path_setnym_ini[profile])) if profile == 'admin': # tails server anchor on ledger a priori continue setnym_config[profile] = cli_config with open(path_setnym_ini[profile], 'r') as fh_cfg: print('\n\n== 3.{} == {} setnym configuration:\n{}'.format( i, profile, fh_cfg.read())) sub_proc = subprocess.run( ['von_anchor_setnym', str(path_setnym_ini[profile])], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) assert not sub_proc.returncode i += 1 print('\n\n== 4 == Setnym ops completed OK') # wallets = {profile: Wallet(setnym_config[profile]['VON Anchor']['name']) for profile in setnym_config} # wallets['admin'] = Wallet(config['admin']['VON Anchor']['name']) wallets = await get_wallets( { **{ profile: setnym_config[profile]['VON Anchor'] for profile in setnym_config }, 'admin': config['admin']['VON Anchor'] }, open_all=False) # Open pool and anchors, issue creds to create tails files async with wallets['issuer'] as w_issuer, ( wallets['prover']) as w_prover, (NodePool( config['issuer']['Node Pool']['name'])) as pool, (RegistrarAnchor( w_issuer, pool)) as ian, (OrgBookAnchor(w_prover, pool)) as pan: # Get nyms from ledger for display i = 0 for an in (ian, pan): print('\n\n== 5.{} == {} nym on ledger: {}'.format( i, an.wallet.name, ppjson(await an.get_nym()))) i += 1 # Publish schema to ledger S_ID = schema_id(ian.did, 'rainbow', '{}.0'.format(int(time()))) schema_data = { 'name': schema_key(S_ID).name, 'version': schema_key(S_ID).version, 'attr_names': ['numeric', 'sha256'] } S_KEY = schema_key(S_ID) try: await ian.get_schema(S_KEY) # may exist (almost certainly not) except AbsentSchema: await ian.send_schema(json.dumps(schema_data)) schema_json = await ian.get_schema(S_KEY) schema = json.loads(schema_json) print('\n\n== 6 == SCHEMA [{} v{}]: {}'.format(S_KEY.name, S_KEY.version, ppjson(schema))) assert schema # should exist now # Setup link secret for creation of cred req or proof await pan.create_link_secret('LinkSecret') # Issuer anchor create, store, publish cred definitions to ledger; create cred offers await ian.send_cred_def(S_ID, revo=True) cd_id = cred_def_id(S_KEY.origin_did, schema['seqNo'], pool.protocol) assert ((not Tails.unlinked(ian.dir_tails)) and [f for f in Tails.links(ian.dir_tails, ian.did) if cd_id in f]) cred_def_json = await ian.get_cred_def(cd_id) # ought to exist now cred_def = json.loads(cred_def_json) print('\n\n== 7.0 == Cred def [{} v{}]: {}'.format( S_KEY.name, S_KEY.version, ppjson(json.loads(cred_def_json)))) assert cred_def.get('schemaId', None) == str(schema['seqNo']) cred_offer_json = await ian.create_cred_offer(schema['seqNo']) cred_offer = json.loads(cred_offer_json) print('\n\n== 7.1 == Credential offer [{} v{}]: {}'.format( S_KEY.name, S_KEY.version, ppjson(cred_offer_json))) (cred_req_json, cred_req_metadata_json) = await pan.create_cred_req( cred_offer_json, cd_id) cred_req = json.loads(cred_req_json) print('\n\n== 8 == Credential request [{} v{}]: metadata {}, cred {}'. format(S_KEY.name, S_KEY.version, ppjson(cred_req_metadata_json), ppjson(cred_req_json))) assert json.loads(cred_req_json) # Issuer anchor issues creds and stores at HolderProver: get cred req, create cred, store cred cred_data = [] CREDS = 450 # enough to build 4 rev regs print('\n\n== 9 == creating and storing {} credentials:'.format(CREDS)) for number in range(CREDS): (cred_json, _) = await ian.create_cred( cred_offer_json, cred_req_json, { 'numeric': str(number), 'sha256': sha256(str(number).encode()).hexdigest(), }) cred_id = await pan.store_cred(cred_json, cred_req_metadata_json) print('.', end='' if (number + 1) % 100 else '{}\n'.format(number + 1), flush=True) # Exercise list view, least to most specific for tails_list_path in ('all', ian.did, cd_id): url = url_for(tsrv.port, 'tails/list/{}'.format(tails_list_path)) r = requests.get(url) assert r.status_code == 200 assert not r.json() rr_ids_up = { basename(link) for link in Tails.links(ian.dir_tails, ian.did) } for rr_id in rr_ids_up: url = url_for(tsrv.port, 'tails/list/{}'.format(rr_id)) r = requests.get(url) assert r.status_code == 200 assert not r.json() print( '\n\n== 10 == All listing views at server come back OK and empty as expected' ) rv = pexpect.run('python ../src/sync/sync.py {}'.format( path_cli_ini['issuer'])) print('\n\n== 11 == Issuer sync uploaded local tails files') for tails_list_path in ('all', ian.did, cd_id): url = url_for(tsrv.port, 'tails/list/{}'.format(tails_list_path)) r = requests.get(url) assert r.status_code == 200 assert {rr for rr in r.json()} == rr_ids_up for rr_id in rr_ids_up: url = url_for(tsrv.port, 'tails/list/{}'.format(rr_id)) r = requests.get(url) assert r.status_code == 200 assert r.json() == [rr_id] # list with one rr_id should come back # Exercise list view, least to most specific for tails_list_path in ('all', ian.did, cd_id): url = url_for(tsrv.port, 'tails/list/{}'.format(tails_list_path)) r = requests.get(url) assert r.status_code == 200 assert len(r.json()) == len(rr_ids_up) print( '\n\n== 12 == All listing views at server come back OK with {} uploaded files' .format(len(rr_ids_up))) rv = pexpect.run('python ../src/sync/sync.py {}'.format( path_cli_ini['prover'])) print('\n\n== 13 == Prover sync downloaded remote tails files') rr_ids_down = { basename(link) for link in Tails.links( config['prover']['Tails Client']['tails.dir'], ian.did) } assert rr_ids_down == rr_ids_up # Exercise admin-delete rv = pexpect.run('python ../src/admin/delete.py {} all'.format( path_cli_ini['admin'])) print('\n\n== 14 == Admin called for deletion at tails server') # Check tails server deletion url = url_for(tsrv.port, 'tails/list/all') r = requests.get(url) assert r.status_code == 200 assert not r.json() print( '\n\n== 15 == All listing views at server come back OK and empty as expected' ) rv = pexpect.run('python ../src/sync/multisync.py 1 {}'.format( path_cli_ini['issuer'])) print( '\n\n== 16 == Issuer multisync on 1 sync iteration uploaded local tails files' ) for tails_list_path in ('all', ian.did, cd_id): url = url_for(tsrv.port, 'tails/list/{}'.format(tails_list_path)) r = requests.get(url) assert r.status_code == 200 assert {rr for rr in r.json()} == rr_ids_up for rr_id in rr_ids_up: url = url_for(tsrv.port, 'tails/list/{}'.format(rr_id)) r = requests.get(url) assert r.status_code == 200 assert r.json() == [rr_id] # list with one rr_id should come back # Exercise list view, least to most specific for tails_list_path in ('all', ian.did, cd_id): url = url_for(tsrv.port, 'tails/list/{}'.format(tails_list_path)) r = requests.get(url) assert r.status_code == 200 assert len(r.json()) == len(rr_ids_up) print( '\n\n== 17 == All listing views at server come back OK with {} uploaded files' .format(len(rr_ids_up))) # Remove tails server anchor wallet await wallets['admin'].remove() print('\n\n== 18 == Removed admin (tails server anchor {}) wallet'. format(wallets['admin'].name))
async def test_anchors_tails_load( pool_name, pool_genesis_txn_data, seed_trustee1): rrbx = True print(Ink.YELLOW('\n\n== Load-testing tails on {}ternal rev reg builder ==').format("ex" if rrbx else "in")) await RevRegBuilder.stop(WALLET_NAME) # in case of re-run # Set up node pool ledger config and wallets, open pool, init anchors p_mgr = NodePoolManager() if pool_name not in await p_mgr.list(): await p_mgr.add_config(pool_name, pool_genesis_txn_data) pool = p_mgr.get(pool_name) await pool.open() w_mgr = WalletManager() wallets = { 'trustee-anchor': { 'seed': seed_trustee1, 'storage_type': None, 'config': None, 'access_creds': None }, WALLET_NAME: { 'seed': 'Superstar-Anchor-000000000000000', 'storage_type': None, 'config': None, 'access_creds': { 'key': 'rrbx-test' } } } for (name, wdata) in wallets.items(): try: wdata['wallet'] = await w_mgr.create({ 'id': name, 'seed': wdata['seed'] }) except ExtantWallet: wdata['wallet'] = w_mgr.get({'id': name}) finally: await wdata['wallet'].open() tan = TrusteeAnchor(wallets['trustee-anchor']['wallet'], pool) no_prox = rrbx_prox() san = OrgHubAnchor(wallets[WALLET_NAME]['wallet'], pool, rrbx=rrbx) if rrbx: await beep('external rev reg builder process on {}'.format(WALLET_NAME), 15) if rrbx_prox() != no_prox + 1: await RevRegBuilder.stop(WALLET_NAME) assert False, "External rev reg builder process did not start" async with OrgHubAnchor( wallets[WALLET_NAME]['wallet'], pool, rrbx=rrbx): # check for exactly 1 external rev reg builder process await beep('external rev reg builder process uniqueness test on {}'.format(WALLET_NAME), 5) if rrbx_prox() != no_prox + 1: await RevRegBuilder.stop(WALLET_NAME) assert False, "External rev reg builder process was not unique" assert pool.handle await tan.open() await san.open() # Publish anchor particulars to ledger if not yet present for an in (tan, san): if not json.loads(await tan.get_nym(an.did)): await tan.send_nym(an.did, an.verkey, an.wallet.name, an.least_role()) nyms = { 'tan': json.loads(await tan.get_nym(tan.did)), 'san': json.loads(await tan.get_nym(san.did)) } print('\n\n== 1 == nyms: {}'.format(ppjson(nyms))) for k in nyms: assert 'dest' in nyms[k] # Publish schema to ledger if not yet present; get from ledger S_ID = schema_id(san.did, 'tails_load', '{}.0'.format(int(time.time()))) S_KEY = schema_key(S_ID) schema_data = { 'name': schema_key(S_ID).name, 'version': schema_key(S_ID).version, 'attr_names': [ 'number', 'remainder' ] } try: await san.get_schema(S_KEY) # may exist (almost certainly not) except AbsentSchema: await san.send_schema(json.dumps(schema_data)) schema_json = await san.get_schema(S_KEY) schema = json.loads(schema_json) assert schema # should exist now print('\n\n== 2 == SCHEMA [{} v{}]: {}'.format(S_KEY.name, S_KEY.version, ppjson(schema))) # Setup link secret for creation of cred req or proof await san.create_link_secret('LinkSecret') # SRI anchor create, store, publish cred definitions to ledger; create cred offers await san.send_cred_def(S_ID, revo=True) cd_id = cred_def_id(S_KEY.origin_did, schema['seqNo'], pool.protocol) assert ((not Tails.unlinked(san.dir_tails)) and [f for f in Tails.links(san.dir_tails, san.did) if cd_id in f]) cred_def_json = await san.get_cred_def(cd_id) # ought to exist now cred_def = json.loads(cred_def_json) print('\n\n== 3.0 == Cred def [{} v{}]: {}'.format( S_KEY.name, S_KEY.version, ppjson(json.loads(cred_def_json)))) assert cred_def.get('schemaId', None) == str(schema['seqNo']) cred_offer_json = await san.create_cred_offer(schema['seqNo']) print('\n\n== 3.1 == Credential offer [{} v{}]: {}'.format( S_KEY.name, S_KEY.version, ppjson(cred_offer_json))) (cred_req_json, cred_req_metadata_json) = await san.create_cred_req(cred_offer_json, cd_id) print('\n\n== 4 == Credential request [{} v{}]: metadata {}, cred-req {}'.format( S_KEY.name, S_KEY.version, ppjson(cred_req_metadata_json), ppjson(cred_req_json))) assert json.loads(cred_req_json) # BC Reg anchor (as Issuer) issues creds and stores at HolderProver: get cred req, create cred, store cred CREDS = 4034 # enough to kick off rev reg on size 4096 and issue two creds in it: 1 needing set-rev-reg, 1 not print('\n\n== 5 == creating {} credentials'.format(CREDS)) swatch = Stopwatch(2) optima = {} # per rev-reg, fastest/slowest pairs for number in range(CREDS): swatch.mark() (cred_json, _) = await san.create_cred( cred_offer_json, cred_req_json, { 'number': str(number), 'remainder': str(number % 100) }) elapsed = swatch.mark() tag = rev_reg_id2tag(Tails.current_rev_reg_id(san.dir_tails, cd_id)) if tag not in optima: optima[tag] = (elapsed, elapsed) else: optima[tag] = (min(optima[tag][0], elapsed), max(optima[tag][1], elapsed)) print('.', end='', flush=True) if ((number + 1) % 100) == 0: print('{}: #{}: {:.2f}-{:.2f}s'.format(number + 1, tag, *optima[tag]), flush=True) assert json.loads(cred_json) print('{}: #{}: {:.2f}-{:.2f}s'.format(number + 1, tag, *optima[tag]), flush=True) print('\n\n== 6 == best, worst times by revocation registry: {}'.format(ppjson(optima))) assert (not rrbx) or (max(optima[tag][1] for tag in optima) < 4 * min(optima[tag][1] for tag in optima if int(tag) > 0)) # if waiting on rr beyond #0, sizes increase as 2^n await san.close() if rrbx: await RevRegBuilder.stop(WALLET_NAME) await tan.close() for (name, wdata) in wallets.items(): await wdata['wallet'].close() await pool.close()
def schema_id(self) -> str: """ Accessor for the schema_id of this schema """ return schema_id(self.origin_did, self.name, self.version)
async def _publish_schema(self, issuer: AgentCfg, cred_type: dict) -> None: """ Check the ledger for a specific schema and version, and publish it if not found. Also publish the related credential definition if not found Args: issuer: the initialized and opened issuer instance publishing the schema cred_type: a dict which will be updated with the published schema and credential def """ if not cred_type or "definition" not in cred_type: raise IndyConfigError("Missing schema definition") definition = cred_type["definition"] s_id = schema_id(issuer.did, definition.name, definition.version) if not cred_type.get("ledger_schema"): LOGGER.info( "Checking for schema: %s (%s)", definition.name, definition.version, ) # Check if schema exists on ledger try: s_key = schema_key(s_id) schema_json = await issuer.instance.get_schema(s_key) ledger_schema = json.loads(schema_json) log_json("Schema found on ledger:", ledger_schema, LOGGER) if sorted(ledger_schema["attrNames"]) != sorted( definition.attr_names): raise IndyConfigError( "Ledger schema attributes do not match definition, found: %s", ledger_schema["attrNames"]) except AbsentSchema: # If not found, send the schema to the ledger LOGGER.info( "Publishing schema: %s (%s)", definition.name, definition.version, ) schema_json = await issuer.instance.send_schema( json.dumps({ "name": definition.name, "version": definition.version, "attr_names": definition.attr_names, })) ledger_schema = json.loads(schema_json) if not ledger_schema or not ledger_schema.get("seqNo"): raise ServiceSyncError( "Schema was not published to ledger") log_json("Published schema:", ledger_schema, LOGGER) cred_type["ledger_schema"] = ledger_schema if not cred_type.get("cred_def"): # Check if credential definition has been published LOGGER.info( "Checking for credential def: %s (%s)", definition.name, definition.version, ) try: cred_def_json = await issuer.instance.get_cred_def( cred_def_id(issuer.did, cred_type["ledger_schema"]["seqNo"], self._pool.protocol)) cred_def = json.loads(cred_def_json) log_json("Credential def found on ledger:", cred_def, LOGGER) except AbsentCredDef: # If credential definition is not found then publish it LOGGER.info( "Publishing credential def: %s (%s)", definition.name, definition.version, ) cred_def_json = await issuer.instance.send_cred_def( s_id, revocation=False) cred_def = json.loads(cred_def_json) log_json("Published credential def:", cred_def, LOGGER) cred_type["cred_def"] = cred_def