def main(): from argparse import ArgumentParser from json import dump from os import getcwd, path from sys import stdin, stdout from urllib import pathname2url from webvtt import parse from pyld.jsonld import JsonLdProcessor aparser = ArgumentParser(description="webvtt2rdf converter") aparser.add_argument("-i", "--input", type=file, default=stdin) aparser.add_argument("-f", "--flatten", action="store_true") aparser.add_argument("-v", "--video", type=str, default="http://example.org/video.mp4") aparser.add_argument("-F", "--format", choices=["json-ld", "nquads"], default="nquads") args = aparser.parse_args() if args.input is stdin: base = "stdin:" else: base = "file://" + pathname2url(path.join(getcwd(), args.input.name)) jsonld = webvtt2jsonld(parse(args.input), base, args.video, flatten=args.flatten, ) if args.format == "json-ld": dump(jsonld, stdout, indent=4); print "\n" else: proc = JsonLdProcessor() print proc.to_nquads(proc.to_rdf(jsonld, None))
def _assert_verification_method(self, verification_method: dict): """Assert verification method. Throws if not ok.""" if not JsonLdProcessor.has_value(verification_method, "type", self.required_key_type): raise LinkedDataProofException( f"Invalid key type. The key type must be {self.required_key_type}" )
def create_vcrecord(self, cred_dict: dict) -> VCRecord: """Return VCRecord from a credential dict.""" proofs = cred_dict.get("proof") or [] proof_types = None if type(proofs) is dict: proofs = [proofs] if proofs: proof_types = [proof.get("type") for proof in proofs] contexts = [ctx for ctx in cred_dict.get("@context") if type(ctx) is str] if "@graph" in cred_dict: for enclosed_data in cred_dict.get("@graph"): if ( enclosed_data["id"].startswith("urn:") and "credentialSubject" in enclosed_data ): cred_dict.update(enclosed_data) del cred_dict["@graph"] break given_id = cred_dict.get("id") if given_id and self.check_if_cred_id_derived(given_id): given_id = str(uuid4()) # issuer issuer = cred_dict.get("issuer") if type(issuer) is dict: issuer = issuer.get("id") # subjects subject_ids = None subjects = cred_dict.get("credentialSubject") if subjects: if type(subjects) is dict: subjects = [subjects] subject_ids = [ subject.get("id") for subject in subjects if ("id" in subject) ] else: cred_dict["credentialSubject"] = {} # Schemas schemas = cred_dict.get("credentialSchema", []) if type(schemas) is dict: schemas = [schemas] schema_ids = [schema.get("id") for schema in schemas] expanded = jsonld.expand(cred_dict) types = JsonLdProcessor.get_values( expanded[0], "@type", ) return VCRecord( contexts=contexts, expanded_types=types, issuer_id=issuer, subject_ids=subject_ids, proof_types=proof_types, given_id=given_id, cred_value=cred_dict, schema_ids=schema_ids, )
def normalize_jsonld(json_ld_to_normalize, document_loader=preloaded_context_document_loader, detect_unmapped_fields=False): """ Canonicalize the JSON-LD certificate. The detect_unmapped_fields parameter is a temporary, incomplete, workaround to detecting fields that do not correspond to items in the JSON-LD schemas. It works in the Blockcerts context because: - Blockcerts doesn't use a default vocab - fallback.org is not expected to occur Because unmapped fields get dropped during canonicalization, this uses a trick of adding {"@vocab": "http://fallback.org/"} to the json ld, which will cause any unmapped fields to be prefixed with http://fallback.org/. If a @vocab is already there (i.e. an issuer adds this in their extensions), then tampering will change the normalized form, hence the hash of the certificate, so we will still detect this during verification. This issue will be addressed in a first-class manner in the future by the pyld library. :param json_ld_to_normalize: :param document_loader :param detect_unmapped_fields: :return: """ json_ld = json_ld_to_normalize options = deepcopy(JSONLD_OPTIONS) if document_loader: options['documentLoader'] = document_loader if detect_unmapped_fields: json_ld = deepcopy(json_ld_to_normalize) prev_context = JsonLdProcessor.get_values(json_ld_to_normalize, '@context') add_fallback = True for pc in prev_context: if type(pc) is dict: for key, value in pc.items(): if key == '@vocab': # this already has a vocab; unmapped fields will be detected in the hash add_fallback = False break if add_fallback: prev_context.append(FALLBACK_CONTEXT) json_ld['@context'] = prev_context normalized = jsonld.normalize(json_ld, options=options) if detect_unmapped_fields and FALLBACK_VOCAB in normalized: unmapped_fields = [] for m in re.finditer('<http://fallback\.org/(.*)>', normalized): unmapped_fields.append(m.group(0)) error_string = ', '.join(unmapped_fields) raise BlockcertValidationError( 'There are some fields in the certificate that do not correspond to the expected schema. This has likely been tampered with. Unmapped fields are: ' + error_string) return normalized
async def _verify_presentation( *, presentation: dict, suites: List[LinkedDataProof], document_loader: DocumentLoaderMethod, challenge: str = None, domain: str = None, purpose: ProofPurpose = None, ): """Verify presentation structure, credentials, proof purpose and signature.""" if not purpose and not challenge: raise LinkedDataProofException( 'A "challenge" param is required for AuthenticationProofPurpose.') elif not purpose: purpose = AuthenticationProofPurpose(challenge=challenge, domain=domain) # TODO validate presentation structure here if "proof" not in presentation: raise LinkedDataProofException('presentation must contain "proof"') presentation_result = await ld_proofs_verify( document=presentation, suites=suites, purpose=purpose, document_loader=document_loader, ) credential_results = None verified = True credentials = JsonLdProcessor.get_values(presentation, "verifiableCredential") credential_results = await asyncio.gather(*[ verify_credential( credential=credential, suites=suites, document_loader=document_loader, # FIXME: we don't want to interhit the authentication purpose # from the presentation. However we do want to have subject # authentication I guess # purpose=purpose, ) for credential in credentials ]) verified = all([result.verified for result in credential_results]) return PresentationVerificationResult( verified=verified, presentation_result=presentation_result, credential_results=credential_results, errors=presentation_result.errors, )
async def add( *, document: dict, suite: LinkedDataProof, purpose: ProofPurpose, document_loader: DocumentLoaderMethod, ) -> dict: """Add a Linked Data proof to the document. If the document contains other proofs, the proof will be appended to the existing set of proofs. Important note: This method assumes that the term `proof` in the given document has the same definition as the `https://w3id.org/security/v2` JSON-LD @context. Args: document (dict): JSON-LD document to be signed. suite (LinkedDataProof): A signature suite instance that will create the proof purpose (ProofPurpose): A proof purpose instance that will augment the proof with information describing its intended purpose. document_loader (DocumentLoader): Document loader to use. Returns: dict: The signed document, with the signature in the top-level `proof` property. """ # Shallow copy document to allow removal of existing proofs input = document.copy() input.pop("proof", None) # create the new proof, suites MUST output a proof using security-v2 `@context` proof = await suite.create_proof( document=input, purpose=purpose, document_loader=document_loader ) JsonLdProcessor.add_value(document, "proof", proof) return document
def validate( self, *, proof: dict, document: dict, suite: LinkedDataProof, verification_method: dict, document_loader: DocumentLoaderMethod, ) -> PurposeResult: """Validate if the issuer matches the controller of the verification method.""" try: result = super().validate( proof=proof, document=document, suite=suite, verification_method=verification_method, document_loader=document_loader, ) # Return early if super check was invalid if not result.valid: return result # FIXME: Other implementations don't expand, but # if we don't expand we can't get the property using # the full CREDENTIALS_ISSUER_URL. [expanded] = jsonld.expand( document, { "documentLoader": document_loader, }, ) issuer: List[dict] = JsonLdProcessor.get_values( expanded, CREDENTIALS_ISSUER_URL ) if len(issuer) == 0: raise LinkedDataProofException("Credential issuer is required.") controller_id = result.controller.get("id") issuer_id = issuer[0].get("@id") if controller_id != issuer_id: raise LinkedDataProofException( "Credential issuer must match the verification method controller." ) return result except Exception as e: return PurposeResult(valid=False, error=e)
async def _get_proofs(document: dict, proof_types: Union[List[str], None] = None) -> list: """Get proof set from document, optionally filtered by proof_types.""" proof_set = JsonLdProcessor.get_values(document, "proof") # If proof_types is present, only take proofs that match if proof_types: proof_set = list( filter(lambda _: _["type"] in proof_types, proof_set)) if len(proof_set) == 0: raise LinkedDataProofException( "No matching proofs found in the given document") # Shallow copy proofs and add document context or SECURITY_CONTEXT_URL context = document.get("@context") or SECURITY_CONTEXT_URL proof_set = [{"@context": context, **proof} for proof in proof_set] return proof_set
def test_resolve_context_imports(): context = { '@import': 'https://prefix.cc/context', } resolved_context1 = ContextResolver( shared_cache={}, document_loader=_default_document_loader, ).resolve( active_ctx={}, context=context, base='test:', ) resolved_context2 = JsonLdProcessor().process_context( active_ctx={ 'mappings': {}, }, local_ctx=context, options={}, ) raise ValueError([resolved_context1, resolved_context2])
def sign(to_sign, private_key, options, chain_name='mainnet'): import copy copy = copy.deepcopy(to_sign) if 'signature' in copy: del copy['signature'] # normalize and get data to hash normalized = normalize_jsonld(to_sign) to_hash = _getDataToHash(normalized, options=options) # TODO: obtain lock while modifying global state bitcoin.SelectParams(chain_name) message = BitcoinMessage(to_hash) secret_key = CBitcoinSecret(private_key) signature = SignMessage(secret_key, message) # compact just signature part against all contexts signature_payload = { '@context': SECURITY_CONTEXT_URL, 'type': algorithm, 'creator': options.creator, 'created': options.created, 'signatureValue': signature.decode('utf-8') } tmp = {'https://w3id.org/security#signature': signature_payload} prev_contexts = JsonLdProcessor.get_values(to_sign, '@context') if not SECURITY_CONTEXT_URL in prev_contexts: prev_contexts.append(SECURITY_CONTEXT_URL) c = {'@context': prev_contexts} res = jsonld.compact(tmp, c, options={'documentLoader': cached_document_loader}) copy['@context'] = prev_contexts copy['signature'] = res['signature'] return copy
def validate( self, *, proof: dict, document: dict, suite: "LinkedDataProof", verification_method: dict, document_loader: DocumentLoaderMethod, ) -> PurposeResult: """Validate whether verification method of proof is authorized by controller.""" try: result = super().validate( proof=proof, document=document, suite=suite, verification_method=verification_method, document_loader=document_loader, ) # Return early if super check was invalid if not result.valid: return result verification_id = verification_method.get("id") controller = verification_method.get("controller") if isinstance(controller, dict): controller_id = controller.get("id") elif isinstance(controller, str): controller_id = controller else: raise LinkedDataProofException( '"controller" must be a string or dict') # Get the controller result.controller = jsonld.frame( controller_id, frame={ "@context": SECURITY_CONTEXT_URL, "id": controller_id, self.term: { "@embed": "@never", "id": verification_id }, }, options={ "documentLoader": document_loader, "expandContext": SECURITY_CONTEXT_URL, # if we don't set base explicitly it will remove the base in returned # document (e.g. use key:z... instead of did:key:z...) # same as compactToRelative in jsonld.js "base": None, }, ) # Retrieve al verification methods on controller associated with term verification_methods = JsonLdProcessor.get_values( result.controller, self.term) # Check if any of the verification methods matches with the verification id result.valid = any(method == verification_id for method in verification_methods) if not result.valid: raise LinkedDataProofException( f"Verification method {verification_id} not authorized" f" by controller for proof purpose {self.term}") return result except Exception as e: return PurposeResult(valid=False, error=e)
def get_values(self, property): return JsonLdProcessor.get_values(self, property)
def has_property(self, property): return JsonLdProcessor.has_property(self, property)
def has_value(self, property, value): return JsonLdProcessor.has_value(self, property, value)
async def derive( *, document: dict, reveal_document: dict, # TODO: I think this could support multiple suites? # But then, why do multiple proofs? suite: LinkedDataProof, document_loader: DocumentLoaderMethod, nonce: bytes = None, ) -> dict: """Create new derived Linked Data proof(s) on document using the reveal document. Important note: This method assumes that the term `proof` in the given document has the same definition as the `https://w3id.org/security/v2` JSON-LD @context. (v3 because BBS?) Args: document (dict): JSON-LD document with one or more proofs to be derived. reveal_document (dict): JSON-LD frame specifying the attributes to reveal. suite (LinkedDataProof): A signature suite instance to derive the proof. document_loader (DocumentLoader): Document loader to use. nonce (bytes, optional): Nonce to use for the proof. Defaults to None. Returns: dict: The derived document with the derived proof(s) in the top-level `proof` property. """ # Shallow copy document to allow removal of existing proofs input = document.copy() # Check if suite supports derivation if not suite.supported_derive_proof_types: raise LinkedDataProofException( f"{suite.signature_type} does not support derivation" ) # Get proofs, remove proof from document proof_set = await ProofSet._get_proofs( document=input, proof_types=suite.supported_derive_proof_types ) input.pop("proof", None) # Derive proof, remove context derived_proof = await suite.derive_proof( proof=proof_set[0], document=input, reveal_document=reveal_document, document_loader=document_loader, nonce=nonce, ) if len(proof_set) > 1: derived_proof["proof"] = [derived_proof["proof"]] proof_set.pop(0) for proof in proof_set: additional_derived_proof = await suite.derive_proof( proof=proof, document=input, reveal_document=reveal_document, document_loader=document_loader, ) derived_proof["proof"].append(additional_derived_proof["proof"]) JsonLdProcessor.add_value( derived_proof["document"], "proof", derived_proof["proof"] ) return derived_proof["document"]
async def store_credential(self, cred_ex_record: V20CredExRecord, cred_id: str = None) -> None: """Store linked data proof credential.""" # Get attachment data cred_dict: dict = cred_ex_record.cred_issue.attachment( LDProofCredFormatHandler.format) # Deserialize objects credential = VerifiableCredential.deserialize(cred_dict, unknown=INCLUDE) # Get signature suite, proof purpose and document loader suite = await self._get_suite(proof_type=credential.proof.type) purpose = self._get_proof_purpose( proof_purpose=credential.proof.proof_purpose, challenge=credential.proof.challenge, domain=credential.proof.domain, ) document_loader = self.profile.inject(DocumentLoader) # Verify the credential result = await verify_credential( credential=cred_dict, suites=[suite], document_loader=document_loader, purpose=purpose, ) if not result.verified: raise V20CredFormatError(f"Received invalid credential: {result}") # Saving expanded type as a cred_tag expanded = jsonld.expand(cred_dict) types = JsonLdProcessor.get_values( expanded[0], "@type", ) # create VC record for storage vc_record = VCRecord( contexts=credential.context_urls, expanded_types=types, issuer_id=credential.issuer_id, subject_ids=credential.credential_subject_ids, schema_ids=[], # Schemas not supported yet proof_types=[credential.proof.type], cred_value=credential.serialize(), given_id=credential.id, record_id=cred_id, cred_tags=None, # Tags should be derived from credential values ) # Create detail record with cred_id_stored detail_record = V20CredExRecordLDProof( cred_ex_id=cred_ex_record.cred_ex_id, cred_id_stored=vc_record.record_id) # save credential and detail record async with self.profile.session() as session: vc_holder = session.inject(VCHolder) await vc_holder.store_credential(vc_record) # Store detail record, emit event await detail_record.save(session, reason="store credential v2.0", event=True)