def verify_w3c_vc(self, vc=None, signing_key=None, filter=None, documentloader=None): singed_credential = json.loads(vc) jws_header = b'{"alg":"EdDSA","b64":false,"crit":["b64"]}' proof = singed_credential['proof'] proof['@context'] = 'https://w3id.org/security/v2' encodedSignature = proof['jws'].split("..",1)[1] + "==" del singed_credential['proof'] del proof['jws'] normalized_doc = jsonld.normalize(singed_credential , {'algorithm': 'URDNA2015', 'format': 'application/n-quads'}) normalized_proof = jsonld.normalize(proof, {'algorithm': 'URDNA2015', 'format': 'application/n-quads'}) doc_hash = hashlib.sha256() proof_hash = hashlib.sha256() doc_hash.update(normalized_doc.encode('utf-8')) proof_hash.update(normalized_proof.encode('utf-8')) ver_key = nacl.signing.VerifyKey(signing_key,nacl.encoding.HexEncoder) signature = nacl.encoding.URLSafeBase64Encoder.decode(encodedSignature) encodedHeader = nacl.encoding.URLSafeBase64Encoder.encode(jws_header) to_verify = encodedHeader + b'.' + proof_hash.digest() + doc_hash.digest() try: ver_key.verify(to_verify, signature) if(filter): if(self._filter(singed_credential, filter)): return True, "0" else: return False, "101 Filter failed" return True except: return False, "100 VC signature verification failed"
def validate_data(data, shape_file_path): """Validate an expanded jsonld document against a shape. Parameters ---------- data : dict Python dictionary containing JSONLD object shape_file_path : str SHACL file for the document Returns ------- conforms: bool Whether the document is conformant with the shape v_text: str Validation information returned by PySHACL """ kwargs = {"algorithm": "URDNA2015", "format": "application/n-quads"} normalized = jsonld.normalize(data, kwargs) data_file_format = "nquads" shape_file_format = "turtle" conforms, v_graph, v_text = shacl_validate( normalized, shacl_graph=shape_file_path, data_graph_format=data_file_format, shacl_graph_format=shape_file_format, inference="rdfs", debug=False, serialize_report_graph=True, ) return conforms, v_text
async def retrieve(request): """Retrieve data specific to an acronym, URL should be persistent.""" db_pool = request.app['pool'] term = request.match_info['acronymid'] term_type = request.match_info['acronymtype'] LOG.info('retrieve') data = await fetch_acronym(db_pool, term, term_type) doc = { "@id": f'acr:{data[0]["index"]}', "dc:title": data[0]["title"], "skos:label": data[0]["title"], "dc:type": { "@id": f'acr:{data[0]["acronymtype"]}', "@type": 'skos:Concept', "skos:label": data[0]["acronymtype"] }, "dc:language": data[0]["language"], "dc:description": data[0]["description"] } if request.content_type == 'application/ld+json': response = jsonld.flatten(doc, context) return web.json_response(response, content_type='application/ld+json', dumps=json.dumps) elif request.content_type == 'application/n-quads': response = jsonld.normalize(doc, { 'algorithm': 'URDNA2015', 'format': 'application/n-quads' }) return web.Response(text=response, content_type='application/n-quads') else: response = {"data": data} return web.json_response(response, content_type='application/json', dumps=json.dumps)
def append(self, data, graph=None): content = jsonld.normalize(data, { 'algorithm': 'URDNA2015', 'format': 'application/nquads' }).encode('utf-8') query.insert( graph_expr(graph, content) if graph is not None else content) params = {} params = {'query': str(query)} uri = self.url req = requests.post( uri, params=params, headers={ 'accept': 'application/json', 'content-type': 'application/x-www-form-urlencoded; charset=utf-8' }, auth=self.get_authentication()) if (req.status_code < 200 or req.status_code >= 300): raise RESTIOError( 'Cannot append to graph {}, status={}'.format( graph, req.status_code), uri, req.status_code, req.text)
def update(self, data, graph=None, subjects=[]): tuples = tuple_expr('s', 'p', 'o') if len(subjects) > 0: tuples.filter(in_expr(var('s'), *subjects)) if graph is None: query.delete(tuples) else: query.delete(graph_expr(graph, tuples)) content = jsonld.normalize(data, { 'algorithm': 'URDNA2015', 'format': 'application/nquads' }).encode('utf-8') query.insert( graph_expr(graph, content) if graph is not None else content) params = {} params = {'query': str(query)} uri = self.url req = requests.post( uri, params=params, headers={ 'accept': 'application/json', 'content-type': 'application/x-www-form-urlencoded; charset=utf-8' }, auth=self.get_authentication()) if (req.status_code < 200 or req.status_code >= 300): raise RESTIOError( 'Cannot update graph {}, status={}'.format( graph, req.status_code), uri, req.status_code, req.text)
def task_excel2jsonld(args): logging.info( "called task_excel2jsonld" ) cnsExcel = CnsExcel() filename = args["input_file"] cnsExcel.loadExcelSchema(filename) if len(cnsExcel.report["warn"])>0: logging.info(json4debug(cnsExcel.report["warn"])) assert False filename_output = args["output_file"] cnsExcel.schema.exportJsonLd(filename_output) jsondata = file2json(filename_output) report = cnsExcel.schema.initReport() cnsExcel.schema.cnsValidateRecursive(jsondata, report) if len(report["bugs"])>2: logging.info(json4debug(report)) assert False xdebug_file = os.path.join(args["debug_dir"],os.path.basename(args["output_file"])) filename_debug = xdebug_file+u".debug" cnsExcel.schema.exportDebug(filename_debug) from pyld import jsonld doc = file2json(filename_output) normalized = jsonld.normalize( doc, {'algorithm': 'URDNA2015', 'format': 'application/n-quads'}) filename_ntriples = args["output_file"].replace("jsonld","nq") lines2file([normalized], filename_ntriples )
def ld_args(graph): norm = jsonld.normalize( { "@context": self.context, "@graph": json.loads(graph) }, {"format": "application/nquads"}) return {"data": norm, "format": "n3"}
def show_graph(doc, size=10): rdf = jsonld.normalize(doc)['@default'] graph = graphviz.Digraph(strict=False, graph_attr={'rankdir': 'LR'}) # graph = graphviz.Digraph( # strict=False, graph_attr={'size': str(size), 'rankdir': 'LR'} # ) for edge in rdf: subj = short_name(edge['subject']) obj = short_name(edge['object']) pred = short_name(edge['predicate']) if subj and obj and pred: # Apply different styles to the nodes based on whether they're # literals, ConceptNet URLs, or other URLs if obj.startswith('"'): # Literal values graph.node(obj, penwidth='0') elif obj.startswith('/'): # ConceptNet nodes graph.node(obj, style='filled', fillcolor="#ddeeff") else: # Other URLs graph.node(obj, color="#558855") graph.edge(subj, obj, label=pred) return graph
def do_hash_certificate(self, certificate): cert_utf8 = certificate.decode('utf-8') cert_json = json.loads(cert_utf8) normalized = jsonld.normalize(cert_json, {'algorithm': 'URDNA2015', 'format': 'application/nquads'}) hashed = sha256(normalized) self.tree.add_leaf(hashed, False) return hashed
def issue(credential, signing_key, documentloader=None): """ It signs a credential using Ed25519Signature2018 JSON-LD Signature :param credential: a python dict representing the credential :param [signing_key]: the signing key [id] the key id [privateKeyHex] a Hex encoded Ed25519 private key :param documentloader: a custom documentloader :return: the credential with the singature appended """ credential = credential.copy() jws_header = b'{"alg":"EdDSA","b64":false,"crit":["b64"]}' proof = { '@context': 'https://w3id.org/security/v2', 'type': 'Ed25519Signature2018', 'created': datetime.datetime.utcnow().replace(microsecond=0).isoformat() + 'Z', #'2020-06-17T17:51:12Z', 'verificationMethod': signing_key['id'], 'proofPurpose': 'assertionMethod' } normalized_doc = jsonld.normalize(credential, { 'algorithm': 'URDNA2015', 'format': 'application/n-quads' }) normalized_proof = jsonld.normalize(proof, { 'algorithm': 'URDNA2015', 'format': 'application/n-quads' }) doc_hash = hashlib.sha256() proof_hash = hashlib.sha256() doc_hash.update(normalized_doc.encode('utf-8')) proof_hash.update(normalized_proof.encode('utf-8')) signing_key = nacl.signing.SigningKey(signing_key['privateKeyHex'], nacl.encoding.HexEncoder) encodedHeader = nacl.encoding.URLSafeBase64Encoder.encode(jws_header) to_sign = encodedHeader + b'.' + proof_hash.digest() + doc_hash.digest() signed_data = signing_key.sign(to_sign) jws = encodedHeader + b'..' + nacl.encoding.URLSafeBase64Encoder.encode( signed_data.signature) proof['jws'] = jws.decode()[:-2] del proof['@context'] credential['proof'] = proof return credential
def ld_triples(ld, g=None): bid_map = {} def parse_term(term): try: term['value'] = term['value'].decode('unicode-escape') except UnicodeEncodeError: pass if term['type'] == 'IRI': return URIRef(u'{}'.format(term['value'])) elif term['type'] == 'literal': datatype = URIRef(term.get('datatype', None)) if datatype == XSD.dateTime: try: term['value'] = float(term['value']) term['value'] = datetime.utcfromtimestamp(term['value']) except: try: term['value'] = isodate.parse_datetime(term['value']) except: timestamp = mktime_tz(parsedate_tz(term['value'])) term['value'] = datetime.fromtimestamp(timestamp) if datatype == RDFS.Literal: datatype = None try: term['value'] = float(term['value']) except: pass return Literal(term['value'], datatype=datatype) else: bid = term['value'].split(':')[1] if bid not in bid_map: bid_map[bid] = shortuuid.uuid() return BNode(bid_map[bid]) if g is None: g = Graph() if '@context' in ld: for ns in filter( lambda k: ':' not in k and isinstance( ld['@context'][k], basestring) and ld['@context'][k]. startswith('http'), ld['@context']): g.bind(ns, URIRef(ld['@context'].get(ns))) if ld: norm = jsonld.normalize(ld) def_graph = norm.get('@default', []) for triple in def_graph: predicate = parse_term(triple['predicate']) if not predicate.startswith('http'): continue subject = parse_term(triple['subject']) object = parse_term(triple['object']) g.add((subject, predicate, object)) else: print ld return g
def _get_cert_generator(self) -> Generator: """Return a generator of jsonld-normalized unsigned certs.""" for uid, cert in self.unsigned_certs.items(): normalized = jsonld.normalize(cert.to_dict(), { 'algorithm': 'URDNA2015', 'format': 'application/n-quads' }) yield normalized.encode('utf-8')
def doc_hash(doc): doc = dict(doc) if 'signature' in doc: del doc['signature'] normalized = jsonld.normalize(doc, {'algorithm': 'URDNA2015', 'format': 'application/nquads'}) h = hashlib.new('sha256') h.update(normalized.encode('utf-8')) return h.hexdigest()
def do_execute(self, state): normalized = jsonld.normalize(state.certificate_json, { 'algorithm': 'URDNA2015', 'format': 'application/nquads' }) hashed = sha256(normalized) state.local_hash = hashed return True
def normalize_jsonld(jld_document): """ Normalize and hash the json-ld document """ options = {'algorithm': 'URDNA2015', 'format': 'application/nquads'} normalized = jsonld.normalize(jld_document, options=options) normalized_hash = SHA256.new(data=normalized.encode('utf-8')).digest() return normalized_hash
def normalize_jsonld(json_ld_to_normalize, document_loader=preloaded_context_document_loader, detect_unmapped_fields=False): """ Canonicalize the JSON-LD certificate. The detect_unmapped_fields parameter is a temporary, incomplete, workaround to detecting fields that do not correspond to items in the JSON-LD schemas. It works in the Blockcerts context because: - Blockcerts doesn't use a default vocab - fallback.org is not expected to occur Because unmapped fields get dropped during canonicalization, this uses a trick of adding {"@vocab": "http://fallback.org/"} to the json ld, which will cause any unmapped fields to be prefixed with http://fallback.org/. If a @vocab is already there (i.e. an issuer adds this in their extensions), then tampering will change the normalized form, hence the hash of the certificate, so we will still detect this during verification. This issue will be addressed in a first-class manner in the future by the pyld library. :param json_ld_to_normalize: :param document_loader :param detect_unmapped_fields: :return: """ json_ld = json_ld_to_normalize options = deepcopy(JSONLD_OPTIONS) if document_loader: options['documentLoader'] = document_loader if detect_unmapped_fields: json_ld = deepcopy(json_ld_to_normalize) prev_context = JsonLdProcessor.get_values(json_ld_to_normalize, '@context') add_fallback = True for pc in prev_context: if type(pc) is dict: for key, value in pc.items(): if key == '@vocab': # this already has a vocab; unmapped fields will be detected in the hash add_fallback = False break if add_fallback: prev_context.append(FALLBACK_CONTEXT) json_ld['@context'] = prev_context normalized = jsonld.normalize(json_ld, options=options) if detect_unmapped_fields and FALLBACK_VOCAB in normalized: unmapped_fields = [] for m in re.finditer('<http://fallback\.org/(.*)>', normalized): unmapped_fields.append(m.group(0)) error_string = ', '.join(unmapped_fields) raise BlockcertValidationError( 'There are some fields in the certificate that do not correspond to the expected schema. This has likely been tampered with. Unmapped fields are: ' + error_string) return normalized
def _do_normalize(doc): # normalize a document using the RDF Dataset Normalization Algorithm # (URDNA2015), see: https://json-ld.github.io/normalization/spec/ normalized = jsonld.normalize( doc, {'algorithm': 'URDNA2015', 'format': 'application/n-quads'}) # print("NORMALIZED") # print(json.dumps(normalized, indent=2)) return normalized
def normalize_jsonld(jld_document: str) -> bytes: """ Normalize and hash the json-ld document """ options = {'algorithm': 'URDNA2015', 'format': 'application/nquads'} normalized = jsonld.normalize(jld_document, options=options) normalized_hash = eidas_crypto_hash_byte(b_data=normalized.encode('utf-8')) return normalized_hash
def _doc_hash(doc): doc = dict(doc) if "signature" in doc: del doc["signature"] normalized = jsonld.normalize( doc, {"algorithm": "URDNA2015", "format": "application/nquads"} ) h = hashlib.new("sha256") h.update(normalized.encode("utf-8")) return h.hexdigest()
def do_execute(self, state): options = { 'algorithm': 'URDNA2015', 'format': 'application/nquads', 'documentLoader': cached_document_loader } normalized = jsonld.normalize(state.certificate_json, options=options) hashed = sha256(normalized) state.local_hash = hashed return True
def options_hash(doc): doc = dict(doc['signature']) for k in ['type', 'id', 'signatureValue']: if k in doc: del doc[k] doc['@context'] = 'https://w3id.org/identity/v1' normalized = jsonld.normalize(doc, {'algorithm': 'URDNA2015', 'format': 'application/nquads'}) h = hashlib.new('sha256') h.update(normalized.encode('utf-8')) return h.hexdigest()
def _parse_json_ld(filename): # just some experiments with open(filename) as data_f: data = json.load(data_f) compacted = compact_with_json_ld_context(data) expanded = jsonld.expand(compacted) normalized = jsonld.normalize( data, {'algorithm': 'URDNA2015', 'format': 'application/nquads'}) print(json.dumps(expanded, indent=2))
def verify(singed_credential, verification_key, documentloader=None): """ It verifies a credential signed using Ed25519Signature2018 JSON-LD Signature :param singed_credential: a python dict representing the credential :param [verification_key]: the verification key [id] the key id [publicKeyHex] a Hex encoded Ed25519 public key :param documentloader: a custom documentloader :return: True or False """ singed_credential = singed_credential.copy() jws_header = b'{"alg":"EdDSA","b64":false,"crit":["b64"]}' proof = singed_credential['proof'] proof['@context'] = 'https://w3id.org/security/v2' encodedSignature = proof['jws'].split("..", 1)[1] + "==" del singed_credential['proof'] del proof['jws'] normalized_doc = jsonld.normalize(singed_credential, { 'algorithm': 'URDNA2015', 'format': 'application/n-quads' }) normalized_proof = jsonld.normalize(proof, { 'algorithm': 'URDNA2015', 'format': 'application/n-quads' }) doc_hash = hashlib.sha256() proof_hash = hashlib.sha256() doc_hash.update(normalized_doc.encode('utf-8')) proof_hash.update(normalized_proof.encode('utf-8')) ver_key = nacl.signing.VerifyKey(verification_key['publicKeyHex'], nacl.encoding.HexEncoder) signature = nacl.encoding.URLSafeBase64Encoder.decode(encodedSignature) encodedHeader = nacl.encoding.URLSafeBase64Encoder.encode(jws_header) to_verify = encodedHeader + b'.' + proof_hash.digest() + doc_hash.digest() try: ver_key.verify(to_verify, signature) return True except: return False
def _canonize(data, document_loader=None): return jsonld.normalize( data, { "algorithm": "URDNA2015", "format": "application/n-quads", **{ opt: document_loader for opt in ["documentLoader"] if document_loader }, }, )
def append(self,data,graph=None): content = jsonld.normalize(data, {'algorithm': 'URDNA2015', 'format': 'application/nquads'}).encode('utf-8') query.insert(graph_expr(graph,content) if graph is not None else content) params = {} params = {'query':str(query)} uri = self.url req = requests.post(uri,params=params,headers={'accept':'application/json','content-type':'application/x-www-form-urlencoded; charset=utf-8'},auth=self.get_authentication()) if (req.status_code<200 or req.status_code>=300): raise RESTIOError('Cannot append to graph {}, status={}'.format(graph,req.status_code),uri,req.status_code,req.text)
def normalize(self, document): """The normalisation operation will produce a canonical representation of the document according to the URDNA2015 canonicalisation method. Returns: string containing the N-Quad representation of the normalised document""" return jsonld.normalize(document, options={ 'algorithm': 'URDNA2015', 'format': 'application/n-quads' })
def _options_hash(doc): doc = dict(doc["signature"]) for k in ["type", "id", "signatureValue"]: if k in doc: del doc[k] doc["@context"] = "https://w3id.org/identity/v1" normalized = jsonld.normalize( doc, {"algorithm": "URDNA2015", "format": "application/nquads"} ) h = hashlib.new("sha256") h.update(normalized.encode("utf-8")) return h.hexdigest()
def json_to_rdf(json_in: jsonasobj.JsonObj, schema_uri: str) -> Graph: """ Use the jsonld processor to convert the json into an RDF graph :param json_in: :param schema_uri: the base URI of the schema file :return: RDF graph """ json_in['@context'] = schema_uri + ('/' if schema_uri[-1] not in ['/', '#'] else '') + 'context.json' normalized = jsonld.normalize(json_in._as_json_obj(), {'format': 'application/nquads', 'algorithm': 'URDNA2015'}) g = Graph() g.parse(data=prefixes, format="turtle") g.parse(data=normalized, format="turtle") return g
def to_newformat(path, format, prefixfile=None, contextfile=None): """Convert a JSONLD document to n-triples format Since PyLD requires an http url, a local server is started to serve the document. Parameters ---------- path : str A local path or remote url to convert to n-triples format: str of enum Returned format jsonld, n-triples, turtle prefixfile: str Prefixes to use when converting to turtle (ignored otherwise) contextfile: str Context to use for compaction when returning jsonld. If not provided, a jsonld graph is returned. Returns ------- normalized : str A normalized document """ supported_formats = ["jsonld", "n-triples", "turtle"] if format not in supported_formats: raise ValueError(f"{format} not in {supported_formats}") data = load_file(path) if format == "jsonld": if contextfile is not None: with open(contextfile) as fp: context = json.load(fp) data = jsonld.compact(data, context) return json.dumps(data, indent=2) kwargs = {"algorithm": "URDNA2015", "format": "application/n-quads"} nt = jsonld.normalize(data, kwargs) if format == "n-triples": return nt import rdflib as rl g = rl.Graph() g.bind("rs", "http://schema.repronim.org/") g.bind("sdo", "http://schema.org/") g.bind("nidm", "http://purl.org/nidash/nidm#") g.bind("skos", "http://www.w3.org/2004/02/skos/core#") g.bind("prov", "http://www.w3.org/ns/prov#") if prefixfile is not None: with open(prefixfile) as fp: prefixes = json.load(fp) for key, value in prefixes.items(): g.bind(key, value) g.parse(data=nt, format="nt") return g.serialize(format=format).decode()
def _export_nquad(args, filename_output): from pyld import jsonld doc = file2json(filename_output) normalized = jsonld.normalize(doc, { 'algorithm': 'URDNA2015', 'format': 'application/n-quads' }) xdebug_file = os.path.join(args["debug_dir"], os.path.basename(filename_output)) filename_nquad = xdebug_file + u".nq" #filename_nquad = args["output_file"].replace("jsonld","nq") lines2file([normalized], filename_nquad)
def do_hash_certificate(certificate): """ Hash the JSON-LD normalized certificate :param certificate: :return: """ options = {'algorithm': 'URDNA2015', 'format': 'application/nquads'} cert_utf8 = certificate.decode('utf-8') cert_json = json.loads(cert_utf8) normalized = jsonld.normalize(cert_json, options=options) hashed = sha256(normalized) #self.tree.add_leaf(hashed, False) return hashed
def normalize_jsonld(json_ld_to_normalize): """ Normalize the JSON-LD certificate :param certificate_json: :return: """ options = { 'algorithm': 'URDNA2015', 'format': 'application/nquads', 'documentLoader': cached_document_loader } normalized = jsonld.normalize(json_ld_to_normalize, options=options) return normalized
def get_jsonld(self, context, new_context={}, format="full"): """Return the JSON-LD serialization. :param: context the context to use for raw publishing; each SmartJsonLD instance is expected to have a default context associated. :param: new_context the context to use for formatted publishing, usually supplied by the client; used by the 'compacted', 'framed', and 'normalized' formats. :param: format the publishing format; can be 'full', 'inline', 'compacted', 'expanded', 'flattened', 'framed' or 'normalized'. Note that 'full' and 'inline' are synonims, referring to the document form which includes the context; for more information see: [http://www.w3.org/TR/json-ld/] """ from pyld import jsonld if isinstance(context, six.string_types): ctx = self.get_context(context) elif isinstance(context, dict): ctx = context else: raise TypeError('JSON-LD context must be a string or dictionary') try: doc = self.translate(context, ctx) except NotImplementedError: # model does not require translation doc = self.dumps(clean=True) doc["@context"] = ctx if format in ["full", "inline"]: return doc if format == "compacted": return jsonld.compact(doc, new_context) elif format == "expanded": return jsonld.expand(doc) elif format == "flattened": return jsonld.flatten(doc) elif format == "framed": return jsonld.frame(doc, new_context) elif format == "normalized": return jsonld.normalize(doc, new_context) raise ValueError('Invalid JSON-LD serialization format')
def _build_clause(query, clause_key): prepared_clause = quote_variables(query[clause_key]) global_context = query.get('@context') if global_context: prepared_clause['@context'] = _context_concat(global_context, prepared_clause.get('@context')) LOG.debug("Prepared clause (%s): %s", clause_key, prepared_clause) sparql = "{\n" normalized = jsonld.normalize(prepared_clause) for graphid, triples in normalized.items(): if graphid != '@default': if graphid.startswith(VAR_PREFIX): graphid = graphid[len(VAR_PREFIX):] else: graphid = "<{}>".format(graphid) sparql = '{} GRAPH {} {{\n'.format(sparql, graphid) for triple in triples: sparql = '{} '.format(sparql) for key in ('subject', 'predicate', 'object'): node = triple[key] ntype = node['type'] value = node['value'] if ntype == 'IRI': if value.startswith(VAR_PREFIX): value = value[len(VAR_PREFIX):] else: value = "<{}>".format(value) elif ntype == 'blank node': pass #value has alreadt the correct form else: assert ntype == 'literal', ntype if 'datatype' in node: value = "{}^^<{}>".format(dumps(value), node['datatype']) else: value = "{}@{}".format(dumps(value), node['language']) sparql = '{}{} '.format(sparql, value) sparql = '{}.\n'.format(sparql) if graphid != '@default': sparql = '{} }}\n'.format(sparql) sparql = "{}}}\n".format(sparql) return sparql
def update(self,data,graph=None,subjects=[]): tuples = tuple_expr('s','p','o') if len(subjects)>0: tuples.filter(in_expr(var('s'),*subjects)) if graph is None: query.delete(tuples) else: query.delete(graph_expr(graph,tuples)) content = jsonld.normalize(data, {'algorithm': 'URDNA2015', 'format': 'application/nquads'}).encode('utf-8') query.insert(graph_expr(graph,content) if graph is not None else content) params = {} params = {'query':str(query)} uri = self.url req = requests.post(uri,params=params,headers={'accept':'application/json','content-type':'application/x-www-form-urlencoded; charset=utf-8'},auth=self.get_authentication()) if (req.status_code<200 or req.status_code>=300): raise RESTIOError('Cannot update graph {}, status={}'.format(graph,req.status_code),uri,req.status_code,req.text)
def parse_jsonld(content, base_uri=None, encoding="utf-8", graph=None): """I parse RDF content from JSON-LD. This parses the JSON as is. For handling "simplified" kTBS JSON, see parse_jdon (and use the application.json content-type). See :func:`rdfrest.parse.parse_rdf_xml` for prototype documentation. """ if graph is None: graph = Graph() if encoding.lower() != "utf-8": content = content.decode(encoding).encode("utf-8") try: json_data = loads(content) # ... then parse! normalized_json = normalize(json_data, pylod_options(base_uri)) # Do not use "nt" as format as it works only with latin-1 graph.parse(data=normalized_json, format="n3") except Exception, ex: raise ParseError(ex.message or str(ex))
def main(self): print('PyLD Unit Tests') print('Use -h or --help to view options.') # add program options self.parser.add_option('-f', '--file', dest='file', help='The single test file to run', metavar='FILE') self.parser.add_option('-d', '--directory', dest='directory', help='The directory full of test files', metavar='DIR') self.parser.add_option('-e', '--earl', dest='earl', help='The filename to write the EARL report to', metavar='EARL') self.parser.add_option('-v', '--verbose', dest='verbose', action='store_true', default=False, help='Prints verbose test data') # parse options (self.options, args) = self.parser.parse_args() # check if file or directory were specified if self.options.file == None and self.options.directory == None: raise Exception('No test file or directory specified.') # check if file was specified, exists, and is file if self.options.file is not None: if (os.path.exists(self.options.file) and os.path.isfile(self.options.file)): # add manifest file to the file list self.manifest_files.append(os.path.abspath(self.options.file)) else: raise Exception('Invalid test file: "%s"' % self.options.file) # check if directory was specified, exists and is dir if self.options.directory is not None: if (os.path.exists(self.options.directory) and os.path.isdir(self.options.directory)): # load manifest files from test directory for test_dir, dirs, files in os.walk(self.options.directory): for manifest in files: # add all .jsonld manifest files to the file list if (manifest.find('manifest') != -1 and manifest.endswith('.jsonld')): self.manifest_files.append( join(test_dir, manifest)) else: raise Exception('Invalid test directory: "%s"' % self.options.directory) # see if any manifests have been specified if len(self.manifest_files) == 0: raise Exception('No manifest files found.') passed = 0 failed = 0 total = 0 # run the tests from each manifest file for manifest_file in self.manifest_files: test_dir = os.path.dirname(manifest_file) manifest = json.load(open(manifest_file, 'r')) count = 1 for test in manifest['sequence']: # skip unsupported types skip = True test_type = test['@type'] for tt in test_type: if tt in SKIP_TEST_TYPES: skip = True break if tt in TEST_TYPES: skip = False if skip: # print 'Skipping test: "%s" ...' % test['name'] continue print('JSON-LD/%s %04d/%s...' % ( manifest['name'], count, test['name']), end=' ') total += 1 count += 1 # read input file with open(join(test_dir, test['input'])) as f: if test['input'].endswith('.jsonld'): input = json.load(f) else: input = f.read().decode('utf8') # read expect file with open(join(test_dir, test['expect'])) as f: if test['expect'].endswith('.jsonld'): expect = json.load(f) else: expect = f.read().decode('utf8') result = None # JSON-LD options options = { 'base': 'http://json-ld.org/test-suite/tests/' + test['input'], 'useNativeTypes': True } success = False try: if 'jld:ExpandTest' in test_type: result = jsonld.expand(input, options) elif 'jld:CompactTest' in test_type: ctx = json.load(open(join(test_dir, test['context']))) result = jsonld.compact(input, ctx, options) elif 'jld:FlattenTest' in test_type: result = jsonld.flatten(input, None, options) elif 'jld:FrameTest' in test_type: frame = json.load(open(join(test_dir, test['frame']))) result = jsonld.frame(input, frame, options) elif 'jld:FromRDFTest' in test_type: result = jsonld.from_rdf(input, options) elif 'jld:ToRDFTest' in test_type: options['format'] = 'application/nquads' result = jsonld.to_rdf(input, options) elif 'jld:NormalizeTest' in test_type: options['format'] = 'application/nquads' result = jsonld.normalize(input, options) # check the expected value against the test result success = deep_compare(expect, result) if success: passed += 1 print('PASS') else: failed += 1 print('FAIL') if not success or self.options.verbose: print('Expect:', json.dumps(expect, indent=2)) print('Result:', json.dumps(result, indent=2)) except jsonld.JsonLdError as e: print('\nError: ', e) failed += 1 print('FAIL') # add EARL report assertion EARL['subjectOf'].append({ '@type': 'earl:Assertion', 'earl:assertedBy': EARL['doap:developer']['@id'], 'earl:mode': 'earl:automatic', 'earl:test': ('http://json-ld.org/test-suite/tests/' + os.path.basename(manifest_file) + test.get('@id', '')), 'earl:result': { '@type': 'earl:TestResult', 'dc:date': datetime.datetime.utcnow().isoformat(), 'earl:outcome': ('earl:' + 'passed' if success else 'failed') } }) if self.options.earl: f = open(self.options.earl, 'w') f.write(json.dumps(EARL, indent=2)) f.close() print('Done. Total:%d Passed:%d Failed:%d' % (total, passed, failed))
def main(self): print "PyLD TestRunner" print "Use -h or --help to view options." # add program options self.parser.add_option("-f", "--file", dest="file", help="The single test file to run", metavar="FILE") self.parser.add_option("-d", "--directory", dest="directory", help="The directory full of test files", metavar="DIR") self.parser.add_option("-v", "--verbose", dest="verbose", action="store_true", default=False, help="Prints verbose test data") # parse options (self.options, args) = self.parser.parse_args() # check if file or directory were specified if self.options.file == None and self.options.directory == None: print "No test file or directory specified." return # check if file was specified, exists and is file if self.options.file != None: if (os.path.exists(self.options.file) and os.path.isfile(self.options.file)): # add test file to the file list self.testfiles.append(os.path.abspath(self.options.file)) self.testdir = os.path.dirname(self.options.file) else: print "Invalid test file." return # check if directory was specified, exists and is dir if self.options.directory != None: if (os.path.exists(self.options.directory) and os.path.isdir(self.options.directory)): # load test files from test directory for self.testdir, dirs, files in os.walk(self.options.directory): for testfile in files: # add all .test files to the file list if testfile.endswith(".test"): self.testfiles.append(join(self.testdir, testfile)) else: print "Invalid test directory." return # see if any tests have been specified if len(self.testfiles) == 0: print "No tests found." return # FIXME: #self.testFiles.sort() run = 0 passed = 0 failed = 0 # run the tests from each test file for testfile in self.testfiles: # test group in test file testgroup = json.load(open(testfile, 'r')) count = 1 for test in testgroup['tests']: print 'Test: %s %04d/%s...' % ( testgroup['group'], count, test['name']), run += 1 count += 1 # open the input and expected result json files inputFd = open(join(self.testdir, test['input'])) expectFd = open(join(self.testdir, test['expect'])) inputJson = json.load(inputFd) expectJson = json.load(expectFd) resultJson = None testType = test['type'] if testType == 'normalize': resultJson = jsonld.normalize(inputJson) elif testType == 'expand': resultJson = jsonld.expand(inputJson) elif testType == 'compact': contextFd = open(join(self.testdir, test['context'])) contextJson = json.load(contextFd) resultJson = jsonld.compact(contextJson, inputJson) elif testType == 'frame': frameFd = open(join(self.testdir, test['frame'])) frameJson = json.load(frameFd) resultJson = jsonld.frame(inputJson, frameJson) else: print "Unknown test type." # check the expected value against the test result if expectJson == resultJson: passed += 1 print 'PASS' if self.options.verbose: print 'Expect:', json.dumps(expectJson, indent=4) print 'Result:', json.dumps(resultJson, indent=4) else: failed += 1 print 'FAIL' print 'Expect:', json.dumps(expectJson, indent=4) print 'Result:', json.dumps(resultJson, indent=4) print "Tests run: %d, Tests passed: %d, Tests Failed: %d" % (run, passed, failed)
def main(self): print 'PyLD Unit Tests' print 'Use -h or --help to view options.' # add program options self.parser.add_option('-f', '--file', dest='file', help='The single test file to run', metavar='FILE') self.parser.add_option('-d', '--directory', dest='directory', help='The directory full of test files', metavar='DIR') self.parser.add_option('-v', '--verbose', dest='verbose', action='store_true', default=False, help='Prints verbose test data') # parse options (self.options, args) = self.parser.parse_args() # check if file or directory were specified if self.options.file == None and self.options.directory == None: raise Exception('No test file or directory specified.') # check if file was specified, exists, and is file if self.options.file is not None: if (os.path.exists(self.options.file) and os.path.isfile(self.options.file)): # add manifest file to the file list self.manifest_files.append(os.path.abspath(self.options.file)) else: raise Exception('Invalid test file: "%s"' % self.options.file) # check if directory was specified, exists and is dir if self.options.directory is not None: if (os.path.exists(self.options.directory) and os.path.isdir(self.options.directory)): # load manifest files from test directory for test_dir, dirs, files in os.walk(self.options.directory): for manifest in files: # add all .jsonld manifest files to the file list if (manifest.find('manifest') != -1 and manifest.endswith('.jsonld')): self.manifest_files.append( join(test_dir, manifest)) else: raise Exception('Invalid test directory: "%s"' % self.options.directory) # see if any manifests have been specified if len(self.manifest_files) == 0: raise Exception('No manifest files found.') passed = 0 failed = 0 total = 0 # run the tests from each manifest file for manifest_file in self.manifest_files: test_dir = os.path.dirname(manifest_file) manifest = json.load(open(manifest_file, 'r')) count = 1 for test in manifest['sequence']: # skip unsupported types skip = True test_type = test['@type'] for tt in TEST_TYPES: if tt in test_type: skip = False break if skip: print 'Skipping test: "%s" ...' % test['name'] continue print 'JSON-LD/%s %04d/%s...' % ( manifest['name'], count, test['name']), total += 1 count += 1 # read input file with open(join(test_dir, test['input'])) as f: if test['input'].endswith('.jsonld'): input = json.load(f) else: input = f.read().decode('utf8') # read expect file with open(join(test_dir, test['expect'])) as f: if test['expect'].endswith('.jsonld'): expect = json.load(f) else: expect = f.read().decode('utf8') result = None # JSON-LD options options = { 'base': 'http://json-ld.org/test-suite/tests/' + test['input']} try: if 'jld:NormalizeTest' in test_type: options['format'] = 'application/nquads' result = jsonld.normalize(input, options) elif 'jld:ExpandTest' in test_type: result = jsonld.expand(input, options) elif 'jld:CompactTest' in test_type: ctx = json.load(open(join(test_dir, test['context']))) result = jsonld.compact(input, ctx, options) elif 'jld:FrameTest' in test_type: frame = json.load(open(join(test_dir, test['frame']))) result = jsonld.frame(input, frame, options) elif 'jld:FromRDFTest' in test_type: result = jsonld.from_rdf(input, options) elif 'jld:ToRDFTest' in test_type: options['format'] = 'application/nquads' result = jsonld.to_rdf(input, options) # check the expected value against the test result success = deep_compare(expect, result) if success: passed += 1 print 'PASS' else: failed += 1 print 'FAIL' if not success or self.options.verbose: print 'Expect:', json.dumps(expect, indent=2) print 'Result:', json.dumps(result, indent=2) except jsonld.JsonLdError as e: print '\nError: ', e failed += 1 print 'FAIL' print 'Done. Total:%d Passed:%d Failed:%d' % (total, passed, failed)
def write_turtle(disease_lst, masterClass_lst, name): dct = { '@graph': [ { '@id': "symdoc:Disease", 'name': 'Disease' }, { '@type': 'owl:ObjectProperty', '@id': 'symdoc-ontology:relatedAtonomyOf', 'owl:inverseOf': {'@id': 'symdoc-ontology:hasRelatedAtonomy'} }, { '@type': 'owl:ObjectProperty', '@id': 'symdoc-ontology:hasRelatedAtonomy' }, { '@type': 'owl:ObjectProperty', '@id': 'symdoc-ontology:symptomOf', 'owl:inverseOf': {'@id': 'symdoc-ontology:hasSymptom'} }, { '@type': 'owl:ObjectProperty', '@id': 'symdoc-ontology:hasPossibleDrug' }, { '@type': 'owl:ObjectProperty', '@id': 'symdoc-ontology:possibleDrugOf', 'owl:inverseOf': {'@id': 'symdoc-ontology:hasPossibleDrug'} }, { '@type': 'owl:ObjectProperty', '@id': 'symdoc-ontology:hasSymptom', }, { '@type': ['owl:ObjectProperty', 'owl:SymmetricProperty'], '@id': "symdoc-ontology:familyDisease" }, { '@type': ['owl:ObjectProperty', 'owl:SymmetricProperty'], '@id': "symdoc-ontology:relatedDisease" } ], '@context': { 'alias': "symdoc-ontology:alias", 'disease-ontology': {'@id': 'owl:sameAs'}, 'sameAs': "symdoc-ontology:sameAs", 'familyDisease': "symdoc-ontology:familyDisease", 'name': "rdfs:label", 'possibleDrugOf': "symdoc-ontology:possibleDrugOf", 'relatedDisease': "symdoc-ontology:relatedDisease", 'rd-name': "rdfs:label", 'rd-score': "symdoc-ontology:relatedDiseaseScore", 'symptom': "symdoc-ontology:symptom", 'symptomOf': "symdoc-ontology:symptomOf", 'hasSymptom': "symdoc-ontology:hasSymptom", 'relatedAtonomyOf': "symdoc-ontology:relatedAtonomyOf", 'hasRelatedAtonomy': "symdoc-ontology:hasRelatedAtonomy", 'symptom_frequency': "symdoc-ontology:symptomFrequency", 'masterClass': "rdfs:subClassOf", 'score': {'@id': "symdoc-ontology:score", '@type': 'xsd:integer'}, 'frequency': "symdoc-ontology:frequency", #Prefixes 'xsd': "http://www.w3.org/2001/XMLSchema#", 'rdf': "http://www.w3.org/1999/02/22-rdf-syntax-ns#", 'rdfs': "http://www.w3.org/2000/01/rdf-schema#", 'owl': "http://www.w3.org/2002/07/owl#", 'symdoc': "http://www.symptomdoctor.org#", 'symdoc-ontology': "http://www.symptomdoctor.org/ontology#" } } # Add classes for c in masterClass_lst: dct['@graph'].append({ '@id': "symdoc:%s" % c, 'alias': convertCamelCase(c), 'masterClass': {'@id': "symdoc:Disease"} }) # Add diseases for d in disease_lst: dct['@graph'].append(d) sys.stdout.write('Writing %s now, please wait' % name) normalized = jsonld.normalize(dct, {'format': 'application/nquads'}) f = open('%s.ttl' % name, 'w') f.write(normalized.encode('utf8')) sys.stdout.write('\nFinished %s\n' % name)
} } while True: os.system("clear") gene_id = raw_input("input gene id\n") url = url_mod % int(gene_id) try: r = requests.get(url) r.raise_for_status() except HTTPError: print "no id" data = r.json() print "\nraw json data" print json.dumps(data) data["@context"] = context["@context"] print "\ncompacted data" print json.dumps(data) doc = jsonld.expand(data) print "\nexpanded data" print json.dumps(doc) data_nor = jsonld.normalize(doc, {'format': 'application/nquads'}) print "\n N-Quads data" print data_nor raw_input("input any key to continue")
def parse_json(content, base_uri=None, encoding="utf-8", graph=None): """I parse RDF content from kTBS-specific JSON. See :func:`rdfrest.parse.parse_rdf_xml` for prototype documentation. """ if graph is None: graph = Graph() if encoding.lower() != "utf-8": content = content.decode(encoding).encode("utf-8") try: json_data = loads(content) obsel_context = False if isinstance(json_data, list): # this is a list of obsels ; embed it in correct json_data = { "@id": base_uri, "obsels": json_data, } obsel_context = True elif json_data.get("@type") == "Base": json_data.setdefault(u"inRoot", unicode(base_uri)) elif json_data.get("@type") in ("StoredTrace", "ComputedTrace", "DataGraph", "TraceModel", "Method"): json_data.setdefault(u"inBase", unicode(base_uri)) elif "@graph" in json_data: # this is a TraceModel # @graph must be a non-empty list, # with the first item representing the trace model json_data["@graph"][0].setdefault(u"inBase", unicode(base_uri)) elif ((json_data.get("hasObselList") is None) and (json_data.get("hasTraceStatistics") is None) and (json_data.get("hasBuiltinMethod") is None)): # must be an obsel obsel_context = True json_data.setdefault(u"hasTrace", unicode(base_uri)) # add context if needed if "@context" not in json_data: if not obsel_context: json_data["@context"] = CONTEXT_URI else: model_uri = factory(base_uri, [KTBS.AbstractTrace]).model_uri if model_uri[-1] not in { "/", "#" }: model_uri += "#" json_data["@context"] = [ CONTEXT_URI, { "m": unicode(model_uri) }, ] # ... then parse! normalized_json = normalize(json_data, pylod_options(base_uri)) # Do not use "nt" as format as it works only with latin-1 graph.parse(data=normalized_json, format="n3") except Exception, ex: raise ParseError(ex.message or str(ex))