def detail(cls, annotation, with_context=True): item = dict() if with_context: item["@context"] = cls.JSONLD_CONTEXT item["id"] = url_for( "annotation_detail", annotation_id=annotation.id, library_short_name=annotation.patron.library.short_name, _external=True, ) item["type"] = "Annotation" item["motivation"] = annotation.motivation item["body"] = annotation.content if annotation.target: target = json.loads(annotation.target) compacted = jsonld.compact(target, cls.JSONLD_CONTEXT) del compacted["@context"] item["target"] = compacted if annotation.content: body = json.loads(annotation.content) compacted = jsonld.compact(body, cls.JSONLD_CONTEXT) del compacted["@context"] item["body"] = compacted return item
def expand_compact_for_context(wa, context): '''assumes anno has @context.''' context = wa['@context'] try: compacted = jsonld.compact(wa, context, compactArrays=False) except Exception as e: msg = 'compaction for context({}) of anno({}) failed: {}'.format( context, wa['id'], str(e)) raise e try: expanded = jsonld.expand(compacted) except Exception as e: msg = 'expansion for context({}) of anno({}) failed: {}'.format( context, wa['id'], str(e)) raise e try: translated = jsonld.compact(expanded, context, compactArrays=False) except Exception as e: msg = 'translation for context({}) of anno({}) failed: {}'.format( CATCH_CONTEXT_IRI, wa['id'], str(e)) raise e return translated
def from_jsonld(cls, data): """Instantiate a JSON-LD class from data.""" if isinstance(data, cls): return data if not isinstance(data, dict): raise ValueError(data) if '@type' in data: type_ = tuple(sorted(data['@type'])) if type_ in cls.__type_registry__ and getattr( cls, '_jsonld_type', None) != type_: new_cls = cls.__type_registry__[type_] if cls != new_cls: return new_cls.from_jsonld(data) if cls._jsonld_translate: data = ld.compact(data, {'@context': cls._jsonld_translate}) data.pop('@context', None) data.setdefault('@context', cls._jsonld_context) if data['@context'] != cls._jsonld_context: compacted = ld.compact(data, {'@context': cls._jsonld_context}) else: compacted = data # assert compacted['@type'] == cls._jsonld_type, '@type must be equal' # TODO update self(not cls)._jsonld_context with data['@context'] fields = cls._jsonld_fields return cls( **{k.lstrip('_'): v for k, v in compacted.items() if k in fields})
def from_jsonld( cls, data, client=None, __reference__=None, __source__=None, ): """Instantiate a JSON-LD class from data.""" if isinstance(data, cls): return data if not isinstance(data, dict): raise ValueError(data) if '@type' in data: type_ = tuple(sorted(data['@type'])) if type_ in cls.__type_registry__ and getattr( cls, '_jsonld_type', None) != type_: new_cls = cls.__type_registry__[type_] if cls != new_cls: return new_cls.from_jsonld(data, client=client) if cls._jsonld_translate: data = ld.compact(data, {'@context': cls._jsonld_translate}) data.pop('@context', None) data.setdefault('@context', cls._jsonld_context) if data['@context'] != cls._jsonld_context: try: compacted = ld.compact(data, {'@context': cls._jsonld_context}) except Exception: compacted = data else: compacted = data for migration in MIGRATIONS: data = migration(data) fields = cls._jsonld_fields data_ = {} if client: data_['client'] = client for k, v in compacted.items(): if k in fields: data_[k.lstrip('_')] = v if __reference__: with with_reference(__reference__): self = cls(**data_) else: self = cls(**data_) if __source__: setattr(self, '__source__', __source__) return self
def compact_uri(uri_list, context): if isinstance(uri_list, str): return jsonld.compact({"@type": uri_list}, context)["@type"] else: try: return tuple( as_list(jsonld.compact({"@type": uri_list}, context)["@type"])) except Exception as err: raise Exception(f"{err} uri_list={uri_list} context={context}")
def create_verify_data(data, signature_options): """Encapsulate the process of constructing the string used during sign and verify.""" type_ = signature_options.get("type", "Ed25519Signature2018") if type_ and type_ != "Ed25519Signature2018": raise SignatureTypeError(f"invalid signature type {type_}.") signature_options["verificationMethod"] = signature_options.get( "creator", signature_options.get("verificationMethod")) if not signature_options.get("verificationMethod"): raise MissingVerificationMethodError( "signature_options.verificationMethod is required") signature_options["created"] = signature_options.get( "created", _created_at()) [expanded] = jsonld.expand(data) framed = jsonld.compact(expanded, "https://w3id.org/security/v2", {"skipExpansion": True}) # Detect any dropped attributes during the expand/contract step. if len(data) > len(framed): # > check indicates dropped attrs < is a different error # attempt to collect error report data for_diff = jsonld.compact(expanded, data.get("@context")) dropped = set(data.keys()) - set(for_diff.keys()) raise DroppedAttributeError( f"{dropped} attributes dropped. " "Provide definitions in context to correct.") # Check proof for dropped attributes attr = [ ("proof", "proof"), ("credentialSubject", "https://www.w3.org/2018/credentials#credentialSubject"), ] data_context = data.get("@context") for mapping in attr: data_attribute = data.get(mapping[0], {}) frame_attribute = framed.get(mapping[1], {}) if len(data_attribute) > len(frame_attribute): for_diff = jsonld.compact(expanded, data_context) for_diff_attribute = for_diff.get(mapping[1], {}) dropped = set(data_attribute.keys()) - set( for_diff_attribute.keys()) raise DroppedAttributeError( f"in {mapping[0]}, {dropped} attributes dropped. " "Provide definitions in context to correct.") canonized_signature_options = _canonize_signature_options( signature_options) hash_of_canonized_signature_options = _sha256(canonized_signature_options) canonized_document = _canonize_document(framed) hash_of_canonized_document = _sha256(canonized_document) return (framed, hash_of_canonized_signature_options + hash_of_canonized_document)
def jsonld(self, frame=None, options=None, context=None, removeContext=None): if removeContext is None: removeContext = Response._context # Loop? if frame is None: frame = self._frame if context is None: context = self.context else: context = self.get_context(context) # For some reason, this causes errors with pyld # if options is None: # options = {"expandContext": context.copy() } js = self if frame: logging.debug("Framing: %s", json.dumps(self, indent=4)) logging.debug("Framing with %s", json.dumps(frame, indent=4)) js = jsonld.frame(js, frame, options) logging.debug("Result: %s", json.dumps(js, indent=4)) logging.debug("Compacting with %s", json.dumps(context, indent=4)) js = jsonld.compact(js, context, options) logging.debug("Result: %s", json.dumps(js, indent=4)) if removeContext == context: del js["@context"] return js
def rdf2jsonld(self): """ Splits Nquads and converts the partitions to JSON-LD compacted document form """ # Extract tokens from offset + 1 to <n-th offset after> (n = args.docs) i = 0 while i < len(self.newdoc) - 1: if i + self.docs >= len(self.newdoc) - 1: j = len(self.newdoc) - 1 else: j = i + self.docs # Serializing RDF into JSON-LD by method from_rdf results in the so called # expanded document form, i.e. a format that doesn't contain any namespaces print("Serializing RDF file to JSON-LD") expand = jsonld.from_rdf( self.nquads[self.offsets[self.newdoc[i]] + 1:self.offsets[self.newdoc[j]]]) i = j # The compacted JSON-LD document form offers the possibility to include a context # (i.e. namespaces) and thus reduces redundancy print("Converting to compacted document form") compacted = jsonld.compact(expand, self.loadjson(self.frame)) # TODO: Insert function to append data of contributors to the bibliographicResource print("Indexing documents") for graph in compacted["@graph"]: if self.extcont is True: graph["@context"] = path.abspath(args.frame) else: graph["@context"] = compacted["@context"] self.output(graph)
def quads_to_jsonld(quads): from pyld import jsonld context = AssemblQuadStorageManager.get_jsonld_context(True) jsonf = jsonld.from_rdf(quads) jsonc = jsonld.compact(jsonf, context) jsonc['@context'] = AssemblQuadStorageManager.get_jsonld_context(False) return jsonc
def serialize(self): compacted_json = jsonld.compact({ "http://schema.org/word_id": self.word_id, "http://schema.org/word": self.word }, self.get_context()) del compacted_json['@context'] return compacted_json
def rdf2jsonld(self): """ Splits Nquads and converts the partitions to JSON-LD compacted document form """ # Extract tokens from offset + 1 to <n-th offset after> (n = args.docs) i = 0 while i < len(self.newdoc) - 1: if i + self.docs >= len(self.newdoc) - 1: j = len(self.newdoc) - 1 else: j = i + self.docs # Serializing RDF into JSON-LD by method from_rdf results in the so called # expanded document form, i.e. a format that doesn't contain any namespaces print("Serializing RDF file to JSON-LD") expand = jsonld.from_rdf(self.nquads[self.offsets[self.newdoc[i]] + 1:self.offsets[self.newdoc[j]]]) i = j # The compacted JSON-LD document form offers the possibility to include a context # (i.e. namespaces) and thus reduces redundancy print("Converting to compacted document form") compacted = jsonld.compact(expand, self.loadjson(self.frame)) # TODO: Insert function to append data of contributors to the bibliographicResource print("Indexing documents") for graph in compacted["@graph"]: if self.extcont is True: graph["@context"] = path.abspath(args.frame) else: graph["@context"] = compacted["@context"] self.output(graph)
def mds_canonicalize(app, data: dict) -> dict: # language=rst """ TODO: Documentation of this vital function. """ ctx = mds_context() # if '@context' not in data: # _logger.warning("No @context in data to be canonicalized.") # data['@context'] = ctx # The expansion is implicitly done in jsonld.compact() below. # data = jsonld.expand(data) retval = jsonld.compact(data, ctx) retval = DATASET.canonicalize(retval) if 'dcat:distribution' not in retval: retval['dcat:distribution'] = [] retval['@context'] = ctx for distribution in retval['dcat:distribution']: if 'ams:distributionType' in distribution: if distribution['ams:distributionType'] != 'file': distribution.pop('dcat:mediaType', None) distribution.pop('dcat:mediaType', None) if distribution['ams:distributionType'] != 'file': distribution.pop('dct:byteSize', None) if distribution['ams:distributionType'] != 'api': distribution.pop('ams:serviceType', None) return retval
def find_id_in_jsonld(json_string, jsonld_options): input_data = json.loads(json_string) result = jsonld.compact(input_data, OPENBADGES_CONTEXT_V2_URI, options=jsonld_options) node_id = result.get('id', '') return node_id
def parse_field(field_key, field_value, r_list): """ Return a field as a key/value pair, normalising the keys to qnames, and the value to a single value, based on the o:id, @id and o:label, in that order. Return None, None for fields that don't fit this pattern. :param field_value: value for field :param field_key: key for field :param r_list: row list :return: key, value """ # if field_key == u'o:id': # return 'dcterms:identifier', field_value # don't try to compact @ids if field_key == '@id': return None, None # compact using master context to reduce to qnames ck = jsonld.compact({field_key: field_value}, master_context) del (ck['@context']) # remove the context if ck: for k, v in ck.items(): if k == 'dcterms:hasPart': for z in v: parse_expanded([z], row_list=r_list) return k, ';'.join([y['dcterms:identifier'] for y in v]) if isinstance(v, dict): for x in ['o:id', '@id', 'o:label']: if x in v: return k, v[x] return None, None else: return k, v
def to_jsonld(self): """ Use metadata_standard.specification_root to build an expanded jsonld metadata record """ namespaced = { 'dct:title': self.title, 'dbpedia:StartDateTime': self.start_datetime.isoformat(), 'dbpedia:EndDateTime': self.end_datetime.isoformat(), 'dcat:accessURL': 'https://mt.northwestknowledge.net' '/lidd/api/metadata/{}'.format(self.id), 'dcat:downloadURL': 'https://mt.northwestknowledge.net' '/lidd/api/metadata/{}/raw'.format(self.id), 'void:dataDump': 'https://mt.northwestknowledge.net' '/lidd/api/metadata/{}/rdf'.format(self.id) } context = HCLS_PLUS_CONTEXT return jsonld.compact(namespaced, context, {'expandContext': context})
def rdf2es(self, string, bibo): """ Does the really interesting stuff: Transformation of the triples by subject and indexing in ES :param string: The RDF triples as a concatenated string. :param bibo: Is subject a bibo:Document? :return: Body for ES indexing """ g = Graph().parse(data=string) jldstr = g.serialize(format='json-ld', indent=4) if bibo: esdoc = jsonld.compact(loads(jldstr.decode('utf-8')), self.loadjson(self.frame)) doctype = 'document' else: esdoc = loads(jldstr.decode('utf-8')) esdoc = jsonld.frame(esdoc, self.loadjson(self.frame))['@graph'][0] esdoc['@context'] = self.loadjson(self.frame)['@context'] doctype = 'bibliographicResource' docid = re.findall('\w{9}', esdoc['@id'])[0] if self.filemode: bulkfile = [{'index': {'_index': self.index, '_type': doctype, '_id': docid}}, esdoc] return bulkfile else: esdoc.update({'_index': self.index, '_type': doctype, '_id': docid}) return esdoc
def query_ld(q, limit=None, connection=None): if connection is None: connection = get_connection() ld = connection.query(q, limit=limit) return ld if ld.get('names') is not None and ld.get( 'values') is not None else jsonld.compact(ld, connection.context)
def usePyld(): try: jsonld except: print "=== can't do pyld demos' as package pyld isn't installed - Download and install from https://github.com/digitalbazaar/pyld" return # Grab the vitals query = {"fmql": "DESCRIBE 120_5 FILTER(.02=2-9&.01>2008-04-01)", "format": "JSON-LD"} queryURL = FMQLEP + "?" + urllib.urlencode(query) jreply = json.loads(urllib2.urlopen(queryURL).read()) json.dump(jreply, open("fmql_FMQL.json", "w"), indent=2) # nix all but @graph and @context jreply = {"@context": jreply["@context"], "@graph": jreply["@graph"]} # Let's produce different forms of JSON-LD (and RDF) from this # 1. Expanded form print "pyld expand ..." expanded = jsonld.expand(jreply) json.dump(expanded, open("pyld_EXP_FMQLEX.json", "w"), indent=2) # 2. Compact it - using the basic context print "pyld compact ..." compact = jsonld.compact(jreply, {"livevista": "http://livevista.caregraf.info/"}) json.dump(compact, open("pyld_COMP_FMQLEX.json", "w"), indent=2) # 3. Dump RDF -- only nquads are supported ... others return errors print "pyld tordf ..." open("pyld_RDF_FMQLEX.rdf", "w").write(jsonld.to_rdf(jreply, {"format": "application/nquads"})) print
def translate_distribution_item(b2file, filecontent): context = { # ontologies used in FDP according to spec "rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#", "rdfs": "http://www.w3.org/2000/01/rdf-schema#", "dcat": "http://www.w3.org/ns/dcat#", "xsd": "http://www.w3.org/2001/XMLSchema#", "owl": "http://www.w3.org/2002/07/owl#", "dct": "http://purl.org/dc/terms/", "lang": "http://id.loc.gov/vocabulary/iso639-1/", "fdp": "http://rdf.biosemantics.org/ontologies/fdp-o#", "foaf": "http://xmlns.com/foaf/", "b2": "https://b2share.eudat.eu/ontology/b2share/" } doc = { "@id": filecontent.links.selflink, "@type": "dcat:Distribution", #"http://purl.org/dc/terms/identifier": b2file.identifier, "http://purl.org/dc/terms/issued": filecontent.created, "http://purl.org/dc/terms/modified": filecontent.updated, "http://purl.org/dc/terms/title": filecontent.key, #"http://purl.org/dc/terms/license": filecontent.key, --> license is set up in the dataset level (level 3) "http://purl.org/dc/terms/hasVersion": filecontent.version_id, "http://purl.org/dc/terms/versionOf": b2file.links.selflink } return jsonld.compact(doc, context)
def serialize(self): compacted_json = jsonld.compact({ "http://schema.org/player_id": self.player_id, "http://schema.org/username": self.username }, self.get_context()) del compacted_json['@context'] return compacted_json
def jsonld(self, with_context=True, context_uri=None, prefix=None, expanded=False): ser = self.serializable() result = jsonld.compact(ser, self._context, options={ 'base': prefix, 'expandContext': self._context, 'senpy': prefix }) if context_uri: result['@context'] = context_uri if expanded: result = jsonld.expand(result, options={ 'base': prefix, 'expandContext': self._context }) if not with_context: del result['@context'] return result
def serialize(self): compacted_json = jsonld.compact( { "http://schema.org/first_name": self.first_name, "http://schema.org/last_name": self.last_name, "http://schema.org/id": self.id, "http://schema.org/email": self.email, "http://schema.org/birthDate": self.birthday.isoformat() if self.birthday else "", "http://schema.org/telephone": self.phone, "http://schema.org/languages": self.languages, "http://schema.org/skills": self.skills, "http://schema.org/nr_of_reviews": len(self.reviews), "http://schema.org/nr_of_interviews": len(self.interviews) }, self.get_context()) #print(compacted_json) return compacted_json
def usePyld(): # Grab the vitals used in frame 1 query = {"fmql": "DESCRIBE 120_5 FILTER(.02=2-9&.01>2008-04-01)", "format": "JSON-LD"} queryURL = FMQLEP + "?" + urllib.urlencode(query) jreply = json.loads(urllib2.urlopen(queryURL).read()) json.dump(jreply, open("fmql_FMQL_F1.json", "w"), indent=2) # Grab the vitals used in frame 2 query = {"fmql": "DESCRIBE 120_5 FILTER(.02=2-9&.01>2008-04-01)", "format": "JSON-LD2"} queryURL = FMQLEP + "?" + urllib.urlencode(query) jreply = json.loads(urllib2.urlopen(queryURL).read()) json.dump(jreply, open("fmql_FMQL_F2.json", "w"), indent=2) # Let's produce different forms of JSON-LD (and RDF) from this # 1. Expanded form print "pyld expand ..." expanded = jsonld.expand(jreply) json.dump(expanded, open("pyld_EXP_FMQLEX.json", "w"), indent=2) # 2. Compact it - using the basic context of framing 1 print "pyld compact ..." compact = jsonld.compact(jreply, json.load(open("vsfmcontextBase.json"))) json.dump(compact, open("pyld_COMP_FMQLEX.json", "w"), indent=2) # 3. Dump RDF -- only nquads are supported ... others return errors print "pyld tordf ..." open("pyld_RDF_FMQLEX.rdf", "w").write(jsonld.to_rdf(jreply, {"format": "application/nquads"}))
def compact_entity_params(attr, context, compacted_dict, app): """Apply compaction output""" context_list = [] attr_key = attr default_context_compact = 'https://uri.etsi.org/ngsi-ld/default-context/' try: if attr in compacted_dict: return compacted_dict[attr] if default_context_compact in attr: attr = attr.replace(default_context_compact, '') compacted_dict[attr_key] = attr return attr if context: if context in app.context_dict.keys(): context_list.append(app.context_dict[context]) else: context_list.append(context) context_list.append(app.context_dict[default_context]) con = {"@context": context_list} com = {attr: attr} compacted = jsonld.compact(com, con) attr = list(compacted.keys())[1] compacted_dict[attr_key] = attr except Exception as e: app.logger.error("Error: compact_entity_params") app.logger.error(traceback.format_exc()) return attr
def compact_object(jsonobject): if isinstance(jsonobject, list) and len(jsonobject) == 1: jsonobject = jsonobject[0] if isinstance(jsonobject, dict): if (record_field and record_field in jsonobject) or (record_field is None): compacted = jsonld.compact(jsonobject, context, {'skipExpansion': True}) if context_url: compacted['@context'] = context_url for date in ["definition"]: if isinstance(compacted.get(date), str): compacted.pop(date) if isinstance(compacted.get("gndIdentifier"), list): compacted["gndIdentifier"] = compacted.pop( "gndIdentifier")[0] # for fix in ["definition"]: # if isinstance(compacted.get("fix"),(dict,list)): # compacted.pop(fix) if (node and compacted.get("@id") and compacted.get("@id").startswith("_:")) or ( node and compacted.get("id") and compacted.get("id").startswith("_:")): # avoid raceconditions with gzip.open( pathprefix + str(current_process().name) + "-bnodes.ldj.gz", "at") as fileout: fileout.write(json.dumps(compacted, indent=None) + "\n") else: with gzip.open( pathprefix + str(current_process().name) + ".ldj.gz", "at") as fileout: _id = compacted.pop("id") compacted["id"] = _id.split("/")[-1] fileout.write(json.dumps(compacted, indent=None) + "\n")
def detect_type(value, allowed_types=None): """ Detect type of `value`, optionally subject to `allowed_types`. Return tuple of detected value and type. """ detected_value = value detected_type = None if isinstance(value, six.string_types): return detect_raw_type(value, allowed_types) if isinstance(value, dict): detected_type = 'json' result = jsonld.compact( value, OPENBADGES_CONTEXT_V2_URI, options=OB_OPTIONS.get('jsonld_options') ) id_url = result.get('id','') if ob_utils.is_url(id_url): detected_value = id_url detected_type = 'url' if not detected_type: raise Exception('unknown type') if allowed_types: assert detected_type in allowed_types, 'unhandled type "%s"' % detected_type return (detected_value, detected_type)
def process_all_imdb(cursor, imdb): for i, mid in enumerate(get_movies_to_process(cursor)): print "// %d movies processed." % i movie = get_jsonld_from_imdb(imdb, mid) movie = jsonld.compact(movie, 'http://schema.org/') print json.dumps(movie, indent=4)
def serialize(self): """ Creates dictionary representation of object """ compacted_json = jsonld.compact( { "http://schema.org/first_name": self.first_name, "http://schema.org/last_name": self.last_name, "http://schema.org/id": self.id, "http://schema.org/email": self.email, "http://schema.org/birthDate": self.birthday.isoformat() if self.birthday else "", "http://schema.org/telephone": self.phone, "http://schema.org/languages": self.languages, "http://schema.org/number_of_reviews": len(self.reviews), "http://schema.org/number_of_interviews": len(self.interviews) }, self.get_context()) return compacted_json
def jsonld_compact_data(state, task_meta, **options): try: data = task_meta.get('data') if data and not sys.version[:3] < '3' and not isinstance( data, six.string_types): data = data.decode() input_data = json.loads(data) expected_class = task_meta.get('expected_class') except TypeError: return task_result(False, "Could not load data") selected_options = options.get('jsonld_options', jsonld_use_cache) selected_options['documentLoader'].contexts = set() result = jsonld.compact(input_data, OPENBADGES_CONTEXT_V2_URI, options=selected_options) new_contexts = list(selected_options['documentLoader'].contexts.copy()) node_id = result.get('id', task_meta.get('node_id', get_next_blank_node_id())) # Handle mismatch between URL node source and declared ID. if result.get('id') and task_meta.get( 'node_id') and result['id'] != task_meta['node_id']: refetch_action = add_task(FETCH_HTTP_NODE, url=result['id']) if expected_class: refetch_action['expected_class'] = expected_class actions = [ report_message( "Node fetched from source {} declared its id as {}".format( task_meta['node_id'], node_id), MESSAGE_LEVEL_WARNING, success=False), delete_outdated_node_tasks(task_meta['node_id']), refetch_action ] if task_meta.get('source_node_path'): actions.append( patch_node_reference(task_meta['source_node_path'], node_id)) return task_result( True, "Successfully compacted node {} from source {} and found ID mismatch" .format(node_id, task_meta['node_id']), actions) actions = [add_node(node_id, data=result)] + _get_extension_actions( result, [node_id], new_contexts) if expected_class: actions.append( add_task(VALIDATE_EXPECTED_NODE_CLASS, node_id=node_id, expected_class=expected_class)) elif task_meta.get('detectAndValidateClass', True): actions.append( add_task(DETECT_AND_VALIDATE_NODE_CLASS, node_id=node_id)) return task_result(True, "Successfully compacted node {}".format(node_id), actions)
def compact_and_clean(self, js): newjs = compact(js, context_js) newjs['@context'] = context if newjs.has_key("@graph"): for k,v in newjs['@graph'].items(): newjs[k] = v del newjs['@graph'] return newjs
def compact_with_json_ld_context(input_json, document_loader=None): options = {} if document_loader: options['documentLoader'] = document_loader with open(JSON_LD_CONTEXT_V1_2) as context_f: ctx = json.load(context_f) compacted = jsonld.compact(input_json, ctx, options=options) return compacted
def from_graph( data: Graph, type: Optional[Union[str, List]] = None, frame: Dict = None, model_context: Optional[Context] = None ) -> Union[Resource, List[Resource]]: from collections import OrderedDict if not type: _types = data.triples( (None, RDF.type, None)) # type of data to transform to JSONLD _types = [str(_type[2]) for _type in _types] else: _types = type # to get curies as keys when the model context is not used graph_n3 = data.serialize(format="n3") graph_n3 = Graph().parse(data=graph_n3, format="n3") graph_string = graph_n3.serialize(format="json-ld", auto_compact=True, indent=2) graph_json = json.loads(graph_string) if model_context: context = model_context.document else: context = graph_json[CONTEXT] if not frame: frame = {"@context": context, "@type": _types, "@embed": True} framed = jsonld.frame(graph_json, frame) framed = _graph_free_jsonld(framed) if isinstance(framed, list): framed = [ jsonld.compact(item, ctx=context, options={'processingMode': 'json-ld-1.1'}) for item in framed ] else: framed = jsonld.compact(framed, ctx=context, options={'processingMode': 'json-ld-1.1'}) return from_jsonld(framed)
def serialize_in_json(g, uri): context = build_graph_context(g) cg = skolemize(g) ted_nquads = cg.serialize(format='nquads') ld = jsonld.from_rdf(ted_nquads) type = list(cg.objects(uri, RDF.type)).pop() ld = jsonld.frame(ld, {'context': context, '@type': str(type)}) return json.dumps(jsonld.compact(ld, context), indent=3, sort_keys=True)
def convert(anno): # check we're already parsed if not type(anno) == dict: anno = json.loads(anno) # rdf = expand(anno) reframed = frame(anno, annoframe) outjs = compact(reframed, contextURI) return outjs
def fix_iiif(manifests): with open('contexts/iiif2-jsonld11.json') as f: ctx = json.load(f) manifests = jsonld.compact( manifests, ctx ) manifests = patch_image_service(manifests) manifests["@context"] = "http://iiif.io/api/presentation/2/context.json" return manifests
def serialize(self): compacted_json = jsonld.compact({ "http://schema.org/note_id": self.note_id, "http://schema.org/book_id": self.book_id, "http://schema.org/user_id": self.user_id, "http://schema.org/notes": self.notes }, self.get_context()) del compacted_json['@context'] return compacted_json
def dump(self, obj): """Compact JSON according to context.""" rec = copy.deepcopy(super(JSONLDSerializer, self).dump(obj)) rec.update(self.context) compacted = jsonld.compact(rec, self.context) if not self.expanded: return compacted else: return jsonld.expand(compacted)[0]
def transform_jsonld(self, obj): """Compact JSON according to context.""" rec = copy.deepcopy(obj) rec.update(self.context) compacted = jsonld.compact(rec, self.context) if not self.expanded: return compacted else: return jsonld.expand(compacted)[0]
def record2jsonld(record, context): from pyld import jsonld import json import rdflib_jsonld from rdflib import Graph import copy rec = copy.deepcopy(record) rec.update(context) compacted = jsonld.compact(rec, context) return jsonld.expand(compacted)
def quads_to_jsonld(self, quads): from pyld import jsonld context = json.load(open(join(dirname(__file__), 'ontology', 'context.jsonld'))) server_uri = self.local_uri() context["@context"]['local'] = server_uri jsonf = jsonld.from_rdf(quads) jsonc = jsonld.compact(jsonf, context) jsonc['@context'] = [ context_url, {'local': server_uri}] return jsonc
def clean_jsonld(self, js, uri): if type(js) == list: # find actual object # NB for fcr:metadata it's not retrieved URI for o in js: if o['@id'] == uri or (self.context.id_alias and o.get(self.context.id_alias, '') == uri): js = o break js = jsonld.compact(js, self.context.data) del js['@context'] return js
def detail(cls, annotation, with_context=True): item = dict() if with_context: item["@context"] = cls.JSONLD_CONTEXT item["id"] = url_for("annotation_detail", annotation_id=annotation.id, library_short_name=annotation.patron.library.short_name, _external=True) item["type"] = "Annotation" item["motivation"] = annotation.motivation item["body"] = annotation.content if annotation.target: target = json.loads(annotation.target) compacted = jsonld.compact(target, cls.JSONLD_CONTEXT) del compacted["@context"] item["target"] = compacted if annotation.content: body = json.loads(annotation.content) compacted = jsonld.compact(body, cls.JSONLD_CONTEXT) del compacted["@context"] item["body"] = compacted return item
def rdf_to_jsonld(self, rdf, fmt): g = ConjunctiveGraph() g.parse(data=rdf, format=fmt) out = g.serialize(format='json-ld') j2 = json.loads(out) j2 = {"@context": context_js, "@graph": j2} framed = frame(j2, frame_js) out = compact(framed, context_js) # recursively clean blank node ids #out = self._clean_bnode_ids(out) return out
def test_json_ld(self, book_context, book_schema, simple_book_record): from pyld import jsonld import json import rdflib_jsonld from rdflib import Graph import copy rec = copy.deepcopy(simple_book_record) rec.update(book_context) compacted = jsonld.compact(rec, book_context) expanded = jsonld.expand(compacted) graph = Graph().parse(data=json.dumps(expanded, indent=2), format="json-ld") print(graph.serialize(format="json-ld"))
def convert(context, src, dest): #context = abspath(context) with open(context) as f: context = load(f) g = ConjunctiveGraph() with open(src) as f: g.parse(data=f.read(), format='trig') with open(dest, 'w') as f: # f.write(g.serialize(format='json-ld', indent=4, context=context)) # Bug in rdflib: Above loses the TextPositionSelector. quads = g.serialize(format='nquads') json = jsonld.from_rdf(quads) jsonc = jsonld.compact(json, context) dump(jsonc, f, indent=" ")
def convert(input_path, output_path): skos = jsonld.from_rdf(get_skos(input_path).decode('unicode_escape').encode('utf-8','ignore')) context = { "@vocab": "http://www.w3.org/2004/02/skos/core#", "name": { "@id": "http://www.w3.org/2004/02/skos/core#prefLabel", "@container": "@set" }, "alternateName": { "@id": "http://www.w3.org/2004/02/skos/core#altLabel", "@container": "@set" }, "narrower": { "@id": "http://www.w3.org/2004/02/skos/core#narrower", "@container": "@set" }, "description": { "@id": "http://purl.org/dc/terms/description", "@container": "@set" }, "scopeNote": { "@container": "@set" }, "notation": { "@container": "@set" }, "publisher": "http://purl.org/dc/terms/publisher", "title": "http://purl.org/dc/terms/title", "preferredNamespacePrefix": "http://purl.org/vocab/vann/preferredNamespacePrefix", "preferredNamespaceUri": "http://purl.org/vocab/vann/preferredNamespaceUri", "source": "http://purl.org/dc/terms/source" } frame = { "@context": context, "@type": "ConceptScheme", "@explicit": True, "hasTopConcept": { "@type": "Concept", "narrower": { "@type": "Concept" } } } framed = jsonld.compact(jsonld.frame(skos, frame), context) del framed['@context'] with open(output_path, 'w') as output_file: json.dump(framed, output_file, indent=2, ensure_ascii=False) print "Wrote data for " + input_path + " to " + output_path
def compacted_jsonld(self): """Return the ontology in compacted JSON-LD format as an object.""" content = {'content': self.triples} conn = HTTPConnection('rdf-translator.appspot.com') # conn.set_debuglevel(1) conn.request('POST', '/convert/n3/json-ld/content', urllib.urlencode(content)) response = conn.getresponse() assert response.status == 200 expanded = json.load(response) compacted = jsonld.compact(expanded, self.context) # print json.dumps(compacted,indent=4) compacted['@graph'] = sorted(compacted['@graph'], key=lambda x: x['@id']) return compacted
def _rdf_to_jsonld(self, b): fmt = request.headers['Content-Type'] if self.rdflib_format_map.has_key(fmt): rdftype = self.rdflib_format_map[fmt] g = Graph() g.parse(data=b, format=rdftype) out = g.serialize(format='json-ld') # AND THIS IS WHERE IT GETS CRAAAAAZEEEE... # aka rdflib doesn't do framing so we re-re-parse it j2 = json.loads(out) j2 = {"@context": self.default_context, "@graph": j2} framed = frame(j2, self.annoframe) out = compact(framed, self.default_context) # recursively clean blank node ids out = self._clean_bnode_ids(out) return out
def compact_json(metadata, context=DEFAULT_CONTEXT): """ Compact json with supplied context. Note: Free floating" nodes are removed (eg a key just named "bazzzzzz" which isn't specified in the context... something like bazzzzzz:blerp will stay though. This is jsonld.compact behavior. """ compacted = jsonld.compact( metadata, context, options={ "documentLoader": load_context, # This allows for things like "license" and etc to be preserved "expandContext": context, "keepFreeFloatingNodes": False}) return compacted
def serialize(self): compacted_json = jsonld.compact({ "http://schema.org/first_name": self.first_name, "http://schema.org/last_name": self.last_name, "http://schema.org/id": self.id, "http://schema.org/email": self.email, "http://schema.org/birthDate": self.birthday.isoformat() if self.birthday else "", "http://schema.org/telephone": self.phone, "http://schema.org/languages": self.languages, "http://schema.org/skills":self.skills, "http://schema.org/nr_of_reviews": len(self.reviews), "http://schema.org/nr_of_interviews": len(self.interviews) }, self.get_context()) #print(compacted_json) return compacted_json
def get_jsonld(self, context, new_context={}, format="full"): """Return the JSON-LD serialization. :param: context the context to use for raw publishing; each SmartJsonLD instance is expected to have a default context associated. :param: new_context the context to use for formatted publishing, usually supplied by the client; used by the 'compacted', 'framed', and 'normalized' formats. :param: format the publishing format; can be 'full', 'inline', 'compacted', 'expanded', 'flattened', 'framed' or 'normalized'. Note that 'full' and 'inline' are synonims, referring to the document form which includes the context; for more information see: [http://www.w3.org/TR/json-ld/] """ from pyld import jsonld if isinstance(context, six.string_types): ctx = self.get_context(context) elif isinstance(context, dict): ctx = context else: raise TypeError('JSON-LD context must be a string or dictionary') try: doc = self.translate(context, ctx) except NotImplementedError: # model does not require translation doc = self.dumps(clean=True) doc["@context"] = ctx if format in ["full", "inline"]: return doc if format == "compacted": return jsonld.compact(doc, new_context) elif format == "expanded": return jsonld.expand(doc) elif format == "flattened": return jsonld.flatten(doc) elif format == "framed": return jsonld.frame(doc, new_context) elif format == "normalized": return jsonld.normalize(doc, new_context) raise ValueError('Invalid JSON-LD serialization format')
def compact(document, context=None, base=None, remove_context=False): """Compact OA JSON-LD, shortening forms according to context.""" # See http://www.w3.org/TR/json-ld-api/#compaction if context is None: context = default_context() if base is None: base = default_base() options = {} if base is not None: options['base'] = base compacted = jsonld.compact(document, context, options) if remove_context: try: del compacted['@context'] except KeyError: pass return compacted
def jsonld(self, with_context=True, context_uri=None, prefix=None, expanded=False): ser = self.serializable() result = jsonld.compact( ser, self._context, options={ 'base': prefix, 'expandContext': self._context, 'senpy': prefix }) if context_uri: result['@context'] = context_uri if expanded: result = jsonld.expand( result, options={'base': prefix, 'expandContext': self._context}) if not with_context: del result['@context'] return result
def main(self): print('PyLD Unit Tests') print('Use -h or --help to view options.') # add program options self.parser.add_option('-f', '--file', dest='file', help='The single test file to run', metavar='FILE') self.parser.add_option('-d', '--directory', dest='directory', help='The directory full of test files', metavar='DIR') self.parser.add_option('-e', '--earl', dest='earl', help='The filename to write the EARL report to', metavar='EARL') self.parser.add_option('-v', '--verbose', dest='verbose', action='store_true', default=False, help='Prints verbose test data') # parse options (self.options, args) = self.parser.parse_args() # check if file or directory were specified if self.options.file == None and self.options.directory == None: raise Exception('No test file or directory specified.') # check if file was specified, exists, and is file if self.options.file is not None: if (os.path.exists(self.options.file) and os.path.isfile(self.options.file)): # add manifest file to the file list self.manifest_files.append(os.path.abspath(self.options.file)) else: raise Exception('Invalid test file: "%s"' % self.options.file) # check if directory was specified, exists and is dir if self.options.directory is not None: if (os.path.exists(self.options.directory) and os.path.isdir(self.options.directory)): # load manifest files from test directory for test_dir, dirs, files in os.walk(self.options.directory): for manifest in files: # add all .jsonld manifest files to the file list if (manifest.find('manifest') != -1 and manifest.endswith('.jsonld')): self.manifest_files.append( join(test_dir, manifest)) else: raise Exception('Invalid test directory: "%s"' % self.options.directory) # see if any manifests have been specified if len(self.manifest_files) == 0: raise Exception('No manifest files found.') passed = 0 failed = 0 total = 0 # run the tests from each manifest file for manifest_file in self.manifest_files: test_dir = os.path.dirname(manifest_file) manifest = json.load(open(manifest_file, 'r')) count = 1 for test in manifest['sequence']: # skip unsupported types skip = True test_type = test['@type'] for tt in test_type: if tt in SKIP_TEST_TYPES: skip = True break if tt in TEST_TYPES: skip = False if skip: # print 'Skipping test: "%s" ...' % test['name'] continue print('JSON-LD/%s %04d/%s...' % ( manifest['name'], count, test['name']), end=' ') total += 1 count += 1 # read input file with open(join(test_dir, test['input'])) as f: if test['input'].endswith('.jsonld'): input = json.load(f) else: input = f.read().decode('utf8') # read expect file with open(join(test_dir, test['expect'])) as f: if test['expect'].endswith('.jsonld'): expect = json.load(f) else: expect = f.read().decode('utf8') result = None # JSON-LD options options = { 'base': 'http://json-ld.org/test-suite/tests/' + test['input'], 'useNativeTypes': True } success = False try: if 'jld:ExpandTest' in test_type: result = jsonld.expand(input, options) elif 'jld:CompactTest' in test_type: ctx = json.load(open(join(test_dir, test['context']))) result = jsonld.compact(input, ctx, options) elif 'jld:FlattenTest' in test_type: result = jsonld.flatten(input, None, options) elif 'jld:FrameTest' in test_type: frame = json.load(open(join(test_dir, test['frame']))) result = jsonld.frame(input, frame, options) elif 'jld:FromRDFTest' in test_type: result = jsonld.from_rdf(input, options) elif 'jld:ToRDFTest' in test_type: options['format'] = 'application/nquads' result = jsonld.to_rdf(input, options) elif 'jld:NormalizeTest' in test_type: options['format'] = 'application/nquads' result = jsonld.normalize(input, options) # check the expected value against the test result success = deep_compare(expect, result) if success: passed += 1 print('PASS') else: failed += 1 print('FAIL') if not success or self.options.verbose: print('Expect:', json.dumps(expect, indent=2)) print('Result:', json.dumps(result, indent=2)) except jsonld.JsonLdError as e: print('\nError: ', e) failed += 1 print('FAIL') # add EARL report assertion EARL['subjectOf'].append({ '@type': 'earl:Assertion', 'earl:assertedBy': EARL['doap:developer']['@id'], 'earl:mode': 'earl:automatic', 'earl:test': ('http://json-ld.org/test-suite/tests/' + os.path.basename(manifest_file) + test.get('@id', '')), 'earl:result': { '@type': 'earl:TestResult', 'dc:date': datetime.datetime.utcnow().isoformat(), 'earl:outcome': ('earl:' + 'passed' if success else 'failed') } }) if self.options.earl: f = open(self.options.earl, 'w') f.write(json.dumps(EARL, indent=2)) f.close() print('Done. Total:%d Passed:%d Failed:%d' % (total, passed, failed))
(options, args) = parser.parse_args() if len(args) != 1: parser.error("Error: incorrect number of arguments, try --help") doc = get_demo_record(file_name=args[0], verbose=options.verbose) validate(doc) if options.verbose: from rerodoc.dojson.book import book2marc print(json.dumps(book2marc.do(doc), indent=2)) from rerodoc.dojson.utils import get_context context = get_context("book") doc.update(context) if options.verbose: print("Input record in json format:") print(json.dumps(doc, indent=2)) compacted = jsonld.compact(doc, context) #print compacted expanded = jsonld.expand(compacted) #import pprint #pprint.pprint(expanded) #flattened = jsonld.flatten(doc) #framed = jsonld.frame(doc, context) #normalized = jsonld.normalize(doc, {'format': 'application/nquads'}) graph = Graph().parse(data=json.dumps(compacted, indent=2), format="json-ld") print(graph.serialize(format=options.format))
def compact(self): return jsonld.compact(self, self.get_context(self.context))
?producer ldbmovie:producer_name ?producer_name . ?rating ldbmovie:content_rating_name ?rating_str . ?genre ldbmovie:film_genre_name ?genre_name . ?director ldbmovie:director_name ?director_name . } } """ ) # As os july/2015, LinkedMDB's sparql endpoint did not support JSON-LD. sparql.setReturnFormat(Wrapper.N3) response = sparql.query() if "response" not in dir(response): print response sys.exit() n3 = "".join(response.response.readlines()) print n3 g = Graph() movie = json.loads(g.parse(data=n3, format="n3").serialize(format="json-ld")) context = "http://schema.org/" movie = jsonld.compact(movie, {"@context": context}) print json.dumps(movie, indent=4)