def _vcard_name(self): g = Graph() vc = Resource(g, URIRef(self.vcard_name_uri)) vc.set(RDF.type, VCARD.Name) vc.set(RDFS.label, Literal(self.profile['name'].strip())) vc.set(VCARD.familyName, Literal(self.profile['lastName'])) vc.set(VCARD.givenName, Literal(self.profile['firstName'])) return g
def new_container(uid): if not uid.startswith('/') or uid == '/': raise InvalidResourceError(uid) if rdfly.ask_rsrc_exists(uid): raise ResourceExistsError(uid) rsrc = Ldpc(uid, provided_imr=Resource(Graph(), nsc['fcres'][uid])) return rsrc
def _vcard_title(self): title = self.profile['position_title'] g = Graph() vt = Resource(g, self.vcard_title_uri) vt.set(RDF.type, VCARD.Title) vt.set(RDFS.label, Literal(title)) vt.set(VCARD.title, Literal(title)) return g
def get_subclasses(self, ontology_class): if ontology_class not in self.subclass_cache: ontology_class_resource = Resource(self.graph, ontology_class) self.subclass_cache[ontology_class] = [ subclass.identifier for subclass in ontology_class_resource.transitive_subjects(RDFS.subClassOf) ] return self.subclass_cache[ontology_class]
def _vcard_title(self): if not hasattr(self, 'academictitle'): return None g = Graph() vt = Resource(g, self.vcard_title_uri) vt.set(RDF.type, VCARD.Title) vt.set(RDFS.label, Literal(self.academictitle)) vt.set(VCARD.title, Literal(self.academictitle)) return g
def getdeftoken(g, uri, qualify=True): """return a token form of a URI""" if qualify: try: return Resource(g, URIRef(str(uri))).qname() except: return "dummy" else: return getonttoken(str(uri))
def get_components(cls, item_type: str, url: str, paging: bool = False, page_size: int = 0, page_no: int = 0): if not cls.__instance: cls() cls.__init_graph() if not cls.__item_types: cls.__item_types = cls.__get_item_types() if re.sub('\\.', ' ', item_type) in cls.__item_types.values(): item_type = unquote(item_type) item_type = re.sub(' ', '.', item_type) url = unquote(url) url = re.sub(' ', '.', url) container = Resource(cls.__graph, URIRef(url)) container.add(RDF.type, LDP.BasicContainer) config_ids = load_items(cls.__source_base_uri, item_type, page_size, page_no) ri, config_ids = cls.__get_paging(item_type, config_ids, url, paging, page_size, page_no) if ri: container.add(OSLC.responseInfo, ri) for config_id in config_ids: member_url = url + f'/{config_id}' member = Resource(cls.__graph, URIRef(member_url)) member.add(RDF.type, OSLC_CONFIG.Component) member.add(DCTERMS.title, Literal(config_ids[config_id]['keyed_name'])) container.add(LDP.contains, member) return container else: return False
def __get_response_info(cls, item_type: str, url: str, url_sp: str, paging: bool, page_size: int, page_no: int) -> Resource: items = load_items(cls.__source_base_uri, item_type, page_size, page_no) resource = Resource(cls.__graph, URIRef(url)) ri, items = cls.__get_paging(item_type, items, url, paging, page_size, page_no) if ri: resource.add(OSLC.responseInfo, ri) for item in items: item_url = url + '/' + re.sub(' ', '.', item) member = Resource(cls.__graph, URIRef(item_url)) resource.add(RDFS.member, member) return resource
def _vcard_email(self): if not hasattr(self, 'email'): return None g = Graph() vt = Resource(g, self.vcard_email_uri) vt.set(RDF.type, VCARD.Email) # Label probably not necessary vt.set(RDFS.label, Literal(self.email)) vt.set(VCARD.email, Literal(self.email)) return g
def _vcard_name(self): g = Graph() vc = Resource(g, URIRef(self.vcard_name_uri)) vc.set(RDF.type, VCARD.Name) vc.set(RDFS.label, Literal(self._label())) vc.set(VCARD.familyName, Literal(self.cffamilynames)) vc.set(VCARD.givenName, Literal(self.cffirstnames)) if hasattr(self, 'middlename'): vc.set(VIVO.middleName, Literal(self.middlename)) return g
def to_rdf(self): g = Graph() r = Resource(g, self.uri) r.set(RDF.type, SKOS.Concept) r.set(RDFS.label, Literal(self.name)) r.set(CONVERIS.converisId, Literal(self.cid)) g += self.has_researchers() g += self.get_narrower() return g
def _date(self, dtype, dv): g = Graph() date_obj = converis.convert_date(dv) date_uri = URIRef(DATA_NAMESPACE + 'date' + dtype + self.vid) de = Resource(g, date_uri) de.set(RDF.type, VIVO.DateTimeValue) if date_obj is not None: de.set(RDFS.label, Literal(dv)) de.set(VIVO.dateTime, Literal(date_obj, datatype=XSD.date)) de.set(VIVO.dateTimePrecision, VIVO.yearMonthDayPrecision) return date_uri, g
def unified_orgs(self): g = Graph() addresses = self.addresses() for addr in addresses: for org in addr["unified_orgs"]: uri = waan_uri(org) r = Resource(g, uri) r.set(RDF.type, WOS.UnifiedOrganization) r.set(RDFS.label, Literal(org)) # relation set by address return g
def to_rdf(self, graph): super(ResponseInfo, self).to_rdf(graph) uri = self.about ri = Resource(graph, URIRef(uri)) ri.add(RDF.type, OSLC.ResponseInfo) if self.title: ri.add(DCTERMS.title, Literal(self.title, datatype=XSD.Literal)) if self.members: for item in self.members: item_url = uri + '/' + item.identifier member = Resource(graph, URIRef(item_url)) ri.add(RDFS.member, member) if self.total_count and self.total_count > 0: ri.add(OSLC.totalCount, Literal(self.total_count)) return ri
def imr(self, v): ''' Replace in-memory buffered resource. @param v (set | rdflib.Graph) New set of triples to populate the IMR with. ''' if isinstance(v, Resource): v = v.graph self._imr = Resource(Graph(), self.uri) gr = self._imr.graph gr += v
def to_rdf(self): g = Graph() o = Resource(g, self.uri) o.set(RDF.type, FOAF.Organization) o.set(RDFS.label, Literal(self.cfname)) o.set(CONVERIS.converisId, Literal(self.cid)) if hasattr(self, 'cfresact'): o.set(VIVO.overview, Literal(self.cfresact)) for child in self.get_children(): # Has sub-organization o.set(OBO['BFO_0000051'], child) return g
def __get_query_capability(cls, item_type_name: str, item_type_name_url: str, uri: str) -> Resource: qc = Resource(cls.__graph, BNode()) qc.add(RDF.type, OSLC.QueryCapability) qc.add(DCTERMS.title, Literal(f'Query Capability for ItemType: {item_type_name}')) qc.add(OSLC.queryBase, URIRef(uri)) qc.add(OSLC.resourceType, URIRef(ARAS.term(item_type_name_url))) qc.add(OSLC.resourceShape, URIRef(uri + '/resourceShape')) return qc
def _createProvenance(self, result): provdata = IProvenanceData(result) from rdflib import URIRef, Literal, Namespace, Graph from rdflib.namespace import RDF, RDFS, FOAF, DCTERMS, XSD from rdflib.resource import Resource PROV = Namespace(u"http://www.w3.org/ns/prov#") BCCVL = Namespace(u"http://ns.bccvl.org.au/") LOCAL = Namespace(u"urn:bccvl:") graph = Graph() # the user is our agent member = api.user.get_current() username = member.getProperty('fullname') or member.getId() user = Resource(graph, LOCAL['user']) user.add(RDF['type'], PROV['Agent']) user.add(RDF['type'], FOAF['Person']) user.add(FOAF['name'], Literal(username)) user.add(FOAF['mbox'], URIRef('mailto:{}'.format(member.getProperty('email')))) # add software as agent software = Resource(graph, LOCAL['software']) software.add(RDF['type'], PROV['Agent']) software.add(RDF['type'], PROV['SoftwareAgent']) software.add(FOAF['name'], Literal('BCCVL ALA Importer')) # script content is stored somewhere on result and will be exported with zip? # ... or store along with pstats.json ? hidden from user # -> execenvironment after import -> log output? # -> source code ... maybe some link expression? stored on result ? separate entity? activity = Resource(graph, LOCAL['activity']) activity.add(RDF['type'], PROV['Activity']) # TODO: this is rather queued or created time for this activity ... could capture real start time on running status update (or start transfer) now = datetime.now().replace(microsecond=0) activity.add(PROV['startedAtTime'], Literal(now.isoformat(), datatype=XSD['dateTime'])) activity.add(PROV['hasAssociationWith'], user) activity.add(PROV['hasAssociationWith'], software) # add job parameters to activity provdata.data = graph.serialize(format="turtle")
def generate_rdf(data): DCO = Namespace("http://info.deepcarbon.net/schema#") VIVO = Namespace("http://vivoweb.org/ontology/core#") SKOS = Namespace("http://www.w3.org/2004/02/skos/core#") BIBO = Namespace("http://purl.org/ontology/bibo/") g = Graph() g.bind("dco", DCO) g.bind("bibo", BIBO) g.bind("vivo", VIVO) for (uri, record) in data: try: publication = Publication(record) pub = Resource(g, URIRef(uri)) if publication.issue is not None: pub.add(BIBO.issue, Literal(publication.issue)) if publication.issued is not None and publication.issued[ "year"] is not None: pub.add( DCO.yearOfPublicationYear, Literal(publication.issued["year"], datatype=XSD.gYear)) if publication.volume is not None: pub.add(BIBO.volume, Literal(publication.volume)) if publication.issn is not None: pub.add(BIBO.issn, Literal(publication.issn)) if publication.pages is not None: if "-" in publication.pages: pageStart = publication.pages[:publication.pages.find("-")] pageEnd = publication.pages[publication.pages.find("-") + 1:] if pageStart != "n/a": pub.add(BIBO.pageStart, Literal(pageStart)) if pageEnd != "n/a": pub.add(BIBO.pageEnd, Literal(pageEnd)) else: #pub.add(BIBO.pages, Literal(publication.pages)) pass except ValueError as err: #print((uri,str(err))) pass with open("pub-info.ttl", "w") as out: out.write( g.serialize(format="turtle", encoding="UTF-8").decode(encoding="UTF-8"))
def to_rdf(self, graph): super(PrefixDefinition, self).to_rdf(graph) pd = Resource(graph, BNode()) pd.add(RDF.type, OSLC.PrefixDefinition) if self.prefix: pd.add(OSLC.prefix, Literal(self.prefix)) if self.prefix_base: pd.add(OSLC.prefixBase, URIRef(self.prefix_base.uri)) return pd
def crawl_graph(self): ret = {} for ontology_property, p, o in self.graph.triples( (None, None, RDF.Property)): for s, p, domain_class in self.graph.triples( (ontology_property, RDFS.domain, None)): domain_class = Resource(self.graph, domain_class) for domain_subclass in domain_class.transitive_subjects( RDFS.subClassOf): if domain_subclass.identifier not in ret: ret[domain_subclass.identifier] = { "down": [], "up": [] } for s, p, range_class in self.graph.triples( (ontology_property, RDFS.range, None)): ret[domain_subclass.identifier]["down"].append({ "ontology_property": ontology_property, "ontology_classes": self.get_subclasses(range_class) }) for s, p, range_class in self.graph.triples( (ontology_property, RDFS.range, None)): range_class = Resource(self.graph, range_class) for range_subclass in range_class.transitive_subjects( RDFS.subClassOf): if range_subclass.identifier not in ret: ret[range_subclass.identifier] = {"down": [], "up": []} for s, p, o in self.graph.triples( (ontology_property, RDFS.domain, None)): ret[range_subclass.identifier]["up"].append({ "ontology_property": ontology_property, "ontology_classes": self.get_subclasses(o) }) return ret
def add_metadata_for_subject (rdf_graph,subject_uri,namespaces,nidm_obj): """ Cycles through triples for a particular subject and adds them to the nidm_obj :param rdf_graph: RDF graph object :param subject_uri: URI of subject to query for additional metadata :param namespaces: Namespaces in NIDM document :param nidm_obj: NIDM object to add metadata :return: None """ #Cycle through remaining metadata and add attributes for predicate, objects in rdf_graph.predicate_objects(subject=subject_uri): #if find qualified association if predicate == URIRef(Constants.PROV['qualifiedAssociation']): #need to get associated prov:Agent uri, add person information to graph for agent in rdf_graph.objects(subject=subject_uri, predicate=Constants.PROV['wasAssociatedWith']): #add person to graph and also add all metadata person = nidm_obj.add_person(uuid=agent) #now add metadata for person add_metadata_for_subject(rdf_graph=rdf_graph,subject_uri=agent,namespaces=namespaces,nidm_obj=person) #get role information for bnode in rdf_graph.objects(subject=subject_uri,predicate=Constants.PROV['qualifiedAssociation']): #for bnode, query for object which is role? How? #term.BNode.__dict__() #create temporary resource for this bnode r = Resource(rdf_graph,bnode) #get the object for this bnode with predicate Constants.PROV['hadRole'] for r_obj in r.objects(predicate=Constants.PROV['hadRole']): #create qualified names for objects obj_nm,obj_term = split_uri(r_obj._identifier) for uris in namespaces: if uris.uri == URIRef(obj_nm): #create qualified association in graph nidm_obj.add_qualified_association(person=person,role=pm.QualifiedName(uris,obj_term)) else: if validators.url(objects): #create qualified names for objects obj_nm,obj_term = split_uri(objects) for uris in namespaces: if uris.uri == URIRef(obj_nm): #prefix = uris.prefix nidm_obj.add_attributes({predicate : pm.QualifiedName(uris,obj_term)}) else: nidm_obj.add_attributes({predicate : get_RDFliteral_type(objects)})
def sub_orgs(self): g = Graph() addresses = self.addresses() for addr in addresses: ano = addr["number"] org = addr["organization"] for idx, suborg in enumerate(addr['sub_organizations']): label = "{}, {}".format(suborg, org) uri = self.sub_org_uri(label) r = Resource(g, uri) r.set(RDF.type, WOS.SubOrganization) r.set(RDFS.label, Literal(label)) r.set(WOS.organizationName, Literal(org)) r.set(WOS.subOrganizationName, Literal(suborg)) return g
def get_metadata(self, uid, ver_uid=None, strict=True): ''' This is an optimized query to get only the administrative metadata. ''' logger.debug('Getting metadata for: {}'.format(uid)) if ver_uid: uid = self.snapshot_uid(uid, ver_uid) gr = self.ds.graph(nsc['fcadmin'][uid]) | Graph() uri = nsc['fcres'][uid] rsrc = Resource(gr, uri) if strict: self._check_rsrc_status(rsrc) return rsrc
def get_service_provider(cls, url: str): if not cls.__instance: cls() cls.__init_graph() service_provider = None service = cls.__get_service(url=url) if service: service_provider = Resource(cls.__graph, URIRef(url)) service_provider.add(RDF.type, OSLC.ServiceProvider) service_provider.add(OSLC.service, service) return service_provider else: return False
def _vcard_email(self): g = Graph() try: emails = [e for e in self.profile["emails"].split("|")] except KeyError: try: emails = [self.profile['email']] except KeyError: emails = [] for email in emails: vt = Resource(g, self.vcard_email_uri) vt.set(RDF.type, VCARD.Work) # Label probably not necessary vt.set(RDFS.label, Literal(email)) vt.set(VCARD.email, Literal(email)) return g
def to_rdf(self, graph): if not self.about: raise Exception("The title is missing") oac = Resource(graph, URIRef(self.about)) oac.add(RDF.type, URIRef(OSLC.oauthConfiguration)) if self.authorization_uri: oac.add(OSLC.authorizationURI, URIRef(self.authorization_uri)) if self.oauth_access_token_uri: oac.add(OSLC.oauthAccessTokenURI, URIRef(self.oauth_access_token_uri)) if self.oauth_request_token_uri: oac.add(OSLC.oauthRequestTokenURI, URIRef(self.oauth_request_token_uri)) return oac
def get_dti(self, start, end): if (start is None) and (end is None): return # Date/Time Interval g = Graph() dti_uri = D['dti'] + self.vid dti = Resource(g, dti_uri) dti.set(RDF.type, VIVO.DateTimeInterval) if start is not None: start_uri, start_g = self._date("start", start) dti.set(VIVO.start, start_uri) g += start_g if end is not None: end_uri, end_g = self._date("end", end) g += end_g dti.set(VIVO.end, end_uri) return dti_uri, g
def to_rdf(self, graph): super(ServiceProvider, self).to_rdf(graph) uri = self.about if self.about.__contains__(self.identifier) \ else self.about + '/{}'.format(self.identifier) if self.identifier else '' sp = Resource(graph, URIRef(uri)) sp.add(RDF.type, OSLC.ServiceProvider) if self.identifier: sp.add(DCTERMS.identifier, Literal(self.identifier, datatype=XSD.string)) if self.title: sp.add(DCTERMS.title, Literal(self.title, datatype=XSD.Literal)) if self.description: sp.add(DCTERMS.description, Literal(self.description)) if self.publisher: sp.add(DCTERMS.publisher, URIRef(self.publisher.about)) if self.service: for s in self.service: r = s.to_rdf(graph) sp.add(OSLC.service, r) if self.details: sp.add(OSLC.details, URIRef(self.details)) if self.oauth_configuration: sp.add(OSLC.oauthConfiguration, URIRef(self.oauth_configuration.about)) if self.prefix_definition: for pd in self.prefix_definition: r = pd.to_rdf(graph) sp.add(OSLC.prefixDefinition, r) sp.add(JAZZ_PROCESS.supportContributionsToLinkIndexProvider, Literal(True, datatype=XSD.boolean)) sp.add(JAZZ_PROCESS.supportLinkDiscoveryViaLinkIndexProvider, Literal(True, datatype=XSD.boolean)) sp.add(JAZZ_PROCESS.supportOSLCSimpleQuery, Literal(True, datatype=XSD.boolean)) sp.add(JAZZ_PROCESS.globalConfigurationAware, Literal('yes', datatype=XSD.String)) return sp
def to_rdf(self, graph): super(Preview, self).to_rdf(graph) p = Resource(graph, BNode()) p.add(RDF.type, OSLC.Preview) if self.document: p.add(OSLC.document, URIRef(self.document)) if self.hint_height: p.add(OSLC.hintHeight, Literal(self.hint_height, datatype=XSD.string)) if self.hint_width: p.add(OSLC.hintWidth, Literal(self.hint_width, datatype=XSD.string)) if self.initial_height: p.add(OSLC.initialHeight, Literal(self.initial_height, datatype=XSD.string)) return p