def process_journal(records, writer, mappings): record, fields = majority_vote(records, ('Journal',), mappings) if record.get('issn'): uri = URIRef('urn:issn:%s' % record['issn']) graph_uri = URIRef('/graph/issn/%s' % record['issn']) elif record.get('x-nlm-ta'): uri = URIRef('/id/journal/%s' % sluggify(record['x-nlm-ta'])) graph_uri = URIRef('/graph/journal/%s' % sluggify(record['x-nlm-ta'])) elif record.get('name'): uri = URIRef('/id/journal/%s' % sluggify(record['name'])) graph_uri = URIRef('/graph/journal/%s' % sluggify(record['name'])) else: sys.stderr.write("Unidentifiable: %s" % record) return for id, _ in fields['id']: mappings['id'][id] = uri mappings['journal'][uri] = graph_uri.split('/', 3)[-1] writer.send((uri, RDF.type, FABIO.Journal, graph_uri)) for key, predicate in JOURNAL_DATA_PROPERTIES: if key in record: writer.send((uri, predicate, Literal(record[key]), graph_uri)) if isinstance(record.get('publisher'), URIRef): writer.send((uri, DCTERMS.publisher, record['publisher'], graph_uri))
def detect_namespace(rdf): """Try to automatically detect the URI namespace of the vocabulary. Return namespace as URIRef. """ # pick a concept conc = rdf.value(None, RDF.type, SKOS.Concept, any=True) if conc is None: logging.critical( "Namespace auto-detection failed. " "Set namespace using the --namespace option.") sys.exit(1) ln = localname(conc) ns = URIRef(conc.replace(ln, '')) if ns.strip() == '': logging.critical( "Namespace auto-detection failed. " "Set namespace using the --namespace option.") sys.exit(1) logging.info( "Namespace auto-detected to '%s' " "- you can override this with the --namespace option.", ns) return ns
def discussion_as_graph(self, discussion_id): self.ensure_discussion_storage(None) from assembl.models import Discussion d_storage_name = self.discussion_storage_name() d_graph_iri = URIRef(self.discussion_graph_iri()) v = get_virtuoso(self.session, d_storage_name) discussion_uri = URIRef( Discussion.uri_generic(discussion_id, self.local_uri())) subjects = list(v.query( """SELECT DISTINCT ?s WHERE { ?s assembl:in_conversation %s }""" % (discussion_uri.n3()))) subjects.append([discussion_uri]) # print len(subjects) cg = ConjunctiveGraph(identifier=d_graph_iri) for (s,) in subjects: # Absurdly slow. DISTINCT speeds up a lot, but I get numbers. for p, o in v.query( 'SELECT ?p ?o WHERE { graph %s { %s ?p ?o }}' % ( d_graph_iri.n3(), s.n3())): cg.add((s, p, o)) for (s, o, g) in v.query( '''SELECT ?s ?o ?g WHERE { GRAPH ?g {?s catalyst:expressesIdea ?o } . ?o assembl:in_conversation %s }''' % (discussion_uri.n3())): cg.add((s, CATALYST.expressesIdea, o, g)) # TODO: Add roles return cg
def view(rtype, rid): if '.' in rid: rid, suffix = rid.rsplit('.', 1) else: suffix = None path = rtype + '/' + rid uri = URIRef(app.config['RESOURCE_BASE'] + path) #if template: services = app.config['SERVICES'] rq = render_template("queries/%s.rq" % rtype, prefixes=RQ_PREFIXES, this=uri.n3(), services=services) fmt = _conneg_format(suffix) if fmt == 'rq': return rq, 200, {'Content-Type': 'text/plain'} res = run_query(app.config['ENDPOINT'], rq) #else: # url = data_base + path + '.n3' # res = requests.get(url) graph = to_graph(res.content) this = graph.resource(uri) if fmt in ('html', 'xhtml'): return render_template(rtype + '.html', path=path, this=this, curies=graph.qname) else: headers = {'Content-Type': MIMETYPES.get(fmt) or 'text/plain'} fmt = {'rdf': 'xml', 'ttl': 'turtle'}.get(fmt) or fmt return graph.serialize(format=fmt), 200, headers
def get_synthesis_contributors(self, id_only=True): # author of important extracts from .idea_content_link import Extract from .auth import AgentProfile from .post import Post from sqlalchemy.sql.functions import count local_uri = AssemblQuadStorageManager.local_uri() discussion_storage = \ AssemblQuadStorageManager.discussion_storage_name() idea_uri = URIRef(self.uri(local_uri)) clause = '''select distinct ?annotation where { %s idea:includes* ?ideaP . ?annotation assembl:resourceExpressesIdea ?ideaP }''' extract_ids = [x for (x,) in self.db.execute( SparqlClause(clause % ( idea_uri.n3(),), quad_storage=discussion_storage.n3()))] r = list(self.db.query(AgentProfile.id, count(Extract.id)).join( Post, Post.creator_id==AgentProfile.id).join(Extract).filter( Extract.important == True, Extract.id.in_(extract_ids))) r.sort(key=lambda x: x[1], reverse=True) if id_only: return [AgentProfile.uri_generic(a) for (a, ce) in r] else: ids = [a for (a, ce) in r] order = {id: order for (order, id) in enumerate(ids)} agents = self.db.query(AgentProfile).filter(AgentProfile.id.in_(ids)).all() agents.sort(key=lambda a: order[a.id]) return agents
def discussion_as_graph(self, discussion_id): from assembl.models import Discussion, AgentProfile local_uri = self.local_uri() discussion = Discussion.get(discussion_id) d_storage_name = self.discussion_storage_name() d_graph_iri = URIRef(self.discussion_graph_iri()) v = get_virtuoso(self.session, d_storage_name) discussion_uri = URIRef( Discussion.uri_generic(discussion_id, local_uri)) subjects = [s for (s,) in v.query( """SELECT DISTINCT ?s WHERE { ?s assembl:in_conversation %s }""" % (discussion_uri.n3()))] subjects.append(discussion_uri) participant_ids = list(discussion.get_participants(True)) profiles = {URIRef(AgentProfile.uri_generic(id, local_uri)) for id in participant_ids} subjects.extend(profiles) # add pseudo-accounts subjects.extend((URIRef("%sAgentAccount/%d" % (local_uri, id)) for id in participant_ids)) # print len(subjects) cg = ConjunctiveGraph(identifier=d_graph_iri) self.add_subject_data(v, cg, subjects) # add relationships of non-pseudo accounts for ((account, p, profile), g) in v.triples((None, SIOC.account_of, None)): if profile in profiles: cg.add((account, SIOC.account_of, profile, g)) # Tempting: simplify with this. # cg.add((profile, FOAF.account, account, g)) for (s, o, g) in v.query( '''SELECT ?s ?o ?g WHERE { GRAPH ?g {?s catalyst:expressesIdea ?o } . ?o assembl:in_conversation %s }''' % (discussion_uri.n3())): cg.add((s, CATALYST.expressesIdea, o, g)) return cg
def instance_view_jsonld(request): from assembl.semantic.virtuoso_mapping import AssemblQuadStorageManager from rdflib import URIRef, ConjunctiveGraph ctx = request.context user_id = authenticated_userid(request) or Everyone permissions = get_permissions( user_id, ctx.get_discussion_id()) instance = ctx._instance if not instance.user_can(user_id, CrudPermissions.READ, permissions): return HTTPUnauthorized() discussion = ctx.get_instance_of_class(Discussion) if not discussion: raise HTTPNotFound() aqsm = AssemblQuadStorageManager() uri = URIRef(aqsm.local_uri() + instance.uri()[6:]) d_storage_name = aqsm.discussion_storage_name(discussion.id) v = get_virtuoso(instance.db, d_storage_name) cg = ConjunctiveGraph(v, d_storage_name) result = cg.triples((uri, None, None)) #result = v.query('select ?p ?o ?g where {graph ?g {<%s> ?p ?o}}' % uri) # Something is wrong here. triples = '\n'.join([ '%s %s %s.' % (uri.n3(), p.n3(), o.n3()) for (s, p, o) in result if '_with_no_name_entry' not in o]) return aqsm.quads_to_jsonld(triples)
def post(self): chan = URIRef(self.get_argument('chan')) sub = self.settings.currentSub() chanKey = Literal(chan.rsplit('/', 1)[1]) old = sub.get_levels().get(chanKey, 0) sub.editLevel(chan, 0 if old else 1)
def test_creation_with_unknown_ns(self): uri = 'http://localhost:8000/resource/aggregation/ton-smits-huis/454' predicate = RDFPredicate(uri) graph = Graph() graph.add((URIRef(uri), FOAF.name, Literal("sjoerd"))) subject = list(graph.subjects())[0] uri_ref = URIRef(uri) assert uri_ref.n3() == "ns1:454" assert predicate is not None assert predicate.label is not None
def get_service(): g = new_graph() me = URIRef(url_for('get_service', _external=True)) g.add((me, RDF.type, SERVICE_TYPE)) for db_resource in service_graph.subjects(RDF.type, PARTITION.Root): db_resource = URIRef( url_for('get_resource', rid=db_resource.replace(URI_PREFIX, ""), _external=True)) g.add((me, CONTAINMENT_LINK, db_resource)) response = make_response(g.serialize(format='turtle')) response.headers['Content-Type'] = 'text/turtle' return response
def enterNewAnnotationMenu(self): ''' Interactive input for a new annotation ''' self.printNewAnnotationMenu() i = 1 for year in self.yearsAnnotations: print '{}) {}'.format(i,year["year"]) i += 1 print year = raw_input('Table to annotate: ') cell = raw_input('Cell to annotate: ') author = raw_input('Author: ') corrected = raw_input('Corrected value (leave blank if none): ') flag = raw_input('Flag: ') graphURI = URIRef(self.yearsAnnotations[int(year)-1]["uri"]) d2sGraphURI = graphURI.replace("cedar-project.nl", "www.data2semantics.org") annoURI = URIRef(d2sGraphURI + '/NOORDBRABANT/' + cell) cellURI = annoURI.replace("annotations", "data") # Create the new annotation query = """ PREFIX oa: <http://www.w3.org/ns/openannotation/core/> PREFIX xsd: <http://www.w3.org/2001/XMLSchema#> INSERT INTO GRAPH <""" + graphURI + """> { <""" + annoURI + """> a oa:Annotation; oa:annotated \"""" + str(datetime.datetime.now().strftime("%Y-%m-%d")) + """\"^^xsd:date; oa:annotator \"""" + author + """\"; oa:generated \"""" + str(datetime.datetime.now().strftime("%Y-%m-%d")) + """\"^^xsd:date; oa:generator <https://cedar-project.nl/tools/cedar-demo.py>; oa:hasBody [ rdf:value \"""" + corrected + ' ' + flag + """\" ]; oa:hasTarget <""" + cellURI + """>; oa:modelVersion <http://www.openannotation.org/spec/core/20120509.html> . } """ # query = "INSERT INTO GRAPH <http://cedar-project.nl/annotations/VT_1859_01_H1> {<http://a> rdf:type <http:b>}" print query self.sparql.setQuery(query) self.sparql.setReturnFormat(JSON) self.results = self.sparql.query().convert()
def tableView_objectValueForTableColumn_row_(self, tableView, tableColumn, row): id = tableColumn.identifier() uri = self.resources[row] if id=="uri": base =self.context base = base.split("#", 1)[0] uri = URIRef(uri.replace(base, "", 1)) # relativize return uri elif id=="label": return self.redfoot.label(uri, "") elif id=="comment": return self.redfoot.comment(uri, "") else: return ""
def check_valid_uri(self, uri): """ checks to see if a uri is valid """ valid = False if isinstance(uri, str): uri_out = False try: uri_test = URIRef(uri) uri_out = uri_test.n3() except: # some sort of error thrown, so not valid valid = False if isinstance(uri_out, str): valid = True return valid
def __init__(self, store, identifier=None, graph=None): if graph is not None: assert identifier is None np = store.node_pickler identifier = md5() s = list(graph.triples((None, None, None))) s.sort() for t in s: identifier.update("^".join((np.dumps(i) for i in t))) identifier = URIRef("data:%s" % identifier.hexdigest()) super(GraphValue, self).__init__(store, identifier) for t in graph: store.add(t, context=self) else: super(GraphValue, self).__init__(store, identifier)
def term(self, term, use_prefix=True): if isinstance(term, Namespace): term = URIRef(term) if term is None: return RDF.nil elif not hasattr(term, 'n3'): return self.term(Literal(term)) elif use_prefix and isinstance(term, URIRef): return self.uri(term) elif isinstance(term, Literal): if term.datatype in (XSD.double, XSD.integer, XSD.float, XSD.boolean): return unicode(term).lower() elif isinstance(term, Namespace): return unicode(term) return term.n3()
def bulk_update(self, named_graph, graph, size, is_add=True): """ Bulk adds or deletes. Triples are chunked into n size groups before sending to API. This prevents the API endpoint from timing out. """ context = URIRef(named_graph) total = len(graph) if total > 0: for set_size, nt in self.nt_yielder(graph, size): if is_add is True: logger.debug("Adding {} statements to <{}>.".format(set_size, named_graph)) self.update(u'INSERT DATA { GRAPH %s { %s } }' % (context.n3(), nt)) else: logger.debug("Removing {} statements from <{}>.".format(set_size, named_graph)) self.update(u'DELETE DATA { GRAPH %s { %s } }' % (context.n3(), nt)) return total
def __init__(self, parent, identifier=None, meta=None): self.parent = parent if identifier is None: identifier = BNode() self.identifier = URIRef(identifier) self.meta = MetaData(self, meta) self.meta.generate()
def get_idea_ids_showing_post(cls, post_id): "Given a post, give the ID of the ideas that show this message" # This works because of a virtuoso bug... # where DISTINCT gives IDs instead of URIs. from .generic import Content discussion_storage = \ AssemblQuadStorageManager.discussion_storage_name() post_uri = URIRef(Content.uri_generic( post_id, AssemblQuadStorageManager.local_uri())) return [int(id) for (id,) in cls.default_db.execute(SparqlClause( '''select distinct ?idea where { %s sioc:reply_of* ?post . ?post assembl:postLinkedToIdea ?ideaP . ?idea idea:includes* ?ideaP }''' % (post_uri.n3(),), quad_storage=discussion_storage.n3()))]
class DOI(DOIMetadata): """find and download a pdf for the doi given >>> DOI('10.1016/0166-218X(92)00170-Q') """ headers = generic_headers headers['Host'] = 'gen.lib.rus.ec' headers['Referer'] = 'http://gen.lib.rus.ec/scimag/' url = 'http://gen.lib.rus.ec/scimag/?s={}&journalid=&v=&i=&p=&redirect=1' def __init__(self, doi, *args, **kwargs): super().__init__(doi, *args, **kwargs) self.url = URIRef(self.url.format(self.doi)) r = requests.get(self.url, headers=self.headers) r.raise_for_status() self.links = re.compile(r'a href="([^"]+pdf)"').findall(r.text) link, *links = self.links r = requests.get(link, stream=True) self.filename = Literal('.'.join((doi.replace('/','_'), 'pdf'))) with open(self.filename, 'wb') as fd: for chunk in r.iter_content(1024*10): fd.write(chunk) self.path = URIRef(urljoin('file:', pjoin(os.getcwd(), self.filename))) self.graph.add((self.path, URIRef('http://purl.org/dc/terms/identifier'), self.doi)) self.graph.commit()
def canvas_and_images_graph(graph, canvas_uri): canvas_uri = URIRef(canvas_uri) canvas_graph = Graph() canvas_graph += graph.triples((canvas_uri, None, None)) qres = graph.query("""SELECT ?image_anno ?image WHERE { ?image_anno a oa:Annotation . ?image_anno oa:hasTarget %s . ?image_anno oa:hasBody ?image . ?image a ?type . FILTER(?type = dcmitype:Image || ?type = dms:Image || ?type = dms:ImageChoice) . }""" % (canvas_uri.n3()), initNs=ns) for image_anno, image in qres: canvas_graph += graph.triples_choices(([image_anno, image], None, None)) return canvas_graph
def term(self, term, use_prefix=True): if isinstance(term, (Namespace, ClosedNamespace)): term = URIRef(namespace_to_uri(term)) if term is None: return RDF.nil elif not hasattr(term, 'n3'): return self.term(Literal(term)) elif use_prefix and isinstance(term, URIRef): return self.uri(term) elif isinstance(term, Literal): if term.datatype in (XSD.double, XSD.integer, XSD.float, XSD.boolean): return unicode(term).lower() elif use_prefix and term.datatype: # Abbreviate datatype if possible datatype_term = self.uri(term.datatype) return '"%s"^^%s' % (term, datatype_term) elif isinstance(term, Namespace): return unicode(term) return term.n3()
def relativize(self, uri): base = URIRef(self.base) basedir = URIRef(self.base if base.endswith('/') else base.rsplit('/', 1)[0]) if base is not None: if uri == base: uri = URIRef('') elif uri == basedir: uri = URIRef('.') elif uri.startswith(basedir + '/'): uri = URIRef(uri.replace(basedir + '/', "", 1)) return uri
class ContentProvider(object): #implements(IContentProvider) def __init__(self, config): self.config = config self.jena = JenaHelper(config) self.harvester = URIRef(config['harvesteruri']) data = self.jena.graphquery("DESCRIBE %s" % self.harvester.n3()) #import pdb; pdb.set_trace() self.originatingSource = data.value(self.harvester, ANDS.originatingSource) or Literal(config['originatingsource']) self.groupDescription = data.value(self.harvester, ANDS.groupDescription) or Literal(config['groupdescription']) self.item_sparql_query = resource_string(__name__, "item_sparql.sparql") def set_logger(self, log): """Set the logger instance for this class """ self.log = log def update(self, from_date=None): """Harvests new content added since from_date returns a list of content_ids that were changed/added, this should be called before get_contents is called """ query = resource_string(__name__, "items_to_harvest.sparql") self._content = self.jena.selectquery(query % {'harvester': self.harvester}) return self._content def count(self): """Returns number of content objects in the repository returns None if number is unknown, this should not be called before update is called """ return len(self._content) def get_content_ids(self): """returns a list/generator of content_ids """ return self._content def get_content_by_id(self, id): """Return content of a specific id """ # assume id is URIRef instance g = Graph(identifier=URIRef(id)) #print self.item_sparql_query %{"subject":id, 'harvester': self.harvester} data = self.jena.graphquery(self.item_sparql_query %{"subject": id, 'harvester': self.harvester}, g) #print data # FIXME: make tese conditional data.add((URIRef(id), ANDS.originatingSource, self.originatingSource)) data.add((URIRef(id), ANDS.groupDescription, self.groupDescription)) return data
def fixit(current_object): """ Read the def data structure and replace all string URIs with URIRef entities :param current_object: the piece of the data structure to be fixed :return current_object: the piece repaired in place """ from rdflib import URIRef if isinstance(current_object, dict): for k in current_object.keys(): current_object[k] = fixit(current_object[k]) elif isinstance(current_object, list): for i in range(0, len(current_object)): current_object[i] = fixit(current_object[i]) elif isinstance(current_object, basestring): if current_object.startswith("http://"): current_object = URIRef(current_object) elif current_object.startswith("xsd:"): current_object = cast_to_rdflib(current_object) return current_object
def __init__(self, config): self.config = config self.jena = JenaHelper(config) self.harvester = URIRef(config['harvesteruri']) data = self.jena.graphquery("DESCRIBE %s" % self.harvester.n3()) #import pdb; pdb.set_trace() self.originatingSource = data.value(self.harvester, ANDS.originatingSource) or Literal(config['originatingsource']) self.groupDescription = data.value(self.harvester, ANDS.groupDescription) or Literal(config['groupdescription']) self.item_sparql_query = resource_string(__name__, "item_sparql.sparql")
def match_sparql_ask(transition, event, token, fsa): """ The 'sparql-ask' matcher. With this matcher, transition conditions are interpreted as the WHERE clause of a SPARQL Ask query, where variable ?obs is bound to the considered obsel, and prefix m: is bound to the source trace URI. """ m_ns = fsa.source.model_uri if m_ns[-1] != '/' and m_ns[-1] != '#': m_ns += '#' history = token and token.get('history_events') if history: pred = URIRef(history[-1]) first = URIRef(history[0]) else: pred = None first = None condition = transition['condition'] ## this would be the correct way to do it # initBindings = { "obs": URIRef(event), "pred": pred, "first": first } ## unfortunately, Virtuoso does not support VALUES clauses after the ASK clause, ## which is how SPARQLUpdateStore handles initBindings ## so we generate that clause in the condition instead condition = """ BIND (%s as ?obs) BIND (%s as ?pred) BIND (%s as ?first) """ % ( URIRef(event).n3(), pred.n3() if pred else '""', # simulating NULL first.n3() if first else '""', # simulating NULL ) + condition ## thank you for nothing Virtuoso :-( return fsa.source_obsels_graph.query( "ASK { %s }" % condition, initNs={"": KTBS, "m": m_ns}, # initBindings=initBindings, # not supported by Virtuoso :-( ).askAnswer
def convert(self, name, qname, attrs): if name[0] is None: name = URIRef(name[1]) else: name = URIRef("".join(name)) atts = {} for (n, v) in attrs.items(): #attrs._attrs.iteritems(): # if n[0] is None: att = URIRef(n[1]) else: att = URIRef("".join(n)) if att.startswith(XMLNS) or att[0:3].lower()=="xml": pass elif att in UNQUALIFIED: #if not RDFNS[att] in atts: atts[RDFNS[att]] = v else: atts[URIRef(att)] = v return name, atts
def get_contributors(self): # anyone who contributed to any of the idea's posts local_uri = AssemblQuadStorageManager.local_uri() discussion_storage = \ AssemblQuadStorageManager.discussion_storage_name() idea_uri = URIRef(self.uri(local_uri)) clause = '''select count(distinct ?postP), count(distinct ?post), ?author where { %s idea:includes* ?ideaP . ?postP assembl:postLinkedToIdea ?ideaP . ?post sioc:reply_of* ?postP . ?post sioc:has_creator ?author }''' r = self.db.execute( SparqlClause(clause % ( idea_uri.n3(),), quad_storage=discussion_storage.n3())) r = [(int(cpp), int(cp), 'local:AgentProfile/' + a.rsplit('/',1)[1] ) for (cpp, cp, a) in r] r.sort(reverse=True) return [a for (cpp, cp, a) in r]
def addDir(self, dirPath): """add description of this disk directory and every parent directory""" if not dirPath.startswith(self.topDir): raise ValueError("%s is not in %s" % (dirPath, self.topDir)) dirUri = uriOfFilename(self.rootUri, self.topDir, dirPath) dirUri = URIRef(dirUri.rstrip('/') + '/') stmts = [(dirUri, RDF.type, PHO['DiskDirectory']), (dirUri, PHO['filename'], Literal(dirPath)), (dirUri, PHO['basename'], Literal(os.path.basename(dirPath)))] try: parentUri = self.addDir(os.path.dirname(dirPath)) stmts.append((dirUri, PHO.inDirectory, parentUri)) except ValueError: pass self.graph.add(triples=stmts, context=URIRef("http://photo.bigasterisk.com/scan/fs")) return dirUri
def buildGraphFromJson(jsonData): graph = Graph() jsonDict = json.loads(jsonData) for sub, predDict in jsonDict.items(): subToPut = None if sub.startswith("http"): subToPut = URIRef(sub.encode("utf-8")) else: subToPut = BNode(sub.encode("utf-8")) for pred, objDictList in predDict.items(): for objDict in objDictList: if objDict["type"] == "literal" or objDict["value"] == "*": print("{} {} {} ({})".format(subToPut, pred, objDict["value"].encode("utf-8"), objDict["type"])) graph.add((subToPut, URIRef(pred), Literal(objDict["value"].encode("utf-8")))) elif objDict["type"] == "uri": print("{} {} {}".format(subToPut.encode('utf-8'), pred, objDict["value"].encode('utf-8'))) graph.add((subToPut, URIRef(pred), URIRef(objDict["value"].encode("utf-8")))) else: print("{} {} {}".format(subToPut, pred, objDict["value"].encode("utf-8"))) graph.add((subToPut, URIRef(pred), BNode(objDict["value"].encode("utf-8")))) return graph
def liftingtabs(mydb, cursor, cursorupdate): graph = Graph() # graph for the dataset docgraph1 = Graph() # graph for the documentation drawing docgraph2 = Graph() # graph for the documentation drawing # add namespaces graph = apply_namespaces(graph) docgraph1 = apply_namespaces(docgraph1) docgraph2 = apply_namespaces(docgraph2) # get the ones we need here STCATH = get_namespace(graph, 'stcath') CRM = get_namespace(graph, 'crm') doci1 = 2 # msid with no liftingtabs doci2 = 1307 # msid with liftingtabs, pagemarker id: 23 # deal with thesaurus concepts graph.add((URIRef("http://w3id.org/lob/concept/2833"), RDF.type, CRM["E55_Type"])) graph.add((URIRef("http://w3id.org/lob/concept/2833"), RDFS.label, Literal("board strap markers", lang="en"))) graph.add((URIRef("http://w3id.org/lob/concept/1658"), RDF.type, CRM["E57_Material"])) graph.add((URIRef("http://w3id.org/lob/concept/1658"), RDFS.label, Literal("tanned skin", lang="en"))) graph.add((URIRef("http://w3id.org/lob/concept/1197"), RDF.type, CRM["E57_Material"])) graph.add((URIRef("http://w3id.org/lob/concept/1197"), RDFS.label, Literal("tawed skin", lang="en"))) graph.add((URIRef("http://w3id.org/lob/concept/5429"), RDF.type, CRM["E55_Type"])) graph.add((URIRef("http://w3id.org/lob/concept/5429"), RDFS.label, Literal("adhering", lang="en"))) graph.add((URIRef("http://w3id.org/lob/concept/4045"), RDF.type, CRM["E55_Type"])) graph.add((URIRef("http://w3id.org/lob/concept/4045"), RDFS.label, Literal("nailing", lang="en"))) graph.add((URIRef("http://stcath.overturnin"), RDF.type, CRM["E55_Type"])) graph.add((URIRef("http://stcath.overturnin"), RDFS.label, Literal("over turn-in attaching", lang="en"))) graph.add((URIRef("http://stcath.underturnin"), RDF.type, CRM["E55_Type"])) graph.add((URIRef("http://stcath.underturnin"), RDFS.label, Literal("under turn-in attaching", lang="en"))) graph.add((URIRef("http://stcath.brokenoff"), RDF.type, CRM["E55_Type"])) graph.add((URIRef("http://stcath.brokenoff"), RDFS.label, Literal("broken off", lang="en"))) graph.add( (URIRef("http://stcath.brokenandsewn"), RDF.type, CRM["E55_Type"])) graph.add((URIRef("http://stcath.brokenandsewn"), RDFS.label, Literal("broken and sewn", lang="en"))) graph.add((URIRef("http://stcath.missing"), RDF.type, CRM["E55_Type"])) graph.add((URIRef("http://stcath.missing"), RDFS.label, Literal("missing", lang="en"))) graph.add((URIRef("http://stcath.sound"), RDF.type, CRM["E55_Type"])) graph.add( (URIRef("http://stcath.sound"), RDFS.label, Literal("sound", lang="en"))) graph.add((URIRef("http://stcath.worn"), RDF.type, CRM["E55_Type"])) graph.add( (URIRef("http://stcath.worn"), RDFS.label, Literal("worn", lang="en"))) graph.add((URIRef("http://stcath.detached"), RDF.type, CRM["E55_Type"])) graph.add((URIRef("http://stcath.detached"), RDFS.label, Literal("detached", lang="en"))) graph.add((URIRef("http://stcath.dangling"), RDF.type, CRM["E55_Type"])) graph.add((URIRef("http://stcath.dangling"), RDFS.label, Literal("dangling", lang="en"))) docgraph1.add((URIRef("http://w3id.org/lob/concept/2833"), RDF.type, CRM["E55_Type"])) docgraph1.add((URIRef("http://w3id.org/lob/concept/2833"), RDFS.label, Literal("board strap markers", lang="en"))) docgraph2.add((URIRef("http://w3id.org/lob/concept/2833"), RDF.type, CRM["E55_Type"])) docgraph2.add((URIRef("http://w3id.org/lob/concept/2833"), RDFS.label, Literal("board strap markers", lang="en"))) docgraph2.add((URIRef("http://w3id.org/lob/concept/1658"), RDF.type, CRM["E55_Type"])) docgraph2.add((URIRef("http://w3id.org/lob/concept/1658"), RDFS.label, Literal("tanned skin", lang="en"))) docgraph2.add((URIRef("http://w3id.org/lob/concept/5429"), RDF.type, CRM["E55_Type"])) docgraph2.add((URIRef("http://w3id.org/lob/concept/5429"), RDFS.label, Literal("adhering", lang="en"))) docgraph2.add( (URIRef("http://stcath.underturnin"), RDF.type, CRM["E55_Type"])) docgraph2.add((URIRef("http://stcath.underturnin"), RDFS.label, Literal("under turn-in attaching", lang="en"))) docgraph2.add( (URIRef("http://stcath.brokenoff"), RDF.type, CRM["E55_Type"])) docgraph2.add((URIRef("http://stcath.brokenoff"), RDFS.label, Literal("broken off", lang="en"))) # 1_3_LiftingTabs cursor.execute( "SELECT mss.msuuid, mss.cataloguename, lt.msid, lt.yesnonk FROM MSs mss INNER JOIN `1_3_LiftingTabs` lt on mss.id=lt.msid" ) rows = cursor.fetchall() for row in rows: msuuid = URIRef(row["msuuid"], str(STCATH)) if row["yesnonk"] == "no": graph.add((msuuid, CRM["NTP46_is_not_composed_of_physical_thing_of_type"], URIRef("http://w3id.org/lob/concept/2833"))) if row["msid"] == doci1: docgraph1.add( (msuuid, CRM["NTP46_is_not_composed_of_physical_thing_of_type"], URIRef("http://w3id.org/lob/concept/2833"))) docgraph1.add((msuuid, RDF.type, CRM["E22_Man-Made_Object"])) docgraph1.add( (msuuid, RDFS.label, Literal(row["cataloguename"], lang="en"))) if row["msid"] == doci2: docgraph2.add((msuuid, RDF.type, CRM["E22_Man-Made_Object"])) docgraph2.add( (msuuid, RDFS.label, Literal(row["cataloguename"], lang="en"))) # LiftingTabs cursor.execute( "SELECT lt.id, mss.msuuid, mss.cataloguename, lt.msid, lt.partadditionuuid, lt.leftliftingtabuuid, lt.rightliftingtabuuid, lt.location, lt.material, lt.attachment, lt.turnin FROM MSs mss INNER JOIN `LiftingTabs` lt on mss.id=lt.msid" ) rows = cursor.fetchall() for row in rows: shelfmark = row["cataloguename"] msuuid = URIRef(row["msuuid"], str(STCATH)) # lifting tabs locations = row["location"].split(",") # TODO: deal with option "Centre, board" for location in locations: location = location.strip() if location == "Left board" or location == "Both boards" or location == "Right board": if location == "Left board" or location == "Both boards": if row["leftliftingtabuuid"] is None: newuuid = str(uuid.uuid4()) leftliftingtabuuid = URIRef(newuuid, str(STCATH)) # update the database sql = "UPDATE LiftingTabs SET leftliftingtabuuid=%s WHERE id=%s" val = (newuuid, row["id"]) cursorupdate.execute(sql, val) mydb.commit() else: leftliftingtabuuid = URIRef(row["leftliftingtabuuid"], str(STCATH)) graph.add((leftliftingtabuuid, RDF.type, CRM["E22_Man-Made_Object"])) graph.add((leftliftingtabuuid, CRM["P2_has_type"], URIRef("http://w3id.org/lob/concept/2833"))) graph.add((leftliftingtabuuid, RDFS.label, Literal("Left board marker of " + shelfmark, lang="en"))) if row["material"] == "Tanned leather": graph.add((leftliftingtabuuid, CRM["P45_consists_of"], URIRef("http://w3id.org/lob/concept/1658"))) elif row["material"] == "Tawed leather": graph.add((leftliftingtabuuid, CRM["P45_consists_of"], URIRef("http://w3id.org/lob/concept/1197"))) if location == "Right board" or location == "Both boards": if row["rightliftingtabuuid"] is None: newuuid = str(uuid.uuid4()) rightliftingtabuuid = URIRef(newuuid, str(STCATH)) # update the database sql = "UPDATE LiftingTabs SET rightliftingtabuuid=%s WHERE id=%s" val = (newuuid, row["id"]) cursorupdate.execute(sql, val) mydb.commit() else: rightliftingtabuuid = URIRef( row["rightliftingtabuuid"], str(STCATH)) graph.add((rightliftingtabuuid, RDF.type, CRM["E22_Man-Made_Object"])) graph.add((rightliftingtabuuid, CRM["P2_has_type"], URIRef("http://w3id.org/lob/concept/2833"))) graph.add((rightliftingtabuuid, RDFS.label, Literal("Right board marker of " + shelfmark, lang="en"))) if row["material"] == "Tanned leather": graph.add((rightliftingtabuuid, CRM["P45_consists_of"], URIRef("http://w3id.org/lob/concept/1658"))) elif row["material"] == "Tawed leather": graph.add((rightliftingtabuuid, CRM["P45_consists_of"], URIRef("http://w3id.org/lob/concept/1197"))) if row["partadditionuuid"] is None: newuuid = str(uuid.uuid4()) partadditionuuid = URIRef(newuuid, str(STCATH)) # update the database sql = "UPDATE LiftingTabs SET partadditionuuid=%s WHERE id=%s" val = (newuuid, row["id"]) cursorupdate.execute(sql, val) mydb.commit() else: partadditionuuid = URIRef(row["partadditionuuid"], str(STCATH)) graph.add( (partadditionuuid, RDF.type, CRM["E79_Part_Addition"])) graph.add((partadditionuuid, RDFS.label, Literal("Addition of board markers to " + shelfmark, lang="en"))) graph.add((partadditionuuid, CRM["P110_augmented"], msuuid)) if location == "Both boards" or location == "Left board": graph.add((partadditionuuid, CRM["P111_added"], leftliftingtabuuid)) if location == "Both boards" or location == "Right board": graph.add((partadditionuuid, CRM["P111_added"], rightliftingtabuuid)) if row['attachment'] == "Glued": graph.add( (partadditionuuid, CRM["P32_used_general_technique"], URIRef("http://w3id.org/lob/concept/5429"))) elif row['attachment'] == "Nailed": graph.add( (partadditionuuid, CRM["P32_used_general_technique"], URIRef("http://w3id.org/lob/concept/4045"))) if row['turnin'] == "Under turn-in": graph.add( (partadditionuuid, CRM["P32_used_general_technique"], URIRef("http://stcath.underturnin"))) elif row['turnin'] == "Over turn-in": graph.add( (partadditionuuid, CRM["P32_used_general_technique"], URIRef("http://stcath.overturnin"))) if location == "Both boards": # TODO: Mark the location of the lifting tabs when the board foredge place is available #graph.add((leftliftingtabuuid, CRM["P55_has_current_location"], ...)) #graph.add((rightliftingtabuuid, CRM["P55_has_current_location"], ...)) pass elif location == "Right board": # TODO: Mark the location of the lifting tabs when the board foredge place is available #graph.add((rightliftingtabuuid, CRM["P55_has_current_location"], ...)) pass elif location == "Left board": # TODO: Mark the location of the lifting tabs when the board foredge place is available #graph.add((leftliftingtabuuid, CRM["P55_has_current_location"], ...)) pass if row["msid"] == doci2: docgraph2.add( (leftliftingtabuuid, RDF.type, CRM["E22_Man-Made_Object"])) docgraph2.add((leftliftingtabuuid, RDFS.label, Literal("Left board marker of " + shelfmark, lang="en"))) docgraph2.add((leftliftingtabuuid, CRM["P2_has_type"], URIRef("http://w3id.org/lob/concept/2833"))) docgraph2.add( (rightliftingtabuuid, RDF.type, CRM["E22_Man-Made_Object"])) docgraph2.add((rightliftingtabuuid, RDFS.label, Literal("Right board marker of " + shelfmark, lang="en"))) docgraph2.add((rightliftingtabuuid, CRM["P2_has_type"], URIRef("http://w3id.org/lob/concept/2833"))) docgraph2.add( (partadditionuuid, RDF.type, CRM["E79_Part_Addition"])) docgraph2.add((partadditionuuid, RDFS.label, Literal("Addition of board markers to " + shelfmark, lang="en"))) docgraph2.add( (partadditionuuid, CRM["P111_added"], leftliftingtabuuid)) docgraph2.add( (partadditionuuid, CRM["P111_added"], rightliftingtabuuid)) docgraph2.add((partadditionuuid, CRM["P110_augmented"], msuuid)) docgraph2.add( (leftliftingtabuuid, RDF.type, CRM["E22_Man-Made_Object"])) docgraph2.add((leftliftingtabuuid, RDFS.label, Literal("Left board marker of " + shelfmark, lang="en"))) docgraph2.add( (rightliftingtabuuid, RDF.type, CRM["E22_Man-Made_Object"])) docgraph2.add((rightliftingtabuuid, RDFS.label, Literal("Right board marker of " + shelfmark, lang="en"))) docgraph2.add( (partadditionuuid, RDF.type, CRM["E79_Part_Addition"])) docgraph2.add((partadditionuuid, RDFS.label, Literal("Addition of board markers to " + shelfmark, lang="en"))) docgraph2.add( (partadditionuuid, CRM["P111_added"], leftliftingtabuuid)) docgraph2.add( (partadditionuuid, CRM["P111_added"], rightliftingtabuuid)) docgraph2.add((partadditionuuid, CRM["P110_augmented"], msuuid)) docgraph2.add((leftliftingtabuuid, CRM["P45_consists_of"], URIRef("http://w3id.org/lob/concept/1658"))) docgraph2.add((rightliftingtabuuid, CRM["P45_consists_of"], URIRef("http://w3id.org/lob/concept/1658"))) docgraph2.add((partadditionuuid, CRM["P32_used_general_technique"], URIRef("http://w3id.org/lob/concept/5429"))) docgraph2.add((partadditionuuid, CRM["P32_used_general_technique"], URIRef("http://stcath.underturnin"))) # LiftingTabsCondition cursor.execute( "SELECT ltc.id, lt.msid, mss.msuuid, mss.cataloguename, lt.leftliftingtabuuid, lt.rightliftingtabuuid, ltc.condition, ltc.conditionuuid, ltc.leftboard FROM `LiftingTabsCondition` ltc LEFT JOIN `LiftingTabs` lt ON ltc.liftingtabid=lt.id INNER JOIN MSs mss ON mss.id=lt.msid" ) rows = cursor.fetchall() for row in rows: shelfmark = row["cataloguename"] msuuid = URIRef(row["msuuid"], str(STCATH)) if row['leftliftingtabuuid'] is not None: leftliftingtabuuid = URIRef(row["leftliftingtabuuid"], str(STCATH)) # elif row['leftliftingtabuuid'] is None: # if row["leftboard"] == 1: # we have a left lifting tab condition but no left lifting tab # print(str(row["msid"]) + ", ") if row['rightliftingtabuuid'] is not None: rightliftingtabuuid = URIRef(row["rightliftingtabuuid"], str(STCATH)) # elif row['rightliftingtabuuid'] is None: # if row["leftboard"] == 0: # we have a right lifting tab condition but no right lifting tab # print(str(row["msid"]) + ", ") if row["leftboard"] == 1: # this is the left board if row["conditionuuid"] is None: newuuid = str(uuid.uuid4()) conditionuuid = URIRef(newuuid, str(STCATH)) # update the database sql = "UPDATE LiftingTabsCondition SET conditionuuid=%s WHERE id=%s" val = (newuuid, row["id"]) cursorupdate.execute(sql, val) mydb.commit() else: conditionuuid = URIRef(row["conditionuuid"], str(STCATH)) graph.add((conditionuuid, RDF.type, CRM["E3_Condition_State"])) graph.add( (conditionuuid, RDFS.label, Literal("Condition of left board marker of " + shelfmark, lang="en"))) graph.add( (leftliftingtabuuid, CRM["P44_has_condition"], conditionuuid)) if row["msid"] == doci2: docgraph2.add( (conditionuuid, RDF.type, CRM["E3_Condition_State"])) docgraph2.add( (conditionuuid, RDFS.label, Literal("Condition of left board marker of " + shelfmark, lang="en"))) docgraph2.add((leftliftingtabuuid, CRM["P44_has_condition"], conditionuuid)) docgraph2.add((conditionuuid, CRM["P2_has_type"], URIRef("http://stcath.brokenoff"))) elif row["leftboard"] == 0: # this is the right board if row["conditionuuid"] is None: newuuid = str(uuid.uuid4()) conditionuuid = URIRef(newuuid, str(STCATH)) # update the database sql = "UPDATE LiftingTabsCondition SET conditionuuid=%s WHERE id=%s" val = (newuuid, row["id"]) cursorupdate.execute(sql, val) mydb.commit() else: conditionuuid = URIRef(row["conditionuuid"], str(STCATH)) graph.add((conditionuuid, RDF.type, CRM["E3_Condition_State"])) graph.add( (conditionuuid, RDFS.label, Literal("Condition of right board marker of " + shelfmark, lang="en"))) graph.add( (rightliftingtabuuid, CRM["P44_has_condition"], conditionuuid)) if row["msid"] == doci2: docgraph2.add( (conditionuuid, RDF.type, CRM["E3_Condition_State"])) docgraph2.add( (conditionuuid, RDFS.label, Literal("Condition of right board marker of " + shelfmark, lang="en"))) docgraph2.add((rightliftingtabuuid, CRM["P44_has_condition"], conditionuuid)) docgraph2.add((conditionuuid, CRM["P2_has_type"], URIRef("http://stcath.brokenoff"))) if row["condition"] == "Broken off": graph.add((conditionuuid, CRM["P2_has_type"], URIRef("http://stcath.brokenoff"))) elif row["condition"] == "Broken and Sewn": graph.add((conditionuuid, CRM["P2_has_type"], URIRef("http://stcath.brokenandsewn"))) elif row["condition"] == "Missing": graph.add((conditionuuid, CRM["P2_has_type"], URIRef("http://stcath.missing"))) elif row["condition"] == "Sound": graph.add((conditionuuid, CRM["P2_has_type"], URIRef("http://stcath.sound"))) elif row["condition"] == "Worn": graph.add((conditionuuid, CRM["P2_has_type"], URIRef("http://stcath.worn"))) elif row["condition"] == "Detached": graph.add((conditionuuid, CRM["P2_has_type"], URIRef("http://stcath.detached"))) elif row["condition"] == "Dangling": graph.add((conditionuuid, CRM["P2_has_type"], URIRef("http://stcath.dangling"))) # documentation drawing dot1 = visualise_graph(docgraph1, 'MS without board strap markers', "forth") dot2 = visualise_graph(docgraph2, 'MS with board strap markers', "forth") dot1.render('liftingtabs/liftingtabs-1.gv', format='svg') dot2.render('liftingtabs/liftingtabs-2.gv', format='svg') # serialise the graph graph.serialize(destination='liftingtabs/liftingtabs.ttl', format='turtle', encoding="utf-8") docgraph1.serialize(destination='liftingtabs/liftingtabs-doc-1.n3', format='n3', encoding="utf-8") docgraph2.serialize(destination='liftingtabs/liftingtabs-doc-2.n3', format='n3', encoding="utf-8")
def extract_metadata(item): g = item.get_graph() m = defaultdict(list) #for s,p,o in g.triples((URIRef(item.uri), ag.NAMESPACES[dc]['identifier'], None)): for o in g.objects(URIRef(item.uri), ag.NAMESPACES['dc']['title']): m['title'].append(o) for o in g.objects(URIRef(item.uri), ag.NAMESPACES['dc']['identifier']): m['identifier'].append(o) for o in g.objects(URIRef(item.uri), ag.NAMESPACES['dc']['description']): m['description'].append(o) for o in g.objects(URIRef(item.uri), ag.NAMESPACES['dc']['creator']): m['creator'].append(o) for o in g.objects(URIRef(item.uri), ag.NAMESPACES['dc']['subject']): m['subject'].append(o) for o in g.objects(URIRef(item.uri), ag.NAMESPACES['dcterms']['abstract']): m['abstract'].append(o) for o in g.objects(URIRef(item.uri), ag.NAMESPACES['dcterms']['created']): try: dt = formatDate(str(o)) except: dt = o m['created'].append(dt) for o in g.objects(URIRef(item.uri), ag.NAMESPACES['dcterms']['description']): m['description'].append(o) for o in g.objects(URIRef(item.uri), ag.NAMESPACES['dcterms']['hasVersion']): m['hasVersion'].append(o) for o in g.objects(URIRef(item.uri), ag.NAMESPACES['dcterms']['identifier']): m['identifier'].append(o) for o in g.objects(URIRef(item.uri), ag.NAMESPACES['dcterms']['isVersionOf']): m['isVersionOf'].append(o) for o in g.objects(URIRef(item.uri), ag.NAMESPACES['dcterms']['license']): m['license'].append(o) for o in g.objects(URIRef(item.uri), ag.NAMESPACES['dcterms']['mediator']): m['mediator'].append(o) for o in g.objects(URIRef(item.uri), ag.NAMESPACES['dcterms']['modified']): try: dt = formatDate(str(o)) except: dt = o m['modified'].append(dt) for o in g.objects(URIRef(item.uri), ag.NAMESPACES['dcterms']['publisher']): m['publisher'].append(o) for o in g.objects(URIRef(item.uri), ag.NAMESPACES['dcterms']['rights']): m['rights'].append(o) for o in g.objects(URIRef(item.uri), ag.NAMESPACES['dcterms']['subject']): m['subject'].append(o) for o in g.objects(URIRef(item.uri), ag.NAMESPACES['dcterms']['title']): m['title'].append(o) for o in g.objects(URIRef(item.uri), ag.NAMESPACES['oxds']['isEmbargoed']): m['isEmbargoed'].append(o) for o in g.objects(URIRef(item.uri), ag.NAMESPACES['oxds']['embargoedUntil']): try: dt = formatDate(str(o)) except: dt = o m['embargoedUntil'].append(dt) for o in g.objects(URIRef(item.uri), ag.NAMESPACES['oxds']['currentVersion']): m['currentVersion'].append(o) for o in g.objects(URIRef(item.uri), ag.NAMESPACES['bibo']['doi']): m['doi'].append(o) for o in g.objects(URIRef(item.uri), ag.NAMESPACES['ore']['aggregates']): m['aggregates'].append(o) return dict(m)
from rdflib import URIRef import io from sdoterm import * from localmarkdown import Markdown VOCABURI="http://schema.org/" CORE = "core" DEFTRIPLESFILESGLOB = ["data/*.ttl","data/ext/*/*.ttl"] LOADEDDEFAULT=False TERMS={} EXPANDEDTERMS={} TERMSLOCK = threading.Lock() RDFLIBLOCK = threading.Lock() DATATYPEURI = URIRef(VOCABURI+"DataType") ENUMERATIONURI = URIRef(VOCABURI+"Enumeration") THINGURI = URIRef(VOCABURI+"Thing") class SdoTermSource(): TYPE = "Class" PROPERTY = "Property" DATATYPE = "Datatype" ENUMERATION = "Enumeration" ENUMERATIONVALUE = "Enumerationvalue" REFERENCE = "Reference" TERMCOUNTS=None SOURCEGRAPH=None MARKDOWNPROCESS=True
def entity_uri(value): return URIRef(ui_url('entities', value))
def collection_uri(value): return URIRef(ui_url('collections', value))
s = row['s'].strip().replace(' ', '_') p = row['p'] o = row['o'].strip().replace(' ', '_') triples += [(s, p, o)] # RDF generation cso_namespace = 'https://cso.kmi.open.ac.uk/topics/' swkg_namespace = 'https://swkg.kmi.open.ac.uk/entity/' relation_namespace = 'https://swkg.kmi.open.ac.uk/relation/' g = Graph() for (s, p, o) in triples: s_URI = '' if s in csoTopics: s_URI = URIRef(cso_namespace + s) else: s_URI = URIRef(swkg_namespace + s) o_URI = '' if s in csoTopics: o_URI = URIRef(cso_namespace + o) else: o_URI = URIRef(swkg_namespace + o) p_URI = URIRef(relation_namespace + p) g.add((s_URI, p_URI, o_URI)) print(g.serialize("SemWebKG.rdf", format="xml")) print(g.serialize("SemWebKG.nt", format="nt"))
def country_uri(value): return URIRef('iso-3166-1:%s' % value)
def BNode(): return URIRef(uuid4().urn)
def eventos(uri, nombre, fecha, descripcion, lugar, media, image, uriTime, mapa, duracion, linkURI): if media != "No disponible": g.add((URIRef(uri), EVENT.illustrate, URIRef(media))) g.add((URIRef(uri), RDF.type, EVENT.Event)) g.add((URIRef(uri), FOAF.depiction, URIRef(image))) g.add((URIRef(uri), RDFS.label, Literal(nombre, lang='es'))) g.add((URIRef(uri), RDFS.comment, Literal(descripcion, lang='es'))) #Fecha y duracion g.add((URIRef(uri), EVENT.time, URIRef(uriTime))) g.add((URIRef(uriTime), RDF.type, TIME.Interval)) g.add((URIRef(uriTime), TIME.at, Literal(fecha, datatype=XSD.dateTime))) g.add((URIRef(uriTime), TIME.duration, Literal(duracion, datatype=XSD.duration))) #Lugar g.add((URIRef(uri), EVENT.place, URIRef(mapa))) g.add((URIRef(mapa), RDFS.label, Literal(lugar, lang='es'))) g.add((URIRef(uri), UMBEL.isAbout, URIRef(rutaAnillo['Eventos.rdf']))) g.add((URIRef(uri), RDFS.seeAlso, URIRef(linkURI))) g.add((URIRef(uri), VCARD.category, Literal("EVENTOS", lang='es')))
def main(): parser = argparse.ArgumentParser(description='Feature extraction') parser.add_argument('-im', '--im', metavar='image', nargs='+', dest='im', type=str, required=True, help='Images to calculate features on') parser.add_argument('-md', '--md', metavar='metadata', dest='md', type=str, required=False, nargs='+', help='Clinical data on patient (DICOM)') parser.add_argument('-sem', '--sem', metavar='semantics', dest='sem', type=str, required=False, nargs='+', help='Semantic Features') parser.add_argument('-seg', '--seg', metavar='segmentation', dest='seg', type=str, required=True, nargs='+', help='Segmentation to calculate features on') parser.add_argument('-para', '--para', metavar='Parameters', nargs='+', dest='para', type=str, required=False, help='Parameters') parser.add_argument('-out', '--out', metavar='Features', dest='out', type=str, required=False, help='Patient features output (HDF)') args = parser.parse_args() if type(args.im) is list: args.im = ''.join(args.im) if type(args.seg) is list: args.seg = ''.join(args.seg) if type(args.out) is list: args.out = ''.join(args.out) featureVector = CalcFeatures(image=args.im, mask=args.seg) if 'rdf' in args.out: # Write output to rdf # import rdflib and some namespace from rdflib import Graph, URIRef, BNode, Literal, Namespace from rdflib.namespace import RDF, FOAF # convert python object to RDF print "-----------------------------------------------------------" print " RDF Output:" print "" Img = Graph() lung1_image = URIRef("http://example.org/CT-Image") Img.add((lung1_image, RDF.type, FOAF.Image)) list_key = featureVector.keys() list_value = featureVector.values() for i in range(len(list_key)): tmp_value = Literal(list_value[i]) tmp_name = list_key[i] Img.add((lung1_image, FOAF.tmp_name, tmp_value)) print Img.serialize(format='turtle') # Create a rdf file for storing output Img.serialize(args.out, format="pretty-xml") elif 'hdf5' in args.out: # Write output to hdf5 import numpy as np import pandas as pd # Assign features to corresponding groups shape_labels = list() shape_features = list() histogram_labels = list() histogram_features = list() GLCM_labels = list() GLCM_features = list() GLRLM_labels = list() GLRLM_features = list() GLSZM_labels = list() GLSZM_features = list() for featureName in featureVector.keys(): if 'shape' in featureName: shape_labels.append(featureName) shape_features.append(featureVector[featureName]) if 'firstorder' in featureName: histogram_labels.append(featureName) histogram_features.append(featureVector[featureName]) if 'glcm' in featureName: GLCM_labels.append(featureName) GLCM_features.append(featureVector[featureName]) if 'glrlm' in featureName: GLRLM_labels.append(featureName) GLRLM_features.append(featureVector[featureName]) if 'glszm' in featureName: GLSZM_labels.append(featureName) GLSZM_features.append(featureVector[featureName]) # Convert feature to single dictionary containing PD series features = dict() pandas_dict = dict(zip(shape_labels, shape_features)) shape_dict = dict() shape_dict['all'] = pd.Series(pandas_dict) shape_features = pd.Series(shape_dict) features['shape_features'] = shape_features pandas_dict = dict(zip(histogram_labels, histogram_features)) histogram_dict = dict() histogram_dict['all'] = pd.Series(pandas_dict) histogram_features = pd.Series(histogram_dict) features['histogram_features'] = histogram_features GLCM_dict = dict(zip(GLCM_labels, GLCM_features)) GLRLM_dict = dict(zip(GLRLM_labels, GLRLM_features)) GLSZM_dict = dict(zip(GLSZM_labels, GLSZM_features)) texture_features = dict() texture_features['GLCM'] = pd.Series(GLCM_dict) texture_features['GLRLM'] = pd.Series(GLRLM_dict) texture_features['GLSZM'] = pd.Series(GLSZM_dict) texture_features = pd.Series(texture_features) features['texture_features'] = texture_features # We also return just the arrray image_feature_array = list() for _, feattype in features.iteritems(): for _, imfeatures in feattype.iteritems(): image_feature_array.extend(imfeatures.values) image_feature_array = np.asarray(image_feature_array) image_feature_array = image_feature_array.ravel() panda_labels = ['image_features', 'image_features_array'] panda_data = pd.Series([features, image_feature_array], index=panda_labels, name='Image features') panda_data.to_hdf(args.out, 'image_features')
g.bind("dcterms", DCTERMS) g.bind("owl", OWL) g.bind("pro", pro) g.bind("proles", proles) g.bind("prov", prov) g.bind("ti", ti) ############################# # # # Persons # # # ############################# for person in root.findall('.//tei:person', tei): person_id = person.get('{http://www.w3.org/XML/1998/namespace}id') person_uri = URIRef(base_uri + '/person/' + person_id) person_ref = '#' + person_id # person g.add((person_uri, RDF.type, crm.E21_Person)) # same as same_as = person.get('sameAs') if same_as is not None: same_as = same_as.split() i = 0 while i < len(same_as): same_as_uri = URIRef(same_as[i]) g.add((person_uri, OWL.sameAs, same_as_uri)) i += 1
def test_read(self): sx = None for s in self.g.subjects(predicate=URIRef("https://example.org/e"), object=URIRef("https://example.org/f")): sx = s assert sx == URIRef("https://example.org/d")
def __init__(self, uri: URIRef, namespaces: Union[dict, NamespaceManager, None]): self.uri = URIRef(uri) self.namespaces = namespaces
# -*- coding: utf-8 -*- import json from pyexcel_xlsx import get_data from rdflib import Graph, URIRef, Literal, RDF, RDFS, XSD, Namespace ns = Namespace("sw-kreusch") g = Graph() typePLZ = URIRef("http://dbpedia.org/ontology/zipCode") typeCounty = URIRef("http://dbpedia.org/ontology/county") typeiPhoneModel = URIRef("https://www.wikidata.org/wiki/Q2766") hasVEK = URIRef("hasVEK") isInCounty = URIRef("isInisInCounty") inceptedOn = URIRef("inceptedOn") g.add((hasVEK, RDF.type, RDF.Property)) g.add((hasVEK, RDFS.range, XSD.nonNegativeInteger)) g.add((inceptedOn, RDF.type, RDF.Property)) g.add((inceptedOn, RDFS.range, XSD.dateTime)) #Parsing the zipCode to county mapping data = get_data("AuszugGV1QAktuell.xlsx") counties = data["Onlineprodukt_Gemeinden_310317"] for community in counties: if len(community) > 13: #Testing if the current row contains a string or not try: regCode = int(str(community[2])) except UnicodeEncodeError: continue except ValueError: continue #concatenating the regCode/county code
def parse_template_instance(parent_template_id: int, instance: Group, nsm: NamespaceManager = None) -> AbstractTemplate: """Parse a stOTTR template instance. Arguments: * parent_template_id: ID of the template that owns the scope of the instance. * instance: instance to parse * nsm: Namespace manager used to expand prefixed URIs. Returns: An OTTR template. """ cross_variable = None # if the instance uses a cross expansion if instance.type == "cross": # first, find the repeated variable classic_args = list() for pos in range(len(instance.content.arguments)): arg = instance.content.arguments[pos] if type(arg) is not str: cross_variable = parse_term(arg[0], nsm=nsm) classic_args.append(arg[0]) else: classic_args.append(arg) # if not found, raise error # TODO improve error reporting if cross_variable is None: raise SyntaxError( "Found an expansion mode 'cross' without a repeated variable.") # then, replace the current instance by the inner instance instance = instance.content instance.arguments = classic_args # TODO supports other expansion modes # parse arguments to RDF Terms ottr_arguments = list() for arg in instance.arguments: arg = parse_term(arg, nsm=nsm) # unify variables found during the process, so they are unique to the local scope if type(arg) is Variable: new_arg = unify_var(arg, parent_template_id) # replace the cross variable when it gets renammed if arg == cross_variable: cross_variable = new_arg arg = new_arg ottr_arguments.append(arg) # build the concrete template object template_name = parse_term(instance.name, nsm=nsm) ottr_instance = None # case 1: handle base templates (using a generic method) if template_name in BASE_TEMPLATES: TemplateConstructor, nb_arguments = BASE_TEMPLATES[template_name] if len(ottr_arguments) != nb_arguments: raise Exception( f"The {template_name.n3()} template takes exactly {nb_arguments} arguments, but {len(ottr_arguments)} were provided" ) params = parse_instance_arguments(parent_template_id, ottr_arguments, nsm=nsm) ottr_instance = TemplateConstructor(*params) else: # case 2: a non-base template instance ottr_instance = NonBaseInstance( template_name, parse_instance_arguments(parent_template_id, ottr_arguments, nsm=nsm)) # use a cross expansion operator if needed if cross_variable is not None: cross_name = URIRef(f"http://pyOTTR?cross={template_name}") return CrossTemplate(cross_name, ottr_instance, cross_variable) return ottr_instance
def test_mulpath_n3(): uri = "http://example.com/foo" n3 = (URIRef(uri) * ZeroOrMore).n3() assert n3 == "<" + uri + ">*"
def email_uri(value): return URIRef('mailto:%s' % value)
def factual_data_graph(results): for row in results: if row != []: for el in row['categoria']: #ogni el è un dizionario factual_data.add((URIRef(categ + el['uri']), RDF.type, ecrm.E1_CRM_Entity)) factual_data.add((URIRef(categ + el['uri']), rdfs.label, Literal(el['label'], datatype=XSD.string))) if len(el) > 2: #quindi non contiene la chiave 'superclasse', ma solo uri e label factual_data.add((URIRef(categ + el['superclasse']), RDF.type, ecrm.E1_CRM_Entity)) factual_data.add((URIRef(categ + el['uri']), ecrm.P67_refers_to, URIRef(categ + el['superclasse']))) if row['riscritture_letterarie'] != {}: for el2 in row['riscritture_letterarie']: # el2 = dizionario try: factual_data.add((URIRef(work + el2['work_uri']), RDF.type, efrbroo.F1_Work)) factual_data.add((URIRef(work + el2['work_uri']), rdfs.label, Literal(el2['work_label'], datatype=XSD.string))) #manca subwork_uri e subwork_label for autore_uri in el2['autore_uri']: factual_data.add((URIRef(work + autore_uri), RDF.type, efrbroo.F10_Person)) factual_data.add((URIRef(work + el2['work_uri']), rdfs.label, (URIRef(work + autore_uri)))) factual_data.add((URIRef(work + el2['work_uri'] + '-wc'), RDF.type, efrbroo.F27_Work_Conception)) factual_data.add((URIRef(work + el2['work_uri'] + '-wc'), ecrm.P14_carried_out_by, (URIRef(work + autore_uri)))) factual_data.add((URIRef(work + el2['work_uri'] + '-wc'), efrbroo.R16_initiated, (URIRef(work + el2['work_uri'])))) # manca il link tra work uri e work label # manca la data !!!! except: print('Mi sa che manca qualcosa in riscritture letterarie') if row['fonti_medievali_e_moderne'] != {}: for el2 in row['fonti_medievali_e_moderne']: # el2 = dizionario try: factual_data.add((URIRef(work + el2['work_uri']), RDF.type, efrbroo.F1_Work)) factual_data.add((URIRef(work + el2['work_uri']), rdfs.label, Literal(el2['work_label'], datatype=XSD.string))) # manca subwork_uri e subwork_label # manca la data !!!! for autore_uri in el2['autore_uri']: factual_data.add((URIRef(work + autore_uri), RDF.type, efrbroo.F10_Person)) factual_data.add((URIRef(work + el2['work_uri'] + '-wc'), RDF.type, efrbroo.F27_Work_Conception)) factual_data.add((URIRef(work + el2['work_uri'] + '-wc'), ecrm.P14_carried_out_by, (URIRef(work + autore_uri)))) factual_data.add((URIRef(work + el2['work_uri'] + '-wc'), efrbroo.R16_initiated, (URIRef(work + el2['work_uri'])))) # manca il link tra autore uri e autore label # manca la data !!!! except: print('Mi sa che manca qualcosa in fonti medievali e moderne') if row['riscritture_cinematografiche'] != {}: for el3 in row['riscritture_cinematografiche']: try: factual_data.add((URIRef(work + el3['titolo_film_uri']), RDF.type, efrbroo.F1_Work)) factual_data.add((URIRef(work + el3['titolo_film_uri']), rdfs.label, Literal(el3['titolo_film'], datatype=XSD.string))) # manca la data !!!! # manca autore_uri + autore label # manca work conception except: print('Alc nol va in CINEMA') print(factual_data.serialize(format='trig').decode('UTF-8')) factual_data.serialize(destination='discarica/rdf_liv0_works.trig', format='trig') return factual_data
def phone_uri(value): return URIRef('tel:%s' % value)
def createHotelId(hotelId): hotelId = URIRef('http://data.linkedevents.org/london/hotels/' + hotelId) return hotelId
def document_uri(value): return URIRef(ui_url('documents', value))
def replaced_by(uri, title): new_uri = lookup(title) if(new_uri != ''): yse_skos.remove((None, None, URIRef(uri))) yse_skos.add((URIRef(uri), dct.isReplacedBy, URIRef(new_uri))) yse_skos.add((URIRef(uri), OWL.deprecated, Literal('true',datatype=XSD.boolean)))
def term(self, name): from FuXi.Syntax.infixOWL import Class return Class(URIRef(self + name))
g.parse('NewBase.ttl', format='n3') with open('StudentRecord.csv', "r") as student_file: student = csv.reader(student_file, delimiter=',') counter = 0 for row in student: if counter == 0: counter += 1 else: student_id = row[0] student_firstname = str(row[1]).strip() student_lastname = str(row[2]).strip() student_email = str(row[3]) program = str(row[4]) student_enrolled_subject = row[5] student_enrolled_snumber = row[6] course_name = URIRef("http://focu.io/data#" + str(row[7]).replace(" ", "%")) student_grade = row[8] student_enrolled_term = row[9] b_node = BNode() print("http://focu.io/data#" + quote(str(student_lastname + student_firstname))) student_node = URIRef( "http://focu.io/data#" + quote(str(student_lastname + student_firstname))) print(student_node) g.add((student_node, rdf.type, focu.Student)) g.add((student_node, foaf.givenName, Literal(student_firstname))) g.add((student_node, foaf.familyName, Literal(student_lastname))) g.add((student_node, v.email, Literal(student_email))) g.add((student_node, focu.student_id, Literal(student_id))) g.add((b_node, foaf.name, course_name))
def delete_triples(uri): yse_skos.remove((URIRef(uri), None, None)) yse_skos.remove((None, None, URIRef(uri)))
def check_ro(base_path: Path, nested: bool = False) -> None: manifest_file = base_path / "metadata" / "manifest.json" assert manifest_file.is_file(), "Can't find {}".format(manifest_file) arcp_root = find_arcp(base_path) base = urllib.parse.urljoin(arcp_root, "metadata/manifest.json") g = Graph() # Avoid resolving JSON-LD context https://w3id.org/bundle/context # so this test works offline context = Path(get_data("tests/bundle-context.jsonld")).as_uri() with open(manifest_file, "r", encoding="UTF-8") as fh: jsonld = fh.read() # replace with file:/// URI jsonld = jsonld.replace("https://w3id.org/bundle/context", context) g.parse(data=jsonld, format="json-ld", publicID=base) if os.environ.get("DEBUG"): print("Parsed manifest:\n\n") g.serialize(sys.stdout, format="ttl") ro = None for ro in g.subjects(ORE.isDescribedBy, URIRef(base)): break assert ro is not None, "Can't find RO with ore:isDescribedBy" profile = None for dc in g.objects(ro, DCTERMS.conformsTo): profile = dc break assert profile is not None, "Can't find profile with dct:conformsTo" assert profile == URIRef(provenance.CWLPROV_VERSION), ( "Unexpected cwlprov version " + profile ) paths = [] externals = [] for aggregate in g.objects(ro, ORE.aggregates): if not arcp.is_arcp_uri(aggregate): externals.append(aggregate) # Won't check external URIs existence here # TODO: Check they are not relative! continue lfile = _arcp2file(base_path, aggregate) paths.append(os.path.relpath(lfile, base_path)) assert os.path.isfile(lfile), "Can't find aggregated {}".format(lfile) assert paths, "Didn't find any arcp aggregates" assert externals, "Didn't find any data URIs" for ext in ["provn", "xml", "json", "jsonld", "nt", "ttl"]: f = "metadata/provenance/primary.cwlprov.%s" % ext assert f in paths, "provenance file missing " + f for f in [ "workflow/primary-job.json", "workflow/packed.cwl", "workflow/primary-output.json", ]: assert f in paths, "workflow file missing " + f # Can't test snapshot/ files directly as their name varies # TODO: check urn:hash::sha1 thingies # TODO: Check OA annotations packed = urllib.parse.urljoin(arcp_root, "/workflow/packed.cwl") primary_job = urllib.parse.urljoin(arcp_root, "/workflow/primary-job.json") primary_prov_nt = urllib.parse.urljoin( arcp_root, "/metadata/provenance/primary.cwlprov.nt" ) uuid = arcp.parse_arcp(arcp_root).uuid highlights = set(g.subjects(OA.motivatedBy, OA.highlighting)) assert highlights, "Didn't find highlights" for h in highlights: assert (h, OA.hasTarget, URIRef(packed)) in g describes = set(g.subjects(OA.motivatedBy, OA.describing)) for d in describes: assert (d, OA.hasBody, URIRef(arcp_root)) in g assert (d, OA.hasTarget, URIRef(uuid.urn)) in g linked = set(g.subjects(OA.motivatedBy, OA.linking)) for l in linked: assert (l, OA.hasBody, URIRef(packed)) in g assert (l, OA.hasBody, URIRef(primary_job)) in g assert (l, OA.hasTarget, URIRef(uuid.urn)) in g has_provenance = set(g.subjects(OA.hasBody, URIRef(primary_prov_nt))) for p in has_provenance: assert (p, OA.hasTarget, URIRef(uuid.urn)) in g assert (p, OA.motivatedBy, PROV.has_provenance) in g # Check all prov elements are listed formats = set() for prov in g.objects(p, OA.hasBody): assert (prov, DCTERMS.conformsTo, URIRef(provenance.CWLPROV_VERSION)) in g # NOTE: DC.format is a Namespace method and does not resolve like other terms formats.update(set(g.objects(prov, DC["format"]))) assert formats, "Could not find media types" expected = set( Literal(f) for f in ( "application/json", "application/ld+json", "application/n-triples", 'text/provenance-notation; charset="UTF-8"', 'text/turtle; charset="UTF-8"', "application/xml", ) ) assert formats == expected, "Did not match expected PROV media types" if nested: # Check for additional PROVs # Let's try to find the other wf run ID otherRuns = set() for p in g.subjects(OA.motivatedBy, PROV.has_provenance): if (p, OA.hasTarget, URIRef(uuid.urn)) in g: continue otherRuns.update(set(g.objects(p, OA.hasTarget))) assert otherRuns, "Could not find nested workflow run prov annotations"
from unittest import TestCase from rdflib import URIRef, RDF, Graph from ldp import NS as LDP from ldp.dataset import NamedContextDataset, context as dataset from ldp.globals import continents, capitals, aggregation CONTINENTS = URIRef('http://www.telegraphis.net/data/continents') CAPITALS = URIRef('http://www.telegraphis.net/data/capitals') class TestNamedContext(TestCase): def test_single_context_parse(self): ds = NamedContextDataset() ds.g['cont'] = ds.parse(source='test/continents.rdf', publicID=CONTINENTS) self.assertEqual(len(list(ds.g['cont'][::])), 112) def test_multiple_context_parse(self): ds = NamedContextDataset() ds.g['cont'] = ds.parse(source='test/continents.rdf', publicID=CONTINENTS) self.assertEqual(len(list(ds.g['cont'][::])), 112) ds.g['capitals'] = ds.parse(source='test/capitals.rdf', publicID=CONTINENTS) self.assertEqual(len(list(ds.g['cont'][::])), 2584) def test_multiple_context_aggregation(self):
from rdflib.namespace import RDF, RDFS, FOAF, OWL, XSD, DC, DCTERMS import json import os path = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) data = json.load(open(path + "/DatiPerElaborazione/Pub.geojson", "r")) g = Graph() cmo = Namespace("http://www.comune.milano.it/ontology/") schema = Namespace("https://schema.org/") g.bind("cmo", cmo) g.bind("schema", schema) for element in data: uri = element["URI"] g.add([URIRef(uri), RDF.type, cmo.Pub]) g.add([URIRef(uri), RDFS.label, Literal(element["nome"])]) g.add([URIRef(uri), cmo.localBusinessWebsite, Literal(element["website"])]) g.add([URIRef(uri), schema.email, Literal(element["email"])]) g.add([URIRef(uri), cmo.localBusinessPostalCode, Literal(element["cap"])]) g.add([URIRef(uri), schema.address, Literal(element["indirizzo"])]) g.add([ URIRef(uri), cmo.latitude, Literal(element["lat"], datatype=XSD.float) ]) g.add([ URIRef(uri), cmo.longitude, Literal(element["long"], datatype=XSD.float) ]) g.serialize(destination=path + '/Turtles/pub.ttl', format='turtle')
def read_manifest(item, manifest_file): triples = [] namespaces = {} seeAlsoFiles = [] oxdsClasses = ['http://vocab.ox.ac.uk/dataset/schema#Grouping', 'http://vocab.ox.ac.uk/dataset/schema#DataSet'] aggregates = item.list_rdf_objects(item.uri, "ore:aggregates") g = ConjunctiveGraph() gparsed = g.parse(manifest_file, format='xml') namespaces = dict(g.namespaces()) #Get the subjects subjects = {} for s in gparsed.subjects(): if s in subjects: continue if type(s).__name__ == 'URIRef': if str(s).startswith('file://'): ss = str(s).replace('file://', '') if manifest_file in ss: subjects[s] = URIRef(item.uri) else: manifest_file_path, manifest_file_name = os.path.split(manifest_file) ss = ss.replace(manifest_file_path, '').strip('/') for file_uri in aggregates: if ss in str(file_uri): subjects[s] = URIRef(file_uri) break if not s in subjects: subjects[s] = URIRef(item.uri) else: subjects[s] = URIRef(s) elif type(s).__name__ == 'BNode': replace_subject = True for o in gparsed.objects(): if o == s: replace_subject = False if replace_subject: subjects[s] = URIRef(item.uri) else: subjects[s] = s #Get the dataset type #set the subject uri to item uri if it is of type as defined in oxdsClasses datasetType = False for s,p,o in gparsed.triples((None, RDF.type, None)): if str(o) in oxdsClasses: if type(s).__name__ == 'URIRef' and len(s) > 0 and str(s) != str(item.uri) and str(subjects[s]) != str(item.uri): namespaces['owl'] = URIRef("http://www.w3.org/2002/07/owl#") triples.append((item.uri, 'owl:sameAs', s)) triples.append((item.uri, RDF.type, o)) elif type(s).__name__ == 'BNode' or len(s) == 0 or str(s) == str(item.uri) or str(subjects[s]) == str(item.uri): gparsed.remove((s, p, o)) subjects[s] = item.uri #Get the uri for the see also files for s,p,o in gparsed.triples((None, URIRef('http://www.w3.org/2000/01/rdf-schema#seeAlso'), None)): if type(o).__name__ == 'URIRef' and len(o) > 0: obj = str(o) if obj.startswith('file://'): obj_path, obj_name = os.path.split(obj) obj = obj.replace(obj_path, '').strip('/') for file_uri in aggregates: if obj in str(file_uri): seeAlsoFiles.append(file_uri) gparsed.remove((s, p, o)) #Add remaining triples for s,p,o in gparsed.triples((None, None, None)): triples.append((subjects[s], p, o)) return namespaces, triples, seeAlsoFiles
def browser_cerca(): """ Permite la comunicacion con el agente via un navegador via un formulario """ global product_list if request.method == 'GET': return render_template('cerca.html', products=None) elif request.method == 'POST': # Peticio de cerca if request.form['submit'] == 'Cerca': logger.info("Enviando peticion de busqueda") # Content of the message contentResult = ECSDI['Cerca_productes_' + str(get_count())] # Graph creation gr = Graph() gr.add((contentResult, RDF.type, ECSDI.Cerca_productes)) # Add restriccio nom nom = request.form['nom'] if nom: # Subject nom subject_nom = ECSDI['RestriccioNom' + str(get_count())] gr.add((subject_nom, RDF.type, ECSDI.RestriccioNom)) gr.add( (subject_nom, ECSDI.Nom, Literal(nom, datatype=XSD.string))) # Add restriccio to content gr.add((contentResult, ECSDI.Restringe, URIRef(subject_nom))) marca = request.form['marca'] if marca: subject_marca = ECSDI['Restriccion_Marca_' + str(get_count())] gr.add((subject_marca, RDF.type, ECSDI.Restriccion_Marca)) gr.add((subject_marca, ECSDI.Marca, Literal(marca, datatype=XSD.string))) gr.add((contentResult, ECSDI.Restringe, URIRef(subject_marca))) min_price = request.form['min_price'] max_price = request.form['max_price'] if min_price or max_price: subject_preus = ECSDI['Restriccion_Preus_' + str(get_count())] gr.add((subject_preus, RDF.type, ECSDI.Rango_precio)) if min_price: gr.add( (subject_preus, ECSDI.Precio_min, Literal(min_price))) if max_price: gr.add( (subject_preus, ECSDI.Precio_max, Literal(max_price))) gr.add((contentResult, ECSDI.Restringe, URIRef(subject_preus))) seller = get_agent_info(agn.SellerAgent, DirectoryAgent, UserPersonalAgent, get_count()) gr2 = send_message( build_message(gr, perf=ACL.request, sender=UserPersonalAgent.uri, receiver=seller.uri, msgcnt=get_count(), content=contentResult), seller.address) index = 0 subject_pos = {} product_list = [] for s, p, o in gr2: if s not in subject_pos: subject_pos[s] = index product_list.append({}) index += 1 if s in subject_pos: subject_dict = product_list[subject_pos[s]] if p == RDF.type: subject_dict['url'] = s elif p == ECSDI.Marca: subject_dict['marca'] = o elif p == ECSDI.Modelo: subject_dict['modelo'] = o elif p == ECSDI.Precio: subject_dict['precio'] = o elif p == ECSDI.Nombre: subject_dict['nombre'] = o elif p == ECSDI.Peso: subject_dict['peso'] = o product_list[subject_pos[s]] = subject_dict return render_template('cerca.html', products=product_list) # -------------------------------------------------------------------------------------------------------------- # Peticio de compra elif request.form['submit'] == 'Comprar': products_checked = [] for item in request.form.getlist("checkbox"): item_checked = [] item_map = product_list[int(item)] item_checked.append(item_map['marca']) item_checked.append(item_map['modelo']) item_checked.append(item_map['nombre']) item_checked.append(item_map['precio']) item_checked.append(item_map['url']) item_checked.append(item_map['peso']) products_checked.append(item_checked) logger.info("Creando la peticion de compra") # Content of the message content = ECSDI['Peticion_compra_' + str(get_count())] # Graph creation gr = Graph() gr.add((content, RDF.type, ECSDI.Peticion_compra)) # Asignar prioridad a la peticion (asignamos el contador de mensaje) gr.add((content, ECSDI.Prioridad, Literal(get_count(), datatype=XSD.integer))) # Creacion de la ciudad (por ahora Barcelona) -------------------------------------------------------------- subject_ciudad = ECSDI['Ciudad_' + str(random.randint(1, sys.float_info.max))] gr.add((subject_ciudad, RDF.type, ECSDI.Ciudad)) gr.add((subject_ciudad, ECSDI.Nombre, Literal(41.398373, datatype=XSD.float))) gr.add((subject_ciudad, ECSDI.Latitud, Literal(2.188247, datatype=XSD.float))) gr.add((subject_ciudad, ECSDI.Longitud, Literal('Barcelona', datatype=XSD.string))) # Creacion del sobre (Compra) ------------------------------------------------------------------------------ subject_sobre = ECSDI['Compra_' + str(random.randint(1, sys.float_info.max))] gr.add((subject_sobre, RDF.type, ECSDI.Compra)) gr.add((subject_sobre, ECSDI.Pagat, Literal(0, datatype=XSD.integer))) gr.add((subject_sobre, ECSDI.Enviar_a, URIRef(subject_ciudad))) total_price = 0.0 for item in products_checked: total_price += float(item[3]) # Creacion del producto -------------------------------------------------------------------------------- subject_producto = item[4] gr.add((subject_producto, RDF.type, ECSDI.Producto)) gr.add((subject_producto, ECSDI.Marca, Literal(item[0], datatype=XSD.string))) gr.add((subject_producto, ECSDI.Modelo, Literal(item[1], datatype=XSD.string))) gr.add((subject_producto, ECSDI.Nombre, Literal(item[2], datatype=XSD.string))) gr.add((subject_producto, ECSDI.Precio, Literal(item[3], datatype=XSD.float))) gr.add((subject_producto, ECSDI.Peso, Literal(item[5], datatype=XSD.float))) gr.add( (subject_sobre, ECSDI.Productos, URIRef(subject_producto))) gr.add((subject_sobre, ECSDI.Precio_total, Literal(total_price, datatype=XSD.float))) gr.add((content, ECSDI.Sobre, URIRef(subject_sobre))) seller = get_agent_info(agn.SellerAgent, DirectoryAgent, UserPersonalAgent, get_count()) answer = send_message( build_message(gr, perf=ACL.request, sender=UserPersonalAgent.uri, receiver=seller.uri, msgcnt=get_count(), content=content), seller.address) products_matrix = [] for item in answer.subjects(RDF.type, ECSDI.Producto): product = [ answer.value(subject=item, predicate=ECSDI.Marca), answer.value(subject=item, predicate=ECSDI.Modelo), answer.value(subject=item, predicate=ECSDI.Nombre), answer.value(subject=item, predicate=ECSDI.Precio) ] products_matrix.append(product) return render_template('endSell.html', products=products_matrix)