def test_rdfxml(self): rdf_url = urljoin(get_server_base(), 'view/%s?%s' % (quote(self.system.fqdn.encode('utf8')), urlencode({'tg_format': 'rdfxml'}))) graph = rdflib.graph.Graph() graph.parse(location=rdf_url, format='xml') self.assert_(len(graph) >= 9)
def getROEvolution(self, rouri): #if len(rouri.split(self._srsuri))>1: #rouri = rouri.split(self._srsuri)[-1] (manifest_status, manifest_reason, manifest_headers, manifest_data) = ( self.doRequest(uripath=urljoin(rouri,".ro/manifest.rdf"), accept="application/rdf+xml")) if manifest_status == 404: return (manifest_status, manifest_reason, manifest_data, None) (manifest_status, manifest_reason, manifest_headers, manifest_data) = self.doRequest(uripath=rouri, accept="application/rdf+xml") if manifest_status == 404 or not "link" in manifest_headers: if manifest_status == 401: print "Unauthorised operation" return None return (manifest_status, manifest_reason, manifest_data, None) parssed_header_uri = manifest_headers["link"].split(">; rel=")[0][1:] (evolution_status, evolution_reason, evolution_headers, evolution_data) = self.doRequest(uripath=parssed_header_uri,accept="text/turtle") if evolution_status == 404: return (evolution_status, evolution_reason, evolution_data, EvoType.UNDEFINED) graph = rdflib.Graph() graph.parse(data=evolution_data, format="n3") try: (graph.objects(getResourceUri(self._srsuri, rouri), ROEVO.isFinalized)).next() return (evolution_status, evolution_reason, evolution_data, EvoType.UNDEFINED) except StopIteration as error: try: return (evolution_status, evolution_reason, evolution_data, self.checkType(graph.objects(getResourceUri(self._srsuri, rouri), RDF.type))) except StopIteration as error: return (evolution_status, evolution_reason, evolution_data, EvoType.UNDEFINED)
def session(request,department_Name,session_Start): dep = get_object_or_404(Department, Name=department_Name) session = get_object_or_404(Session, Start=session_Start, Department= dep) rdfextras.registerplugins() g=rdflib.Graph() g.parse(session.Student_file) g.parse(dep.Professor_file) res1 = g.query(""" PREFIX nit: <http://nitdgp.ac.in/> SELECT ?fname ?lname ?email ?roll ?dept ?regno ?address ?prof_fname ?prof_lname ?bgroup ?height ?weight WHERE { ?x nit:fname ?fname. ?x nit:lname ?lname. ?x nit:email ?email. ?x nit:roll ?roll. ?x nit:dept ?dept. ?x nit:regno ?regno. ?x nit:address ?address. ?x nit:email ?email. ?x nit:researchUnder ?researchUnder. ?x nit:bgroup ?bgroup. ?x nit:height ?height. ?x nit:weight ?weight. ?researchUnder nit:fname ?prof_fname. ?researchUnder nit:lname ?prof_lname } """) context = { 'dep' : dep, 'res1' : res1, 'session' : session, } return render(request, 'department/session.html', context)
def test_rdfxml(self): rdf_url = urljoin( get_server_base(), "view/%s?%s" % (quote(self.system.fqdn.encode("utf8")), urlencode({"tg_format": "rdfxml"})), ) graph = rdflib.graph.Graph() graph.parse(location=rdf_url, format="xml") self.assert_(len(graph) >= 9)
def test_rdfxml(self): rdf_url = urljoin( get_server_base(), 'view/%s?%s' % (quote(self.system.fqdn.encode('utf8')), urlencode({'tg_format': 'rdfxml'}))) graph = rdflib.graph.Graph() graph.parse(location=rdf_url, format='xml') self.assert_(len(graph) >= 9)
def init_graph(self, name): config=getUtility(Interface, name='configuration') graph_descr=config['graph_'+name] contains=graph_descr.get('contains', None) identifier=graph_descr.get('id', name) storage=graph_descr['storage'] load=graph_descr.get('load_from', None) sto_driver, sto_filepath=self.storages[storage] if contains == None: graph=rdflib.graph.Graph( store=sto_driver, identifier=identifier, namespace_manager=self.ns_man ) if sto_filepath is not None: sto_filepath = os.path.abspath(sto_filepath) if not os.path.exists(sto_filepath): os.makedirs(sto_filepath) graph.open(sto_filepath, create=True) else: if sto_driver != 'default': graph=rdflib.graph.ConjunctiveGraph(sto_driver, identifier=identifier) if sto_filepath != None: graph.open(sto_filepath,create=True) else: graph=rdflib.graph.Dataset(sto_driver) for cg in contains.split(','): cg=cg.strip() g=self.graphs[cg] graph.add_graph(g) if len(graph)==0 and load != None: try: graph.parse(load) # FIXME slashes in Windows graph.commit() except IOError: logger.warning ("Cannot load graph from URI:" + load) for nk,nv in NAMESPACES.items(): graph.bind(nk,nv) if name=='ns': self.ns_man=graph self.graphs[name]=graph GSM=getGlobalSiteManager() GSM.registerUtility(graph, IGraph, name=name)
def get_asset_ids(assets_data, content_type): """ This function extracts source_id, source_id_type and entity_uri of each asset in the data. It does this by creating an in memory graph via RDF Lib. * Caution this is potentially very cpu expensive and could use a lot of memory :param assets_data: A blob of triples :param content_type: The type of the blob :return: an array of dicts that show the ids of assets """ graph = rdflib.graph.Graph() graph.parse(data=assets_data, format=TYPE_MAPPING[content_type]) result = graph.query(SPARQL_PREFIXES + ASSET_QUERY_ALL_ENTITY_IDS) result = [{u'entity_id': x[u'entity_id'].split('/')[-1]} for x in result] return result
def display(request): dept_P = request.GET['dept'] session_P = request.GET['session'] regno_P = request.GET['regno'] sem_P = request.GET['sem'] dep = get_object_or_404(Department, Name=dept_P) session = get_object_or_404(Session, Start=session_P, Department= dep) result = get_object_or_404(Result, Session=session, Department=dep, Semester=sem_P) rdfextras.registerplugins() g=rdflib.Graph() g.parse(result.Result_file) S = """ PREFIX nit: <http://nitdgp.ac.in/> SELECT ?fname ?lname ?roll ?regno ?cgpa ?sgpa ?BT501LG ?BT502LG ?BT503LG ?OELG ?BT551LG ?BT552LG ?CH581LG WHERE { ?x nit:fname ?fname. ?x nit:lname ?lname. ?x nit:regno """+"'"+regno_P+"'"+""". ?x nit:roll ?roll. ?x nit:regno ?regno. ?x nit:cgpa ?cgpa. ?x nit:sgpa ?sgpa. ?x nit:BT501LG ?BT501LG. ?x nit:BT502LG ?BT502LG. ?x nit:BT503LG ?BT503LG. ?x nit:OELG ?OELG. ?x nit:BT551LG ?BT551LG. ?x nit:BT552LG ?BT552LG. ?x nit:CH581LG ?CH581LG } """ res1 = g.query(S) context = { 'dep' : dep, 'res1' : res1, 'session' : session, 'sem' : sem_P, } return render(request, 'department/display.html',context)
def get_asset_source_ids(assets_data, content_type, entity_id): """ This function extracts source_id and source_id_type of each asset in the data. It does this by creating an in memory graph via RDF Lib. * Caution this is potentially very cpu expensive and could use a lot of memory :param assets_data: A blob of triples :param content_type: The type of the blob :return: an array of dicts that show the ids of assets """ graph = rdflib.graph.Graph() graph.parse(data=assets_data, format=TYPE_MAPPING[content_type]) result = graph.query(SPARQL_PREFIXES + (ASSET_GET_ALSO_IDENTIFIED.format( entity_id=Asset.normalise_id(entity_id)))) result = [{ 'source_id_type': x[0].split('/')[-1], 'source_id': str(x[1]) } for x in result] return result
def detail(request, department_Name): dep = get_object_or_404(Department, Name=department_Name) sessions = dep.session_set.all() rdfextras.registerplugins() g=rdflib.Graph() g.parse(dep.Professor_file) res1 = g.query(""" PREFIX nit: <http://nitdgp.ac.in/> SELECT ?fname ?lname ?email ?research ?designation WHERE { ?x nit:fname ?fname. ?x nit:lname ?lname. ?x nit:email ?email. ?x nit:designation ?designation. ?x nit:research ?research } """) context = { 'dep' : dep, 'res1' : res1, 'sessions' : sessions, } return render(request, 'department/detail.html', context)
testimonial = TextBlob(help) if testimonial.sentiment.polarity < 0: wantsTalk = False print("Alright have a nice day!") # To get keyword phrases ranked highest to lowest. r = Rake() # Uses stopwords for english from NLTK, and all punctuation characters. r.extract_keywords_from_text(help) #print(r.get_ranked_phrases()) # To get keyword phrases ranked highest to lowest. ======= print("HOLA MUNDO") from rdflib import Graph() g = Graph() PATH_TO_DATA = "92771.ttl" g.parse(PATH_TO_DATA) for i in g.objects(): print(i) >>>>>>> a976707e5e7eaa6782fa614b0e0739eae69194d2 git commit