コード例 #1
0
    def _parse_hextuple(self, cg: ConjunctiveGraph, tup: List[Union[str, None]]):
        # all values check
        # subject, predicate, value, datatype cannot be None
        # language and graph may be None
        if tup[0] is None or tup[1] is None or tup[2] is None or tup[3] is None:
            raise ValueError("subject, predicate, value, datatype cannot be None")

        # 1 - subject
        s: Union[URIRef, BNode]
        if tup[0].startswith("_"):
            s = BNode(value=tup[0].replace("_:", ""))
        else:
            s = URIRef(tup[0])

        # 2 - predicate
        p = URIRef(tup[1])

        # 3 - value
        o: Union[URIRef, BNode, Literal]
        if tup[3] == "globalId":
            o = URIRef(tup[2])
        elif tup[3] == "localId":
            o = BNode(value=tup[2].replace("_:", ""))
        else:  # literal
            if tup[4] is None:
                o = Literal(tup[2], datatype=URIRef(tup[3]))
            else:
                o = Literal(tup[2], lang=tup[4])

        # 6 - context
        if tup[5] is not None:
            c = URIRef(tup[5])
            cg.add((s, p, o, c))
        else:
            cg.add((s, p, o))
コード例 #2
0
ファイル: AppPoolViews.py プロジェクト: jo-tud/aof
def fill_graph_by_subject(basegraph, newgraph, subject, loop_count=0):
    """
    Fills an Graph with all triples with an certain subject. Includes the necessary triples for the objects until the deepth of 5.
    :param basegraph: Graph with the data for the new Graph
    :param newgraph: Instance of the new Graph
    :param subject: subject of triples which is looked for in the basegraph
    :return: Graph
    """
    subject_list=[BNode,URIRef]

    if not issubclass(type(basegraph),Graph):
        log.error("The given basegraph is not a subclass of Graph!")
        return ConjunctiveGraph()
    elif subject == "":
        log.info("The given subject was empty. Returning the basegraph")
        return basegraph
    elif type(subject) not in subject_list:
        log.info("The given subject was not of type BNode or URIRef. Returning the basegraph")
        return basegraph
    elif not issubclass(type(newgraph),Graph):
        newgraph=ConjunctiveGraph()

    loop_count += 1
    for s, p, o in basegraph.triples((subject, None, None)):
        newgraph.add((s, p, o))
        if type(o) in subject_list and loop_count < 6:  # it will do: (S1,P1,O1) -> if O1 has an own Description: (O1,P2,O2)... 5 times
            newgraph = fill_graph_by_subject(basegraph, newgraph, o, loop_count)
    return newgraph
コード例 #3
0
ファイル: virtuoso_mapping.py プロジェクト: iilab/assembl
    def discussion_as_graph(self, discussion_id):
        self.ensure_discussion_storage(None)
        from assembl.models import Discussion
        d_storage_name = self.discussion_storage_name()
        d_graph_iri = URIRef(self.discussion_graph_iri())
        v = get_virtuoso(self.session, d_storage_name)
        discussion_uri = URIRef(
            Discussion.uri_generic(discussion_id, self.local_uri()))
        subjects = list(v.query(
            """SELECT DISTINCT ?s WHERE {
            ?s assembl:in_conversation %s }""" % (discussion_uri.n3())))
        subjects.append([discussion_uri])
        # print len(subjects)
        cg = ConjunctiveGraph(identifier=d_graph_iri)
        for (s,) in subjects:
            # Absurdly slow. DISTINCT speeds up a lot, but I get numbers.
            for p, o in v.query(
                'SELECT ?p ?o WHERE { graph %s { %s ?p ?o }}' % (
                        d_graph_iri.n3(), s.n3())):
                    cg.add((s, p, o))

        for (s, o, g) in v.query(
                '''SELECT ?s ?o ?g WHERE {
                GRAPH ?g {?s catalyst:expressesIdea ?o } .
                ?o assembl:in_conversation %s }''' % (discussion_uri.n3())):
            cg.add((s, CATALYST.expressesIdea, o, g))

        # TODO: Add roles

        return cg
コード例 #4
0
class Topic(object):
    def __init__(self, entity_name, entity_id):
        '''
        Constructor
        '''
        # Get the event page and compute its id
        self.entity_id = entity_id
        self.resource = LDES[self.entity_id]
        
        # Create the graph
        self.graph = ConjunctiveGraph()
        self.graph.bind('swc', SWC)
        self.graph.bind('cfp', CFP)
        self.graph.bind('ical', ICAL)
        self.graph.bind('foaf', FOAF)
        self.graph.bind('dct', DCT)
        self.graph.bind('lode', LODE)
        
        # Declare the type of the resource
        self.graph.add((self.resource, RDF.type, SIOCT['Tag']))
        self.graph.add((self.named_graph(), DCT['modified'], Literal(datetime.now()))) 
        
    def get_rdf_data(self):
        return self.graph.serialize()
    
    def named_graph(self):
        return URIRef(NAMED_GRAPHS_BASE + self.entity_id + '.rdf')
    
    def process(self, record, entity_url):
        # Get the document
        document = BeautifulSoup(urllib2.urlopen("http://eventseer.net" + entity_url).read())
        del document
コード例 #5
0
ファイル: models.py プロジェクト: delving/nave
    def get_graph_from_sparql_results(sparql_json, named_graph=None):
        if len(sparql_json['results']['bindings']) == 0:
            return ConjunctiveGraph(), 0
        sparql_vars = sparql_json['head']['vars']
        if 'g' in sparql_vars:
            if not named_graph:
                named_graph = sparql_json['results']['bindings'][0]['g']['value']
            sparql_vars.remove('g')
        triple_levels = RDFModel.get_context_triples(sparql_json['head']['vars'])
        nr_levels = len(triple_levels)
        if named_graph:
            named_graph = URIRef(named_graph)
        graph = ConjunctiveGraph(identifier=named_graph)

        graph.namespace_manager = namespace_manager
        for binding in sparql_json['results']['bindings']:
            binding_levels = RDFModel.get_context_levels(len(binding.keys()))
            for s, p, o in triple_levels[:binding_levels]:
                subject = URIRef(binding[s]['value'])
                if binding[s]['type'] == 'bnode':
                    subject = BNode(binding[s]['value'])
                predicate = URIRef(binding[p]['value'])
                obj = RDFModel.get_object_from_sparql_result(binding[o])
                graph.add((subject, predicate, obj))
        # materialize inferences
        for subject, obj in graph.subject_objects(
                predicate=URIRef("http://www.openarchives.org/ore/terms/isAggregatedBy")):
            graph.add((obj, URIRef("http://www.openarchives.org/ore/terms/aggregates"), subject))
            graph.remove((subject, URIRef("http://www.openarchives.org/ore/terms/isAggregatedBy"), obj))
        return graph, nr_levels
コード例 #6
0
    def query_graph(self, subj=None, pred=None, obj=None, exhaustive=False):
	"""Return a graph of  all triples with subect `sub`, predicate `pred`
	OR object `obj. If `exhaustive`, return all subelements of the given
	arguments (If sub is http://127.0.0.1/api/v1/wine/, return 
	http://127.0.0.1/api/v1/wine/{s} for all s). Arguments must be of type
	URIRef or Literal"""
	g = ConjunctiveGraph()
	count = 0
	if not isinstance(subj, list):
	    subj = [subj]
	for sub in subj:
	    for uri_s, uri_p, uri_o in sorted(self.graph):
		s, p, o = str(uri_s), str(uri_p), str(uri_o)
		if exhaustive:
		    s = s.rpartition('/')[0]
		    p = p.rpartition('/')[0]
		    o = o.rpartition('/')[0]
		else:
		    s = s[:-1] if s.endswith('/') else s
		    p = p[:-1] if p.endswith('/') else p
		    o = o[:-1] if o.endswith('/') else o
		if (sub and sub == s) or (pred and pred == p) or (obj and obj == o):
		    g.add((uri_s, uri_p, uri_o))
		    count += 1
	return g
コード例 #7
0
ファイル: test_wsgi.py プロジェクト: ujjwalsh/flask_rdf
def make_ctx_graph():
	context = URIRef('http://example.com/#root')
	graph = ConjunctiveGraph('IOMemory', context)
	person = URIRef('http://example.com/#person')
	graph.add((person, RDF.type, FOAF.Person))
	graph.add((person, FOAF.age, Literal(15, datatype=XSD.integer)))
	return graph
コード例 #8
0
def get_rdf_template(item_uri, item_id):
    g = ConjunctiveGraph(identifier=item_uri)
    g.bind('rdf', 'http://www.w3.org/1999/02/22-rdf-syntax-ns#')
    g.bind('dcterms', 'http://purl.org/dc/terms/')
    g.add((URIRef(item_uri), URIRef('http://purl.org/dc/terms/identifier'), Literal(item_id)))
    data2 = g.serialize(format='xml', encoding="utf-8") + '\n'
    return data2
コード例 #9
0
def change_status(vocabprefix, uri, predicate, message, action):
    if not action in ['add', 'remove']:
        return False
    vocab_uri = URIRef(uri)
    vocabdir = os.path.join(ag.vocabulariesdir, vocabprefix)
    vocabstatusfile = os.path.join(vocabdir, "status.rdf")
    if not os.path.isfile(vocabstatusfile):
        return False
    graph = Graph()
    graph.parse(vocabstatusfile)
    predicate = predicate.split(':')
    ns = predicate[0]
    term = predicate[1]
    if message and (message.startswith('http://') or message.startswith('file://')):
        message = URIRef(message)
    elif message:
        message = Literal(message)
    if action == 'add':
        for prefix, url in namespaces.iteritems():
            graph.bind(prefix, URIRef(url))
        graph.add((vocab_uri, namespaces[ns][term], message))
    elif action == 'remove':
        graph.remove((vocab_uri, namespaces[ns][term], message))
     
    rdf_str = None
    rdf_str = graph.serialize()
    f = codecs.open(vocabstatusfile, 'w', 'utf-8')
    f.write(rdf_str)
    f.close()
    return True
コード例 #10
0
def update_mediator(params):
    #Write user metadata and save the rdf file
    if not ('username' in params and params['username']):
        return False
    det = get_mediator_details(params['username'])
    graph = Graph()
    graph.parse(os.path.join(ag.mediatorsdir, '%s.rdf'%params['username']))
    for prefix, url in namespaces.iteritems():
        graph.bind(prefix, URIRef(url))
    uri = URIRef(det['uri'])
    if 'firstname' in params and params['firstname']:
        graph.remove((uri, namespaces['foaf']['firstName'], None))
        graph.add((uri, namespaces['foaf']['firstName'], Literal(params['firstname'])))
    if 'lastname' in params and params['lastname']:
        graph.remove((uri, namespaces['foaf']['lastName'], None))
        graph.add((uri, namespaces['foaf']['lastName'], Literal(params['lastname'])))
    if 'email' in params and params['email']:
        graph.remove((uri, namespaces['foaf']['mbox'], None))
        graph.add((uri, namespaces['foaf']['mbox'], Literal(params['email'])))
    if 'title' in params and params['title']:
        graph.remove((uri, namespaces['foaf']['title'], None))
        graph.add((uri, namespaces['foaf']['title'], Literal(params['title'])))
    if 'department' in params and params['department']:
        graph.remove((uri, namespaces['dcterms']['isPartOf'], None))
        department = params['department'].split(';')
        for d in department:
            graph.add((uri, namespaces['dcterms']['isPartOf'], Literal(d.strip())))
    rdf_str = None
    rdf_str = graph.serialize()
    f = codecs.open(os.path.join(ag.mediatorsdir, '%s.rdf'%params['username']), 'w', 'utf-8')
    f.write(rdf_str)
    f.close()
    return True
コード例 #11
0
ファイル: virtuoso_mapping.py プロジェクト: rmoorman/assembl
 def discussion_as_graph(self, discussion_id):
     from assembl.models import Discussion, AgentProfile
     local_uri = self.local_uri()
     discussion = Discussion.get(discussion_id)
     d_storage_name = self.discussion_storage_name()
     d_graph_iri = URIRef(self.discussion_graph_iri())
     v = get_virtuoso(self.session, d_storage_name)
     discussion_uri = URIRef(
         Discussion.uri_generic(discussion_id, local_uri))
     subjects = [s for (s,) in v.query(
         """SELECT DISTINCT ?s WHERE {
         ?s assembl:in_conversation %s }""" % (discussion_uri.n3()))]
     subjects.append(discussion_uri)
     participant_ids = list(discussion.get_participants(True))
     profiles = {URIRef(AgentProfile.uri_generic(id, local_uri))
                 for id in participant_ids}
     subjects.extend(profiles)
     # add pseudo-accounts
     subjects.extend((URIRef("%sAgentAccount/%d" % (local_uri, id))
                      for id in participant_ids))
     # print len(subjects)
     cg = ConjunctiveGraph(identifier=d_graph_iri)
     self.add_subject_data(v, cg, subjects)
     # add relationships of non-pseudo accounts
     for ((account, p, profile), g) in v.triples((None, SIOC.account_of, None)):
         if profile in profiles:
             cg.add((account, SIOC.account_of, profile, g))
             # Tempting: simplify with this.
             # cg.add((profile, FOAF.account, account, g))
     for (s, o, g) in v.query(
             '''SELECT ?s ?o ?g WHERE {
             GRAPH ?g {?s catalyst:expressesIdea ?o } .
             ?o assembl:in_conversation %s }''' % (discussion_uri.n3())):
         cg.add((s, CATALYST.expressesIdea, o, g))
     return cg
コード例 #12
0
def get_rdf_template(item_uri, item_id):
    g = ConjunctiveGraph(identifier=item_uri)
    g.bind('rdf', 'http://www.w3.org/1999/02/22-rdf-syntax-ns#')
    g.bind('dcterms', 'http://purl.org/dc/terms/')
    g.add((URIRef(item_uri), URIRef('http://purl.org/dc/terms/identifier'), Literal(item_id)))
    data2 = g.serialize(format='xml', encoding="utf-8") + '\n'
    return data2
コード例 #13
0
ファイル: rdf_cleanup2.py プロジェクト: koo5/hackery2
def run(input_file, input_format_hint, output_format):
    #print(input_format_hint)
    g = ConjunctiveGraph(store=OrderedAndIndexedStore())
    g.parse(input_file, format=input_format_hint)

    triples = []

    for t in g.triples((None,None,None)):
        triples.append(t)

    triples.sort(key=lambda x:x[0])

    #for t in triples:
    #    print(t[0])

    #import IPython; IPython.embed()


    g.remove((None,None,None))
    #print(list(g.triples((None,None,None))))


    for t in triples:
        g.add(t)

    #print(list(g2.triples((None,None,None))))
    #import IPython; IPython.embed()

    #out = open('out.n3', 'wb')
    # g.serialize(out, format='n3')

    for l in g.serialize(format=output_format).splitlines(): print(l.decode())
コード例 #14
0
def test_sqlalchemy_luuu():
    logger.info(f'Python version: {python_version}')
    logger.info(f'RDFLib version: {rdflib_version}')
    logger.info(f'RDFLib-SQLAlchemy version: {rdflib_sqlalchemy_version}')

    identifier = URIRef('local://test_sqlalchemy_luuu/')

    store = plugin.get(
        'SQLAlchemy',
        Store,
    )(identifier=identifier, )

    graph = ConjunctiveGraph(
        store=store,
        identifier=identifier,
    )

    graph.open('sqlite:///', create=True)

    graph.add([
        Literal('https://example.org'),  # <--- sic!
        RDF.type,
        SDO.WebSite,
        URIRef('https://example.org/about/'),
    ])
コード例 #15
0
    def test_exclude_xhtml(self):
        ns = "http://www.w3.org/1999/xhtml/vocab#"
        kg = ConjunctiveGraph()
        kg.add((
            BNode(),
            URIRef("http://www.w3.org/1999/xhtml/vocab#role"),
            URIRef("http://www.w3.org/1999/xhtml/vocab#button"),
        ))
        print(kg.serialize(format="turtle"))

        q_xhtml = ('SELECT * WHERE { ?s ?p ?o . FILTER (strstarts(str(?p), "' +
                   ns + '"))}')
        print(q_xhtml)

        res = kg.query(q_xhtml)
        self.assertEquals(len(res), 1)

        q_del = (
            'DELETE {?s ?p ?o} WHERE { ?s ?p ?o . FILTER (strstarts(str(?p), "'
            + ns + '"))}')
        kg.update(q_del)
        print(kg.serialize(format="turtle"))

        res = kg.query(q_xhtml)
        self.assertEquals(len(res), 0)
コード例 #16
0
class Serializer(PythonSerializer):
    """
    Convert a queryset to RDF
    """    
    internal_use_only = False

    def end_serialization(self):
        FOAF = Namespace('http://xmlns.com/foaf/0.1/')
        DC = Namespace('http://purl.org/dc/elements/1.1/')
        
        self.graph = ConjunctiveGraph()
        self.options.pop('stream', None)
        fields = filter(None, self.options.pop('fields','').split(','))
        meta = None
        subject = None
        for object in self.objects:
            if not fields:
                fields = object['fields'].keys()    
            newmeta = object['model']
            if newmeta != meta:
                meta = newmeta
            subject = BNode('%s.%s'%(FOAF[newmeta],object['pk']))
            self.graph.add((subject,FOAF['pk'],Literal(object['pk'])))
            for k in fields:
                if k:
                    self.graph.add((subject,FOAF[k],Literal(object['fields'][k])))

    def getvalue(self):
        if callable(getattr(self.graph, 'serialize', None)):
            return self.graph.serialize()
コード例 #17
0
 def discussion_as_graph(self, discussion_id):
     from assembl.models import Discussion, AgentProfile
     local_uri = self.local_uri()
     discussion = Discussion.get(discussion_id)
     d_storage_name = self.discussion_storage_name()
     d_graph_iri = URIRef(self.discussion_graph_iri())
     v = get_virtuoso(self.session, d_storage_name)
     discussion_uri = URIRef(
         Discussion.uri_generic(discussion_id, local_uri))
     subjects = [s for (s,) in v.query(
         """SELECT DISTINCT ?s WHERE {
         ?s assembl:in_conversation %s }""" % (discussion_uri.n3()))]
     subjects.append(discussion_uri)
     participant_ids = list(discussion.get_participants(True))
     profiles = {URIRef(AgentProfile.uri_generic(id, local_uri))
                 for id in participant_ids}
     subjects.extend(profiles)
     # add pseudo-accounts
     subjects.extend((URIRef("%sAgentAccount/%d" % (local_uri, id))
                      for id in participant_ids))
     # print len(subjects)
     cg = ConjunctiveGraph(identifier=d_graph_iri)
     self.add_subject_data(v, cg, subjects)
     # add relationships of non-pseudo accounts
     for ((account, p, profile), g) in v.triples((None, SIOC.account_of, None)):
         if profile in profiles:
             cg.add((account, SIOC.account_of, profile, g))
             # Tempting: simplify with this.
             # cg.add((profile, FOAF.account, account, g))
     for (s, o, g) in v.query(
             '''SELECT ?s ?o ?g WHERE {
             GRAPH ?g {?s catalyst:expressesIdea ?o } .
             ?o assembl:in_conversation %s }''' % (discussion_uri.n3())):
         cg.add((s, CATALYST.expressesIdea, o, g))
     return cg
コード例 #18
0
 def save_item(self, item):
     '''
     Convert the item into a graph and put the graph into the triple store
     '''
     # Delete the previous triples associated to that resource
     #conn = httplib.HTTPConnection(self.url)
     #conn.request("DELETE", "/data/%s" % item.get_resource())
     #conn.close()
     # Generate the new graph
     graph = ConjunctiveGraph()
     for (key, values) in item.get_metadata():
         if type(values) == type([]):
             for value in values:
                 graph.add((item.get_resource(), key, value))
         else:
             graph.add((item.get_resource(), key, values))
     # Save it
     #print graph.serialize()
     headers = {'Accept': '*/*', 'Content-Type': 'application/rdf+xml'}
     conn = httplib.HTTPConnection(self.url)
     conn.request("PUT",
                  "/data/%s" % item.get_resource(),
                  body=graph.serialize(),
                  headers=headers)
     conn.getresponse()
     conn.close()
コード例 #19
0
ファイル: util.py プロジェクト: zepheira/Library.Link
def rdf_from_site(site, rules=None):
    '''
    >>> from librarylink.util import rdf_from_site
    >>> g = rdf_from_site('http://link.denverlibrary.org')
    >>> s = g.serialize(format='json-ld', indent=2)
    >>> with open('denverlibrary.ld.json', 'wb') as fp: fp.write(s)

    >>> rules = {'ignore-predicates': ['http://bibfra.me/', 'http://library.link/'], 'rename-predicates': {'http://library.link/vocab/branchOf': 'http://schema.org/branch'}}
    >>> g = rdf_from_site('http://link.denverlibrary.org', rules=rules)
    >>> s = g.serialize(format='json-ld', indent=2)
    >>> with open('denverlibrary.ld.json', 'wb') as fp: fp.write(s)
    '''
    from rdflib import ConjunctiveGraph, URIRef, Literal, RDF, RDFS
    from versa.writer.rdf import mock_bnode, prep, RDF_TYPE
    #Also requires: pip install rdflib-jsonld
    rules = rules or {}
    ignore_pred = rules.get('ignore-predicates', set())
    rename_pred = rules.get('rename-predicates', {})
    model, sitetext = load_rdfa_page(site)
    if not model:
        return None
    g = ConjunctiveGraph()
    #Hoover up everything with a type
    for o, r, t, a in model.match():
        for oldp, newp in rename_pred.items():
            if r == oldp: r = newp
        for igp in ignore_pred:
            if r.startswith(igp):
                break
        else:
            g.add(prep(o, r, t))
    return g
コード例 #20
0
def loadSOGraph(
    filename=None,
    data=None,
    publicID=None,
    normalize=True,
    deslop=True,
    format="json-ld",
):
    """
    Load RDF string or file to an RDFLib ConjunctiveGraph

    Creates a ConjunctiveGraph from  the provided file or text. If both are
    provided then text is used.

    NOTE: Namespace use of ``<http://schema.org>``, ``<https://schema.org>``, or
    ``<http://schema.org/>`` is normalized to ``<https://schema.org/>`` if
    ``normalize`` is True.

    NOTE: Case of ``SO:`` properties in `SO_TERMS` is adjusted consistency if
    ``deslop`` is True

    Args:
        filename (string):  path to RDF file on disk
        data (string): RDF text
        publicID (string): (from rdflib) The logical URI to use as the document base. If None specified the document location is used.
        normalize (boolean): Normalize the use of schema.org namespace
        deslop (boolean): Adjust schema.org terms for case consistency
        format (string): The serialization format of the RDF to load

    Returns:
        ConjunctiveGraph: The loaded graph

    Example:

    .. jupyter-execute:: examples/code/eg_loadsograph_01.py

    """
    g = ConjunctiveGraph()
    if data is not None:
        g.parse(data=data, format=format, publicID=publicID)
    elif filename is not None:
        g.parse(filename, format=format, publicID=publicID)
    if not (normalize or deslop):
        return g
    # Now normalize the graph namespace use to https://schema.org/
    ns = NamespaceManager(g)
    ns.bind(SO_PREFIX, SCHEMA_ORG, override=True, replace=True)
    g2 = ConjunctiveGraph()
    g2.namespace_manager = ns
    for s, p, o in g:
        trip = [s, p, o]
        if normalize:
            for i, t in enumerate(trip):
                trip[i] = _normalizeTerm(t)
        if deslop:
            for i, t in enumerate(trip):
                trip[i] = _desloppifyTerm(g, t)
        g2.add(trip)
    return g2
コード例 #21
0
 def as_graph(self, d_storage_name, graphs=()):
     v = get_virtuoso(self.session, d_storage_name)
     if not graphs:
         graphs = v.contexts()
     cg = ConjunctiveGraph()
     for ctx in graphs:
         for ((s, p, o), g) in v.triples((None,None,None), ctx):
             cg.add((s, p, o, ctx))
     return cg
コード例 #22
0
 def load_sentence(self, rdf_triples):
     """
     Load the given triples into the triple store
     """
     g = ConjunctiveGraph()
     g.bind("base", BASE)
     for triple in rdf_triples:
         g.add(triple)
     self.soh.add_triples(g, clear=True)
コード例 #23
0
 def as_graph(self, d_storage_name, graphs=()):
     v = get_virtuoso(self.session, d_storage_name)
     if not graphs:
         graphs = v.contexts()
     cg = ConjunctiveGraph()
     for ctx in graphs:
         for ((s, p, o), g) in v.triples((None, None, None), ctx):
             cg.add((s, p, o, ctx))
     return cg
コード例 #24
0
ファイル: syntaxtree.py プロジェクト: anonymous-1/syntaxrules
 def load_sentence(self, rdf_triples):
     """
     Load the given triples into the triple store
     """
     g = ConjunctiveGraph()
     g.bind("base", BASE)
     for triple in rdf_triples:
         g.add(triple)
     self.soh.add_triples(g, clear=True)
コード例 #25
0
ファイル: common.py プロジェクト: bredeson/pypeFLOW
    def _RDFGraph(self):
        graph = Graph()

        for k, v in self.__dict__.iteritems():
            if k == "URL": continue
            if k[0] == "_": continue
            if hasattr(v, "URL"):
                graph.add((URIRef(self.URL), pypeNS[k], URIRef(v.URL)))
        return graph
コード例 #26
0
ファイル: common.py プロジェクト: RobinQi/EnhancedFALCON
    def _RDFGraph(self):
        graph = Graph()

        for k, v in self.__dict__.iteritems():
            if k == "URL": continue
            if k[0] == "_": continue
            if hasattr(v, "URL"):
                graph.add( ( URIRef(self.URL), pypeNS[k], URIRef(v.URL) ) )
        return graph
コード例 #27
0
def example_1():
    """Creates a ConjunctiveGraph and performs some BerkeleyDB tasks with it
    """
    path = mktemp()

    # Declare we are using a BerkeleyDB Store
    graph = ConjunctiveGraph("BerkeleyDB")

    # Open previously created store, or create it if it doesn't exist yet
    # (always doesn't exist in this example as using temp file location)
    rt = graph.open(path, create=False)

    if rt == NO_STORE:
        # There is no underlying BerkeleyDB infrastructure, so create it
        print("Creating new DB")
        graph.open(path, create=True)
    else:
        print("Using existing DB")
        assert rt == VALID_STORE, "The underlying store is corrupt"

    print("Triples in graph before add:", len(graph))
    print("(will always be 0 when using temp file for DB)")

    # Now we'll add some triples to the graph & commit the changes
    EG = Namespace("http://example.net/test/")
    graph.bind("eg", EG)

    graph.add((EG["pic:1"], EG.name, Literal("Jane & Bob")))
    graph.add((EG["pic:2"], EG.name, Literal("Squirrel in Tree")))

    graph.commit()

    print("Triples in graph after add:", len(graph))
    print("(should be 2)")

    # display the graph in Turtle
    print(graph.serialize())

    # close when done, otherwise BerkeleyDB will leak lock entries.
    graph.close()

    graph = None

    # reopen the graph
    graph = ConjunctiveGraph("BerkeleyDB")

    graph.open(path, create=False)

    print("Triples still in graph:", len(graph))
    print("(should still be 2)")

    graph.close()

    # Clean up the temp folder to remove the BerkeleyDB database files...
    for f in os.listdir(path):
        os.unlink(path + "/" + f)
    os.rmdir(path)
コード例 #28
0
def exportRDFGraph(mi):
    g = ConjunctiveGraph()
    bnodes = {}
    for NSName, NSuriStr in mi.namespaceBindings.iteritems():
        g.namespace_manager.bind(NSName, URIRef(NSuriStr))

    modelAttrs = [model.__dict__[c] for c in model.__dict__.keys()]
    knownTypes = dict([(c.classURI, c) for c in modelAttrs if hasattr(c, "classURI")])
    knownInstances = dict([(i.URI, i) for i in modelAttrs if hasattr(i, "URI")])

    # Assign blind nodes :
    for s in mi.MainIdx.values():
        if s.URI == None or isBlind(s):
            snode = BNode()
            bnodes[s.URI] = snode
        for propName, propSet in s._props.iteritems():
            for v in propSet:
                if type(v) not in propSet.Lits and isBlind(v):
                    if not bnodes.has_key(v.URI):
                        vnode = BNode()
                        bnodes[v.URI] = vnode


    for s in mi.MainIdx.values():
        if not hasattr(s, "classURI") or s.classURI not in knownTypes.keys():
            raise ExportException("Object "+str(s)+" has no classURI, or classURI is not known in the SAO model.")
            # FIXME : Maybe use a Resource ?

        if s.URI == None or isBlind(s):
            snode = bnodes[s.URI]
        else:
            snode = URIRef(s.URI)

        g.add((snode, RDF.type, URIRef(s.classURI)))

        for propName, propSet in s._props.iteritems():
            for v in propSet:
                if not hasattr(propSet, "propertyURI"):
                    raise ExportException("Property "+str(propName)+" on object "+str(s)+" has no propertyURI !")

                if type(v) not in propSet.Lits and not isinstance(v, Literal):
                    if not hasattr(v, "URI"):
                        raise ExportException("Property value "+str(v)+" is not a Literal, but has no URI !")
                    if isBlind(v):
                        g.add((snode, URIRef(propSet.propertyURI), bnodes[v.URI]))
                    else:
                        g.add((snode, URIRef(propSet.propertyURI), URIRef(v.URI)))
                else:
                    if isinstance(v, Literal):
                        g.add((snode, URIRef(propSet.propertyURI), v))
                    else:
                        g.add((snode, URIRef(propSet.propertyURI), Literal(v)))

        info("Added "+str(type(s))+" @ "+str(snode))

    return g
コード例 #29
0
def load_text_file(path):
    g = ConjunctiveGraph()
    with open(path, "rb") as file:
        for line in file:
            s, p, o = line.split("\t")
            s = s.strip()
            p = p.strip()
            o = o.strip()
            g.add((URIRef("http://" + s), URIRef("http://" + p), URIRef("http://" + o)))
    return g
コード例 #30
0
ファイル: __init__.py プロジェクト: ChunHungLiu/watchdog-1
def _mangled_copy(g):
    "Makes a copy of the graph, replacing all bnodes with the bnode ``_blank``."
    gcopy = ConjunctiveGraph()
    isbnode = lambda v: isinstance(v, BNode)
    for s, p, o in g:
        if isbnode(s): s = _blank
        if isbnode(p): p = _blank
        if isbnode(o): o = _blank
        gcopy.add((s, p, o))
    return gcopy
コード例 #31
0
 def to_rdf(self, edge_info, edge):
     # returns an in-memory graph object, containing the domain resource, its
     # type and the string as a string literal
     g = Graph()
     if edge_info['range_tile_data'] is not None:
         g.add((edge_info['d_uri'], RDF.type,
                URIRef(edge.domainnode.ontologyclass)))
         g.add((edge_info['d_uri'], URIRef(edge.ontologyproperty),
                Literal(edge_info['range_tile_data'])))
     return g
コード例 #32
0
ファイル: __init__.py プロジェクト: AuroraSkywalker/watchdog
def _mangled_copy(g):
    "Makes a copy of the graph, replacing all bnodes with the bnode ``_blank``."
    gcopy = ConjunctiveGraph()
    isbnode = lambda v: isinstance(v, BNode)
    for s, p, o in g:
        if isbnode(s): s = _blank
        if isbnode(p): p = _blank
        if isbnode(o): o = _blank
        gcopy.add((s, p, o))
    return gcopy
コード例 #33
0
 def prepare_graph(self, data, many, **kwargs):
     graph = ConjunctiveGraph()
     self.add_bindings(graph=graph)
     if many:
         for triple_group in data:
             for triple in triple_group:
                 graph.add(triple)
     else:
         for triple in data:
             graph.add(triple)
     return graph
コード例 #34
0
ファイル: simulation.py プロジェクト: NetherNova/grakelasso
def generate_process(id, num_classes):
    """
    Provides the kind of process to simulate
    :return:
    """
    g = ConjunctiveGraph()
    g.add((process_uri, RDF.type, process_uri))
    g.add((process_uri, ID, Literal(id)))
    label = np.zeros(num_classes)
    # label { 0 = generic qa failure, 1 = welding-temp anomaly,
        # 2 = product-welding qa fail, 3 = special part scratched qa fail,
        # 4 = special part shaky qa fail, 5 = frame fitting qa fail }
        # correlation between 1-2-5 and 3-4
    # correlation between assigned equipment, product type, and failure (label)
    if np.random.random() < 0.5:
        process(g, type="normal", stage=0)
    elif np.random.random() < 0.2:    # 10 percent cases a generic failure
        label[0] = 1
        process(g, type="generic", stage=0)
    else:
        stage = 0
        if np.random.random() < 0.7:
            if np.random.random() < 0.5:
                stage = 1
                label[1] = 1
                label[2] = 1
            elif np.random.random() < 0.5:
                stage = 2
                label[2] = 1
                label[5] = 1
            elif np.random.random() < 0.2:
                stage = 3
                label[1] = 1
                label[5] = 1
            else:
                stage = 4
                label[1] = 1
                label[2] = 1
                label[5] = 1
            process(g, type="welding", stage=stage)
        else:
            stage = 0
            if np.random.random() < 0.5:
                label[3] = 1
                stage = 1
            elif np.random.random() < 0.5:
                label[4] = 1
                stage = 2
            else:
                label[3] = 1
                label[4] = 1
                stage = 3
            process(g, type="special", stage=stage)
    return g, label
コード例 #35
0
ファイル: graphs.py プロジェクト: Sunnepah/oort.python
def replace_uri(graph, old, new, predicates=False):
    newGraph = ConjunctiveGraph()
    for pfx, ns in graph.namespace_manager.namespaces():
        newGraph.namespace_manager.bind(pfx, ns)
    for s, p, o in graph:
        s = _change_uri(s, old, new)
        if predicates:
            p = _change_uri(p, old, new)
        if isinstance(o, URIRef):
            o = _change_uri(o, old, new)
        newGraph.add((s, p, o))
    return newGraph
コード例 #36
0
ファイル: graphs.py プロジェクト: niklasl/oort.python
def replace_uri(graph, old, new, predicates=False):
    newGraph = ConjunctiveGraph()
    for pfx, ns in graph.namespace_manager.namespaces():
        newGraph.namespace_manager.bind(pfx, ns)
    for s, p, o in graph:
        s = _change_uri(s, old, new)
        if predicates:
            p = _change_uri(p, old, new)
        if isinstance(o, URIRef):
            o = _change_uri(o, old, new)
        newGraph.add((s, p, o))
    return newGraph
コード例 #37
0
ファイル: query.py プロジェクト: t00m/Vazaar
    def create_temporal_graph(self, resources):
        graph = ConjunctiveGraph()
        if resources:
            for resource in resources:
                triples = self.graph.triples((URIRef(resource), None, None))
                for triple in triples:
                    graph.add(triple)

            #Bind namespaces to graph
            for ns in NSBINDINGS:
                graph.bind(ns, NSBINDINGS[ns])

        return graph
コード例 #38
0
def ConvertToSQLLITE (filename,destinationFileName):

    _graph = ConjunctiveGraph()
    _graph.parse(filename, format="nt")

    sql = ConjunctiveGraph('SQLite')
    sql.open(destinationFileName, create=True)

    for t in _graph.triples((None,None,None)):
        sql.add(t)

    sql.commit()
    sql.close()
コード例 #39
0
    def prepare_graph(self, data, many, **kwargs):
        graph = ConjunctiveGraph()
        self.add_bindings(graph=graph)
        if many:
            triples = []
            for _triples in data:
                triples.extend(_triples)
        else:
            triples = data

        for triple in triples:
            graph.add(triple)
        return graph
コード例 #40
0
ファイル: crawler.py プロジェクト: shirshen12/collection
def table2RDFNTriplesConverter(logFile, predList):

  global sitename

  # Define a namespace, this is constant for all data in our KB/DB

  KAST = Namespace('http://www.kast.com/data/')

  # Define a ConjunctiveGraph, a normal graph also is sufficient but this
  # kind of graph helps us in trivially merging various kinds of sub components.

  g = ConjunctiveGraph()

  # Now read the log file

  f = file(logFile, 'r')
  records = f.readlines()
  f.close()

  # Now loop through all the records and also through predicate list

  for record in records:

    # Define a guid which is the only unique primary key in our entire database.

    guid = 'http://www.kast.com/data/id/'

    # Now split to obtain the url from the record and hash it to obtain a global unique ID

    guid = guid + KAST[record.split(',')[0]].md5_term_hash()

    for p in range(1, len(predList)):

      # Generate the predicate

      pred = KAST[predList[p]]
      dataItem = record.split(',')[p]
      obj = KAST[dataItem]

      # Now add the triples in the defined Conjunctive Graph.

      g.add((guid, pred, obj))
      g.add((obj, KAST['hasvalue']), rdflib.Literal(dataItem))

  # Now after adding all the triples, serialize it to N-Triples format.

  o = file(BASECONTENTDIR + sitename + '.nt', 'w')
  o.write(g.serialize(format="nt"))
  o.close()

  return BASECONTENTDIR + sitename + '.nt'
コード例 #41
0
    def test_base_url(selfself):
        # from https://github.com/RDFLib/rdflib/issues/1003
        rdf_triples_base = """
        @prefix category: <http://example.org/> .
        @prefix dct: <http://purl.org/dc/terms/> .
        @prefix skos: <http://www.w3.org/2004/02/skos/core#> .
        @base <http://example.org/> .

        <> a skos:ConceptScheme ;
            dct:creator <https://creator.com> ;
            dct:description "Test Description"@en ;
            dct:source <nick> ;
            dct:title "Title"@en .
        """
        kg = ConjunctiveGraph()
        kg.parse(data=rdf_triples_base, format="turtle")
        print(kg.serialize(format="turtle"))

        rdf_triples_NO_base = """
        @prefix category: <http://example.org/> .
        @prefix dct: <http://purl.org/dc/terms/> .
        @prefix skos: <http://www.w3.org/2004/02/skos/core#> .

        <> a skos:ConceptScheme ;
            dct:creator <https://creator.com> ;
            dct:description "Test Description"@en ;
            dct:source <nick> ;
            dct:title "Title"@en .
        """
        kg = ConjunctiveGraph()
        kg.parse(data=rdf_triples_NO_base, format="turtle")
        print(kg.serialize(format="turtle"))

        # from scratch
        kg2 = ConjunctiveGraph()
        kg2.add(
            (
                URIRef("http://fair-checker/example/qs"),
                URIRef("http://value"),
                Literal("2"),
            )
        )
        print(kg2.serialize(format="turtle", base="http://fair-checker/example/"))

        kg3 = ConjunctiveGraph()
        kg3.parse(
            data="@base <http://example.org/> . <> a <http://example.org/Class> .",
            format="turtle",
        )
        kg3 = kg3 + kg2
        print(kg3.serialize(format="turtle", base="http://fair-checker/example/"))
コード例 #42
0
def add_ref_vocab(vocabprefix, source_uri):
    vocab_uri = URIRef("http://vocab.ox.ac.uk/%s"%vocabprefix)
    graph = Graph()
    if os.path.isfile(ag.vocabulariesref):
        graph.parse(ag.vocabulariesref)
    for prefix, url in namespaces.iteritems():
        graph.bind(prefix, URIRef(url))
    graph.add((URIRef(vocab_uri), namespaces['dcterms']['isVersionOf'], URIRef(source_uri)))
    rdf_str = None
    rdf_str = graph.serialize()
    f = codecs.open(ag.vocabulariesref, 'w', 'utf-8')
    f.write(rdf_str)
    f.close()
    return True
コード例 #43
0
ファイル: publish.py プロジェクト: oeg-upm/agora-wot
def skolemize(g):
    bn_map = {}
    skolem = ConjunctiveGraph()
    for s, p, o in g:
        if isinstance(s, BNode):
            if s not in bn_map:
                bn_map[s] = URIRef('/'.join([SKOLEM_BASE, str(s)]))
            s = bn_map[s]
        if isinstance(o, BNode):
            if o not in bn_map:
                bn_map[o] = URIRef('/'.join([SKOLEM_BASE, str(o)]))
            o = bn_map[o]
        skolem.add((s, p, o))
    return skolem
コード例 #44
0
ファイル: 04_oci.py プロジェクト: miku/index-oci
    def test_citation_data_ttl(self):
        g1 = ConjunctiveGraph()
        g1.load(self.citation_data_ttl_path, format="nt11")

        g2 = ConjunctiveGraph()
        for c in [
                self.citation_1, self.citation_2, self.citation_3,
                self.citation_4, self.citation_5, self.citation_6
        ]:
            for s, p, o in c.get_citation_rdf(self.base_url, False, False,
                                              False):
                g2.add((s, p, o))

        self.assertTrue(isomorphic(g1, g2))
コード例 #45
0
ファイル: 04_oci.py プロジェクト: miku/index-oci
    def test_citation_prov_ttl(self):
        g1 = ConjunctiveGraph()
        g1.load(self.citation_prov_ttl_path, format="nquads")

        g2 = ConjunctiveGraph()
        for c in [
                self.citation_1, self.citation_2, self.citation_3,
                self.citation_4, self.citation_5, self.citation_6
        ]:
            for s, p, o, g in c.get_citation_prov_rdf(self.base_url).quads(
                (None, None, None, None)):
                g2.add((s, p, o, g))

        self.assertTrue(isomorphic(g1, g2))
コード例 #46
0
ファイル: models.py プロジェクト: delving/nave
 def _populate_graph(self):
     graph = ConjunctiveGraph(identifier=self._generate_namedgraph_uri())
     graph.namespace_manager = namespace_manager
     subject = URIRef(self._get_document_uri())
     graph.add((subject, RDFS.isDefinedBy, URIRef(self._generate_about_uri())))
     self._add_about_triples(graph)
     graph.add((subject, RDF.type, self.ns[self.get_rdf_type()]))
     if self.source_uri and self.source_uri != self.document_uri:
         graph.add((subject, OWL.sameAs, URIRef(self.source_uri)))
     for key, value in self.get_graph_mapping().items():
         if isinstance(key, URIRef):
             predicate = key
         elif isinstance(key, str) and key.startswith('http://'):
             predicate = URIRef(key)
         elif isinstance(key, str) and ":" in key:
             ns, label = key.split(":")
             ns = self.ns_dict.get(ns)
             predicate = URIRef("{}/{}".format(str(ns).rstrip('/'), label))
         else:
             raise ValueError("unknown predicate key in mapping dict: {} => ".format(key, value))
         if type(value) in [str, float, int] and value:
             if isinstance(value, str) and any([value.startswith(uri_prefix) for uri_prefix in ["http", "urn"]]):
                 value = URIRef(value)
             else:
                 value = Literal(value)
         elif type(value) in [Literal, URIRef]:
             value = value
         else:
             logger.warn("Unsupported datatype {} for value {}".format(type(value), value))
         if value:
             graph.add((subject, predicate, value))
     graph.namespace_manager = namespace_manager
     return graph
コード例 #47
0
ファイル: link_v1.py プロジェクト: Data2Semantics/raw2ld
def addMatchesToGraph(matches):
    g = ConjunctiveGraph()
    g.bind("aers", "http://aers.data2semantics.org/resource/")
    g.bind("dbpedia", "http://dbpedia.org/resource/")
    g.bind("owl", "http://www.w3.org/2002/07/owl#")
    g.bind("sider", "http://www4.wiwiss.fu-berlin.de/sider/resource/sider/")
    g.bind("skos","http://www.w3.org/2004/02/skos/core#")
    print "Adding to graph..."
    for m in matches :
        for subj in m :
            for obj in m :
                if subj != obj :
                    g.add((URIRef(subj),SKOS['exactMatch'],URIRef(obj)))
    print "... done"
    return g
コード例 #48
0
ファイル: rdfns.py プロジェクト: niklasl/vim-rdf
class GraphCache(object):

    def __init__(self, cachedir):
        self.graph = ConjunctiveGraph()
        self.mtime_map = {}
        self.cachedir = cachedir
        if not os.path.isdir(cachedir):
            os.makedirs(cachedir)

    def load(self, url):
        src = VOCAB_SOURCE_MAP.get(str(url), url)
        if os.path.isfile(url):
            context_id = create_input_source(url).getPublicId()
            last_vocab_mtime = self.mtime_map.get(url)
            vocab_mtime = os.stat(url).st_mtime
            if not last_vocab_mtime or last_vocab_mtime < vocab_mtime:
                logger.debug("Parse file: '%s'", url)
                self.mtime_map[url] = vocab_mtime
                # use CG as workaround for json-ld always loading as dataset
                graph = ConjunctiveGraph()
                graph.parse(src, format=guess_format(src))
                self.graph.remove_context(context_id)
                for s, p, o in graph:
                    self.graph.add((s, p, o, context_id))
                return graph
        else:
            context_id = url

        if any(self.graph.triples((None, None, None), context=context_id)):
            logger.debug("Using context <%s>" % context_id)
            return self.graph.get_context(context_id)

        cache_path = self.get_fs_path(url)
        if os.path.exists(cache_path):
            logger.debug("Load local copy of <%s> from '%s'", context_id, cache_path)
            return self.graph.parse(cache_path, format='turtle', publicID=context_id)
        else:
            logger.debug("Fetching <%s> to '%s'", context_id, cache_path)
            graph = self.graph.parse(src,
                    format='rdfa' if url.endswith('html') else None)
            with open(cache_path, 'w') as f:
                graph.serialize(f, format='turtle')
            return graph

    def get_fs_path(self, url):
        return os.path.join(self.cachedir, quote(url, safe="")) + '.ttl'
コード例 #49
0
ファイル: manifest.py プロジェクト: benosteen/RDFobject
 def get_graph(self):
     if self.items and self.items_rdfobjects:
         g = ConjunctiveGraph()
         # add bindings from the first graph:
         ns_list = self.items_rdfobjects[self.items[0]].namespaces
         for prefix in ns_list:
             g.bind(prefix, ns_list[prefix])
         #add global prefix bindings
         for ns in self.uh.namespaces:
             g.bind(ns, self.uh.namespaces[ns])
         
         for item in self.items_rdfobjects:
             for s,p,o in self.items_rdfobjects[item].list_triples():
                 g.add((s,p,o))
             self.items_rdfobjects[item].altered = False
         return g
     else:
         return ""
コード例 #50
0
ファイル: test_finalnewline.py プロジェクト: RDFLib/rdflib
def testFinalNewline():
    """
    http://code.google.com/p/rdflib/issues/detail?id=5
    """
    import sys

    graph = ConjunctiveGraph()
    graph.add((URIRef("http://ex.org/a"),
               URIRef("http://ex.org/b"),
               URIRef("http://ex.org/c")))

    failed = set()
    for p in rdflib.plugin.plugins(None, rdflib.plugin.Serializer):
        v = graph.serialize(format=p.name)
        lines = v.split(b("\n"))
        if b("\n") not in v or (lines[-1] != b('')):
            failed.add(p.name)
    assert len(failed) == 0, "No final newline for formats: '%s'" % failed
コード例 #51
0
ファイル: rdfobject.py プロジェクト: benosteen/RDFobject
 def get_graph(self):
     if not self.uri:
         raise URINotSetException()
     g = ConjunctiveGraph(identifier=self.uri)
     
     for ns in self.namespaces:
         g.bind(ns, self.namespaces[ns])
     #add global prefix bindings
     for ns in self.urihelper.namespaces:
         g.bind(ns, self.urihelper.namespaces[ns])
         
     # Add type(s)
     for type in self.types:
         g.add((self.uri, self.namespaces['rdf']['type'], type))
     
     for triple in self.triples:
         g.add((self.uri, triple[0], triple[1]))
     return g
コード例 #52
0
    def create_match_graph(self, graph):
        """For optimization, creating a sparse graph of "best matching"
        for all resources and querying only that at runtime. """
        typeAspects = self.typeAspects
        def list_with_sub_types(rdfType):
            yield rdfType
            for subType in graph.subjects(RDFS.subClassOf, rdfType):
                if subType not in typeAspects:
                    for subSub in list_with_sub_types(subType):
                        yield subSub

        sparseGraph = ConjunctiveGraph()
        for handledType in typeAspects:
            for anySub in list_with_sub_types(handledType):
                for resource in graph.subjects(RDF.type, anySub):
                    sparseGraph.add((resource, RDF.type, handledType))

        return sparseGraph
コード例 #53
0
ファイル: dyncommands.py プロジェクト: drewp/magma
    def makeFactGraph(self):
        fileParsing = httpReading = 0
        g = ConjunctiveGraph()
        fileParsing -= time.time()
        g.parse("config.n3", format="n3")
        fileParsing += time.time()
        g.add((self.user, RDF.type, CL.CurrentUser))

        httpReading -= time.time()

        @inlineCallbacks
        def addData(source):
            try:
                trig = (yield fetch(source)).body
            except AttributeError:
                print vars()
                raise
            try:
                g.addN(parseTrig(trig))
            except Exception:
                import traceback
                print "fetching %s:" % source
                traceback.print_exc()

        yield DeferredList(map(addData,
            # compare to reasoning and reasoning/input/startup.n3
            ["http://bang:9072/bang/processStatus",
             #"http://bang:9055/graph", # heater, etc
             #"http://bang:9069/graph", # door/arduino inputs
             "http://bang:9070/graph", # wifi
             "http://bang:9075/graph", # env
             "http://slash:9080/graph", # frontdoor
             #"http://dash:9107/graph", # xidle
             #"http://dash:9095/graph", # dpms
             #"http://bang:9095/graph", # dpms
             #"http://star:9095/graph", # dpms
             "http://slash:9095/graph", # dpms

             ]))

        httpReading += time.time()
        self.factGraph = g
        returnValue((fileParsing, httpReading))