예제 #1
0
class TestSparqlASK(unittest.TestCase):
    def setUp(self):
        self.graph = Graph()

        io = StringIO("""
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix : <http://goonmill.org/2007/skill.n3#> .

:Foo a rdfs:Class .

:bar a :Foo .
""")

        
        self.graph.load(io, format='n3')

        self.compliance_setting, algebra.DAWG_DATASET_COMPLIANCE = algebra.DAWG_DATASET_COMPLIANCE, False

    def tearDown(self):
        algebra.DAWG_DATASET_COMPLIANCE = self.compliance_setting

    def test_ask_true(self):
        """
        Ask for a triple that exists, assert that the response is True.
        """
        res = self.graph.query('ASK { <http://goonmill.org/2007/skill.n3#bar> a <http://goonmill.org/2007/skill.n3#Foo> } ')
        self.assertEquals(res.askAnswer, True, "The answer should have been that the triple was found")

    def test_ask_false(self):
        """
        Ask for a triple that does not exist, assert that the response is False.
        """
        res = self.graph.query('ASK { <http://goonmill.org/2007/skill.n3#baz> a <http://goonmill.org/2007/skill.n3#Foo> } ')
        self.assertEquals(res.askAnswer, False, "The answer should have been that the triple was not found")
예제 #2
0
class GraphAggregates2(unittest.TestCase):

    known_issue = True
    sparql = True

    def setUp(self):
        memStore = plugin.get('IOMemory',Store)()
        self.graph1 = Graph(memStore,URIRef("http://example.com/graph1"))
        self.graph2 = Graph(memStore,URIRef("http://example.com/graph2"))
        self.graph3 = Graph(memStore,URIRef("http://example.com/graph3"))
    
        for n3Str,graph in [(testGraph1N3,self.graph1),
                            (testGraph2N3,self.graph2),
                            (testGraph3N3,self.graph3)]:
            graph.parse(StringIO(n3Str),format='n3')
    
        self.graph4 = Graph(memStore,RDFS)
        self.graph4.parse(RDFS.uri)
        self.G = ConjunctiveGraph(memStore)

    def testAggregateSPARQL(self):    
        print sparqlQ
        rt =  self.G.query(sparqlQ)
        assert len(rt) > 1
        #print rt.serialize(format='xml')
        LOG_NS = Namespace(u'http://www.w3.org/2000/10/swap/log#')
        rt=self.G.query(sparqlQ2,initBindings={u'?graph' : URIRef("http://example.com/graph3")})
        #print rt.serialize(format='json')
        assert rt.serialize('python')[0] == LOG_NS.N3Document,repr(list(rt.serialize('python')))
예제 #3
0
class TestIssue06(unittest.TestCase):
    debug = False
    sparql = True

    def setUp(self):
        self.graph = ConjunctiveGraph()
        self.graph.parse(data=testgraph, publicID="testgraph")

    def test_issue_6(self):
        query = """
        PREFIX ex: <http://temp.example.org/terms/>
        PREFIX loc: <http://simile.mit.edu/2005/05/ontologies/location#>
        PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
        PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>

        SELECT *
        WHERE {
            {?event ex:date ?date .
            FILTER (xsd:date(?date) >= xsd:date("2007-12-31") && xsd:date(?date) <= xsd:date("2008-01-11"))}

            UNION

            {?event ex:starts ?start; ex:finishes ?end .
             FILTER (xsd:date(?start) >= xsd:date("2008-01-02") && xsd:date(?end) <= xsd:date("2008-01-10"))}
        }
        ORDER BY ?event
        """
        self.graph.query(query, DEBUG=False)
class GraphAggregates2(unittest.TestCase):
    # known_issue = True

    def setUp(self):
        memStore = plugin.get("SQLAlchemy", Store)(identifier="rdflib_test", configuration=Literal("sqlite://"))
        self.graph1 = Graph(memStore, URIRef("http://example.com/graph1"))
        self.graph2 = Graph(memStore, URIRef("http://example.com/graph2"))
        self.graph3 = Graph(memStore, URIRef("http://example.com/graph3"))

        for n3Str, graph in [(testGraph1N3, self.graph1), (testGraph2N3, self.graph2), (testGraph3N3, self.graph3)]:
            graph.parse(StringIO(n3Str), format="n3")
        self.graph4 = Graph(memStore, RDFS.uri)
        self.graph4.parse(RDFS.uri)
        self.G = ConjunctiveGraph(memStore)

    def testAggregateSPARQL(self):
        raise SkipTest("known_issue with SELECT from NAMED")
        rt = self.G.query(sparqlQ)
        assert len(rt) > 1
        rt = self.G.query(sparqlQ2, initBindings={u"?graph": URIRef(u"http://example.com/graph3")})
        try:
            import json

            assert json
        except ImportError:
            import simplejson as json
        res = json.loads(rt.serialize(format="json").decode("utf-8"))
        assert len(res["results"]["bindings"]) == 20, len(res["results"]["bindings"])
예제 #5
0
class GraphAggregates2(unittest.TestCase):
    # known_issue = True

    def setUp(self):
        memStore = plugin.get('SQLAlchemy',
                              Store)(identifier="rdflib_test",
                                     configuration=Literal("sqlite://"))
        self.graph1 = Graph(memStore, URIRef("http://example.com/graph1"))
        self.graph2 = Graph(memStore, URIRef("http://example.com/graph2"))
        self.graph3 = Graph(memStore, URIRef("http://example.com/graph3"))

        for n3Str, graph in [(testGraph1N3, self.graph1),
                             (testGraph2N3, self.graph2),
                             (testGraph3N3, self.graph3)]:
            graph.parse(StringIO(n3Str), format='n3')

        self.graph4 = Graph(memStore, RDFS.uri)
        self.graph4.parse(RDFS.uri)
        self.G = ConjunctiveGraph(memStore)

    def testAggregateSPARQL(self):
        raise SkipTest("known_issue with SELECT from NAMED")
        rt = self.G.query(sparqlQ)
        assert len(rt) > 1
        rt = self.G.query(
            sparqlQ2,
            initBindings={u'?graph': URIRef(u"http://example.com/graph3")})
        try:
            import json
            assert json
        except ImportError:
            import simplejson as json
        res = json.loads(rt.serialize(format='json').decode('utf-8'))
        assert len(res['results']['bindings']) == 20, len(
            res['results']['bindings'])
class GraphAggregates2(unittest.TestCase):
    def setUp(self):
        memStore = plugin.get('SQLAlchemy', Store)(
            identifier="rdflib_test", configuration=Literal("sqlite://"))
        self.graph1 = Graph(memStore, URIRef("http://example.com/graph1"))
        self.graph2 = Graph(memStore, URIRef("http://example.com/graph2"))
        self.graph3 = Graph(memStore, URIRef("http://example.com/graph3"))

        for n3Str,graph in [(testGraph1N3, self.graph1),
                            (testGraph2N3, self.graph2),
                            (testGraph3N3, self.graph3)]:
            graph.parse(StringIO(n3Str), format='n3')

        self.graph4 = Graph(memStore, RDFS.uri)
        self.graph4.parse(RDFS.uri)
        self.G = ConjunctiveGraph(memStore)

    def testAggregateSPARQL(self):
        rt =  self.G.query(sparqlQ)
        assert len(rt) > 1
        rt = self.G.query(sparqlQ2, initBindings={u'?graph' : URIRef(u"http://example.com/graph3")})
        try:
            import json
        except ImportError:
            import simplejson as json
        res = json.loads(rt.serialize(format='json'))
        assert len(res['results']['bindings']) == 20, len(res['results']['bindings'])
예제 #7
0
class Query(unittest.TestCase):

    def setUp(self):
        self.graph = ConjunctiveGraph()
        self.graph.parse(StringIO(test_data), format="n3")
    
    def test1(self):
        r=list(self.graph.query(test_query1))
        self.assertEqual(len(r), 1)

    def test2(self):
        r=list(self.graph.query(test_query2))
        self.assertEqual(len(r), 1)

    def test3(self):
        r=list(self.graph.query(test_query3))
        self.assertEqual(len(r), 1)

    def test4(self):
        r=list(self.graph.query(test_query4))
        self.assertEqual(len(r), 1)

    def test5(self):
        r=list(self.graph.query(test_query5))
        self.assertEqual(len(r), 0)
예제 #8
0
 def testLimit2(self):
     graph = ConjunctiveGraph(plugin.get("IOMemory", Store)())
     graph.parse(StringIO(test_data2), format="n3")
     results = list(graph.query(test_query2, DEBUG=True))
     print graph.query(test_query2).serialize(format="xml")
     self.failUnless(len(results) == 1)
     for title, price in results:
         self.failUnless(title in [Literal("Java Tutorial"), Literal("COBOL Tutorial")])
예제 #9
0
 def testLimit2(self):
     graph = ConjunctiveGraph(plugin.get('IOMemory', Store)())
     graph.parse(StringIO(test_data2), format="n3")
     results = list(graph.query(test_query2, DEBUG=True))
     print graph.query(test_query2).serialize(format='xml')
     self.failUnless(len(results) == 1)
     for title, price in results:
         self.failUnless(
             title in [Literal("Java Tutorial"),
                       Literal("COBOL Tutorial")])
class TestSparqlJsonResults(unittest.TestCase):
    def setUp(self):
        self.graph = ConjunctiveGraph()
        self.graph.parse(StringIO(test_data), format="n3")

    def _query_result_contains(self, query, correct):
        results = self.graph.query(query)
        result_json = json.loads(
            results.serialize(format='json').decode('utf-8'))

        msg = "Expected:\n %s \n- to contain:\n%s" % (result_json, correct)
        self.assertEqual(sorted(result_json["head"], key=repr),
                         sorted(correct["head"], key=repr), msg)

        # Sort by repr - rather a hack, but currently the best way I can think
        # of to ensure the results are in the same order.
        result_bindings = sorted(result_json["results"]["bindings"], key=repr)
        correct_bindings = sorted(correct["results"]["bindings"], key=repr)
        msg = "Expected:\n %s \n- to contain:\n%s" % (result_bindings,
                                                      correct_bindings)
        self.failUnless(result_bindings == correct_bindings, msg)

    testOptional = make_method('optional')

    testWildcard = make_method('wildcard')

    testUnion = make_method('union')

    testUnion3 = make_method('union3')

    testSelectVars = make_method('select_vars')

    testWildcardVars = make_method('wildcard_vars')
예제 #11
0
 def test4_DAWG_DATASET_COMPLIANCE_is_True(self):
     raise SkipTest("known DAWG_DATATSET_COMPLIANCE SPARQL issue")
     graph = Graph()
     graph.parse(data=test4data, format='n3')
     res = graph.query(test4query, dSCompliance=True)
     # print("json", res.serialize(format='json'))
     assert len(res) == 2
예제 #12
0
class TestSparqlEquals(unittest.TestCase):

    PREFIXES = {'rdfs': "http://www.w3.org/2000/01/rdf-schema#"}

    def setUp(self):
        testContent = """
            @prefix rdfs: <%(rdfs)s> .
            <http://example.org/doc/1> rdfs:label "Document 1"@en .
            <http://example.org/doc/2> rdfs:label "Document 2"@en .
            <http://example.org/doc/3> rdfs:label "Document 3"@en .
        """ % self.PREFIXES
        self.graph = ConjunctiveGraph()
        self.graph.load(StringIO(testContent), format='n3')

    def test_uri_equals(self):
        uri = URIRef("http://example.org/doc/1")
        query = ("""
            PREFIX rdfs: <%(rdfs)s>

            SELECT ?uri WHERE {
                ?uri rdfs:label ?label .
                FILTER( ?uri = <""" + uri + """> )
            }
        """) % self.PREFIXES
        res = self.graph.query(query)
        expected = [(uri, )]
        self.assertEqual(list(res), expected)
 def test4_DAWG_DATASET_COMPLIANCE_is_True(self):
     raise SkipTest("known DAWG_DATATSET_COMPLIANCE SPARQL issue")
     graph = Graph()
     graph.parse(data=test4data, format='n3')
     res = graph.query(test4query, dSCompliance=True)
     # print("json", res.serialize(format='json'))
     assert len(res) == 2
예제 #14
0
class TestSparqlEquals(unittest.TestCase):

    PREFIXES = {
        'rdfs': "http://www.w3.org/2000/01/rdf-schema#"
    }

    def setUp(self):
        testContent = """
            @prefix rdfs: <%(rdfs)s> .
            <http://example.org/doc/1> rdfs:label "Document 1"@en .
            <http://example.org/doc/2> rdfs:label "Document 2"@en .
            <http://example.org/doc/3> rdfs:label "Document 3"@en .
        """ % self.PREFIXES
        self.graph = ConjunctiveGraph()
        self.graph.load(StringIO(testContent), format='n3')

    def test_uri_equals(self):
        uri = URIRef("http://example.org/doc/1")
        query = ("""
            PREFIX rdfs: <%(rdfs)s>

            SELECT ?uri WHERE {
                ?uri rdfs:label ?label .
                FILTER( ?uri = <"""+uri+"""> )
            }
        """) % self.PREFIXES
        res = self.graph.query(query)
        expected = [(uri,)]
        self.assertEqual(list(res),expected)
예제 #15
0
class TestSparqlJsonResults(unittest.TestCase):

    def setUp(self):
        self.graph = ConjunctiveGraph()
        self.graph.parse(StringIO(test_data), format="n3")

    def _query_result_contains(self, query, correct):
        results = self.graph.query(query)
        result_json = json.loads(results.serialize(format='json').decode('utf-8'))

        msg = "Expected:\n %s \n- to contain:\n%s" % (result_json, correct)
        self.assertEqual(sorted(result_json["head"], key=repr),
                         sorted(correct["head"], key=repr), msg)

        # Sort by repr - rather a hack, but currently the best way I can think
        # of to ensure the results are in the same order.
        result_bindings = sorted(result_json["results"]["bindings"], key=repr)
        correct_bindings = sorted(correct["results"]["bindings"], key=repr)
        msg = "Expected:\n %s \n- to contain:\n%s" % (result_bindings, correct_bindings)
        self.failUnless(result_bindings==correct_bindings, msg)

    testOptional = make_method('optional')

    testWildcard = make_method('wildcard')

    testUnion = make_method('union')

    testUnion3 = make_method('union3')

    testSelectVars = make_method('select_vars')
    
    testWildcardVars = make_method('wildcard_vars')
예제 #16
0
    def compute_obsels(self, computed_trace, from_scratch=False):
        """I implement :meth:`.interface.IMethod.compute_obsels`.
        """
        diag = Diagnosis("sparql.compute_obsels")

        source = computed_trace.source_traces[0]
        parameters = computed_trace.parameters_as_dict
        parameters["__destination__"] = computed_trace.uri
        parameters["__source__"] = source.uri

        scope = parameters.get('scope', 'trace')
        try:
            if scope == 'store':
                data = ConjunctiveGraph(source.service.store)
            elif scope == 'base':
                data = PrefixConjunctiveView(source.base.uri,
                                             source.service.store)
            else:
                # scope == 'trace'
                data = source.obsel_collection.get_state({"refresh": "no"})
            sparql = parameters["sparql"] % parameters
            result = data.query(sparql, base=source.obsel_collection.uri).graph
            replace_obsels(computed_trace, result, ("inherit" in parameters))
        except Exception, exc:
            LOG.warn(traceback.format_exc())
            diag.append(unicode(exc))
class TestSparqlJsonResults(unittest.TestCase):

    def setUp(self):
        self.graph = ConjunctiveGraph()
        self.graph.parse(StringIO(test_data), format="n3")

    def _query_result_contains(self, query, correct):
        results = self.graph.query(query)
        result_json = json.loads(results.serialize(format='json'))

        msg = "Expected:\n %s \n- to contain:\n%s" % (result_json, correct)
        self.failUnless(result_json["head"]==correct["head"], msg)

        result_bindings = sorted(result_json["results"]["bindings"])
        correct_bindings = sorted(correct["results"]["bindings"])
        msg = "Expected:\n %s \n- to contain:\n%s" % (result_bindings, correct_bindings)
        self.failUnless(result_bindings==correct_bindings, msg)

    testOptional = make_method('optional')

    testWildcard = make_method('wildcard')

    testUnion = make_method('union')

    testUnion3 = make_method('union3')

    testSelectVars = make_method('select_vars')
    
    testWildcardVars = make_method('wildcard_vars')
예제 #18
0
 def testLimit2(self):
     graph = ConjunctiveGraph(plugin.get('IOMemory',Store)())
     graph.parse(data=test_data2, format="n3")
     results = list(graph.query(test_query2,DEBUG=False))
     self.assertEqual(len(results), 1)
     for title,price in results:    
         self.assertTrue(title in [Literal("Java Tutorial"),
                                   Literal("COBOL Tutorial")])    
예제 #19
0
    def testOrderBy(self):
        graph = ConjunctiveGraph(plugin.get('IOMemory', Store)())
        graph.parse(StringIO(test_data), format="n3")
        results = graph.query(test_query)

        self.failUnless(False not in [
            r[0] == a
            for r, a in zip(results, ['Alice', 'Bob', 'Charlie', 'Dave'])
        ])
예제 #20
0
 def testLimit2(self):
     graph = ConjunctiveGraph(plugin.get('IOMemory', Store)())
     graph.parse(data=test_data2, format="n3")
     results = list(graph.query(test_query2, DEBUG=False))
     self.assertEqual(len(results), 1)
     for title, price in results:
         self.assertTrue(
             title in [Literal("Java Tutorial"),
                       Literal("COBOL Tutorial")])
예제 #21
0
class TestSparqlOPT_FILTER(unittest.TestCase):
    def setUp(self):
        self.graph = ConjunctiveGraph()
        self.graph.load(StringIO(testContent), format="n3")

    def test_OPT_FILTER(self):
        results = self.graph.query(QUERY, DEBUG=False, initBindings={"?label": RDFS.label})
        print results.vars
        self.failUnless(list(results) == [(doc2,)], "expecting : %s, got %s" % (repr([(doc2,)]), repr(list(results))))
class TestSparqlOPT_FILTER2(unittest.TestCase):
    def setUp(self):
        self.graph = ConjunctiveGraph()
        self.graph.load(StringIO(testContent), format="n3")

    def test_OPT_FILTER(self):
        results = self.graph.query(QUERY, DEBUG=False).serialize(format="python")
        results = list(results)
        self.failUnless(results == [doc1], "expecting : %s .  Got: %s" % ([doc1], repr(results)))
예제 #23
0
class TestIssue10(unittest.TestCase):
    debug = False
    sparql = True
    
    def setUp(self):
        self.graph = ConjunctiveGraph()
        self.graph.parse(StringIO(testgraph1), publicID="testgraph1")
        self.graph.parse(StringIO(testgraph2), publicID="testgraph2")

    def test_issue_10(self):
        res_var = self.graph.query(
                "SELECT ?g ?s ?p ?o WHERE { GRAPH ?g { ?s ?p ?o } } ",
                processor="sparql",initNs={},initBindings={},DEBUG=False)
        
        res_star = self.graph.query(
                "SELECT * WHERE { GRAPH ?g { ?s ?p ?o } } ",
                processor="sparql",initNs={},initBindings={},DEBUG=False)

        self.assertTrue(len(list(res_var)) == len(list(res_star)))
예제 #24
0
class TestSparqlOPT_FILTER(unittest.TestCase):
    def setUp(self):
        self.graph = ConjunctiveGraph()
        self.graph.load(StringIO(testContent), format='n3')
    def test_OPT_FILTER(self):
        results = self.graph.query(QUERY,
                                   DEBUG=False,
                                   initBindings={'?label':RDFS.label}).serialize(format='python')
        self.failUnless(list(results) == [doc2],
                "expecting : %s"%repr([doc2]))
예제 #25
0
class TestSparqlASK(unittest.TestCase):
    def setUp(self):
        self.graph = Graph()

        io = StringIO("""
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix : <http://goonmill.org/2007/skill.n3#> .

:Foo a rdfs:Class .

:bar a :Foo .
""")

        self.graph.load(io, format='n3')

        self.compliance_setting, algebra.DAWG_DATASET_COMPLIANCE = algebra.DAWG_DATASET_COMPLIANCE, False

    def tearDown(self):
        algebra.DAWG_DATASET_COMPLIANCE = self.compliance_setting

    def test_ask_true(self):
        """
        Ask for a triple that exists, assert that the response is True.
        """
        res = self.graph.query(
            'ASK { <http://goonmill.org/2007/skill.n3#bar> a <http://goonmill.org/2007/skill.n3#Foo> } '
        )
        self.assertEquals(
            res.askAnswer, [True],
            "The answer should have been that the triple was found")

    test_ask_true.known_issue = True

    def test_ask_false(self):
        """
        Ask for a triple that does not exist, assert that the response is False.
        """
        res = self.graph.query(
            'ASK { <http://goonmill.org/2007/skill.n3#baz> a <http://goonmill.org/2007/skill.n3#Foo> } '
        )
        self.assertEquals(
            res.askAnswer, [False],
            "The answer should have been that the triple was not found")
예제 #26
0
class Query(unittest.TestCase):
    def setUp(self):
        self.graph = ConjunctiveGraph()
        self.graph.parse(StringIO(test_data), format="n3")

    def testQuery1(self):
        r = list(self.graph.query(test_query_literal))
        print r
        self.assertEqual(len(r), 1)

    def testQuery2(self):
        r = list(self.graph.query(test_query_resource))
        print r
        self.assertEqual(len(r), 1)

    def testQuery3(self):
        r = list(self.graph.query(test_query_order))
        print r
        self.assertEqual(list(r), [(Literal("Carol"), ),
                                   (Literal("Emerson"), )])
예제 #27
0
class TestSparqlJsonResults(unittest.TestCase):

    sparql = True

    def setUp(self):
        self.graph = ConjunctiveGraph()
        self.graph.parse(StringIO(test_data), format="n3")

    def test_base_ref(self):
        rt=list(self.graph.query(test_query))
        self.failUnless(rt[0][0] == Literal("Alice"),"Expected:\n 'Alice' \nGot:\n %s" % rt)
예제 #28
0
class TestSparqlOPT_FILTER(unittest.TestCase):
    def setUp(self):
        self.graph = ConjunctiveGraph()
        self.graph.load(StringIO(testContent), format='n3')
    def test_OPT_FILTER(self):
        results = self.graph.query(QUERY,
                                   DEBUG=False,
                                   initBindings={'?label':RDFS.label})
        print results.vars
        self.failUnless(list(results) == [(doc2,)],
                "expecting : %s, got %s"%(repr([(doc2,)]), repr(list(results))))
class TestSparqlOPT_FILTER2(unittest.TestCase):
    def setUp(self):
        self.graph = ConjunctiveGraph()
        self.graph.load(StringIO(testContent), format='n3')

    def test_OPT_FILTER(self):
        results = self.graph.query(QUERY, DEBUG=False)
        results = list(results)
        self.failUnless(
            results == [(doc1, )],
            "expecting : %s .  Got: %s" % ([(doc1, )], repr(results)))
예제 #30
0
 def test_simple_recursion(self):
     graph = ConjunctiveGraph()
     graph.load(StringIO(BASIC_KNOWS_DATA), format='n3')
     results = graph.query(KNOWS_QUERY,
                           DEBUG=False).serialize(format='python')
     results = set([tuple(result) for result in results])
     person1 = URIRef('ex:person.1')
     person2 = URIRef('ex:person.2')
     nose.tools.assert_equal(
       results,
       set([(person1, None), (person1, Literal('person 3')),
            (person2, Literal('person 3'))]))
예제 #31
0
class TestSparqlJsonResults(unittest.TestCase):

    sparql = True

    def setUp(self):
        self.graph = ConjunctiveGraph()
        self.graph.parse(StringIO(test_data), format="n3")

    def test_base_ref(self):
        rt = list(self.graph.query(test_query))
        self.failUnless(rt[0][0] == Literal("Alice"),
                        "Expected:\n 'Alice' \nGot:\n %s" % rt)
예제 #32
0
파일: test_json.py 프로젝트: alcides/rdflib
class JSON(unittest.TestCase):
    def setUp(self):
        self.graph = ConjunctiveGraph(plugin.get('IOMemory',Store)())
        self.graph.parse(StringIO(test_data), format="n3")
        
    def testComma(self):
        """
        Verify the serialisation of the data as json contains an exact
        substring, with the comma in the correct place.
        """
        results = self.graph.query(test_query)
        result_json = results.serialize(format='json')
        self.failUnless(result_json.find(correct) > 0)

    def testHeader(self):
        """
        Verify that the "x", substring is omitted from the serialised output.
        """
        results = self.graph.query(test_header_query)
        result_json = results.serialize(format='json')
        self.failUnless(result_json.find('"x",') == -1)
예제 #33
0
 def test_simple_recursion(self):
     graph = ConjunctiveGraph()
     graph.load(StringIO(BASIC_KNOWS_DATA), format='n3')
     results = graph.query(KNOWS_QUERY,
                           DEBUG=False).serialize(format='python')
     results = set([tuple(result) for result in results])
     person1 = URIRef('ex:person.1')
     person2 = URIRef('ex:person.2')
     nose.tools.assert_equal(
         results,
         set([(person1, None), (person1, Literal('person 3')),
              (person2, Literal('person 3'))]))
예제 #34
0
 def test_secondary_recursion(self):
     graph = ConjunctiveGraph()
     graph.load(StringIO(SUBCLASS_DATA), format='n3')
     results = graph.query(SUBCLASS_QUERY,
                           DEBUG=False).serialize(format='python')
     results = set([tuple(result) for result in results])
     ob = URIRef('ex:ob')
     class1 = URIRef('ex:class.1')
     class2 = URIRef('ex:class.2')
     class3 = URIRef('ex:class.3')
     nose.tools.assert_equal(
         results, set([(ob, class1), (ob, class2), (ob, class3)]))
예제 #35
0
class TestIssue43(unittest.TestCase):
    debug = False
    sparql = True
    known_issue = True

    def setUp(self):
        NS = u"http://example.org/"
        self.graph = ConjunctiveGraph()
        self.graph.parse(data=testgraph, format="n3", publicID=NS)

    def testSPARQL_disjunction(self):
        rt = self.graph.query(disjunctionquery, initNs={'rdf':RDF }, DEBUG=False)
        self.assertEquals(len(list(rt)), 0)

    def testSPARQL_conjunction(self):
        rt = self.graph.query(conjunctionquery, initNs={'rdf':RDF }, DEBUG=False)
        self.assertEquals(len(list(rt)), 0)

    def testSPARQL_disjunction_with_conjunction(self):
        rt = self.graph.query(testquery, initNs={'rdf':RDF }, DEBUG=True)
        self.assertEquals(len(list(rt)), 0)
예제 #36
0
class TestIssue11(unittest.TestCase):
    debug = False
    sparql = True
    
    def setUp(self):
        NS = u"http://example.org/"
        self.graph = ConjunctiveGraph()
        self.graph.parse(data=testgraph, format="n3", publicID=NS)

    def testSPARQL_lessthan_filter_using_negative_integer(self):
        rt = self.graph.query(testquery, initNs={'rdf':RDF }, DEBUG=True)
        for row in rt:
            assert str(row[0]) == "http://example.org/bar"
예제 #37
0
 def test_secondary_recursion(self):
     graph = ConjunctiveGraph()
     graph.load(StringIO(SUBCLASS_DATA), format='n3')
     results = graph.query(SUBCLASS_QUERY,
                           DEBUG=False).serialize(format='python')
     results = set([tuple(result) for result in results])
     ob = URIRef('ex:ob')
     class1 = URIRef('ex:class.1')
     class2 = URIRef('ex:class.2')
     class3 = URIRef('ex:class.3')
     nose.tools.assert_equal(
       results,
       set([(ob, class1), (ob, class2), (ob, class3)]))
예제 #38
0
class Query(unittest.TestCase):

    def setUp(self):
        self.graph = ConjunctiveGraph()
        self.graph.parse(StringIO(test_data), format="n3")


    def testQuery1(self):
        r=list(self.graph.query(test_query_literal))
        print r
        self.assertEqual(len(r), 1)

    def testQuery2(self):
        r=list(self.graph.query(test_query_resource))
        print r
        self.assertEqual(len(r), 1)


    def testQuery3(self):
        r=list(self.graph.query(test_query_order))
        print r
        self.assertEqual(list(r), [(Literal("Carol"), ), (Literal("Emerson"),)])
예제 #39
0
class TestSparqlOPT_FILTER(unittest.TestCase):
    def setUp(self):
        self.graph = ConjunctiveGraph()
        self.graph.load(StringIO(testContent), format='n3')

    def test_OPT_FILTER(self):
        results = self.graph.query(QUERY,
                                   DEBUG=False,
                                   initBindings={
                                       '?label': RDFS.label
                                   }).serialize(format='python')
        self.failUnless(
            list(results) == [doc2], "expecting : %s" % repr([doc2]))
예제 #40
0
class TestIssue11(unittest.TestCase):
    debug = True

    def setUp(self):
        self.graph = ConjunctiveGraph(store="SQLite")
        fp, path = tempfile.mkstemp(suffix='.sqlite')
        self.graph.open(path, create=True)

    def testSPARQL_SQLite_lessthan_filter_a(self):
        self.graph.parse(data=testgraph1, format="n3", publicID=NS)
        rt = self.graph.query(good_testquery,
                initNs={'rdf': RDF, 'xsd': XSD}, DEBUG=True)
        # rt = self.graph.query(good_testquery, DEBUG=True)
        # assert str(list(rt)[0][0]) == "http://example.org/bar", list(rt)
        assert len(list(rt)) == 1, list(rt)

    def testSPARQL_SQLite_lessthan_filter_b(self):
        self.graph.parse(data=testgraph2, format="n3", publicID=NS)
        # Counter-example demo
        rt = self.graph.query(bad_testquery,
                initNs={'rdf': RDF, 'xsd': XSD}, DEBUG=True)
        # rt = self.graph.query(bad_testquery, DEBUG=True)
        assert len(list(rt)) == 3, list(rt)
예제 #41
0
class Query(unittest.TestCase):
    def setUp(self):
        self.graph = ConjunctiveGraph()
        self.graph.parse(StringIO(test_data), format="n3")

    def test1(self):
        r = list(self.graph.query(test_query1))
        self.assertEqual(len(r), 1)

    def test2(self):
        r = list(self.graph.query(test_query2))
        self.assertEqual(len(r), 1)

    def test3(self):
        r = list(self.graph.query(test_query3))
        self.assertEqual(len(r), 1)

    def test4(self):
        r = list(self.graph.query(test_query4))
        self.assertEqual(len(r), 1)

    def test5(self):
        r = list(self.graph.query(test_query5))
        self.assertEqual(len(r), 0)
예제 #42
0
class TestIssue43(unittest.TestCase):
    debug = False
    sparql = True
    known_issue = True

    def setUp(self):
        NS = u"http://example.org/"
        self.graph = ConjunctiveGraph()
        self.graph.parse(data=testgraph, format="n3", publicID=NS)

    def testSPARQL_disjunction(self):
        rt = self.graph.query(
            disjunctionquery, initNs={'rdf': RDF}, DEBUG=False)
        self.assertEquals(len(list(rt)), 0)

    def testSPARQL_conjunction(self):
        rt = self.graph.query(
            conjunctionquery, initNs={'rdf': RDF}, DEBUG=False)
        self.assertEquals(len(list(rt)), 0)

    def testSPARQL_disjunction_with_conjunction(self):
        rt = self.graph.query(
            testquery, initNs={'rdf': RDF}, DEBUG=True)
        self.assertEquals(len(list(rt)), 0, list(rt))
예제 #43
0
class DateFilterTest(unittest.TestCase):
    def setUp(self):
        self.graph = ConjunctiveGraph()
        self.graph.load(StringIO(testContent), format='n3')
    def test_DATE_FILTER1(self):
        for query in [QUERY1,QUERY2,QUERY3]:
            print query
            #pQuery = Parse(query)
            #print RenderSPARQLAlgebra(pQuery)
            results = self.graph.query(query,
                                       DEBUG=False).serialize(format='python')
            results = list(results)
            self.failUnless(
                len(results) and results == [ANSWER1],
                "expecting : %s .  Got: %s"%([ANSWER1],repr(results)))
class DateFilterTest(unittest.TestCase):
    def setUp(self):
        self.graph = ConjunctiveGraph()
        self.graph.load(StringIO(testContent), format='n3')

    def test_DATE_FILTER1(self):
        for query in [QUERY1, QUERY2, QUERY3]:
            print query
            #pQuery = Parse(query)
            #print RenderSPARQLAlgebra(pQuery)
            results = self.graph.query(query,
                                       DEBUG=False).serialize(format='python')
            results = list(results)
            self.failUnless(
                len(results) and results == [ANSWER1],
                "expecting : %s .  Got: %s" % ([ANSWER1], repr(results)))
class GraphAggregates3(unittest.TestCase):
    def setUp(self):
        memStore = plugin.get("SQLAlchemy", Store)(identifier="rdflib_test", configuration=Literal("sqlite://"))
        self.graph1 = Graph(memStore, URIRef("graph1"))
        self.graph2 = Graph(memStore, URIRef("graph2"))
        self.graph3 = Graph(memStore, URIRef("graph3"))

        for n3Str, graph in [(testGraph1N3, self.graph1), (testGraph2N3, self.graph2), (testGraph3N3, self.graph3)]:
            graph.parse(StringIO(n3Str), format="n3")
        self.G = ConjunctiveGraph(memStore)

    def testDefaultGraph(self):
        # test that CG includes triples from all 3
        assert self.G.query(sparqlQ3), "CG as default graph should *all* triples"
        assert not self.graph2.query(sparqlQ3), (
            "Graph as " + "default graph should *not* include triples from other graphs"
        )
예제 #46
0
class GraphAggregates3(unittest.TestCase):

    def setUp(self):
        memStore = plugin.get('IOMemory',Store)()
        self.graph1 = Graph(memStore,URIRef("graph1"))
        self.graph2 = Graph(memStore,URIRef("graph2"))
        self.graph3 = Graph(memStore,URIRef("graph3"))
        
        for n3Str,graph in [(testGraph1N3,self.graph1),
                            (testGraph2N3,self.graph2),
                            (testGraph3N3,self.graph3)]:
            graph.parse(StringIO(n3Str),format='n3')
        self.G = ConjunctiveGraph(memStore)

    def testDefaultGraph(self):    
        #test that CG includes triples from all 3
        assert self.G.query(sparqlQ3),"CG as default graph should *all* triples"
        assert not self.graph2.query(sparqlQ3),"Graph as default graph should *not* include triples from other graphs"
예제 #47
0
def testSPARQLNotEquals():
    NS = u"http://example.org/"
    graph = ConjunctiveGraph()
    graph.parse(StringInputSource("""
       @prefix    : <http://example.org/> .
       @prefix rdf: <%s> .
       :foo rdf:value 1.
       :bar rdf:value 2.""" % RDF.uri), format="n3")
    rt = graph.query("""SELECT ?node 
                        WHERE {
                                ?node rdf:value ?val.
                                FILTER (?val != 1)
                               }""",
                           initNs={'rdf': RDF.uri},
                           DEBUG=False)
    for row in rt:        
        #item = row[0]
        item = row
        assert item == URIRef("http://example.org/bar"), "unexpected item of '%s'" % repr(item)
class TestSparqlXmlResults(unittest.TestCase):

    sparql = True

    def setUp(self):
        self.graph = ConjunctiveGraph()
        self.graph.parse(StringIO(test_data), format="n3")

    def testSimple(self):
        self._query_result_contains(query, expected_fragments)

    def _query_result_contains(self, query, fragments):
        results = self.graph.query(query)
        result_xml = results.serialize(format='xml')
        result_xml = normalize(result_xml) # TODO: poor mans c14n..
        # print result_xml
        for frag in fragments:
            # print frag
            self.failUnless(frag in result_xml)
class TestSparqlXmlResults(unittest.TestCase):

    sparql = True

    def setUp(self):
        self.graph = ConjunctiveGraph()
        self.graph.parse(StringIO(test_data), format="n3")

    def testSimple(self):
        self._query_result_contains(query, expected_fragments)

    def _query_result_contains(self, query, fragments):
        results = self.graph.query(query)
        result_xml = results.serialize(format='xml')
        result_xml = normalize(result_xml)  # TODO: poor mans c14n..
        print result_xml
        for frag in fragments:
            print frag
            self.failUnless(frag in result_xml)
예제 #50
0
파일: app.py 프로젝트: kai5263499/rdf-poc
def generate_rootNodes(limit):
    start_time = time.time()
    g = Graph(store=getStore())
    sg = nx.MultiDiGraph()

    query = '''SELECT DISTINCT ?node {
  ?link rgml:source ?node
}
ORDER BY RAND()
LIMIT %d''' % (limit)
    qres = g.query(query)

    query_duration = time.time() - start_time

    addRootNodesToGraph(sg, qres)

    with open('rootnodes.json', 'w') as outfile:
        json.dump(json_graph.node_link_data(sg), outfile)

    return jsonify({'roots': limit, 'duration': query_duration, 'graph': json_graph.node_link_data(sg)})
예제 #51
0
class GraphAggregates3(unittest.TestCase):
    def setUp(self):
        memStore = plugin.get('SQLAlchemy',
                              Store)(identifier="rdflib_test",
                                     configuration=Literal("sqlite://"))
        self.graph1 = Graph(memStore, URIRef("graph1"))
        self.graph2 = Graph(memStore, URIRef("graph2"))
        self.graph3 = Graph(memStore, URIRef("graph3"))

        for n3Str, graph in [(testGraph1N3, self.graph1),
                             (testGraph2N3, self.graph2),
                             (testGraph3N3, self.graph3)]:
            graph.parse(StringIO(n3Str), format='n3')
        self.G = ConjunctiveGraph(memStore)

    def testDefaultGraph(self):
        #test that CG includes triples from all 3
        assert self.G.query(
            sparqlQ3), "CG as default graph should *all* triples"
        assert not self.graph2.query(sparqlQ3), "Graph as " + \
                "default graph should *not* include triples from other graphs"
예제 #52
0
class TestSparqlXmlResults(unittest.TestCase):

    sparql = True

    def setUp(self):
        self.graph = ConjunctiveGraph()
        self.graph.parse(data=test_data, format="n3")

    def testSimple(self):
        self._query_result_contains(query, expected_fragments)

    def _query_result_contains(self, query, fragments):
        results = self.graph.query(query)
        result_xml = results.serialize(format='xml')
        result_xml = normalize(result_xml)  # TODO: poor mans c14n..
        # print result_xml
        for frag in fragments:
            # print(frag, result_xml)
            if frag.startswith(b('<sparql:result>')):
                raise SkipTest("False negative.")
            self.failUnless(frag in result_xml)
예제 #53
0
 def testQueryPlus(self):
     graph = ConjunctiveGraph()
     graph.parse(StringIO(test_data), format="n3")
     result_json = graph.query(test_query).serialize(format='json')
     self.failUnless(result_json.find(correct) > 0)
예제 #54
0
def main():
    from optparse import OptionParser
    usage = '''usage: %prog [options] \\
    <DB connection string> <DB table identifier> <SPARQL query string>'''
    op = OptionParser(usage=usage)
    op.add_option('-s',
                  '--storeKind',
                  metavar='STORE',
                  help='Use this type of DB')
    op.add_option(
        '--owl',
        help='Owl file used to help identify literal and resource properties')
    op.add_option(
        '--rdfs',
        help='RDFS file used to help identify literal and resource properties')
    op.add_option('-d',
                  '--debug',
                  action='store_true',
                  help='Enable (store-level) debugging')
    op.add_option('--sparqlDebug',
                  action='store_true',
                  help='Enable (SPARQL evaluation) debugging')
    op.add_option('--file', default=None, help='File to load SPARQL from')
    op.add_option('--render',
                  action='store_true',
                  default=False,
                  help='Render a SPARQL snippet')
    op.add_option(
        '--flatten',
        action='store_true',
        default=False,
        help=
        'Used with --render to determine if the SQL should be flattened or not'
    )
    op.add_option('-l',
                  '--literal',
                  action='append',
                  dest='literal_properties',
                  metavar='URI',
                  help='Add URI to the list of literal properties')
    op.add_option('-p',
                  '--profile',
                  action='store_true',
                  help='Enable profiling statistics')
    op.add_option('--originalSPARQL',
                  action='store_true',
                  default=False,
                  help='Bypass SPARQL-to-SQL method?')
    op.add_option('-r',
                  '--resource',
                  action='append',
                  dest='resource_properties',
                  metavar='URI',
                  help='Add URI to the list of resource properties')
    op.add_option('--inMemorySQL',
                  action='store_true',
                  default=False,
                  help="Force in memory SPARQL evaluation?")

    op.set_defaults(debug=False, storeKind='MySQL')
    (options, args) = op.parse_args()

    if len(args) < 2:
        op.error('You need to provide a connection string ' +
                 '\n(of the form "user=...,password=...,db=...,host=..."), ' +
                 '\na table identifier, and a query string.')

    from rdflib.sparql import Algebra
    Algebra.DAWG_DATASET_COMPLIANCE = False
    if len(args) == 3:
        connection, identifier, query = args
    else:
        connection, identifier = args
    store = plugin.get(options.storeKind, Store)(identifier)
    ontGraph = None
    if options.owl:
        ontGraph = Graph().parse(options.owl)
    elif options.rdfs:
        ontGraph = Graph().parse(options.rdfs)
    if options.storeKind == 'MySQL' and options.owl:
        for litProp, resProp in ontGraph.query(OWL_PROPERTIES_QUERY,
                                               initNs={u'owl': OWL_NS}):
            if litProp:
                store.literal_properties.add(litProp)
            if resProp:
                store.resource_properties.add(resProp)
        if options.debug:
            print "literalProperties: ", litProp
            print "resourceProperties: ", resProp
    if options.storeKind == 'MySQL' and options.rdfs:
        for litProp, resProp in ontGraph.query(RDFS_PROPERTIES_QUERY,
                                               initNs={u'owl': OWL_NS}):
            if litProp:
                store.literal_properties.add(litProp)
            if resProp:
                store.resource_properties.add(resProp)
        if options.debug:
            print "literalProperties: ", litProp
            print "resourceProperties: ", resProp
    rt = store.open(connection, create=False)
    dataset = ConjunctiveGraph(store)
    if options.inMemorySQL:
        dataset.store.originalInMemorySQL = True

    if options.literal_properties:
        store.literal_properties.update(
            [URIRef(el) for el in options.literal_properties])
    if options.resource_properties:
        store.resource_properties.update(
            [URIRef(el) for el in options.resource_properties])
    if options.debug:
        print_set('literal_properties', store.literal_properties)
        print_set('resource_properties', store.resource_properties)
        store.debug = True
    if options.profile:
        import hotshot, hotshot.stats
        prof = hotshot.Profile("sparqler.prof")
        res = prof.runcall(dataset.query, query, DEBUG=options.sparqlDebug)
        prof.close()
        stats = hotshot.stats.load("sparqler.prof")
        stats.strip_dirs()
        stats.sort_stats('time', 'calls')
        print "===" * 20
        stats.print_stats(20)
        print "===" * 20
    if options.originalSPARQL:
        dataset.store.originalInMemorySQL = True
    if options.render:
        flags = DEFAULT_OPT_FLAGS.copy()
        if options.flatten:
            flags[OPT_FLATTEN] = False
        sb = RdfSqlBuilder(Graph(store), optimizations=flags)
        if options.file:
            query = prepQuery(open(options.file).read(), ontGraph)
            res = dataset.query(query, DEBUG=True)
            print res
        else:
            query = prepQuery(query, ontGraph)
            root = ParseQuery(query, sb)
            print repr(root)
            root.GenSql(sb)
            sql = sb.Sql()
            print sql
        return
    else:
        flags = DEFAULT_OPT_FLAGS.copy()
        if options.flatten:
            flags[OPT_FLATTEN] = False
        dataset.store.optimizations = flags
        if options.file:
            query = prepQuery(open(options.file).read(), ontGraph)
            res = dataset.query(query, DEBUG=options.sparqlDebug)
        else:
            query = prepQuery(query, ontGraph)
            res = dataset.query(
                query,
                DEBUG=options.sparqlDebug,
                initNs=ontGraph
                and dict(ontGraph.namespace_manager.namespaces()) or {})
    print res.serialize(format='xml')
예제 #55
0
파일: querytest.py 프로젝트: foaf/foaftown
#!/usr/bin/env python

# queries an RDF quadstore

from rdflib.graph import ConjunctiveGraph

g = ConjunctiveGraph("Sleepycat")
g.open("store", create=True)

q1 = """PREFIX foaf: <http://xmlns.com/foaf/0.1/>
    SELECT ?src1 ?src2 ?x 
    WHERE {
        GRAPH ?src1 { ?gr1 foaf:member [ foaf:openid ?x ] }
        GRAPH ?src2 { ?gr2 foaf:member [ foaf:openid ?x ] }
        FILTER ( ?src1 != ?src2 )
    }"""

for src1, src2, x in g.query(q1):
    print src1, src2, x

g.close()
예제 #56
0
파일: main.py 프로젝트: cbp44/whyis
class App(Empty):

    managed = False

    def configure_extensions(self):

        Empty.configure_extensions(self)
        self.celery = Celery(self.name,
                             broker=self.config['CELERY_BROKER_URL'],
                             beat=True)
        self.celery.conf.update(self.config)
        self.celery.conf.ONCE = {
            'backend': 'celery_once.backends.Redis',
            'settings': {
                'url': self.config['CELERY_BROKER_URL'],
                'default_timeout': 60 * 60 * 24
            }
        }

        class ContextTask(self.celery.Task):
            def __call__(self, *args, **kwargs):
                with app.app_context():
                    return self.run(*args, **kwargs)

        self.celery.Task = ContextTask

        # Make QueueOnce app context aware.
        class ContextQueueOnce(QueueOnce):
            def __call__(self, *args, **kwargs):
                with app.app_context():
                    return super(ContextQueueOnce,
                                 self).__call__(*args, **kwargs)

        # Attach to celery object for easy access.
        self.celery.QueueOnce = ContextQueueOnce

        app = self

        if 'root_path' in self.config:
            self.root_path = self.config['root_path']

        if 'WHYIS_TEMPLATE_DIR' in self.config and app.config[
                'WHYIS_TEMPLATE_DIR'] is not None:
            my_loader = jinja2.ChoiceLoader([
                jinja2.FileSystemLoader(p)
                for p in self.config['WHYIS_TEMPLATE_DIR']
            ] + [app.jinja_loader])
            app.jinja_loader = my_loader

        @self.celery.task(base=QueueOnce, once={'graceful': True})
        def process_resource(service_name, taskid=None):
            service = self.config['inferencers'][service_name]
            service.process_graph(app.db)

        @self.celery.task
        def process_nanopub(nanopub_uri, service_name, taskid=None):
            service = self.config['inferencers'][service_name]
            print(service, nanopub_uri)
            if app.nanopub_manager.is_current(nanopub_uri):
                nanopub = app.nanopub_manager.get(nanopub_uri)
                service.process_graph(nanopub)
            else:
                print("Skipping retired nanopub", nanopub_uri)

        def setup_periodic_task(task):
            @self.celery.task
            def find_instances():
                print("Triggered task", task['name'])
                for x, in task['service'].getInstances(app.db):
                    task['do'](x)

            @self.celery.task
            def do_task(uri):
                print("Running task", task['name'], 'on', uri)
                resource = app.get_resource(uri)

                # result never used
                task['service'].process_graph(resource.graph)

            task['service'].app = app
            task['find_instances'] = find_instances
            task['do'] = do_task

            return task

        app.inference_tasks = []
        if 'inference_tasks' in self.config:
            app.inference_tasks = [
                setup_periodic_task(task)
                for task in self.config['inference_tasks']
            ]

        for name, task in list(self.config['inferencers'].items()):
            task.app = app

        for task in app.inference_tasks:
            if 'schedule' in task:
                #print "Scheduling task", task['name'], task['schedule']
                self.celery.add_periodic_task(crontab(**task['schedule']),
                                              task['find_instances'].s(),
                                              name=task['name'])
            else:
                task['find_instances'].delay()

        @self.celery.task()
        def update(nanopub_uri):
            '''gets called whenever there is a change in the knowledge graph.
            Performs a breadth-first knowledge expansion of the current change.'''
            #print "Updating on", nanopub_uri
            #if not app.nanopub_manager.is_current(nanopub_uri):
            #    print("Skipping retired nanopub", nanopub_uri)
            #    return
            nanopub = app.nanopub_manager.get(nanopub_uri)
            nanopub_graph = ConjunctiveGraph(nanopub.store)
            if 'inferencers' in self.config:
                for name, service in list(self.config['inferencers'].items()):
                    service.app = self
                    if service.query_predicate == self.NS.whyis.updateChangeQuery:
                        if service.getInstances(nanopub_graph):
                            print("invoking", name, nanopub_uri)
                            process_nanopub.apply_async(kwargs={
                                'nanopub_uri': nanopub_uri,
                                'service_name': name
                            },
                                                        priority=1)
                for name, service in list(self.config['inferencers'].items()):
                    service.app = self
                    if service.query_predicate == self.NS.whyis.globalChangeQuery:
                        process_resource.apply_async(
                            kwargs={'service_name': name}, priority=5)

        def run_update(nanopub_uri):
            update.apply_async(args=[nanopub_uri], priority=9)

        self.nanopub_update_listener = run_update

        app = self

        @self.celery.task(base=self.celery.QueueOnce,
                          once={'graceful': True},
                          retry_backoff=True,
                          retry_jitter=True,
                          autoretry_for=(Exception, ),
                          max_retries=4,
                          bind=True)
        def run_importer(self, entity_name):
            entity_name = URIRef(entity_name)
            print('importing', entity_name)
            importer = app.find_importer(entity_name)
            if importer is None:
                return
            importer.app = app
            modified = importer.last_modified(entity_name, app.db,
                                              app.nanopub_manager)
            updated = importer.modified(entity_name)
            if updated is None:
                updated = datetime.now(pytz.utc)
            print("Remote modified:", updated, type(updated),
                  "Local modified:", modified, type(modified))
            if modified is None or (updated - modified
                                    ).total_seconds() > importer.min_modified:
                importer.load(entity_name, app.db, app.nanopub_manager)

        self.run_importer = run_importer

        self.template_imports = {}
        if 'template_imports' in self.config:
            for name, imp in list(self.config['template_imports'].items()):
                try:
                    m = importlib.import_module(imp)
                    self.template_imports[name] = m
                except Exception:
                    print(
                        "Error importing module %s into template variable %s."
                        % (imp, name))
                    raise

        self.nanopub_manager = NanopublicationManager(
            self.db.store,
            Namespace('%s/pub/' % (self.config['lod_prefix'])),
            self,
            update_listener=self.nanopub_update_listener)

        if 'CACHE_TYPE' in self.config:
            from flask_caching import Cache
            self.cache = Cache(self)
        else:
            self.cache = None

    _file_depot = None

    @property
    def file_depot(self):
        if self._file_depot is None:
            if DepotManager.get('files') is None:
                DepotManager.configure('files', self.config['file_archive'])
            self._file_depot = DepotManager.get('files')
        return self._file_depot

    _nanopub_depot = None

    @property
    def nanopub_depot(self):
        if self._nanopub_depot is None and 'nanopub_archive' in self.config:
            if DepotManager.get('nanopublications') is None:
                DepotManager.configure('nanopublications',
                                       self.config['nanopub_archive'])
            self._nanopub_depot = DepotManager.get('nanopublications')
        return self._nanopub_depot

    def configure_database(self):
        """
        Database configuration should be set here
        """
        self.NS = NS
        self.NS.local = rdflib.Namespace(self.config['lod_prefix'] + '/')

        self.admin_db = database.engine_from_config(self.config, "admin_")
        self.db = database.engine_from_config(self.config, "knowledge_")
        self.db.app = self

        self.vocab = ConjunctiveGraph()
        #print URIRef(self.config['vocab_file'])
        default_vocab = Graph(store=self.vocab.store)
        default_vocab.parse(source=os.path.abspath(
            os.path.join(os.path.dirname(__file__), "default_vocab.ttl")),
                            format="turtle",
                            publicID=str(self.NS.local))
        custom_vocab = Graph(store=self.vocab.store)
        custom_vocab.parse(self.config['vocab_file'],
                           format="turtle",
                           publicID=str(self.NS.local))

        self.datastore = WhyisUserDatastore(self.admin_db, {},
                                            self.config['lod_prefix'])
        self.security = Security(self,
                                 self.datastore,
                                 register_form=ExtendedRegisterForm)

    def __weighted_route(self, *args, **kwargs):
        """
        Override the match_compare_key function of the Rule created by invoking Flask.route.
        This can only be done on the app, not in a blueprint, because blueprints lazily add Rule's when they are registered on an app.
        """
        def decorator(view_func):
            compare_key = kwargs.pop('compare_key', None)
            # register view_func with route
            self.route(*args, **kwargs)(view_func)

            if compare_key is not None:
                rule = self.url_map._rules[-1]
                rule.match_compare_key = lambda: compare_key

            return view_func

        return decorator

    def map_entity(self, name):
        for importer in self.config['namespaces']:
            if importer.matches(name):
                new_name = importer.map(name)
                #print 'Found mapped URI', new_name
                return new_name, importer
        return None, None

    def find_importer(self, name):
        for importer in self.config['namespaces']:
            if importer.resource_matches(name):
                return importer
        return None

    class Entity(rdflib.resource.Resource):
        _this = None

        def this(self):
            if self._this is None:
                self._this = self._graph.app.get_entity(self.identifier)
            return self._this

        _description = None

        def description(self):
            if self._description is None:
                #                try:
                result = Graph()
                #                try:
                for quad in self._graph.query(
                        '''
construct {
    ?e ?p ?o.
    ?o rdfs:label ?label.
    ?o skos:prefLabel ?prefLabel.
    ?o dc:title ?title.
    ?o foaf:name ?name.
    ?o ?pattr ?oattr.
    ?oattr rdfs:label ?oattrlabel
} where {
    graph ?g {
      ?e ?p ?o.
    }
    ?g a np:Assertion.
    optional {
      ?e sio:hasAttribute|sio:hasPart ?o.
      ?o ?pattr ?oattr.
      optional {
        ?oattr rdfs:label ?oattrlabel.
      }
    }
    optional {
      ?o rdfs:label ?label.
    }
    optional {
      ?o skos:prefLabel ?prefLabel.
    }
    optional {
      ?o dc:title ?title.
    }
    optional {
      ?o foaf:name ?name.
    }
}''',
                        initNs=NS.prefixes,
                        initBindings={'e': self.identifier}):
                    if len(quad) == 3:
                        s, p, o = quad
                    else:
                        # Last term is never used
                        s, p, o, _ = quad
                    result.add((s, p, o))
#                except:
#                    pass
                self._description = result.resource(self.identifier)
#                except Exception as e:
#                    print str(e), self.identifier
#                    raise e
            return self._description

    def get_resource(self, entity, async_=True, retrieve=True):
        if retrieve:
            mapped_name, importer = self.map_entity(entity)

            if mapped_name is not None:
                entity = mapped_name

            if importer is None:
                importer = self.find_importer(entity)
            print(entity, importer)

            if importer is not None:
                modified = importer.last_modified(entity, self.db,
                                                  self.nanopub_manager)
                if modified is None or async_ is False:
                    self.run_importer(entity)
                elif not importer.import_once:
                    print("Type of modified is", type(modified))
                    self.run_importer.delay(entity)

        return self.Entity(self.db, entity)

    def configure_template_filters(self):
        filters.configure(self)
        if 'filters' in self.config:
            for name, fn in self.config['filters'].items():
                self.template_filter(name)(fn)

    def add_file(self, f, entity, nanopub):
        entity = rdflib.URIRef(entity)
        old_nanopubs = []
        for np_uri, np_assertion, in self.db.query(
                '''select distinct ?np ?assertion where {
    hint:Query hint:optimizer "Runtime" .
    graph ?assertion {?e whyis:hasFileID ?fileid}
    ?np np:hasAssertion ?assertion.
}''',
                initNs=NS.prefixes,
                initBindings=dict(e=rdflib.URIRef(entity))):
            if not self._can_edit(np_uri):
                raise Unauthorized()
            old_nanopubs.append((np_uri, np_assertion))
        fileid = self.file_depot.create(f.stream, f.filename, f.mimetype)
        nanopub.add((nanopub.identifier, NS.sio.isAbout, entity))
        nanopub.assertion.add((entity, NS.whyis.hasFileID, Literal(fileid)))
        if current_user._get_current_object() is not None and hasattr(
                current_user, 'identifier'):
            nanopub.assertion.add(
                (entity, NS.dc.contributor, current_user.identifier))
        nanopub.assertion.add(
            (entity, NS.dc.created, Literal(datetime.utcnow())))
        nanopub.assertion.add(
            (entity, NS.ov.hasContentType, Literal(f.mimetype)))
        nanopub.assertion.add((entity, NS.RDF.type, NS.mediaTypes[f.mimetype]))
        nanopub.assertion.add(
            (NS.mediaTypes[f.mimetype], NS.RDF.type, NS.dc.FileFormat))
        nanopub.assertion.add(
            (entity, NS.RDF.type, NS.mediaTypes[f.mimetype.split('/')[0]]))
        nanopub.assertion.add((NS.mediaTypes[f.mimetype.split('/')[0]],
                               NS.RDF.type, NS.dc.FileFormat))
        nanopub.assertion.add((entity, NS.RDF.type, NS.pv.File))

        if current_user._get_current_object() is not None and hasattr(
                current_user, 'identifier'):
            nanopub.pubinfo.add((nanopub.assertion.identifier,
                                 NS.dc.contributor, current_user.identifier))
        nanopub.pubinfo.add((nanopub.assertion.identifier, NS.dc.created,
                             Literal(datetime.utcnow())))

        return old_nanopubs

    def delete_file(self, entity):
        for np_uri, in self.db.query('''select distinct ?np where {
    hint:Query hint:optimizer "Runtime" .
    graph ?np_assertion {?e whyis:hasFileID ?fileid}
    ?np np:hasAssertion ?np_assertion.
}''',
                                     initNs=NS.prefixes,
                                     initBindings=dict(e=entity)):
            if not self._can_edit(np_uri):
                raise Unauthorized()
            self.nanopub_manager.retire(np_uri)

    def add_files(self, uri, files, upload_type=NS.pv.File):
        nanopub = self.nanopub_manager.new()

        added_files = False

        old_nanopubs = []
        nanopub.assertion.add((uri, self.NS.RDF.type, upload_type))
        if upload_type == URIRef("http://purl.org/dc/dcmitype/Collection"):
            for f in files:
                filename = secure_filename(f.filename)
                if filename != '':
                    file_uri = URIRef(uri + "/" + filename)
                    old_nanopubs.extend(self.add_file(f, file_uri, nanopub))
                    nanopub.assertion.add((uri, NS.dc.hasPart, file_uri))
                    added_files = True
        elif upload_type == NS.dcat.Dataset:
            for f in files:
                filename = secure_filename(f.filename)
                if filename != '':
                    file_uri = URIRef(uri + "/" + filename)
                    old_nanopubs.extend(self.add_file(f, file_uri, nanopub))
                    nanopub.assertion.add(
                        (uri, NS.dcat.distribution, file_uri))
                    nanopub.assertion.add(
                        (file_uri, NS.RDF.type, NS.dcat.Distribution))
                    nanopub.assertion.add(
                        (file_uri, NS.dcat.downloadURL, file_uri))
                    added_files = True
        else:
            for f in files:
                if f.filename != '':
                    old_nanopubs.extend(self.add_file(f, uri, nanopub))
                    nanopub.assertion.add((uri, NS.RDF.type, NS.pv.File))
                    added_files = True
                    break

        if added_files:
            for old_np, old_np_assertion in old_nanopubs:
                nanopub.pubinfo.add((nanopub.assertion.identifier,
                                     NS.prov.wasRevisionOf, old_np_assertion))
                self.nanopub_manager.retire(old_np)

            for n in self.nanopub_manager.prepare(nanopub):
                self.nanopub_manager.publish(n)

    def _can_edit(self, uri):
        if self.managed:
            return True
        if current_user._get_current_object() is None:
            # This isn't null even when not authenticated, unless we are an autonomic agent.
            return True
        if not hasattr(current_user,
                       'identifier'):  # This is an anonymous user.
            return False
        if current_user.has_role('Publisher') or current_user.has_role(
                'Editor') or current_user.has_role('Admin'):
            return True
        if self.db.query('''ask {
    ?nanopub np:hasAssertion ?assertion; np:hasPublicationInfo ?info.
    graph ?info { ?assertion dc:contributor ?user. }
}''',
                         initBindings=dict(nanopub=uri,
                                           user=current_user.identifier),
                         initNs=dict(np=self.NS.np, dc=self.NS.dc)):
            #print "Is owner."
            return True
        return False

    def configure_views(self):
        def sort_by(resources, property):
            return sorted(resources, key=lambda x: x.value(property))

        def camel_case_split(identifier):
            matches = finditer(
                '.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)',
                identifier)
            return [m.group(0) for m in matches]

        label_properties = [
            self.NS.skos.prefLabel, self.NS.RDFS.label, self.NS.schema.name,
            self.NS.dc.title, self.NS.foaf.name, self.NS.schema.name,
            self.NS.skos.notation
        ]

        @lru_cache(maxsize=1000)
        def get_remote_label(uri):
            for db in [self.db, self.admin_db]:
                g = Graph()
                try:
                    db.nsBindings = {}
                    g += db.query('''select ?s ?p ?o where {
                        hint:Query hint:optimizer "Runtime" .

                         ?s ?p ?o.}''',
                                  initNs=self.NS.prefixes,
                                  initBindings=dict(s=uri))
                    db.nsBindings = {}
                except:
                    pass
                resource_entity = g.resource(uri)
                if len(resource_entity.graph) == 0:
                    #print "skipping", db
                    continue
                for property in label_properties:
                    labels = self.lang_filter(resource_entity[property])
                    if len(labels) > 0:
                        return labels[0]

                if len(labels) == 0:
                    name = [
                        x.value for x in [
                            resource_entity.value(self.NS.foaf.givenName),
                            resource_entity.value(self.NS.foaf.familyName)
                        ] if x is not None
                    ]
                    if len(labels) == 0:
                        name = [
                            x.value for x in [
                                resource_entity.value(
                                    self.NS.schema.givenName),
                                resource_entity.value(
                                    self.NS.schema.familyName)
                            ] if x is not None
                        ]
                        if len(name) > 0:
                            label = ' '.join(name)
                            return label
            try:
                label = self.db.qname(uri).split(":")[1].replace("_", " ")
                return ' '.join(camel_case_split(label)).title()
            except Exception as e:
                print(str(e), uri)
                return str(uri)

        def get_label(resource):
            for property in label_properties:
                labels = self.lang_filter(resource[property])
                #print "mem", property, label
                if len(labels) > 0:
                    return labels[0]
            return get_remote_label(resource.identifier)

        self.get_label = get_label

        def initialize_g():
            if not hasattr(g, "initialized"):
                g.initialized = True
                g.ns = self.NS
                g.get_summary = get_summary
                g.get_label = get_label
                g.labelize = self.labelize
                g.get_resource = self.get_resource
                g.get_entity = self.get_entity
                g.rdflib = rdflib
                g.isinstance = isinstance
                g.current_user = current_user
                g.slugify = slugify
                g.db = self.db

        self.initialize_g = initialize_g

        @self.before_request
        def load_forms():
            if 'authenticators' in self.config:
                for authenticator in self.config['authenticators']:
                    user = authenticator.authenticate(request, self.datastore,
                                                      self.config)
                    if user is not None:
                        #    login_user(user)
                        break
            initialize_g()

        @self.login_manager.user_loader
        def load_user(user_id):
            if user_id != None:
                #try:
                user = self.datastore.find_user(id=user_id)
                return user
                #except:
                #    return None
            else:
                return None

        # def get_graphs(graphs):
        #     query = '''select ?s ?p ?o ?g where {
        #         hint:Query hint:optimizer "Runtime" .
        #
        #         graph ?g {?s ?p ?o}
        #         } values ?g { %s }'''
        #     query = query % ' '.join([graph.n3() for graph in graphs])
        #     #print query
        #     quads = self.db.store.query(query, initNs=self.NS.prefixes)
        #     result = rdflib.Dataset()
        #     result.addN(quads)
        #     return result

#         def explain(graph):
#             values = ')\n  ('.join([' '.join([x.n3() for x in triple]) for triple in graph.triples((None,None,None))])
#             values = 'VALUES (?s ?p ?o)\n{\n('+ values + ')\n}'
#
#             try:
#                 nanopubs = self.db.query('''select distinct ?np where {
#     hint:Query hint:optimizer "Runtime" .
#     ?np np:hasAssertion?|np:hasProvenance?|np:hasPublicationInfo? ?g;
#         np:hasPublicationInfo ?pubinfo;
#         np:hasAssertion ?assertion;
#     graph ?assertion { ?s ?p ?o.}
# }''' + values, initNs=self.NS.prefixes)
#                 result = ConjunctiveGraph()
#                 for nanopub_uri, in nanopubs:
#                     self.nanopub_manager.get(nanopub_uri, result)
#             except Exception as e:
#                 print(str(e), entity)
#                 raise e
#             return result.resource(entity)

        def get_entity_sparql(entity):
            try:
                statements = self.db.query(
                    '''select distinct ?s ?p ?o ?g where {
    hint:Query hint:optimizer "Runtime" .
            ?np np:hasAssertion?|np:hasProvenance?|np:hasPublicationInfo? ?g;
                np:hasPublicationInfo ?pubinfo;
                np:hasAssertion ?assertion;

            {graph ?np { ?np sio:isAbout ?e.}}
            UNION
            {graph ?assertion { ?e ?p ?o.}}
            graph ?g { ?s ?p ?o }
        }''',
                    initBindings={'e': entity},
                    initNs=self.NS.prefixes)
                result = ConjunctiveGraph()
                result.addN(statements)
            except Exception as e:
                print(str(e), entity)
                raise e
            #print result.serialize(format="trig")
            return result.resource(entity)

#         def get_entity_disk(entity):
#             try:
#                 nanopubs = self.db.query('''select distinct ?np where {
#     hint:Query hint:optimizer "Runtime" .
#             ?np np:hasAssertion?|np:hasProvenance?|np:hasPublicationInfo? ?g;
#                 np:hasPublicationInfo ?pubinfo;
#                 np:hasAssertion ?assertion;
#
#             {graph ?np { ?np sio:isAbout ?e.}}
#             UNION
#             {graph ?assertion { ?e ?p ?o.}}
#         }''',initBindings={'e':entity}, initNs=self.NS.prefixes)
#                 result = ConjunctiveGraph()
#                 for nanopub_uri, in nanopubs:
#                     self.nanopub_manager.get(nanopub_uri, result)
# #                result.addN(nanopubs)
#             except Exception as e:
#                 print(str(e), entity)
#                 raise e
#             #print result.serialize(format="trig")
#             return result.resource(entity)

        get_entity = get_entity_sparql

        self.get_entity = get_entity

        def get_summary(resource):
            summary_properties = [
                self.NS.skos.definition, self.NS.schema.description,
                self.NS.dc.abstract, self.NS.dc.description,
                self.NS.dc.summary, self.NS.RDFS.comment,
                self.NS.dcelements.description,
                URIRef("http://purl.obolibrary.org/obo/IAO_0000115"),
                self.NS.prov.value, self.NS.sio.hasValue
            ]
            if 'summary_properties' in self.config:
                summary_properties.extend(self.config['summary_properties'])
            for property in summary_properties:
                terms = self.lang_filter(resource[property])
                for term in terms:
                    yield (property, term)

        self.get_summary = get_summary

        if 'WHYIS_CDN_DIR' in self.config and self.config[
                'WHYIS_CDN_DIR'] is not None:

            @self.route('/cdn/<path:filename>')
            def cdn(filename):
                return send_from_directory(self.config['WHYIS_CDN_DIR'],
                                           filename)

        def render_view(resource, view=None, args=None, use_cache=True):
            self.initialize_g()
            if view is None and 'view' in request.args:
                view = request.args['view']

            if view is None:
                view = 'view'

            if use_cache and self.cache is not None:
                key = str((str(resource.identifier), view))
                result = self.cache.get(key)
                if result is not None:
                    r, headers = result
                    return r, 200, headers
            template_args = dict()
            template_args.update(self.template_imports)
            template_args.update(
                dict(ns=self.NS,
                     this=resource,
                     g=g,
                     current_user=current_user,
                     isinstance=isinstance,
                     args=request.args if args is None else args,
                     url_for=url_for,
                     app=self,
                     view=view,
                     get_entity=get_entity,
                     get_summary=get_summary,
                     search=search,
                     rdflib=rdflib,
                     config=self.config,
                     hasattr=hasattr,
                     set=set))

            types = []
            if 'as' in request.args:
                types = [URIRef(request.args['as']), 0]

            types.extend(
                (x, 1) for x in self.vocab[resource.identifier:NS.RDF.type])
            if len(
                    types
            ) == 0:  # KG types cannot override vocab types. This should keep views stable where critical.
                types.extend([(x.identifier, 1) for x in resource[NS.RDF.type]
                              if isinstance(x.identifier, rdflib.URIRef)])
            #if len(types) == 0:
            types.append([self.NS.RDFS.Resource, 100])
            type_string = ' '.join(
                ["(%s %d '%s')" % (x.n3(), i, view) for x, i in types])
            view_query = '''select ?id ?view (count(?mid)+?priority as ?rank) ?class ?c ?content_type where {
    values (?c ?priority ?id) { %s }
    ?c rdfs:subClassOf* ?mid.
    ?mid rdfs:subClassOf* ?class.
    ?class ?viewProperty ?view.
    ?viewProperty rdfs:subPropertyOf* whyis:hasView.
    ?viewProperty dc:identifier ?id.
    optional {
        ?viewProperty dc:format ?content_type
    }
} group by ?c ?class ?content_type order by ?rank
''' % type_string

            #print view_query
            views = list(
                self.vocab.query(view_query,
                                 initNs=dict(whyis=self.NS.whyis,
                                             dc=self.NS.dc)))
            if len(views) == 0:
                abort(404)

            headers = {'Content-Type': "text/html"}
            extension = views[0]['view'].value.split(".")[-1]
            if extension in DATA_EXTENSIONS:
                headers['Content-Type'] = DATA_EXTENSIONS[extension]
            print(views[0]['view'], views[0]['content_type'])
            if views[0]['content_type'] is not None:
                headers['Content-Type'] = views[0]['content_type']

            # default view (list of nanopubs)
            # if available, replace with class view
            # if available, replace with instance view
            return render_template(views[0]['view'].value,
                                   **template_args), 200, headers

        self.render_view = render_view

        # Register blueprints
        self.register_blueprint(nanopub_blueprint)
        self.register_blueprint(sparql_blueprint)
        self.register_blueprint(entity_blueprint)
        self.register_blueprint(tableview_blueprint)

    def get_entity_uri(self, name, format):
        content_type = None
        if format is not None:
            if format in DATA_EXTENSIONS:
                content_type = DATA_EXTENSIONS[format]
            else:
                name = '.'.join([name, format])
        if name is not None:
            entity = self.NS.local[name]
        elif 'uri' in request.args:
            entity = URIRef(request.args['uri'])
        else:
            entity = self.NS.local.Home
        return entity, content_type

    def get_send_file_max_age(self, filename):
        if self.debug:
            return 0
        else:
            return Empty.get_send_file_max_age(self, filename)