コード例 #1
0
    def contexts(self, triple=None):
        """
        Iterates over results to "SELECT ?NAME { GRAPH ?NAME { ?s ?p ?o } }"
        or "SELECT ?NAME { GRAPH ?NAME {} }" if triple is `None`.

        Returns instances of this store with the SPARQL wrapper
        object updated via addNamedGraph(?NAME).

        This causes a named-graph-uri key / value  pair to be sent over
        the protocol.

        Please note that some SPARQL endpoints are not able to find empty named
        graphs.
        """
        self.resetQuery()

        if triple:
            nts = self.node_to_sparql
            s, p, o = triple
            params = (nts(s if s else Variable('s')),
                      nts(p if p else Variable('p')),
                      nts(o if o else Variable('o')))
            self.setQuery('SELECT ?name WHERE { GRAPH ?name { %s %s %s }}' %
                          params)
        else:
            self.setQuery('SELECT ?name WHERE { GRAPH ?name {} }')

        with contextlib.closing(SPARQLWrapper.query(self).response) as res:
            result = Result.parse(res, format=self.returnFormat)

        return (row.name for row in result)
コード例 #2
0
ファイル: sparqlstore.py プロジェクト: april1452/annotaria
    def query(self, query,
              initNs={},
              initBindings={},
              queryGraph=None,
              DEBUG=False):
        self.debug = DEBUG
        assert isinstance(query, basestring)
        self.setNamespaceBindings(initNs)
        if initBindings:
            if not self.sparql11:
                raise Exception(
                    "initBindings not supported for SPARQL 1.0 Endpoints.")
            v = list(initBindings)

            # VALUES was added to SPARQL 1.1 on 2012/07/24
            query += "\nVALUES ( %s )\n{ ( %s ) }\n"\
                % (" ".join("?" + str(x) for x in v),
                   " ".join(initBindings[x].n3() for x in v))

        self.resetQuery()
        if self.context_aware and queryGraph and queryGraph != '__UNION__':
            self.addDefaultGraph(queryGraph)
        self.setQuery(query)

        return Result.parse(SPARQLWrapper.query(self).response)
コード例 #3
0
ファイル: sparqlstore.py プロジェクト: drewp/rdflib
    def contexts(self, triple=None):
        """
        Iterates over results to "SELECT ?NAME { GRAPH ?NAME { ?s ?p ?o } }"
        or "SELECT ?NAME { GRAPH ?NAME {} }" if triple is `None`.

        Returns instances of this store with the SPARQL wrapper
        object updated via addNamedGraph(?NAME).

        This causes a named-graph-uri key / value  pair to be sent over
        the protocol.

        Please note that some SPARQL endpoints are not able to find empty named
        graphs.
        """
        self.resetQuery()

        if triple:
            nts = self.node_to_sparql
            s, p, o = triple
            params = (nts(s if s else Variable('s')),
                      nts(p if p else Variable('p')),
                      nts(o if o else Variable('o')))
            self.setQuery('SELECT ?name WHERE { GRAPH ?name { %s %s %s }}' % params)
        else:
            self.setQuery('SELECT ?name WHERE { GRAPH ?name {} }')

        with contextlib.closing(SPARQLWrapper.query(self).response) as res:
            result = Result.parse(res, format=self.returnFormat)

        return ( row.name for row in result )
コード例 #4
0
    def query(self, query, default_graph=None):

        if not self.query_endpoint:
            raise SPARQLConnectorException("Query endpoint not set!")

        params = {'query': query}
        if default_graph:
            params["default-graph-uri"] = default_graph

        headers = {'Accept': _response_mime_types[self.returnFormat]}

        args = dict(self.kwargs)
        args.update(url=self.query_endpoint)

        # merge params/headers dicts
        args.setdefault('params', {})

        args.setdefault('headers', {})
        args['headers'].update(headers)

        if self.method == 'GET':
            args['params'].update(params)
        elif self.method == 'POST':
            args['headers'].update({'Content-Type': 'application/sparql-query'})
            args['data'] = params
        else:
            raise SPARQLConnectorException("Unknown method %s" % self.method)

        res = self.session.request(self.method, **args)

        res.raise_for_status()

        return Result.parse(BytesIO(res.content), content_type=res.headers['Content-type'])
コード例 #5
0
    def query(self,
              query,
              initNs={},
              initBindings={},
              queryGraph=None,
              DEBUG=False):
        self.debug = DEBUG
        assert isinstance(query, basestring), 'Query is not a string'
        self.setNamespaceBindings(initNs)
        if initBindings:
            query = self.inject_sparql_bindings(query, initBindings)

        self.resetQuery()

        if self.context_aware and queryGraph and queryGraph != '__UNION__':
            # we care about context

            if not re.search('[\s{]GRAPH[{\s]', query, flags=re.I):
                # if a GRAPH clause was already specified, move on...

                # insert GRAPH clause after/before first/last { }
                # not 100% sure how rock-steady this is
                i1 = query.index("{") + 1
                i2 = query.rindex("}")
                query = query[:i1] + ' GRAPH %s { ' % queryGraph.n3() + \
                    query[i1:i2] + ' } ' + query[i2:]

        self.setQuery(query)

        return Result.parse(SPARQLWrapper.query(self).response)
コード例 #6
0
    def query(self,
              query,
              initNs={},
              initBindings={},
              queryGraph=None,
              DEBUG=False):
        self.debug = DEBUG
        assert isinstance(query, basestring)
        self.setNamespaceBindings(initNs)
        if initBindings:
            if not self.sparql11:
                raise Exception(
                    "initBindings not supported for SPARQL 1.0 Endpoints.")
            v = list(initBindings)

            # VALUES was added to SPARQL 1.1 on 2012/07/24
            query += "\nVALUES ( %s )\n{ ( %s ) }\n"\
                % (" ".join("?" + str(x) for x in v),
                   " ".join(self.node_to_sparql(initBindings[x]) for x in v))

        self.resetQuery()
        self.setMethod(self.query_method)
        if self._is_contextual(queryGraph):
            self.addParameter("default-graph-uri", queryGraph)
        self.timeout = self._timeout
        self.setQuery(query)

        with contextlib.closing(SPARQLWrapper.query(self).response) as res:
            return Result.parse(res)
コード例 #7
0
ファイル: sparqlconnector.py プロジェクト: RDFLib/rdflib
    def query(self, query, default_graph=None):

        if not self.query_endpoint:
            raise SPARQLConnectorException("Query endpoint not set!")

        params = {'query': query}
        if default_graph:
            params["default-graph-uri"] = default_graph

        headers = {'Accept': _response_mime_types[self.returnFormat]}

        args = dict(self.kwargs)
        args.update(url=self.query_endpoint)

        # merge params/headers dicts
        args.setdefault('params', {})

        args.setdefault('headers', {})
        args['headers'].update(headers)

        if self.method == 'GET':
            args['params'].update(params)
        elif self.method == 'POST':
            args['data'] = params
        else:
            raise SPARQLConnectorException("Unknown method %s" % self.method)

        res = self.session.request(self.method, **args)

        res.raise_for_status()

        return Result.parse(BytesIO(res.content), content_type=res.headers['Content-type'])
コード例 #8
0
ファイル: sparqlstore.py プロジェクト: zxenia/rdflib
    def query(self,
              query,
              initNs={},
              initBindings={},
              queryGraph=None,
              DEBUG=False):
        self.debug = DEBUG
        assert isinstance(query, basestring)
        self.setNamespaceBindings(initNs)
        if initBindings:
            if not self.sparql11:
                raise Exception(
                    "initBindings not supported for SPARQL 1.0 Endpoints.")
            v = list(initBindings)

            # VALUES was added to SPARQL 1.1 on 2012/07/24
            query += "\nVALUES ( %s )\n{ ( %s ) }\n"\
                % (" ".join("?" + str(x) for x in v),
                   " ".join(initBindings[x].n3() for x in v))

        self.resetQuery()
        if self._is_contextual(queryGraph):
            self.addDefaultGraph(queryGraph)
        self.setQuery(query)

        return Result.parse(SPARQLWrapper.query(self).response)
コード例 #9
0
    def query(self, query, default_graph=None):

        if not self.query_endpoint:
            raise SPARQLConnectorException("Query endpoint not set!")

        params = {"query": query}
        if default_graph:
            params["default-graph-uri"] = default_graph

        headers = {"Accept": _response_mime_types[self.returnFormat]}

        args = dict(self.kwargs)
        args.update(url=self.query_endpoint)

        # merge params/headers dicts
        args.setdefault("params", {})

        args.setdefault("headers", {})
        args["headers"].update(headers)

        if self.method == "GET":
            args["params"].update(params)
        elif self.method == "POST":
            args["headers"].update(
                {"Content-Type": "application/sparql-query"})
            args["data"] = params
        else:
            raise SPARQLConnectorException("Unknown method %s" % self.method)

        res = self.session.request(self.method, **args)

        res.raise_for_status()

        return Result.parse(BytesIO(res.content),
                            content_type=res.headers["Content-type"])
コード例 #10
0
ファイル: sparqlstore.py プロジェクト: Dataliberate/rdflib
    def query(self, query,
              initNs={},
              initBindings={},
              queryGraph=None,
              DEBUG=False):
        self.debug = DEBUG
        assert isinstance(query, basestring)
        self.setNamespaceBindings(initNs)
        if initBindings:
            if not self.sparql11:
                raise Exception(
                    "initBindings not supported for SPARQL 1.0 Endpoints.")
            v = list(initBindings)

            # VALUES was added to SPARQL 1.1 on 2012/07/24
            query += "\nVALUES ( %s )\n{ ( %s ) }\n"\
                % (" ".join("?" + str(x) for x in v),
                   " ".join(self.node_to_sparql(initBindings[x]) for x in v))

        self.resetQuery()
        if self._is_contextual(queryGraph):
            self.addParameter("default-graph-uri", queryGraph)
        self.timeout = self._timeout
        self.setQuery(query)

        return Result.parse(SPARQLWrapper.query(self).response)
コード例 #11
0
ファイル: sparqlconnector.py プロジェクト: tgbugs/rdflib
    def query(self, query, default_graph: str = None, named_graph: str = None):
        if not self.query_endpoint:
            raise SPARQLConnectorException("Query endpoint not set!")

        params = {}
        # this test ensures we don't have a useless (BNode) default graph URI, which calls to Graph().query() will add
        if default_graph is not None and type(default_graph) != BNode:
            params["default-graph-uri"] = default_graph

        headers = {"Accept": _response_mime_types[self.returnFormat]}

        args = dict(self.kwargs)

        # merge params/headers dicts
        args.setdefault("params", {})

        args.setdefault("headers", {})
        args["headers"].update(headers)

        if self.method == "GET":
            params["query"] = query
            args["params"].update(params)
            qsa = "?" + urlencode(args["params"])
            try:
                res = urlopen(
                    Request(self.query_endpoint + qsa,
                            headers=args["headers"]))
            except Exception as e:
                raise ValueError(
                    "You did something wrong formulating either the URI or your SPARQL query"
                )
        elif self.method == "POST":
            args["headers"].update(
                {"Content-Type": "application/sparql-query"})
            qsa = "?" + urlencode(params)
            try:
                res = urlopen(
                    Request(self.query_endpoint + qsa,
                            data=query.encode(),
                            headers=args["headers"]))
            except HTTPError as e:
                return e.code, str(e), None
        elif self.method == "POST_FORM":
            params["query"] = query
            args["params"].update(params)
            try:
                res = urlopen(
                    Request(self.query_endpoint,
                            data=urlencode(args["params"]).encode(),
                            headers=args["headers"]))
            except HTTPError as e:
                return e.code, str(e), None
        else:
            raise SPARQLConnectorException("Unknown method %s" % self.method)
        return Result.parse(
            BytesIO(res.read()),
            content_type=res.headers["Content-Type"].split(";")[0])
コード例 #12
0
 async def queryGraph(self, query: str, *args, **keywords) -> dict:
     try:
         j = await self.query(query, *args, **keywords)
         data = io.BytesIO()
         data.write(orjson.dumps(j))
         data.seek(0, 0)
     except Exception:
         return None
     else:
         graph = BaseGraph()
         results = Result.parse(data, format='json')
         graph.parse(data=results.serialize(format='xml').decode(),
                     format='xml')
         return graph
コード例 #13
0
    def __len__(self, context=None):
        if not self.sparql11:
            raise NotImplementedError("For performance reasons, this is not" +
                                      "supported for sparql1.0 endpoints")
        else:
            self.resetQuery()
            q = "SELECT (count(*) as ?c) WHERE {?s ?p ?o .}"
            if self._is_contextual(context):
                self.addParameter("default-graph-uri", context.identifier)
            self.setQuery(q)

            with contextlib.closing(SPARQLWrapper.query(self).response) as res:
                result = Result.parse(res, format=self.returnFormat)

            return int(next(iter(result)).c)
コード例 #14
0
ファイル: sparqlstore.py プロジェクト: drewp/rdflib
    def __len__(self, context=None):
        if not self.sparql11:
            raise NotImplementedError(
                "For performance reasons, this is not" +
                "supported for sparql1.0 endpoints")
        else:
            self.resetQuery()
            q = "SELECT (count(*) as ?c) WHERE {?s ?p ?o .}"
            if self._is_contextual(context):
                self.addParameter("default-graph-uri", context.identifier)
            self.setQuery(q)

            with contextlib.closing(SPARQLWrapper.query(self).response) as res:
                result = Result.parse(res, format=self.returnFormat)

            return int(next(iter(result)).c)
コード例 #15
0
    def query(self,
              query,
              initNs={},
              initBindings={},
              queryGraph=None,
              DEBUG=False):
        self.debug = DEBUG
        assert isinstance(query, basestring)
        self.setNamespaceBindings(initNs)
        if initBindings:
            if not self.sparql11:
                raise Exception(
                    "initBindings not supported for SPARQL 1.0 Endpoints.")
            v = list(initBindings)

            # VALUES was added to SPARQL 1.1 on 2012/07/24
            query+="\nVALUES ( %s )\n{ ( %s ) }\n"\
                %(" ".join("?"+str(x) for x in v),
                  " ".join(initBindings[x].n3() for x in v))

        self.resetQuery()

        if self.context_aware and queryGraph and queryGraph != '__UNION__':
            # we care about context

            if not re.search('[\s{]GRAPH[{\s]', query, flags=re.I):
                # if a GRAPH clause was already specified, move on...

                # insert GRAPH clause after/before first/last { }
                # not 100% sure how rock-steady this is
                i1 = query.index("{") + 1
                i2 = query.rindex("}")
                query=query[:i1]+' GRAPH %s { '%queryGraph.n3()+\
                    query[i1:i2]+' } '+query[i2:]

        self.setQuery(query)

        return Result.parse(SPARQLWrapper.query(self).response)
コード例 #16
0
    def check_violations(self):
        res = []
        headers = {
            'Content-Type':
            'application/x-turtle, text/turtle, application/rdf+xml'
        }
        req = requests.post(
            'http://*****:*****@localhost:5820/test-haris/icv/violations',
            headers=headers)

        resp = decoder.MultipartDecoder.from_response(req)
        for p in resp.parts:
            print(p.headers)
            #if "rdf+xml" in p.headers["Content-Type"]:
            #    g = Graph()
            #    g.parse(data=p.content, format=p.headers["Content-Type"])
            if "sparql-result" in p.headers["Content-Type"]:
                g = Result('SELECT')
                g = g.parse(StringIO.StringIO(
                    p.content))  # , format=p.headers["Content-Type"])
                for r in g:
                    res.append({"type": "path", "entity": r["path"]})
        return res
コード例 #17
0
ファイル: sparqlstore.py プロジェクト: cnh/rdflib
    def query(self, query,
              initNs={},
              initBindings={},
              queryGraph=None,
              DEBUG=False):
        self.debug = DEBUG
        assert isinstance(query, basestring)
        self.setNamespaceBindings(initNs)
        if initBindings:
            if not self.sparql11:
                raise Exception(
                    "initBindings not supported for SPARQL 1.0 Endpoints.")
            v = list(initBindings)

            # VALUES was added to SPARQL 1.1 on 2012/07/24
            query += "\nVALUES ( %s )\n{ ( %s ) }\n"\
                % (" ".join("?" + str(x) for x in v),
                   " ".join(initBindings[x].n3() for x in v))

        self.resetQuery()

        if self.context_aware and queryGraph and queryGraph != '__UNION__':
            # we care about context

            if not re.search('[\s{]GRAPH[{\s]', query, flags=re.I):
                # if a GRAPH clause was already specified, move on...

                # insert GRAPH clause after/before first/last { }
                # not 100% sure how rock-steady this is
                i1 = query.index("{") + 1
                i2 = query.rindex("}")
                query = query[:i1] + ' GRAPH %s { ' % queryGraph.n3() + \
                    query[i1:i2] + ' } ' + query[i2:]

        self.setQuery(query)

        return Result.parse(SPARQLWrapper.query(self).response)
コード例 #18
0
 def _parse_response(cls, rsp):
     return list(Result.parse(rsp.buffer))
コード例 #19
0
def query_test(t):
    uri, name, comment, data, graphdata, query, resfile, syntax = t

    # the query-eval tests refer to graphs to load by resolvable filenames
    rdflib_sparql_module.SPARQL_LOAD_GRAPHS = True

    if uri in skiptests:
        raise SkipTest()

    def skip(reason="(none)"):
        print("Skipping %s from now on." % uri)
        with bopen("skiptests.list", "a") as f:
            f.write("%s\t%s\n" % (uri, reason))

    try:
        g = Dataset()
        if data:
            g.default_context.load(data, format=_fmt(data))

        if graphdata:
            for x in graphdata:
                g.load(x, format=_fmt(x))

        if not resfile:
            # no result - syntax test

            if syntax:
                translateQuery(parseQuery(bopen_read_close(query[7:])),
                               base=urljoin(query, "."))
            else:
                # negative syntax test
                try:
                    translateQuery(
                        parseQuery(bopen_read_close(query[7:])),
                        base=urljoin(query, "."),
                    )

                    assert False, "Query should not have parsed!"
                except:
                    pass  # it's fine - the query should not parse
            return

        # eval test - carry out query
        res2 = g.query(bopen_read_close(query[7:]), base=urljoin(query, "."))

        if resfile.endswith("ttl"):
            resg = Graph()
            resg.load(resfile, format="turtle", publicID=resfile)
            res = RDFResultParser().parse(resg)
        elif resfile.endswith("rdf"):
            resg = Graph()
            resg.load(resfile, publicID=resfile)
            res = RDFResultParser().parse(resg)
        else:
            with bopen(resfile[7:]) as f:
                if resfile.endswith("srj"):
                    res = Result.parse(f, format="json")
                elif resfile.endswith("tsv"):
                    res = Result.parse(f, format="tsv")

                elif resfile.endswith("csv"):
                    res = Result.parse(f, format="csv")

                    # CSV is lossy, round-trip our own resultset to
                    # lose the same info :)

                    # write bytes, read strings...
                    s = BytesIO()
                    res2.serialize(s, format="csv")
                    s.seek(0)
                    res2 = Result.parse(s, format="csv")
                    s.close()

                else:
                    res = Result.parse(f, format="xml")

        if not DETAILEDASSERT:
            eq(res.type, res2.type, "Types do not match")
            if res.type == "SELECT":
                eq(set(res.vars), set(res2.vars), "Vars do not match")
                comp = bindingsCompatible(set(res), set(res2))
                assert comp, "Bindings do not match"
            elif res.type == "ASK":
                eq(res.askAnswer, res2.askAnswer, "Ask answer does not match")
            elif res.type in ("DESCRIBE", "CONSTRUCT"):
                assert isomorphic(res.graph,
                                  res2.graph), "graphs are not isomorphic!"
            else:
                raise Exception("Unknown result type: %s" % res.type)
        else:
            eq(
                res.type,
                res2.type,
                "Types do not match: %r != %r" % (res.type, res2.type),
            )
            if res.type == "SELECT":
                eq(
                    set(res.vars),
                    set(res2.vars),
                    "Vars do not match: %r != %r" %
                    (set(res.vars), set(res2.vars)),
                )
                assert bindingsCompatible(set(res), set(res2)), (
                    "Bindings do not match: \nexpected:\n%s\n!=\ngot:\n%s" % (
                        res.serialize(format="txt",
                                      namespace_manager=g.namespace_manager),
                        res2.serialize(format="txt",
                                       namespace_manager=g.namespace_manager),
                    ))
            elif res.type == "ASK":
                eq(
                    res.askAnswer,
                    res2.askAnswer,
                    "Ask answer does not match: %r != %r" %
                    (res.askAnswer, res2.askAnswer),
                )
            elif res.type in ("DESCRIBE", "CONSTRUCT"):
                assert isomorphic(res.graph,
                                  res2.graph), "graphs are not isomorphic!"
            else:
                raise Exception("Unknown result type: %s" % res.type)

    except Exception as e:

        if isinstance(e, AssertionError):
            failed_tests.append(uri)
            fails[str(e)] += 1
        else:
            error_tests.append(uri)
            errors[str(e)] += 1

        if DEBUG_ERROR and not isinstance(e, AssertionError) or DEBUG_FAIL:
            print("======================================")
            print(uri)
            print(name)
            print(comment)

            if not resfile:
                if syntax:
                    print("Positive syntax test")
                else:
                    print("Negative syntax test")

            if data:
                print("----------------- DATA --------------------")
                print(">>>", data)
                print(bopen_read_close(data[7:]))
            if graphdata:
                print("----------------- GRAPHDATA --------------------")
                for x in graphdata:
                    print(">>>", x)
                    print(bopen_read_close(x[7:]))

            print("----------------- Query -------------------")
            print(">>>", query)
            print(bopen_read_close(query[7:]))
            if resfile:
                print("----------------- Res -------------------")
                print(">>>", resfile)
                print(bopen_read_close(resfile[7:]))

            try:
                pq = parseQuery(bopen_read_close(query[7:]))
                print("----------------- Parsed ------------------")
                pprintAlgebra(translateQuery(pq, base=urljoin(query, ".")))
            except:
                print("(parser error)")

            print(decodeStringEscape(str(e)))

            import pdb

            pdb.post_mortem(sys.exc_info()[2])
            # pdb.set_trace()
            # nose.tools.set_trace()
        raise
コード例 #20
0
ファイル: sparqlstore.py プロジェクト: drewp/rdflib
    def triples(self, spo, context=None):
        """
        - tuple **(s, o, p)**
            the triple used as filter for the SPARQL select.
            (None, None, None) means anything.
        - context **context**
            the graph effectively calling this method.

        Returns a tuple of triples executing essentially a SPARQL like
        SELECT ?subj ?pred ?obj WHERE { ?subj ?pred ?obj }

        **context** may include three parameter
        to refine the underlying query:
         * LIMIT: an integer to limit the number of results
         * OFFSET: an integer to enable paging of results
         * ORDERBY: an instance of Variable('s'), Variable('o') or Variable('p')
        or, by default, the first 'None' from the given triple

        .. warning::
        - Using LIMIT or OFFSET automatically include ORDERBY otherwise this is
        because the results are retrieved in a not deterministic way (depends on
        the walking path on the graph)
        - Using OFFSET without defining LIMIT will discard the first OFFSET - 1
        results

        ``
        a_graph.LIMIT = limit
        a_graph.OFFSET = offset
        triple_generator = a_graph.triples(mytriple):
            #do something
        #Removes LIMIT and OFFSET if not required for the next triple() calls
        del a_graph.LIMIT
        del a_graph.OFFSET
        ``
        """

        s, p, o = spo

        vars = []
        if not s:
            s = Variable('s')
            vars.append(s)

        if not p:
            p = Variable('p')
            vars.append(p)
        if not o:
            o = Variable('o')
            vars.append(o)

        if vars:
            v = ' '.join([term.n3() for term in vars])
            verb = 'SELECT %s '%v
        else:
            verb = 'ASK'

        nts = self.node_to_sparql
        query = "%s { %s %s %s }" % (verb, nts(s), nts(p), nts(o))

        # The ORDER BY is necessary
        if hasattr(context, LIMIT) or hasattr(context, OFFSET) \
                or hasattr(context, ORDERBY):
            var = None
            if isinstance(s, Variable):
                var = s
            elif isinstance(p, Variable):
                var = p
            elif isinstance(o, Variable):
                var = o
            elif hasattr(context, ORDERBY) \
                    and isinstance(getattr(context, ORDERBY), Variable):
                var = getattr(context, ORDERBY)
            query = query + ' %s %s' % (ORDERBY, var.n3())

        try:
            query = query + ' LIMIT %s' % int(getattr(context, LIMIT))
        except (ValueError, TypeError, AttributeError):
            pass
        try:
            query = query + ' OFFSET %s' % int(getattr(context, OFFSET))
        except (ValueError, TypeError, AttributeError):
            pass

        self.resetQuery()
        if self._is_contextual(context):
            self.addParameter("default-graph-uri", context.identifier)
        self.timeout = self._timeout
        self.setQuery(query)

        with contextlib.closing(SPARQLWrapper.query(self).response) as res:
            result = Result.parse(res, format=self.returnFormat)

        if vars:
            for row in result:
                yield (row.get(s, s),
                       row.get(p, p),
                       row.get(o, o)), None # why is the context here not the passed in graph 'context'?
        else:
            if result.askAnswer:
                yield (s,p,o), None
コード例 #21
0
def query_test(t):
    uri, name, comment, data, graphdata, query, resfile, syntax = t

    # the query-eval tests refer to graphs to load by resolvable filenames
    rdflib_sparql_module.SPARQL_LOAD_GRAPHS = True

    if uri in skiptests:
        raise SkipTest()

    def skip(reason='(none)'):
        print "Skipping %s from now on." % uri
        f = open("skiptests.list", "a")
        f.write("%s\t%s\n" % (uri, reason))
        f.close()

    try:
        g = ConjunctiveGraph()
        if data:
            g.default_context.load(data, format=_fmt(data))

        if graphdata:
            for x in graphdata:
                g.load(x, format=_fmt(x))

        if not resfile:
            # no result - syntax test

            if syntax:
                translateQuery(parseQuery(open(query[7:]).read()),
                               base=urljoin(query, '.'))
            else:
                # negative syntax test
                try:
                    translateQuery(parseQuery(open(query[7:]).read()),
                                   base=urljoin(query, '.'))

                    assert False, 'Query should not have parsed!'
                except:
                    pass  # it's fine - the query should not parse
            return

        # eval test - carry out query
        res2 = g.query(open(query[7:]).read(), base=urljoin(query, '.'))

        if resfile.endswith('ttl'):
            resg = Graph()
            resg.load(resfile, format='turtle', publicID=resfile)
            res = RDFResultParser().parse(resg)
        elif resfile.endswith('rdf'):
            resg = Graph()
            resg.load(resfile, publicID=resfile)
            res = RDFResultParser().parse(resg)
        elif resfile.endswith('srj'):
            res = Result.parse(open(resfile[7:]), format='json')
        elif resfile.endswith('tsv'):
            res = Result.parse(open(resfile[7:]), format='tsv')

        elif resfile.endswith('csv'):
            res = Result.parse(open(resfile[7:]), format='csv')

            # CSV is lossy, round-trip our own resultset to
            # lose the same info :)

            # write bytes, read strings...
            s = BytesIO()
            res2.serialize(s, format='csv')
            print s.getvalue()
            s = StringIO(s.getvalue().decode('utf-8'))  # hmm ?
            res2 = Result.parse(s, format='csv')

        else:
            res = Result.parse(open(resfile[7:]), format='xml')

        if not DETAILEDASSERT:
            eq(res.type, res2.type, 'Types do not match')
            if res.type == 'SELECT':
                eq(set(res.vars), set(res2.vars), 'Vars do not match')
                comp = bindingsCompatible(
                    set(frozenset(x.iteritems()) for x in res.bindings),
                    set(frozenset(x.iteritems()) for x in res2.bindings))
                assert comp, 'Bindings do not match'
            elif res.type == 'ASK':
                eq(res.askAnswer, res2.askAnswer, 'Ask answer does not match')
            elif res.type in ('DESCRIBE', 'CONSTRUCT'):
                assert isomorphic(res.graph,
                                  res2.graph), 'graphs are not isomorphic!'
            else:
                raise Exception('Unknown result type: %s' % res.type)
        else:

            eq(res.type, res2.type,
               'Types do not match: %r != %r' % (res.type, res2.type))
            if res.type == 'SELECT':
                eq(
                    set(res.vars), set(res2.vars),
                    'Vars do not match: %r != %r' %
                    (set(res.vars), set(res2.vars)))
                assert bindingsCompatible(
                    set(frozenset(x.iteritems()) for x in res.bindings),
                    set(frozenset(x.iteritems()) for x in res2.bindings)
                ), 'Bindings do not match: \n%s\n!=\n%s' % (_bindingsTable(
                    res.bindings), _bindingsTable(res2.bindings))
            elif res.type == 'ASK':
                eq(
                    res.askAnswer, res2.askAnswer,
                    "Ask answer does not match: %r != %r" %
                    (res.askAnswer, res2.askAnswer))
            elif res.type in ('DESCRIBE', 'CONSTRUCT'):
                assert isomorphic(res.graph,
                                  res2.graph), 'graphs are not isomorphic!'
            else:
                raise Exception('Unknown result type: %s' % res.type)

    except Exception, e:

        if isinstance(e, AssertionError):
            failed_tests.append(uri)
            fails[str(e)] += 1
        else:
            error_tests.append(uri)
            errors[str(e)] += 1

        if DEBUG_ERROR and not isinstance(e, AssertionError) or DEBUG_FAIL:
            print "======================================"
            print uri
            print name
            print comment

            if not resfile:
                if syntax:
                    print "Positive syntax test"
                else:
                    print "Negative syntax test"

            if data:
                print "----------------- DATA --------------------"
                print ">>>", data
                print open(data[7:]).read()
            if graphdata:
                print "----------------- GRAPHDATA --------------------"
                for x in graphdata:
                    print ">>>", x
                    print open(x[7:]).read()

            print "----------------- Query -------------------"
            print ">>>", query
            print open(query[7:]).read()
            if resfile:
                print "----------------- Res -------------------"
                print ">>>", resfile
                print open(resfile[7:]).read()

            try:
                pq = parseQuery(open(query[7:]).read())
                print "----------------- Parsed ------------------"
                pprintAlgebra(translateQuery(pq, base=urljoin(query, '.')))
            except:
                print "(parser error)"

            print decodeStringEscape(unicode(e))

            import pdb
            pdb.post_mortem(sys.exc_info()[2])
            # pdb.set_trace()
            # nose.tools.set_trace()
        raise
コード例 #22
0
                line = line.strip()
                if line == "":
                    continue

                row = ROW.parseString(line, parseAll=True)
                r.bindings.append(
                    dict(zip(r.vars, (self.convertTerm(x) for x in row))))

            return r

        except ParseException, err:
            print err.line
            print " " * (err.column - 1) + "^"
            print err

    def convertTerm(self, t):
        if isinstance(t, CompValue):
            if t.name == 'literal':
                return RDFLiteral(t.string, lang=t.lang, datatype=t.datatype)
            else:
                raise Exception("I dont know how to handle this: %s" % (t,))
        else:
            return t

if __name__ == '__main__':
    import sys
    r = Result.parse(file(sys.argv[1]), format='tsv')
    print r.vars
    print r.bindings
    #print r.serialize(format='json')
コード例 #23
0
 def _parse_response(cls, rsp):
     return list(Result.parse(rsp.buffer))
コード例 #24
0
def test_issue_923():
    with StringIO(RESULT_SOURCE) as result_source:
        Result.parse(
            source=result_source,
            content_type="application/sparql-results+json;charset=utf-8",
        )
コード例 #25
0
    def triples(self, spo, context=None):
        """
        - tuple **(s, o, p)**
            the triple used as filter for the SPARQL select.
            (None, None, None) means anything.
        - context **context**
            the graph effectively calling this method.

        Returns a tuple of triples executing essentially a SPARQL like
        SELECT ?subj ?pred ?obj WHERE { ?subj ?pred ?obj }

        **context** may include three parameter
        to refine the underlying query:
         * LIMIT: an integer to limit the number of results
         * OFFSET: an integer to enable paging of results
         * ORDERBY: an instance of Variable('s'), Variable('o') or Variable('p')
        or, by default, the first 'None' from the given triple

        .. warning::
        - Using LIMIT or OFFSET automatically include ORDERBY otherwise this is
        because the results are retrieved in a not deterministic way (depends on
        the walking path on the graph)
        - Using OFFSET without defining LIMIT will discard the first OFFSET - 1
        results

        ``
        a_graph.LIMIT = limit
        a_graph.OFFSET = offset
        triple_generator = a_graph.triples(mytriple):
            #do something
        #Removes LIMIT and OFFSET if not required for the next triple() calls
        del a_graph.LIMIT
        del a_graph.OFFSET
        ``
        """

        s, p, o = spo

        vars = []
        if not s:
            s = Variable('s')
            vars.append(s)

        if not p:
            p = Variable('p')
            vars.append(p)
        if not o:
            o = Variable('o')
            vars.append(o)

        if vars:
            v = ' '.join([term.n3() for term in vars])
        else:
            v = '*'

        nts = self.node_to_sparql
        query = "SELECT %s WHERE { %s %s %s }" % (v, nts(s), nts(p), nts(o))

        # The ORDER BY is necessary
        if hasattr(context, LIMIT) or hasattr(context, OFFSET) \
                or hasattr(context, ORDERBY):
            var = None
            if isinstance(s, Variable):
                var = s
            elif isinstance(p, Variable):
                var = p
            elif isinstance(o, Variable):
                var = o
            elif hasattr(context, ORDERBY) \
                    and isinstance(getattr(context, ORDERBY), Variable):
                var = getattr(context, ORDERBY)
            query = query + ' %s %s' % (ORDERBY, var.n3())

        try:
            query = query + ' LIMIT %s' % int(getattr(context, LIMIT))
        except (ValueError, TypeError, AttributeError):
            pass
        try:
            query = query + ' OFFSET %s' % int(getattr(context, OFFSET))
        except (ValueError, TypeError, AttributeError):
            pass

        self.resetQuery()
        if self._is_contextual(context):
            self.addParameter("default-graph-uri", context.identifier)
        self.timeout = self._timeout
        self.setQuery(query)

        with contextlib.closing(SPARQLWrapper.query(self).response) as res:
            result = Result.parse(res, format=self.returnFormat)

        for row in result:
            yield (row.get(s, s), row.get(p, p), row.get(o, o)), None
コード例 #26
0
                row = ROW.parseString(line, parseAll=True)
                r.bindings.append(
                    dict(zip(r.vars, (self.convertTerm(x) for x in row))))

            return r

        except ParseException, err:
            print err.line
            print " " * (err.column - 1) + "^"
            print err

    def convertTerm(self, t):
        if t is NONE_VALUE:
            return None
        if isinstance(t, CompValue):
            if t.name == 'literal':
                return RDFLiteral(t.string, lang=t.lang, datatype=t.datatype)
            else:
                raise Exception("I dont know how to handle this: %s" % (t, ))
        else:
            return t


if __name__ == '__main__':
    import sys
    r = Result.parse(file(sys.argv[1]), format='tsv')
    print r.vars
    print r.bindings
    # print r.serialize(format='json')
コード例 #27
0
ファイル: test_dawg.py プロジェクト: afujii/rdflib
def query_test(t):
    uri, name, comment, data, graphdata, query, resfile, syntax = t

    # the query-eval tests refer to graphs to load by resolvable filenames
    rdflib_sparql_module.SPARQL_LOAD_GRAPHS = True

    if uri in skiptests:
        raise SkipTest()

    def skip(reason='(none)'):
        print "Skipping %s from now on." % uri
        f = open("skiptests.list", "a")
        f.write("%s\t%s\n" % (uri, reason))
        f.close()

    try:
        g = Dataset()
        if data:
            g.default_context.load(data, format=_fmt(data))

        if graphdata:
            for x in graphdata:
                g.load(x, format=_fmt(x))

        if not resfile:
            # no result - syntax test

            if syntax:
                translateQuery(parseQuery(
                    open(query[7:]).read()), base=urljoin(query, '.'))
            else:
                # negative syntax test
                try:
                    translateQuery(parseQuery(
                        open(query[7:]).read()), base=urljoin(query, '.'))

                    assert False, 'Query should not have parsed!'
                except:
                    pass  # it's fine - the query should not parse
            return

        # eval test - carry out query
        res2 = g.query(open(query[7:]).read(), base=urljoin(query, '.'))

        if resfile.endswith('ttl'):
            resg = Graph()
            resg.load(resfile, format='turtle', publicID=resfile)
            res = RDFResultParser().parse(resg)
        elif resfile.endswith('rdf'):
            resg = Graph()
            resg.load(resfile, publicID=resfile)
            res = RDFResultParser().parse(resg)
        elif resfile.endswith('srj'):
            res = Result.parse(open(resfile[7:]), format='json')
        elif resfile.endswith('tsv'):
            res = Result.parse(open(resfile[7:]), format='tsv')

        elif resfile.endswith('csv'):
            res = Result.parse(open(resfile[7:]), format='csv')

            # CSV is lossy, round-trip our own resultset to
            # lose the same info :)

            # write bytes, read strings...
            s = BytesIO()
            res2.serialize(s, format='csv')
            print s.getvalue()
            s = StringIO(s.getvalue().decode('utf-8'))  # hmm ?
            res2 = Result.parse(s, format='csv')

        else:
            res = Result.parse(open(resfile[7:]), format='xml')

        if not DETAILEDASSERT:
            eq(res.type, res2.type, 'Types do not match')
            if res.type == 'SELECT':
                eq(set(res.vars), set(res2.vars), 'Vars do not match')
                comp = bindingsCompatible(
                    set(res),
                    set(res2)
                )
                assert comp, 'Bindings do not match'
            elif res.type == 'ASK':
                eq(res.askAnswer, res2.askAnswer, 'Ask answer does not match')
            elif res.type in ('DESCRIBE', 'CONSTRUCT'):
                assert isomorphic(
                    res.graph, res2.graph), 'graphs are not isomorphic!'
            else:
                raise Exception('Unknown result type: %s' % res.type)
        else:
            eq(res.type, res2.type,
               'Types do not match: %r != %r' % (res.type, res2.type))
            if res.type == 'SELECT':
                eq(set(res.vars),
                   set(res2.vars), 'Vars do not match: %r != %r' % (
                   set(res.vars), set(res2.vars)))
                assert bindingsCompatible(
                    set(res),
                    set(res2)
                ), 'Bindings do not match: \n%s\n!=\n%s' % (
                    res.serialize(format='txt', namespace_manager=g.namespace_manager),
                    res2.serialize(format='txt', namespace_manager=g.namespace_manager))
            elif res.type == 'ASK':
                eq(res.askAnswer,
                   res2.askAnswer, "Ask answer does not match: %r != %r" % (
                   res.askAnswer, res2.askAnswer))
            elif res.type in ('DESCRIBE', 'CONSTRUCT'):
                assert isomorphic(
                    res.graph, res2.graph), 'graphs are not isomorphic!'
            else:
                raise Exception('Unknown result type: %s' % res.type)

    except Exception, e:

        if isinstance(e, AssertionError):
            failed_tests.append(uri)
            fails[str(e)] += 1
        else:
            error_tests.append(uri)
            errors[str(e)] += 1

        if DEBUG_ERROR and not isinstance(e, AssertionError) or DEBUG_FAIL:
            print "======================================"
            print uri
            print name
            print comment

            if not resfile:
                if syntax:
                    print "Positive syntax test"
                else:
                    print "Negative syntax test"

            if data:
                print "----------------- DATA --------------------"
                print ">>>", data
                print open(data[7:]).read()
            if graphdata:
                print "----------------- GRAPHDATA --------------------"
                for x in graphdata:
                    print ">>>", x
                    print open(x[7:]).read()

            print "----------------- Query -------------------"
            print ">>>", query
            print open(query[7:]).read()
            if resfile:
                print "----------------- Res -------------------"
                print ">>>", resfile
                print open(resfile[7:]).read()

            try:
                pq = parseQuery(open(query[7:]).read())
                print "----------------- Parsed ------------------"
                pprintAlgebra(translateQuery(pq, base=urljoin(query, '.')))
            except:
                print "(parser error)"

            print decodeStringEscape(unicode(e))

            import pdb
            pdb.post_mortem(sys.exc_info()[2])
            # pdb.set_trace()
            # nose.tools.set_trace()
        raise
コード例 #28
0
                row = ROW.parseString(line, parseAll=True)
                r.bindings.append(
                    dict(zip(r.vars, (self.convertTerm(x) for x in row))))

            return r

        except ParseException as err:
            print(err.line)
            print(" " * (err.column - 1) + "^")
            print(err)

    def convertTerm(self, t):
        if t is NONE_VALUE:
            return None
        if isinstance(t, CompValue):
            if t.name == "literal":
                return RDFLiteral(t.string, lang=t.lang, datatype=t.datatype)
            else:
                raise Exception("I dont know how to handle this: %s" % (t, ))
        else:
            return t


if __name__ == "__main__":
    import sys

    r = Result.parse(source=sys.argv[1], format="tsv")
    print(r.vars)
    print(r.bindings)
    # print r.serialize(format='json')