示例#1
0
class HTTP(Connection):

    @classmethod
    def default_hydrant(cls, profile, graph):
        return JSONHydrant(graph)

    @classmethod
    def open(cls, profile=None, user_agent=None, on_bind=None, on_unbind=None,
             on_release=None, on_broken=None):
        """ Open an HTTP connection to a server.

        :param profile: :class:`.ConnectionProfile` detailing how and
            where to connect
        :param user_agent:
        :param on_bind:
        :param on_unbind:
        :param on_release:
        :param on_broken:
        :returns: :class:`.HTTP` connection object
        :raises: :class:`.ConnectionUnavailable` if a connection cannot
            be opened
        """
        if profile is None:
            profile = ConnectionProfile(scheme="http")
        try:
            http = cls(profile, (user_agent or http_user_agent()),
                       on_bind=on_bind, on_unbind=on_unbind, on_release=on_release)
            http._hello()
            return http
        except HTTPError as error:
            raise_from(ConnectionUnavailable("Cannot open connection to %r", profile), error)

    def __init__(self, profile, user_agent, on_bind=None, on_unbind=None, on_release=None):
        super(HTTP, self).__init__(profile, user_agent,
                                   on_bind=on_bind, on_unbind=on_unbind, on_release=on_release)
        self.http_pool = None
        self.headers = make_headers(basic_auth=":".join(profile.auth),
                                    user_agent=self.user_agent)
        self.__closed = False
        self._make_pool(profile)

    def _make_pool(self, profile):
        if profile.secure:
            from ssl import CERT_NONE, CERT_REQUIRED
            from certifi import where as cert_where
            self.http_pool = HTTPSConnectionPool(
                host=profile.host,
                port=profile.port_number,
                maxsize=1,
                block=True,
                cert_reqs=CERT_REQUIRED if profile.verify else CERT_NONE,
                ca_certs=cert_where()
            )
        else:
            self.http_pool = HTTPConnectionPool(
                host=profile.host,
                port=profile.port_number,
                maxsize=1,
                block=True,
            )

    def close(self):
        self.http_pool.close()
        self.__closed = True

    @property
    def closed(self):
        return self.__closed

    @property
    def broken(self):
        return False

    @property
    def local_port(self):
        raise NotImplementedError

    def _hello(self):
        r = self.http_pool.request(method="GET",
                                   url="/",
                                   headers=dict(self.headers))
        metadata = json_loads(r.data.decode("utf-8"))
        if "neo4j_version" in metadata:
            # {
            #   "bolt_routing" : "neo4j://localhost:7687",
            #   "transaction" : "http://localhost:7474/db/{databaseName}/tx",
            #   "bolt_direct" : "bolt://localhost:7687",
            #   "neo4j_version" : "4.0.0",
            #   "neo4j_edition" : "community"
            # }
            self._neo4j_version = Version(metadata["neo4j_version"])  # Neo4j 4.x
            self._neo4j_edition = metadata["neo4j_edition"]
        else:                               # Neo4j 3.x
            # {
            #   "data" : "http://localhost:7474/db/data/",
            #   "management" : "http://localhost:7474/db/manage/",
            #   "bolt" : "bolt://localhost:7687"
            # }
            r = self.http_pool.request(method="GET",
                                       url="/db/data/",
                                       headers=dict(self.headers))
            metadata = json_loads(r.data.decode("utf-8"))
            # {
            #   "extensions" : { },
            #   "node" : "http://localhost:7474/db/data/node",
            #   "relationship" : "http://localhost:7474/db/data/relationship",
            #   "node_index" : "http://localhost:7474/db/data/index/node",
            #   "relationship_index" : "http://localhost:7474/db/data/index/relationship",
            #   "extensions_info" : "http://localhost:7474/db/data/ext",
            #   "relationship_types" : "http://localhost:7474/db/data/relationship/types",
            #   "batch" : "http://localhost:7474/db/data/batch",
            #   "cypher" : "http://localhost:7474/db/data/cypher",
            #   "indexes" : "http://localhost:7474/db/data/schema/index",
            #   "constraints" : "http://localhost:7474/db/data/schema/constraint",
            #   "transaction" : "http://localhost:7474/db/data/transaction",
            #   "node_labels" : "http://localhost:7474/db/data/labels",
            #   "neo4j_version" : "3.5.12"
            # }
            self._neo4j_version = Version(metadata["neo4j_version"])  # Neo4j 3.x
        self.server_agent = "Neo4j/{}".format(self._neo4j_version)

        # Given the root discovery endpoint isn't authenticated, we don't
        # catch incorrect passwords here, and this wouldn't then be signalled
        # to the user until later on. So here, we make a second call to a
        # different URL for that reason only.
        r = self.http_pool.request(method="GET",
                                   url="/db/data/",
                                   headers=dict(self.headers))
        data = r.data.decode("utf-8")
        rs = HTTPResponse.from_json(r.status, data or "{}")
        rs.audit()

    def fast_forward(self, bookmark):
        raise NotImplementedError("Bookmarking is not yet supported over HTTP")

    def run_prog(self, graph_name, cypher, parameters=None, readonly=False,
                 # after=None, metadata=None, timeout=None
                 ):
        if graph_name and not self.supports_multi():
            raise TypeError("Neo4j {} does not support "
                            "named graphs".format(self.neo4j_version))
        if readonly:
            raise TypeError("Readonly transactions are not supported over HTTP")
        r = self._post(HTTPTransactionRef.autocommit_uri(graph_name), cypher, parameters)
        rs = HTTPResponse.from_json(r.status, r.data.decode("utf-8"))
        self.release()
        rs.audit()
        return HTTPResult(graph_name, rs.result())

    def begin(self, graph_name, readonly=False,
              # after=None, metadata=None, timeout=None
              ):
        if graph_name and not self.supports_multi():
            raise TypeError("Neo4j {} does not support "
                            "named graphs".format(self.neo4j_version))
        if readonly:
            raise TypeError("Readonly transactions are not supported over HTTP")
        # if after:
        #     raise TypeError("Bookmarks are not supported over HTTP")
        # if metadata:
        #     raise TypeError("Transaction metadata is not supported over HTTP")
        # if timeout:
        #     raise TypeError("Transaction timeouts are not supported over HTTP")
        r = self._post(HTTPTransactionRef.begin_uri(graph_name))
        rs = HTTPResponse.from_json(r.status, r.data.decode("utf-8"))
        location_path = urlsplit(r.headers["Location"]).path
        tx = HTTPTransactionRef(graph_name, location_path.rpartition("/")[-1])
        self.release()
        rs.audit(tx)
        return tx

    def commit(self, tx):
        if tx.broken:
            raise ValueError("Transaction is broken")
        try:
            r = self._post(tx.commit_uri())
        except ProtocolError:
            tx.mark_broken()
            raise
        else:
            rs = HTTPResponse.from_json(r.status, r.data.decode("utf-8"))
            self.release()
            rs.audit(tx)
            return Bookmark()

    def rollback(self, tx):
        if tx.broken:
            raise ValueError("Transaction is broken")
        try:
            r = self._delete(tx.uri())
        except ProtocolError:
            tx.mark_broken()
            raise
        else:
            rs = HTTPResponse.from_json(r.status, r.data.decode("utf-8"))
            self.release()
            rs.audit(tx)
            return Bookmark()

    def run_query(self, tx, cypher, parameters=None):
        try:
            r = self._post(tx.uri(), cypher, parameters)
        except ProtocolError:
            tx.mark_broken()
            raise
        else:
            rs = HTTPResponse.from_json(r.status, r.data.decode("utf-8"))
            self.release()
            rs.audit(tx)
            return HTTPResult(tx.graph_name, rs.result(), profile=self.profile)

    def pull(self, result, n=-1):
        pass

    def discard(self, result, n=-1):
        pass

    def sync(self, result):
        pass

    def fetch(self, result):
        record = result.take_record()
        return record

    def _post(self, url, statement=None, parameters=None):
        if statement:
            statements = [
                OrderedDict([
                    ("statement", statement),
                    ("parameters", dehydrate(parameters or {})),
                    ("resultDataContents", ["REST"]),
                    ("includeStats", True),
                ])
            ]
        else:
            statements = []
        try:
            return self.http_pool.request(method="POST",
                                          url=url,
                                          headers=dict(self.headers, **{"Content-Type": "application/json"}),
                                          body=json_dumps({"statements": statements}))
        except HTTPError as error:
            raise_from(ProtocolError("Failed to POST to %r" % url), error)

    def _delete(self, url):
        try:
            return self.http_pool.request(method="DELETE",
                                          url=url,
                                          headers=dict(self.headers))
        except HTTPError as error:
            raise_from(ProtocolError("Failed to DELETE %r" % url), error)

    def supports_multi(self):
        return self._neo4j_version >= Version("4.0")
class TestConnectionPool(HTTPDummyServerTestCase):
    def setup_method(self, method):
        self.pool = HTTPConnectionPool(self.host, self.port)

    def teardown_method(self):
        self.pool.close()

    def test_get(self):
        r = self.pool.request("GET",
                              "/specific_method",
                              fields={"method": "GET"})
        assert r.status == 200, r.data

    def test_post_url(self):
        r = self.pool.request("POST",
                              "/specific_method",
                              fields={"method": "POST"})
        assert r.status == 200, r.data

    def test_urlopen_put(self):
        r = self.pool.urlopen("PUT", "/specific_method?method=PUT")
        assert r.status == 200, r.data

    def test_wrong_specific_method(self):
        # To make sure the dummy server is actually returning failed responses
        r = self.pool.request("GET",
                              "/specific_method",
                              fields={"method": "POST"})
        assert r.status == 400, r.data

        r = self.pool.request("POST",
                              "/specific_method",
                              fields={"method": "GET"})
        assert r.status == 400, r.data

    def test_upload(self):
        data = "I'm in ur multipart form-data, hazing a cheezburgr"
        fields = {
            "upload_param": "filefield",
            "upload_filename": "lolcat.txt",
            "upload_size": len(data),
            "filefield": ("lolcat.txt", data),
        }

        r = self.pool.request("POST", "/upload", fields=fields)
        assert r.status == 200, r.data

    def test_one_name_multiple_values(self):
        fields = [("foo", "a"), ("foo", "b")]

        # urlencode
        r = self.pool.request("GET", "/echo", fields=fields)
        assert r.data == b"foo=a&foo=b"

        # multipart
        r = self.pool.request("POST", "/echo", fields=fields)
        assert r.data.count(b'name="foo"') == 2

    def test_request_method_body(self):
        body = b"hi"
        r = self.pool.request("POST", "/echo", body=body)
        assert r.data == body

        fields = [("hi", "hello")]
        with pytest.raises(TypeError):
            self.pool.request("POST", "/echo", body=body, fields=fields)

    def test_unicode_upload(self):
        fieldname = u("myfile")
        filename = u("\xe2\x99\xa5.txt")
        data = u("\xe2\x99\xa5").encode("utf8")
        size = len(data)

        fields = {
            u("upload_param"): fieldname,
            u("upload_filename"): filename,
            u("upload_size"): size,
            fieldname: (filename, data),
        }

        r = self.pool.request("POST", "/upload", fields=fields)
        assert r.status == 200, r.data

    def test_nagle(self):
        """ Test that connections have TCP_NODELAY turned on """
        # This test needs to be here in order to be run. socket.create_connection actually tries
        # to connect to the host provided so we need a dummyserver to be running.
        with HTTPConnectionPool(self.host, self.port) as pool:
            try:
                conn = pool._get_conn()
                pool._make_request(conn, "GET", "/")
                tcp_nodelay_setting = conn._sock.getsockopt(
                    socket.IPPROTO_TCP, socket.TCP_NODELAY)
                assert tcp_nodelay_setting
            finally:
                conn.close()

    def test_socket_options(self):
        """Test that connections accept socket options."""
        # This test needs to be here in order to be run. socket.create_connection actually tries to
        # connect to the host provided so we need a dummyserver to be running.
        pool = HTTPConnectionPool(
            self.host,
            self.port,
            socket_options=[(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)],
        )
        conn = pool._new_conn()
        conn.connect()
        s = conn._sock
        using_keepalive = s.getsockopt(socket.SOL_SOCKET,
                                       socket.SO_KEEPALIVE) > 0
        assert using_keepalive
        s.close()

    def test_disable_default_socket_options(self):
        """Test that passing None disables all socket options."""
        # This test needs to be here in order to be run. socket.create_connection actually tries
        # to connect to the host provided so we need a dummyserver to be running.
        pool = HTTPConnectionPool(self.host, self.port, socket_options=None)
        conn = pool._new_conn()
        conn.connect()
        s = conn._sock
        using_nagle = s.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY) == 0
        assert using_nagle
        s.close()

    def test_defaults_are_applied(self):
        """Test that modifying the default socket options works."""
        # This test needs to be here in order to be run. socket.create_connection actually tries
        # to connect to the host provided so we need a dummyserver to be running.
        with HTTPConnectionPool(self.host, self.port) as pool:
            # Get the HTTPConnection instance
            conn = pool._new_conn()
            try:
                # Update the default socket options
                conn.default_socket_options += [(socket.SOL_SOCKET,
                                                 socket.SO_KEEPALIVE, 1)]
                conn.connect()
                s = conn._sock
                nagle_disabled = (s.getsockopt(socket.IPPROTO_TCP,
                                               socket.TCP_NODELAY) > 0)
                using_keepalive = (s.getsockopt(socket.SOL_SOCKET,
                                                socket.SO_KEEPALIVE) > 0)
                assert nagle_disabled
                assert using_keepalive
            finally:
                conn.close()
                s.close()

    def test_connection_error_retries(self):
        """ ECONNREFUSED error should raise a connection error, with retries """
        port = find_unused_port()
        pool = HTTPConnectionPool(self.host, port)
        try:
            pool.request("GET", "/", retries=Retry(connect=3))
            self.fail("Should have failed with a connection error.")
        except MaxRetryError as e:
            assert type(e.reason) == NewConnectionError

    def test_timeout_success(self):
        timeout = Timeout(connect=3, read=5, total=None)
        with HTTPConnectionPool(self.host, self.port, timeout=timeout) as pool:
            pool.request("GET", "/")
            # This should not raise a "Timeout already started" error
            pool.request("GET", "/")

        with HTTPConnectionPool(self.host, self.port, timeout=timeout) as pool:
            # This should also not raise a "Timeout already started" error
            pool.request("GET", "/")

        timeout = Timeout(total=None)
        with HTTPConnectionPool(self.host, self.port, timeout=timeout) as pool:
            pool.request("GET", "/")

    def test_bad_connect(self):
        pool = HTTPConnectionPool("badhost.invalid", self.port)
        try:
            pool.request("GET", "/", retries=5)
            self.fail("should raise timeout exception here")
        except MaxRetryError as e:
            assert type(e.reason) == NewConnectionError

    def test_keepalive(self):
        with HTTPConnectionPool(self.host, self.port, block=True,
                                maxsize=1) as pool:
            r = pool.request("GET", "/keepalive?close=0")
            r = pool.request("GET", "/keepalive?close=0")

            assert r.status == 200
            assert pool.num_connections == 1
            assert pool.num_requests == 2

    def test_keepalive_close(self):
        with HTTPConnectionPool(self.host,
                                self.port,
                                block=True,
                                maxsize=1,
                                timeout=2) as pool:
            r = pool.request("GET",
                             "/keepalive?close=1",
                             retries=0,
                             headers={"Connection": "close"})

            assert pool.num_connections == 1

            # The dummyserver will have responded with Connection:close,
            # and httplib will properly cleanup the socket.

            # We grab the HTTPConnection object straight from the Queue,
            # because _get_conn() is where the check & reset occurs
            # pylint: disable-msg=W0212
            conn = pool.pool.get()
            assert conn._sock is None
            pool._put_conn(conn)

            # Now with keep-alive
            r = pool.request(
                "GET",
                "/keepalive?close=0",
                retries=0,
                headers={"Connection": "keep-alive"},
            )

            # The dummyserver responded with Connection:keep-alive, the connection
            # persists.
            conn = pool.pool.get()
            assert conn._sock is not None
            pool._put_conn(conn)

            # Another request asking the server to close the connection. This one
            # should get cleaned up for the next request.
            r = pool.request("GET",
                             "/keepalive?close=1",
                             retries=0,
                             headers={"Connection": "close"})

            assert r.status == 200

            conn = pool.pool.get()
            assert conn._sock is None
            pool._put_conn(conn)

            # Next request
            r = pool.request("GET", "/keepalive?close=0")

    def test_post_with_urlencode(self):
        data = {"banana": "hammock", "lol": "cat"}
        r = self.pool.request("POST",
                              "/echo",
                              fields=data,
                              encode_multipart=False)
        assert r.data.decode("utf-8") == urlencode(data)

    def test_post_with_multipart(self):
        data = {"banana": "hammock", "lol": "cat"}
        r = self.pool.request("POST",
                              "/echo",
                              fields=data,
                              encode_multipart=True)
        body = r.data.split(b"\r\n")

        encoded_data = encode_multipart_formdata(data)[0]
        expected_body = encoded_data.split(b"\r\n")

        # TODO: Get rid of extra parsing stuff when you can specify
        # a custom boundary to encode_multipart_formdata
        """
        We need to loop the return lines because a timestamp is attached
        from within encode_multipart_formdata. When the server echos back
        the data, it has the timestamp from when the data was encoded, which
        is not equivalent to when we run encode_multipart_formdata on
        the data again.
        """
        for i, line in enumerate(body):
            if line.startswith(b"--"):
                continue

            assert body[i] == expected_body[i]

    def test_post_with_multipart__iter__(self):
        data = {"hello": "world"}
        r = self.pool.request(
            "POST",
            "/echo",
            fields=data,
            preload_content=False,
            multipart_boundary="boundary",
            encode_multipart=True,
        )

        chunks = [chunk for chunk in r]
        assert chunks == [
            b"--boundary\r\n",
            b'Content-Disposition: form-data; name="hello"\r\n',
            b"\r\n",
            b"world\r\n",
            b"--boundary--\r\n",
        ]

    def test_check_gzip(self):
        r = self.pool.request("GET",
                              "/encodingrequest",
                              headers={"accept-encoding": "gzip"})
        assert r.headers.get("content-encoding") == "gzip"
        assert r.data == b"hello, world!"

    def test_check_deflate(self):
        r = self.pool.request("GET",
                              "/encodingrequest",
                              headers={"accept-encoding": "deflate"})
        assert r.headers.get("content-encoding") == "deflate"
        assert r.data == b"hello, world!"

    def test_bad_decode(self):
        with pytest.raises(DecodeError):
            self.pool.request(
                "GET",
                "/encodingrequest",
                headers={"accept-encoding": "garbage-deflate"},
            )

        with pytest.raises(DecodeError):
            self.pool.request("GET",
                              "/encodingrequest",
                              headers={"accept-encoding": "garbage-gzip"})

    def test_connection_count(self):
        with HTTPConnectionPool(self.host, self.port, maxsize=1) as pool:
            pool.request("GET", "/")
            pool.request("GET", "/")
            pool.request("GET", "/")

            assert pool.num_connections == 1
            assert pool.num_requests == 3

    def test_connection_count_bigpool(self):
        with HTTPConnectionPool(self.host, self.port, maxsize=16) as http_pool:
            http_pool.request("GET", "/")
            http_pool.request("GET", "/")
            http_pool.request("GET", "/")

            assert http_pool.num_connections == 1
            assert http_pool.num_requests == 3

    def test_partial_response(self):
        with HTTPConnectionPool(self.host, self.port, maxsize=1) as pool:
            req_data = {"lol": "cat"}
            resp_data = urlencode(req_data).encode("utf-8")

            r = pool.request("GET",
                             "/echo",
                             fields=req_data,
                             preload_content=False)

            assert r.read(5) == resp_data[:5]
            assert r.read() == resp_data[5:]

    def test_lazy_load_twice(self):
        # This test is sad and confusing. Need to figure out what's
        # going on with partial reads and socket reuse.

        pool = HTTPConnectionPool(self.host,
                                  self.port,
                                  block=True,
                                  maxsize=1,
                                  timeout=2)

        payload_size = 1024 * 2
        first_chunk = 512

        boundary = "foo"

        req_data = {"count": "a" * payload_size}
        resp_data = encode_multipart_formdata(req_data, boundary=boundary)[0]

        req2_data = {"count": "b" * payload_size}
        resp2_data = encode_multipart_formdata(req2_data, boundary=boundary)[0]

        r1 = pool.request(
            "POST",
            "/echo",
            fields=req_data,
            multipart_boundary=boundary,
            preload_content=False,
        )

        first_data = r1.read(first_chunk)
        assert len(first_data) > 0
        assert first_data == resp_data[:len(first_data)]

        try:
            r2 = pool.request(
                "POST",
                "/echo",
                fields=req2_data,
                multipart_boundary=boundary,
                preload_content=False,
                pool_timeout=0.001,
            )

            # This branch should generally bail here, but maybe someday it will
            # work? Perhaps by some sort of magic. Consider it a TODO.

            second_data = r2.read(first_chunk)
            assert len(second_data) > 0
            assert second_data == resp2_data[:len(second_data)]

            assert r1.read() == resp_data[len(first_data):]
            assert r2.read() == resp2_data[len(second_data):]
            assert pool.num_requests == 2

        except EmptyPoolError:
            assert r1.read() == resp_data[len(first_data):]
            assert pool.num_requests == 1

        assert pool.num_connections == 1

    def test_for_double_release(self):
        MAXSIZE = 5

        # Check default state
        with HTTPConnectionPool(self.host, self.port, maxsize=MAXSIZE) as pool:
            assert pool.num_connections == 0
            assert pool.pool.qsize() == MAXSIZE

            # Make an empty slot for testing
            pool.pool.get()
            assert pool.pool.qsize() == MAXSIZE - 1

            # Check state after simple request
            pool.urlopen("GET", "/")
            assert pool.pool.qsize() == MAXSIZE - 1

            # Check state without release
            pool.urlopen("GET", "/", preload_content=False)
            assert pool.pool.qsize() == MAXSIZE - 2

            pool.urlopen("GET", "/")
            assert pool.pool.qsize() == MAXSIZE - 2

            # Check state after read
            pool.urlopen("GET", "/").data
            assert pool.pool.qsize() == MAXSIZE - 2

            pool.urlopen("GET", "/")
            assert pool.pool.qsize() == MAXSIZE - 2

    def test_connections_arent_released(self):
        MAXSIZE = 5
        pool = HTTPConnectionPool(self.host, self.port, maxsize=MAXSIZE)
        assert pool.pool.qsize() == MAXSIZE

        pool.request("GET", "/", preload_content=False)
        assert pool.pool.qsize() == MAXSIZE - 1

    def test_dns_error(self):
        pool = HTTPConnectionPool("thishostdoesnotexist.invalid",
                                  self.port,
                                  timeout=0.001)
        with pytest.raises(MaxRetryError):
            pool.request("GET", "/test", retries=2)

    def test_percent_encode_invalid_target_chars(self):
        with HTTPConnectionPool(self.host, self.port) as pool:
            r = pool.request("GET", "/echo_params?q=\r&k=\n \n")
            assert r.data == b"[('k', '\\n \\n'), ('q', '\\r')]"

    def test_source_address(self):
        for addr, is_ipv6 in VALID_SOURCE_ADDRESSES:
            if is_ipv6 and not HAS_IPV6_AND_DNS:
                warnings.warn("No IPv6 support: skipping.", NoIPv6Warning)
                continue
            with HTTPConnectionPool(self.host,
                                    self.port,
                                    source_address=addr,
                                    retries=False) as pool:
                r = pool.request("GET", "/source_address")
                assert r.data == b(addr[0])

    def test_source_address_error(self):
        for addr in INVALID_SOURCE_ADDRESSES:
            pool = HTTPConnectionPool(self.host,
                                      self.port,
                                      source_address=addr,
                                      retries=False)
            with pytest.raises(NewConnectionError):
                pool.request("GET", "/source_address?{0}".format(addr))

    def test_stream_keepalive(self):
        x = 2

        for _ in range(x):
            response = self.pool.request(
                "GET",
                "/chunked",
                headers={"Connection": "keep-alive"},
                preload_content=False,
                retries=False,
            )
            for chunk in response.stream(3):
                assert chunk == b"123"

        assert self.pool.num_connections == 1
        assert self.pool.num_requests == x

    def test_chunked_gzip(self):
        response = self.pool.request("GET",
                                     "/chunked_gzip",
                                     preload_content=False,
                                     decode_content=True)

        assert b"123" * 4 == response.read()

    def test_mixed_case_hostname(self):
        with HTTPConnectionPool("LoCaLhOsT", self.port) as pool:
            response = pool.request("GET", "http://LoCaLhOsT:%d/" % self.port)
            assert response.status == 200
示例#3
0
class HTTPConnector(Connector):

    scheme = "http"

    headers = None

    @property
    def server_agent(self):
        r = self.pool.request(method="GET",
                              url="/db/data/",
                              headers=dict(self.headers))
        return "Neo4j/{neo4j_version}".format(
            **json_loads(r.data.decode("utf-8")))

    def open(self, cx_data):
        self.pool = HTTPConnectionPool(host=cx_data["host"],
                                       port=cx_data["port"])
        self.headers = make_headers(basic_auth=":".join(cx_data["auth"]))

    def close(self):
        self.pool.close()

    def _post(self, url, statement=None, parameters=None):
        if statement:
            statements = [
                OrderedDict([
                    ("statement", statement),
                    ("parameters", parameters or {}),
                    ("resultDataContents", ["REST"]),
                    ("includeStats", True),
                ])
            ]
        else:
            statements = []
        return self.pool.request(method="POST",
                                 url=url,
                                 headers=dict(
                                     self.headers,
                                     **{"Content-Type": "application/json"}),
                                 body=json_dumps({"statements": statements}))

    def _delete(self, url):
        return self.pool.request(method="DELETE",
                                 url=url,
                                 headers=dict(self.headers))

    def run(self,
            statement,
            parameters=None,
            tx=None,
            graph=None,
            keys=None,
            entities=None):
        hydrator = JSONHydrator(version="rest",
                                graph=graph,
                                keys=keys,
                                entities=entities)
        r = self._post("/db/data/transaction/%s" % (tx or "commit"), statement,
                       hydrator.dehydrate(parameters))
        assert r.status == 200  # TODO: other codes
        try:
            raw_result = hydrator.hydrate_result(r.data.decode("utf-8"))
        except HydrationError as e:
            from py2neo.database import GraphError  # TODO: breaks abstraction layers :(
            if tx is not None:
                self.transactions.remove(tx)
            raise GraphError.hydrate(e.args[0])
        else:
            result = CypherResult({
                "connection": self.connection_data,
                "fields": raw_result.get("columns"),
                "plan": raw_result.get("plan"),
                "stats": raw_result.get("stats"),
            })
            hydrator.keys = result.keys()
            result.append_records(
                hydrator.hydrate(record[hydrator.version])
                for record in raw_result["data"])
            result.done()
            return result

    def begin(self):
        r = self._post("/db/data/transaction")
        if r.status == 201:
            location_path = urlsplit(r.headers["Location"]).path
            tx = location_path.rpartition("/")[-1]
            self.transactions.add(tx)
            return tx
        else:
            raise RuntimeError("Can't begin a new transaction")

    def commit(self, tx):
        self._assert_valid_tx(tx)
        self.transactions.remove(tx)
        self._post("/db/data/transaction/%s/commit" % tx)

    def rollback(self, tx):
        self._assert_valid_tx(tx)
        self.transactions.remove(tx)
        self._delete("/db/data/transaction/%s" % tx)

    def sync(self, tx):
        pass
示例#4
0
class HTTP(Connection):
    @classmethod
    def default_hydrant(cls, profile, graph):
        return JSONHydrant(graph)

    @classmethod
    def open(cls,
             profile,
             user_agent=None,
             on_bind=None,
             on_unbind=None,
             on_release=None):
        http = cls(profile, (user_agent or http_user_agent()),
                   on_bind=on_bind,
                   on_unbind=on_unbind,
                   on_release=on_release)
        http._hello()
        return http

    def __init__(self,
                 profile,
                 user_agent,
                 on_bind=None,
                 on_unbind=None,
                 on_release=None):
        super(HTTP, self).__init__(profile,
                                   user_agent,
                                   on_bind=on_bind,
                                   on_unbind=on_unbind,
                                   on_release=on_release)
        self.http_pool = None
        self.headers = make_headers(basic_auth=":".join(profile.auth))
        self._transactions = set()
        self.__closed = False
        self._make_pool(profile)

    def _make_pool(self, profile):
        if profile.secure:
            from ssl import CERT_NONE, CERT_REQUIRED
            from certifi import where as cert_where
            self.http_pool = HTTPSConnectionPool(
                host=profile.host,
                port=profile.port_number,
                maxsize=1,
                block=True,
                cert_reqs=CERT_REQUIRED if profile.verify else CERT_NONE,
                ca_certs=cert_where())
        else:
            self.http_pool = HTTPConnectionPool(
                host=profile.host,
                port=profile.port_number,
                maxsize=1,
                block=True,
            )

    def close(self):
        self.http_pool.close()
        self.__closed = True

    @property
    def closed(self):
        return self.__closed

    @property
    def broken(self):
        return False

    @property
    def local_port(self):
        raise NotImplementedError

    def _hello(self):
        r = self.http_pool.request(method="GET",
                                   url="/",
                                   headers=dict(self.headers))
        metadata = json_loads(r.data.decode("utf-8"))
        if "neo4j_version" in metadata:
            # {
            #   "bolt_routing" : "neo4j://localhost:7687",
            #   "transaction" : "http://localhost:7474/db/{databaseName}/tx",
            #   "bolt_direct" : "bolt://localhost:7687",
            #   "neo4j_version" : "4.0.0",
            #   "neo4j_edition" : "community"
            # }
            self.neo4j_version = Version(
                metadata["neo4j_version"])  # Neo4j 4.x
        else:  # Neo4j 3.x
            # {
            #   "data" : "http://localhost:7474/db/data/",
            #   "management" : "http://localhost:7474/db/manage/",
            #   "bolt" : "bolt://localhost:7687"
            # }
            r = self.http_pool.request(method="GET",
                                       url="/db/data/",
                                       headers=dict(self.headers))
            metadata = json_loads(r.data.decode("utf-8"))
            # {
            #   "extensions" : { },
            #   "node" : "http://localhost:7474/db/data/node",
            #   "relationship" : "http://localhost:7474/db/data/relationship",
            #   "node_index" : "http://localhost:7474/db/data/index/node",
            #   "relationship_index" : "http://localhost:7474/db/data/index/relationship",
            #   "extensions_info" : "http://localhost:7474/db/data/ext",
            #   "relationship_types" : "http://localhost:7474/db/data/relationship/types",
            #   "batch" : "http://localhost:7474/db/data/batch",
            #   "cypher" : "http://localhost:7474/db/data/cypher",
            #   "indexes" : "http://localhost:7474/db/data/schema/index",
            #   "constraints" : "http://localhost:7474/db/data/schema/constraint",
            #   "transaction" : "http://localhost:7474/db/data/transaction",
            #   "node_labels" : "http://localhost:7474/db/data/labels",
            #   "neo4j_version" : "3.5.12"
            # }
            self.neo4j_version = Version(
                metadata["neo4j_version"])  # Neo4j 3.x
        self.server_agent = "Neo4j/{}".format(self.neo4j_version)

    def auto_run(self,
                 graph_name,
                 cypher,
                 parameters=None,
                 readonly=False,
                 after=None,
                 metadata=None,
                 timeout=None):
        if graph_name and not self.supports_multi():
            raise TypeError("Neo4j {} does not support "
                            "named graphs".format(self.neo4j_version))
        r = self._post(HTTPTransaction.autocommit_uri(graph_name), cypher,
                       parameters)
        assert r.status == 200  # TODO: other codes
        rs = HTTPResponse.from_json(r.data.decode("utf-8"))
        self.release()
        rs.audit()
        return HTTPResult(graph_name, rs.result())

    def begin(self,
              graph_name,
              readonly=False,
              after=None,
              metadata=None,
              timeout=None):
        if graph_name and not self.supports_multi():
            raise TypeError("Neo4j {} does not support "
                            "named graphs".format(self.neo4j_version))
        if readonly:
            raise TypeError(
                "Readonly transactions are not supported over HTTP")
        if after:
            raise TypeError("Bookmarks are not supported over HTTP")
        if metadata:
            raise TypeError("Transaction metadata is not supported over HTTP")
        if timeout:
            raise TypeError("Transaction timeouts are not supported over HTTP")
        r = self._post(HTTPTransaction.begin_uri(graph_name))
        if r.status != 201:
            raise RuntimeError(
                "Can't begin a new transaction")  # TODO: better error
        rs = HTTPResponse.from_json(r.data.decode("utf-8"))
        location_path = urlsplit(r.headers["Location"]).path
        tx = HTTPTransaction(graph_name, location_path.rpartition("/")[-1])
        self._transactions.add(tx)
        self.release()
        rs.audit(tx)
        return tx

    def commit(self, tx):
        self._assert_transaction_open(tx)
        self._transactions.remove(tx)
        r = self._post(tx.commit_uri())
        assert r.status == 200  # TODO: other codes
        rs = HTTPResponse.from_json(r.data.decode("utf-8"))
        self.release()
        rs.audit(tx)
        return Bookmark()

    def rollback(self, tx):
        self._assert_transaction_open(tx)
        self._transactions.remove(tx)
        r = self._delete(tx.uri())
        assert r.status == 200  # TODO: other codes
        rs = HTTPResponse.from_json(r.data.decode("utf-8"))
        self.release()
        rs.audit(tx)
        return Bookmark()

    def run_in_tx(self, tx, cypher, parameters=None):
        r = self._post(tx.uri(), cypher, parameters)
        assert r.status == 200  # TODO: other codes
        rs = HTTPResponse.from_json(r.data.decode("utf-8"))
        self.release()
        rs.audit(tx)
        return HTTPResult(tx.graph_name, rs.result(), profile=self.profile)

    def pull(self, result, n=-1):
        pass

    def discard(self, result, n=-1):
        pass

    def sync(self, result):
        pass

    def fetch(self, result):
        record = result.take_record()
        return record

    def _assert_transaction_open(self, tx):
        if tx not in self._transactions:
            raise ValueError("Transaction %r is not open on this connection",
                             tx)

    def _post(self, url, statement=None, parameters=None):
        if statement:
            statements = [
                OrderedDict([
                    ("statement", statement),
                    ("parameters", parameters or {}),
                    ("resultDataContents", ["REST"]),
                    ("includeStats", True),
                ])
            ]
        else:
            statements = []
        return self.http_pool.request(
            method="POST",
            url=url,
            headers=dict(self.headers, **{"Content-Type": "application/json"}),
            body=json_dumps({"statements": statements}))

    def _delete(self, url):
        return self.http_pool.request(method="DELETE",
                                      url=url,
                                      headers=dict(self.headers))

    def supports_multi(self):
        return self.neo4j_version >= Version("4.0")
示例#5
0
class DXClient:
    def __init__(self, config):
        self.config = config

        logging.info('Start Pool connection to host.')
        options = {'timeout': 0.0000001, 'retries': 0}
        if not self.config.debug:
            self.pool = HTTPSConnectionPool(self.config.api_url, **options)
        else:
            self.pool = HTTPConnectionPool(self.config.api_url,
                                           port=self.config.api_port,
                                           **options)
        # initializate redis database
        self.store = FeatureFlagStore()

    def get_flag(self, flag_key, user, default=None, client_debug=False):
        self.flag_key = flag_key
        self.default = default
        self.client_debug = client_debug
        self.user = user

        if not self.config.offline:
            try:
                r = self.pool.request(
                    'PUT',
                    '/api/client/v1/get_point/{}/{}/'.format(
                        self.config.sdk_key, self.flag_key),
                    headers={'Content-Type': 'application/json'},
                    body=json.dumps(user),
                    timeout=1)
                data = json.loads(r.data)

                if r.status == 200:
                    self.ready_status = self.store.decode_status(data)

                    if hasattr(self.store, 'redis_db') and self.store.redis_db:
                        self.store.init_user(self.config, user, self.flag_key)
                        self.store.save(self.ready_status)

                    return self.ready_status

                elif r.status == 400:
                    raise TypeError(r.data)
                elif r.status == 500:
                    raise ConnectionError(
                        "Error when try to connect to server.")
                    # TODO Добавить логику перевода клиента в статус offline
            except urllib3.exceptions.MaxRetryError:
                self.config.offline = True
                logging.info(
                    "deploy-x.com connection lost or have trouble with internet connection. Offline mode was set On"
                )
                return self._processing_network_error()

        elif self.config.offline:
            return self._processing_network_error()

    def _processing_network_error(self):
        """
        when we already have offline mode or get first error
        we have to check client_debug and based on this return redis or default value
        """
        if self.client_debug:
            final_result = self._get_default_value()
            if final_result is None:
                final_result = self._get_redis_value()
            if final_result is None:
                raise AttributeError(
                    'You have to add default value for dxclient.get function or install redis cache.'
                )
            return final_result
        else:
            final_result = self._get_redis_value()
            if final_result is None:
                final_result = self._get_default_value()
            if final_result is None:
                raise AttributeError(
                    'You have to add default value for dxclient.get function or install redis cache.'
                )
            return final_result

    def _get_default_value(self):
        if self.default:
            logging.info('Offline. Return default value')
            return self.default
        return None

    def _get_redis_value(self):
        if self.store.redis_db:
            user_unique = '-'
            if 'unique_identifier' in self.user:
                user_unique = self.user['unique_identifier']
            elif 'UNIQUE_IDENTIFIER' in self.user:
                user_unique = self.user['UNIQUE_IDENTIFIER']

            status = self.store.r.hget(
                user_unique, '{}_{}'.format(self.config.sdk_key,
                                            self.flag_key))
            if status:
                logging.info('Offline. Get value from redis')
                return self.store.decode_byte_status(status)
        return None

    def close(self):
        if self.pool.num_connections:
            self.pool.close()
        logging.info('Closing connection in initialized client instance.')
class PrometheusCounters(CachedCounters, IReferenceable, IOpenable):
    """
    Performance counters that send their metrics to Prometheus service.

    The component is normally used in passive mode conjunction with :class:`PrometheusMetricsService <pip_services3_prometheus.services.PrometheusMetricsService.PrometheusMetricsService>`.
    Alternatively when connection parameters are set it can push metrics to Prometheus PushGateway.

    ### Configuration parameters ###
        - connection(s):
          - discovery_key:         (optional) a key to retrieve the connection from :class:`IDiscovery <pip_services3_components.connect.IDiscovery.IDiscovery>`
          - protocol:              connection protocol: http or https
          - host:                  host name or IP address
          - port:                  port number
          - uri:                   resource URI or connection string with all parameters in it
        - options:
          - retries:               number of retries (default: 3)
          - connect_timeout:       connection timeout in milliseconds (default: 10 sec)
          - timeout:               invocation timeout in milliseconds (default: 10 sec)

    ### References ###
        - `*:logger:*:*:1.0`           (optional) :class:`ILogger <pip_services3_components.log.ILogger.ILogger>` components to pass log messages
        - `*:counters:*:*:1.0`         (optional) :class:`ICounters <pip_services3_components.count.ICounters.ICounters>` components to pass collected measurements
        - `*:discovery:*:*:1.0`        (optional) :class:`IDiscovery <pip_services3_components.connect.IDiscovery.IDiscovery>` services to resolve connection

    See :class:`RestService <pip_services3_rpc.services.RestService.RestService>`, :class:`CommandableHttpService <pip_services3_rpc.services.CommandableHttpService.CommandableHttpService>`,

    Example:

    .. code-block:: python

        counters = PrometheusCounters()
        counters.configure(ConfigParams.from_tuples(
            "connection.protocol", "http",
            "connection.host", "localhost",
            "connection.port", 8080
        ))

        counters.open("123")

        counters.increment("mycomponent.mymethod.calls")
        timing = counters.begin_timing("mycomponent.mymethod.exec_time")
        try:
            ...
        finally:
            timing.end_timing()

        counters.dump()
    """
    def __init__(self):
        """
        Creates a new instance of the performance counters.
        """
        super(PrometheusCounters, self).__init__()
        self.__logger = CompositeLogger()
        self.__connection_resolver = HttpConnectionResolver()
        self.__opened = False
        self.__source: str = None
        self.__instance: str = None
        self.__push_enabled: bool = None
        self.__client: Any = None
        self.__request_route: str = None

    def configure(self, config: ConfigParams):
        """
        Configures component by passing configuration parameters.

        :param config: configuration parameters to be set.
        """
        super().configure(config)

        self.__connection_resolver.configure(config)
        self.__source = config.get_as_float_with_default(
            'source', self.__source)
        self.__instance = config.get_as_float_with_default(
            'instance', self.__instance)
        self.__push_enabled = config.get_as_float_with_default(
            'push_enabled', True)

    def set_references(self, references: IReferences):
        """
        Sets references to dependent components.

        :param references: references to locate the component dependencies.
        """
        self.__logger.set_references(references)
        self.__connection_resolver.set_references(references)

        context_info = references.get_one_optional(
            Descriptor("pip-services", "context-info", "default", "*", "1.0"))
        if context_info is not None and self.__source is None:
            self.__source = context_info.name
        if context_info is not None and self.__instance is None:
            self.__instance = context_info.context_id

    def is_open(self) -> bool:
        """
        Checks if the component is opened.

        :return: true if the component has been opened and false otherwise.
        """
        return self.__opened

    def open(self, correlation_id: Optional[str]):
        """
        Opens the component.

        :param correlation_id: (optional) transaction id to trace execution through call chain.
        """
        if self.__opened or not self.__push_enabled:
            return

        self.__opened = True

        try:
            connection = self.__connection_resolver.resolve(correlation_id)

            job = self.__source or 'unknown'
            instance = self.__instance or socket.gethostname()
            self.__request_route = "/metrics/job/" + job + "/instance/" + instance
            uri = connection.get_as_string('uri').split('://')[-1]
            if connection.get_as_string('protocol') == 'https':
                self.__client = HTTPSConnectionPool(uri)
            else:
                self.__client = HTTPConnectionPool(uri)

        except Exception as err:
            self.__client = None
            self.__logger.warn(
                correlation_id,
                "Connection to Prometheus server is not configured: " +
                str(err))

    def close(self, correlation_id: Optional[str]):
        """
        Closes component and frees used resources.

        :param correlation_id: (optional) transaction id to trace execution through call chain.
        """
        self.__opened = False
        self.__request_route = None
        try:
            if self.__client:
                self.__client.close()
        finally:
            self.__client = None

    def _save(self, counters: List[Counter]):
        """
        Saves the current counters measurements.

        :param counters: current counters measurements to be saves.
        """
        if self.__client is None or not self.__push_enabled: return

        body = PrometheusCounterConverter.to_string(counters, None, None)
        err = None
        response = None
        try:
            response = self.__client.request('PUT',
                                             self.__request_route,
                                             body=body)
        except Exception as ex:
            err = ex
        finally:
            if err or response.status >= 400:
                self.__logger.error("prometheus-counters", err,
                                    "Failed to push metrics to prometheus")
示例#7
0
文件: http.py 项目: motey/py2neo
class HTTP(Connection):

    @classmethod
    def default_hydrant(cls, profile, graph):
        return JSONHydrant(graph)

    @classmethod
    def open(cls, profile=None, user_agent=None, on_release=None, on_broken=None):
        """ Open an HTTP connection to a server.

        :param profile: :class:`.ConnectionProfile` detailing how and
            where to connect
        :param user_agent:
        :param on_release:
        :param on_broken:
        :returns: :class:`.HTTP` connection object
        :raises: :class:`.ConnectionUnavailable` if a connection cannot
            be opened
        """
        if profile is None:
            profile = ConnectionProfile(scheme="http")
        try:
            http = cls(profile, on_release=on_release)
            http._hello(user_agent or http_user_agent())
            return http
        except HTTPError as error:
            raise_from(ConnectionUnavailable("Cannot open connection to %r", profile), error)

    def __init__(self, profile, on_release=None):
        super(HTTP, self).__init__(profile, on_release=on_release)
        self.http_pool = None
        self.headers = {}
        self.__closed = False
        self._make_pool(profile)

    def _make_pool(self, profile):
        if profile.secure:
            from ssl import CERT_NONE, CERT_REQUIRED
            from certifi import where as cert_where
            self.http_pool = HTTPSConnectionPool(
                host=profile.host,
                port=profile.port_number,
                maxsize=1,
                block=True,
                cert_reqs=CERT_REQUIRED if profile.verify else CERT_NONE,
                ca_certs=cert_where()
            )
        else:
            self.http_pool = HTTPConnectionPool(
                host=profile.host,
                port=profile.port_number,
                maxsize=1,
                block=True,
            )

    def close(self):
        self.http_pool.close()
        self.__closed = True

    @property
    def closed(self):
        return self.__closed

    @property
    def broken(self):
        return False

    def _hello(self, user_agent):
        self.headers.update(make_headers(basic_auth=":".join(self.profile.auth),
                                         user_agent=user_agent))
        r = self.http_pool.request(method="GET",
                                   url="/",
                                   headers=dict(self.headers))
        metadata = json_loads(r.data.decode("utf-8"))
        if "neo4j_version" in metadata:
            # {
            #   "bolt_routing" : "neo4j://localhost:7687",
            #   "transaction" : "http://localhost:7474/db/{databaseName}/tx",
            #   "bolt_direct" : "bolt://localhost:7687",
            #   "neo4j_version" : "4.0.0",
            #   "neo4j_edition" : "community"
            # }
            self._neo4j_version = Version(metadata["neo4j_version"])  # Neo4j 4.x
            self._neo4j_edition = metadata["neo4j_edition"]
        else:                               # Neo4j 3.x
            # {
            #   "data" : "http://localhost:7474/db/data/",
            #   "management" : "http://localhost:7474/db/manage/",
            #   "bolt" : "bolt://localhost:7687"
            # }
            r = self.http_pool.request(method="GET",
                                       url="/db/data/",
                                       headers=dict(self.headers))
            metadata = json_loads(r.data.decode("utf-8"))
            # {
            #   "extensions" : { },
            #   "node" : "http://localhost:7474/db/data/node",
            #   "relationship" : "http://localhost:7474/db/data/relationship",
            #   "node_index" : "http://localhost:7474/db/data/index/node",
            #   "relationship_index" : "http://localhost:7474/db/data/index/relationship",
            #   "extensions_info" : "http://localhost:7474/db/data/ext",
            #   "relationship_types" : "http://localhost:7474/db/data/relationship/types",
            #   "batch" : "http://localhost:7474/db/data/batch",
            #   "cypher" : "http://localhost:7474/db/data/cypher",
            #   "indexes" : "http://localhost:7474/db/data/schema/index",
            #   "constraints" : "http://localhost:7474/db/data/schema/constraint",
            #   "transaction" : "http://localhost:7474/db/data/transaction",
            #   "node_labels" : "http://localhost:7474/db/data/labels",
            #   "neo4j_version" : "3.5.12"
            # }
            self._neo4j_version = Version(metadata["neo4j_version"])  # Neo4j 3.x
        self.server_agent = "Neo4j/{}".format(self._neo4j_version)

        # Given the root discovery endpoint isn't authenticated, we don't
        # catch incorrect passwords here, and this wouldn't then be signalled
        # to the user until later on. So here, we make a second call to a
        # different URL for that reason only.
        r = self.http_pool.request(method="GET",
                                   url="/db/data/",
                                   headers=dict(self.headers))
        data = r.data.decode("utf-8")
        rs = HTTPResponse.from_json(r.status, data or "{}")
        rs.audit()

    def fast_forward(self, bookmark):
        raise NotImplementedError("Bookmarking is not yet supported over HTTP")

    def auto_run(self, cypher, parameters=None, graph_name=None, readonly=False,
                 # after=None, metadata=None, timeout=None
                 ):
        try:
            if graph_name and not self.supports_multi():
                raise TypeError("Neo4j {} does not support "
                                "named graphs".format(self.neo4j_version))
            # if readonly:
            #     log.warning("Readonly transactions are not supported over HTTP")
            r = self._post(HTTPTransactionRef.autocommit_uri(graph_name), cypher, parameters)
            rs = HTTPResponse.from_json(r.status, r.data.decode("utf-8"))
            rs.audit()
            return HTTPResult(HTTPTransactionRef(graph_name), rs.result(), profile=self.profile)
        finally:
            self.release()

    def begin(self, graph_name, readonly=False,
              # after=None, metadata=None, timeout=None
              ):
        if graph_name and not self.supports_multi():
            raise TypeError("Neo4j {} does not support "
                            "named graphs".format(self.neo4j_version))
        try:
            # if readonly:
            #     log.warning("Readonly transactions are not supported over HTTP")
            # if after:
            #     raise TypeError("Bookmarks are not supported over HTTP")
            # if metadata:
            #     raise TypeError("Transaction metadata is not supported over HTTP")
            # if timeout:
            #     raise TypeError("Transaction timeouts are not supported over HTTP")
            r = self._post(HTTPTransactionRef.begin_uri(graph_name))
            rs = HTTPResponse.from_json(r.status, r.data.decode("utf-8"))
            location_path = urlsplit(r.headers["Location"]).path
            tx = HTTPTransactionRef(graph_name, location_path.rpartition("/")[-1])
            rs.audit(tx)
            return tx
        finally:
            self.release()

    def commit(self, tx):
        try:
            if tx.broken:
                raise ValueError("Transaction is broken")
            r = self._post(tx.commit_uri())
        except ProtocolError:
            tx.mark_broken()
            raise
        else:
            rs = HTTPResponse.from_json(r.status, r.data.decode("utf-8"))
            rs.audit(tx)
            return Bookmark()
        finally:
            self.release()

    def rollback(self, tx):
        try:
            if tx.broken:
                raise ValueError("Transaction is broken")
            r = self._delete(tx.uri())
        except ProtocolError:
            tx.mark_broken()
            raise
        else:
            rs = HTTPResponse.from_json(r.status, r.data.decode("utf-8"))
            rs.audit(tx)
            return Bookmark()
        finally:
            self.release()

    def run(self, tx, cypher, parameters=None):
        try:
            r = self._post(tx.uri(), cypher, parameters)
        except ProtocolError:
            tx.mark_broken()
            raise
        else:
            rs = HTTPResponse.from_json(r.status, r.data.decode("utf-8"))
            rs.audit(tx)
            return HTTPResult(tx, rs.result(), profile=self.profile)
        finally:
            self.release()

    def pull(self, result, n=-1):
        try:
            # TODO: lower-memory algorithm that doesn't require
            #  duplication of potentially large data sets
            if n == -1:
                result._buffer.extend(result._data)
                result._data[:] = []
            else:
                result._buffer.extend(result._data[:n])
                result._data[:n] = []
        finally:
            self.release()

    def discard(self, result):
        try:
            result._data[:] = []
        finally:
            self.release()

    def _get_http_profiles(self):
        scheme = "https" if self.profile.secure else "http"
        profiles = {}
        try:
            result = self.auto_run("CALL dbms.cluster.overview")
            self.pull(result)
            while True:
                record = result.take()
                if record is None:
                    break
                key = None
                value = None
                for address in record[1]:
                    profile = ConnectionProfile(address)
                    if profile.scheme == "bolt":
                        key = profile.address
                    elif profile.scheme == scheme:
                        value = profile.address
                    else:
                        pass  # unusable value (should never happen)
                if key and value:
                    profiles[key] = value
        except Neo4jError as error:
            if error.title == "ProcedureNotFound":
                raise_from(TypeError("Neo4j service does not expose a cluster overview"), error)
            else:
                raise
        else:
            return profiles, scheme

    def route(self, graph_name=None, context=None):
        try:
            if self._neo4j_version >= Version("4.0"):
                routers, readers, writers, ttl = self._route4(graph_name, context)
            else:
                routers, readers, writers, ttl = self._route1(graph_name, context)
            profiles, scheme = self._get_http_profiles()  # Convert Bolt addresses to HTTP
            return ([ConnectionProfile(_, scheme=scheme, address=profiles[_.address]) for _ in routers],
                    [ConnectionProfile(_, scheme=scheme, address=profiles[_.address]) for _ in readers],
                    [ConnectionProfile(_, scheme=scheme, address=profiles[_.address]) for _ in writers],
                ttl)
        finally:
            self.release()

    def sync(self, result):
        pass

    def fetch(self, result):
        record = result.take()
        return record

    def _post(self, url, statement=None, parameters=None):
        log.debug("POST %r %r %r", url, statement, parameters)
        if statement:
            statements = [
                OrderedDict([
                    ("statement", statement),
                    ("parameters", dehydrate(parameters or {})),
                    ("resultDataContents", ["REST"]),
                    ("includeStats", True),
                ])
            ]
        else:
            statements = []
        try:
            return self.http_pool.request(method="POST",
                                          url=url,
                                          headers=dict(self.headers, **{"Content-Type": "application/json"}),
                                          body=json_dumps({"statements": statements}))
        except HTTPError as error:
            raise_from(ProtocolError("Failed to POST to %r" % url), error)

    def _delete(self, url):
        log.debug("DELETE %r", url)
        try:
            return self.http_pool.request(method="DELETE",
                                          url=url,
                                          headers=dict(self.headers))
        except HTTPError as error:
            raise_from(ProtocolError("Failed to DELETE %r" % url), error)

    def supports_multi(self):
        return self._neo4j_version >= Version("4.0")