Beispiel #1
0
 def on_written():
     """Invoked when the command has been written to the socket"""
     response_future = concurrent.TracebackFuture()
     self._ioloop.add_future(response_future, on_response)
     self._get_response(response_future,
                        self._pipeline_commands[0][1],
                        self._pipeline_commands[0][2])
Beispiel #2
0
    def exec_func(*args, **kwargs):

        future = concurrent.TracebackFuture()

        # accept optional callback
        callback = kwargs.pop('callback', None)
        if callback:
            future.add_done_callback(callback)

        try:

            io_loop = kwargs.pop('ioloop', None)
            if not io_loop:
                io_loop = ioloop.IOLoop.current()

            def _ioloop_callback(val):
                # print "set result to %s" % val
                future.set_result(val)

            def _callback(val):
                # set the result in the ioloop thread
                # print "callback val is %s " % val
                io_loop.add_callback(_ioloop_callback, val)

            # print "Func %s " % func
            func(callback=_callback, *args, **kwargs)
        except Exception:
            future.set_exc_info(sys.exc_info())

        return future
Beispiel #3
0
        def on_response(response):
            """Process the response future

            :param response: The response future
            :type response: :class:`tornado.concurrent.Future`

            """
            exc = response.exception()
            if exc:
                pipeline_responses.append(exc)
            else:
                pipeline_responses.append(response.result())

            index = len(pipeline_responses.values)

            if index == len(self._pipeline_commands):
                self._pipeline = False
                self._pipeline_commands = []
                future.set_result(pipeline_responses.values)
            else:
                response_future = concurrent.TracebackFuture()
                self._ioloop.add_future(response_future, on_response)
                self._get_response(response_future,
                                   self._pipeline_commands[index][1],
                                   self._pipeline_commands[index][2])
Beispiel #4
0
    def exec_background(*args, **kwargs):
        '''
            Executes a function in a background thread
            and returns a Future invoked when the thread completes.
            Useful for IO Bound processes that block.  For CPU
            bound processes consider using celery, DO NOT execute
            CPU Bound tasks in the tornado process!

            io_loop is the optional ioloop used to invoke the callback
            in the processing thread.  This is useful for unit tests
            that do not use the singleton ioloop.  If set to none,
            IOLoop.current() is returned
        '''
        # traceback future maintains python stack in exception
        future = concurrent.TracebackFuture()

        # use explicit ioloop for unit testing
        # Ref: https://github.com/tornadoweb/tornado/issues/663
        io_loop = kwargs.pop('ioloop', None)
        if not io_loop:
            io_loop = ioloop.IOLoop.current()

        # accept optional callback
        callback = kwargs.pop('callback', None)
        if callback:
            future.add_done_callback(callback)

        def _do_task(*args, **kwargs):
            try:
                rtn = func(*args, **kwargs)
                io_loop.add_callback(future.set_result, rtn)
            except Exception, e:
                logging.debug("Callback exception", exc_info=True)
                io_loop.add_callback(future.set_exc_info, sys.exc_info())
Beispiel #5
0
    def hmset(self, key, value_dict):
        """
        Sets fields to values as in `value_dict` in the hash stored at `key`.

        Sets the specified fields to their respective values in the hash
        stored at `key`.  This command overwrites any specified fields
        already existing in the hash.  If `key` does not exist, a new  key
        holding a hash is created.

        .. note::

           **Time complexity**: ``O(N)`` where ``N`` is the number of
           fields being set.

        :param key: The key of the hash
        :type key: :class:`str`, :class:`bytes`
        :param value_dict: field to value mapping
        :type value_dict: :class:`dict`
        :rtype: bool
        :raises: :exc:`~tredis.exceptions.RedisError`

        """
        if not value_dict:
            future = concurrent.TracebackFuture()
            future.set_result(False)
        else:
            command = [b'HMSET', key]
            command.extend(sum(value_dict.items(), ()))
            future = self._execute(command)
        return future
Beispiel #6
0
    def refresh(self):
        """Load dynamic credentials from the AWS Instance Metadata and user
        data HTTP API.

        :raises: tornado_aws.exceptions.NoCredentialsError

        """
        LOGGER.debug('Refreshing EC2 IAM Credentials')
        async = isinstance(self._client, httpclient.AsyncHTTPClient)
        future = concurrent.TracebackFuture() if async else None
        try:
            result = self._fetch_credentials(async)
            if concurrent.is_future(result):

                def on_complete(response):
                    exception = response.exception()
                    if exception:
                        if isinstance(exception, httpclient.HTTPError) and \
                                exception.code == 599:
                            future.set_exception(
                                exceptions.NoCredentialsError())
                        else:
                            future.set_exception(exception)
                        return
                    self._assign_credentials(response.result())
                    future.set_result(True)

                self._ioloop.add_future(result, on_complete)
            else:
                self._assign_credentials(result)
        except (httpclient.HTTPError, OSError) as error:
            LOGGER.error('Error Fetching Credentials: %s', error)
            raise exceptions.NoCredentialsError()
        return future
Beispiel #7
0
    def _execute(self, action, parameters, attempt, measurements):
        """Invoke a DynamoDB action

        :param str action: DynamoDB action to invoke
        :param dict parameters: parameters to send into the action
        :param int attempt: Which attempt number this is
        :param list measurements: A list for accumulating request measurements
        :rtype: tornado.concurrent.Future

        """
        future = concurrent.TracebackFuture()
        start = time.time()

        def handle_response(request):
            """Invoked by the IOLoop when fetch has a response to process.

            :param tornado.concurrent.Future request: The request future

            """
            self._on_response(
                action, parameters.get('TableName', 'Unknown'), attempt,
                start, request, future, measurements)

        ioloop.IOLoop.current().add_future(self._client.fetch(
            'POST', '/',
            body=json.dumps(parameters).encode('utf-8'),
            headers={
                'x-amz-target': 'DynamoDB_20120810.{}'.format(action),
                'Content-Type': 'application/x-amz-json-1.0',
            }), handle_response)
        return future
Beispiel #8
0
    def validate(self):
        """Validate the session can connect or has open connections to
        PostgreSQL

        :rtype: bool

        """
        future = concurrent.TracebackFuture()

        def on_connected(cf):
            if cf.exception():
                future.set_exception(cf.exception())
                return

            connection = cf.result()
            fd = connection.fileno()

            # The connection would have been added to the pool manager, free it
            self._pool_manager.free(self.pid, connection)
            self._ioloop.remove_handler(fd)

            if fd in self._connections:
                del self._connections[fd]
            if fd in self._futures:
                del self._futures[fd]

            # Return the success in validating the connection
            future.set_result(True)

        # Grab a connection to PostgreSQL
        self._ioloop.add_future(self._connect(), on_connected)

        # Return the future for the query result
        return future
Beispiel #9
0
        def on_connected(cf):
            """Invoked by the future returned by self._connect"""
            if cf.exception():
                future.set_exception(cf.exception())
                return

            # Get the psycopg2 connection object and cursor
            conn = cf.result()
            cursor = self._get_cursor(conn)

            def completed(qf):
                """Invoked by the IOLoop when the future has completed"""
                if qf.exception():
                    error = qf.exception()
                    LOGGER.debug('Cleaning cursor due to exception: %r', error)
                    self._exec_cleanup(cursor, conn.fileno())
                    future.set_exception(error)
                else:
                    value = Results(cursor, self._exec_cleanup, conn.fileno())
                    future.set_result(value)

            # Setup a callback to wait on the query result
            self._futures[conn.fileno()] = concurrent.TracebackFuture()

            # Add the future to the IOLoop
            self._ioloop.add_future(self._futures[conn.fileno()], completed)

            # Get the cursor, execute the query
            func = getattr(cursor, method)
            try:
                func(query, parameters)
            except Exception as error:
                future.set_exception(error)
def _write_measurements():
    """Write out all of the metrics in each of the databases,
    returning a future that will indicate all metrics have been written
    when that future is done.

    :rtype: tornado.concurrent.Future

    """
    global _timeout, _writing

    future = concurrent.TracebackFuture()

    if _writing:
        LOGGER.warning('Currently writing measurements, skipping write')
        future.set_result(False)
    elif not _pending_measurements():
        future.set_result(True)
    elif not _sample_batch():
        LOGGER.debug('Skipping batch submission due to sampling')
        future.set_result(True)

    # Exit early if there's an error condition
    if future.done():
        return future

    if not _http_client or _dirty:
        _create_http_client()

    # Keep track of the futures for each batch submission
    futures = []

    # Submit a batch for each database
    for database in _measurements:
        url = '{}?db={}&precision=ms'.format(_base_url, database)

        # Get the measurements to submit
        measurements = _measurements[database][:_max_batch_size]

        # Pop them off the stack of pending measurements
        _measurements[database] = _measurements[database][_max_batch_size:]

        # Create the request future
        LOGGER.debug('Submitting %r measurements to %r',
                     len(measurements), url)
        request = _http_client.fetch(
            url, method='POST', body='\n'.join(measurements).encode('utf-8'))

        # Keep track of each request in our future stack
        futures.append((request, str(uuid.uuid4()), database, measurements))

    # Start the wait cycle for all the requests to complete
    _writing = True
    _futures_wait(future, futures)

    return future
Beispiel #11
0
    def _maybe_connect(self, callback):
        """Connect to the Redis server, selecting the specified database.

        :raises: :class:`~tredis.exceptions.ConnectError`
                 :class:`~tredis.exceptions..RedisError`

        """
        future = concurrent.TracebackFuture()
        self._ioloop.add_future(future, callback)

        if self._stream:
            return future.set_result(True)

        LOGGER.info('Connecting to %s:%i', self._host, self._port)
        connect_future = self._client.connect(self._host, self._port)

        def on_selected(response):
            """Invoked when the default database is selected when connecting

            :param response: The connection response future
            :type response: :class:`~tornado.concurrent.Future`

            """
            exc = response.exception()
            if exc:
                future.set_exception(exceptions.RedisError(exc))
            else:
                future.set_result(response.result == b'OK')

        def on_connect(response):
            """Invoked when the socket stream has connected

            :param response: The connection response future
            :type response: :class:`~tornado.concurrent.Future`

            """
            exc = response.exception()
            if exc:
                return future.set_exception(exceptions.ConnectError(str(exc)))

            self._stream = response.result()
            self._stream.set_close_callback(self._on_closed)
            if not self._default_db:
                return future.set_result(True)

            def on_written():
                select_future = concurrent.TracebackFuture()
                self._get_response(select_future)
                self._ioloop.add_future(select_future, on_selected)

            LOGGER.debug('Selecting the default db: %r', self._default_db)
            command = self._build_command(['SELECT', ascii(self._default_db)])
            self._stream.write(command, on_written)

        self._ioloop.add_future(connect_future, on_connect)
Beispiel #12
0
    def _execute(self, parts, expectation=None, format_callback=None):
        """Really execute a redis command

        :param list parts: The list of command parts
        :param mixed expectation: Optional response expectation

        :rtype: :class:`~tornado.concurrent.Future`
        :raises: :exc:`~tredis.exceptions.SubscribedError`

        """
        LOGGER.debug('_execute (%r, %r, %r)',
                     parts, expectation, format_callback)

        command = self._build_command(parts)
        future = concurrent.TracebackFuture()

        def on_ready(connection_ready):
            """Invoked once the connection has been established

            :param connection_ready: The connection future
            :type connection_ready: tornado.concurrent.Future

            """
            connection_error = connection_ready.exception()
            if connection_error:
                return future.set_exception(connection_error)

            def on_written():
                """Invoked when the command has been written to the socket"""
                self._get_response(future, expectation, format_callback)

            try:
                self._stream.write(command, callback=on_written)
            except iostream.StreamClosedError as error:
                future.set_exception(exceptions.ConnectionError(error))

        def on_locked(lock):
            """Invoked once the lock has been acquired.

            :param tornado.concurrent.Future lock: The lock future

            """
            LOGGER.debug('Executing %r (%r) with lock %r',
                         command, expectation, lock)
            self._maybe_connect(on_ready)

        # Start executing once locked
        lock_future = self._busy.acquire()
        self._ioloop.add_future(lock_future, on_locked)

        # Release the lock when the future is complete
        self._ioloop.add_future(future, lambda r: self._busy.release())
        return future
Beispiel #13
0
    def _create_connection(self, future):
        """Create a new PostgreSQL connection

        :param tornado.concurrent.Future future: future for new conn result

        """
        # Create a new PostgreSQL connection
        kwargs = utils.uri_to_kwargs(self._uri)

        connection = self._psycopg2_connect(kwargs)

        # Add the connection for use in _poll_connection
        fd = connection.fileno()
        self._connections[fd] = connection

        def on_connected(cf):
            """Invoked by the IOLoop when the future is complete for the
            connection

            :param Future cf: The future for the initial connection

            """
            if cf.exception():
                future.set_exception(cf.exception())

            else:

                # Add the connection to the pool
                self._pool_manager.add(self.pid, connection)
                self._pool_manager.lock(self.pid, connection, self)

                # Added in because psycopg2ct connects and leaves the
                # connection in a weird state: consts.STATUS_DATESTYLE,
                # returning from Connection._setup without setting the state
                # as const.STATUS_OK
                if PYPY:
                    connection.status = extensions.STATUS_READY

                # Register the custom data types
                self._register_unicode(connection)
                self._register_uuid(connection)

                # Set the future result
                future.set_result(connection)

        # Add a future that fires once connected
        self._futures[fd] = concurrent.TracebackFuture()
        self._ioloop.add_future(self._futures[fd], on_connected)

        # Add the connection to the IOLoop
        self._ioloop.add_handler(connection.fileno(), self._on_io_events,
                                 ioloop.IOLoop.WRITE)
    def send_request(self, method, scheme, host, *path, **kwargs):
        """
        Send a HTTP request.

        :param str method: HTTP method to invoke
        :param str scheme: URL scheme for the request
        :param str host: host to send the request to.  This can be
            a formatted IP address literal or DNS name.
        :param path: resource path to request.  Elements of the path
            are quoted as URL path segments and then joined by a ``/``
            to form the resource path.
        :keyword port: port to send the request to.  If omitted, the
            port will be chosen based on the scheme.
        :param kwargs: additional keyword arguments are passed to the
            :class:`tornado.httpclient.HTTPRequest` initializer.

        :returns: :class:`tornado.concurrent.Future` that resolves to
            a :class:`tornado.httpclient.HTTPResponse` instance
        :raises: :class:`.HTTPError`

        """
        port = kwargs.pop('port', None)
        netloc = host if port is None else '{}:{}'.format(host, port)
        target = '{}://{}/{}'.format(
            scheme, netloc,
            '/'.join(parse.quote(str(s), safe='') for s in path))
        if 'headers' in kwargs:
            headers = self.headers.copy()
            headers.update(kwargs.pop('headers'))
            kwargs['headers'] = headers
        else:
            kwargs['headers'] = self.headers

        request = httpclient.HTTPRequest(target, method=method, **kwargs)
        self.logger.debug('sending %s %s', request.method, request.url)

        future = concurrent.TracebackFuture()

        def handle_response(f):
            try:
                future.set_result(f.result())
            except httpclient.HTTPError as error:
                future.set_exception(
                    HTTPError.from_tornado_error(request, error))
            except Exception as exception:
                future.set_exception(exception)

        coro = self.client.fetch(request)
        self.client.io_loop.add_future(coro, handle_response)

        return future
Beispiel #15
0
    def _on_read_only_error(self, command, future):
        """Invoked when a Redis node returns an error indicating it's in
        read-only mode. It will use the ``INFO REPLICATION`` command to
        attempt to find the master server and failover to that, reissuing
        the command to that server.

        :param command: The command that was being executed
        :type command: tredis.client.Command
        :param future: The execution future
        :type future: tornado.concurrent.Future

        """
        failover_future = concurrent.TracebackFuture()

        def on_replication_info(_):
            common.maybe_raise_exception(failover_future)
            LOGGER.debug('Failover closing current read-only connection')
            self._closing = True
            database = self._connection.database
            self._connection.close()
            self._connected.clear()
            self._connect_future = concurrent.Future()

            info = failover_future.result()
            LOGGER.debug('Failover connecting to %s:%s', info['master_host'],
                         info['master_port'])
            self._connection = _Connection(info['master_host'],
                                           info['master_port'], database,
                                           self._read, self._on_closed,
                                           self.io_loop, self._clustering)

            # When the connection is re-established, re-run the command
            self.io_loop.add_future(
                self._connect_future, lambda f: self._connection.execute(
                    command._replace(connection=self._connection), future))

            # Use the normal connection processing flow when connecting
            self.io_loop.add_future(self._connection.connect(),
                                    self._on_connected)

        if self._clustering:
            command.connection.set_readonly(True)

        LOGGER.debug('%s is read-only, need to failover to new master',
                     command.connection.name)

        cmd = Command(self._build_command(['INFO', 'REPLICATION']),
                      self._connection, None, common.format_info_response)

        self.io_loop.add_future(failover_future, on_replication_info)
        cmd.connection.execute(cmd, failover_future)
    def shutdown(self):
        """Invoke on shutdown of your application to stop the periodic
        callbacks and flush any remaining metrics.

        Returns a future that is complete when all pending metrics have been
        submitted.

        :rtype: :class:`~tornado.concurrent.TracebackFuture()`

        """
        future = concurrent.TracebackFuture()
        self._callback.stop()
        self._write_metrics()
        self._shutdown_wait(future)
        return future
Beispiel #17
0
 def request(self, method, url, data=None, query=None, callback=None):
     user_result = concurrent.TracebackFuture()
     if callback is not None:
         user_result.add_done_callback(callback)
     if data is not None:
         data = self.serializer.encode(data)
     if query:
         url = '%s?%s' % (url, urlencode(query, doseq=True))
     request = httpclient.HTTPRequest(url,
                                      method=method.upper(),
                                      body=data,
                                      headers=self.headers)
     start = lambda f: self.start_request(f, request, user_result)
     self.authenticate(request).add_done_callback(start)
     return user_result
def flush():
    """Flush all pending measurements to InfluxDB. This will ensure that all
    measurements that are in the buffer for any database are written. If the
    requests fail, it will continue to try and submit the metrics until they
    are successfully written.

    :rtype: :class:`~tornado.concurrent.Future`

    """
    flush_future = concurrent.TracebackFuture()
    if _batch_future and not _batch_future.done():
        LOGGER.debug('Flush waiting on incomplete _batch_future')
        _flush_wait(flush_future, _batch_future)
    else:
        LOGGER.info('Flushing buffer with %i measurements to InfluxDB',
                    _pending_measurements())
        _flush_wait(flush_future, _write_measurements())
    return flush_future
Beispiel #19
0
    def _execute(self, parts, expectation=None, format_callback=None):
        """Really execute a redis command

        :param list parts: The list of command parts
        :param mixed expectation: Optional response expectation

        :rtype: :class:`~tornado.concurrent.Future`
        :raises: :exc:`~tredis.exceptions.SubscribedError`

        """
        future = concurrent.TracebackFuture()

        try:
            command = self._build_command(parts)
        except ValueError as error:
            future.set_exception(error)
            return future

        def on_locked(_):
            if self.ready:
                if self._clustering:
                    cmd = Command(command, self._pick_cluster_host(parts),
                                  expectation, format_callback)
                else:
                    LOGGER.debug('Connection: %r', self._connection)
                    cmd = Command(command, self._connection, expectation,
                                  format_callback)
                LOGGER.debug('_execute(%r, %r, %r) on %s', cmd.command,
                             expectation, format_callback, cmd.connection.name)
                cmd.connection.execute(cmd, future)
            else:
                LOGGER.critical('Lock released & not ready, aborting command')

        # Wait until the cluster is ready, letting cluster discovery through
        if not self.ready and not self._connected.is_set():
            self.io_loop.add_future(
                self._connected.wait(), lambda f: self.io_loop.add_future(
                    self._busy.acquire(), on_locked))
        else:
            self.io_loop.add_future(self._busy.acquire(), on_locked)

        # Release the lock when the future is complete
        self.io_loop.add_future(future, lambda r: self._busy.release())
        return future
Beispiel #20
0
    def wrapper(*args, **kwargs):

        # When this function gets updated, update gcall also!

        future = concurrent.TracebackFuture()

        def greenlet_base():
            try:
                result = f(*args, **kwargs)
            except Exception:
                future.set_exc_info(sys.exc_info())
            else:
                future.set_result(result)

        gr = greenlet.greenlet(sc_wrap(greenlet_base))
        with NullContext():
            gr.switch()

        return future
Beispiel #21
0
    def _get_role_async(self):
        """Fetch the IAM role from the ECS Metadata and user data API

        :rtype: :class:`~tornado.concurrent.TracebackFuture`
        :raises: tornado.httpclient.HTTPError

        """
        future = concurrent.TracebackFuture()

        def on_response(response):
            if not self._future_exception(response, future):
                role = response.result()
                future.set_result(role.body.decode('utf-8'))

        url = INSTANCE_ENDPOINT.format(INSTANCE_ROLE_PATH)
        request = self._client.fetch(url,
                                     connect_timeout=HTTP_TIMEOUT,
                                     request_timeout=HTTP_TIMEOUT)
        self._ioloop.add_future(request, on_response)
        return future
Beispiel #22
0
    def _fetch_credentials_async(self):
        """Return the credentials from the EC2 Instance Metadata and user data
        API using an Async adapter.

        :return: :class:`~concurrent.TracebackFuture`

        """
        future = concurrent.TracebackFuture()

        def on_credentials(response):
            if not self._future_exception(response, future):
                result = response.result()
                future.set_result(result)

        def on_role(response):
            if not self._future_exception(response, future):
                req = self._get_instance_credentials_async(response.result())
                self._ioloop.add_future(req, on_credentials)

        request = self._get_role_async()
        self._ioloop.add_future(request, on_role)
        return future
Beispiel #23
0
def gcall(f, *args, **kwargs):
    '''
        Calls a function, makes it asynchronous, and returns the result of
        the function as a :class:`tornado.concurrent.Future`. The wrapped
        function may use :func:`gyield` to pseudo-synchronously wait for a
        future to resolve.
        
        This is the same code that :func:`@greenado.groutine <groutine>`
        uses to wrap functions.

        :param f:       Function to call
        :param args:    Function arguments
        :param kwargs:  Function keyword arguments

        :returns: :class:`tornado.concurrent.Future`

        .. warning:: You should not discard the returned Future or exceptions
                     may be silently discarded, similar to a tornado coroutine.
                     See :func:`@gen.coroutine <tornado.gen.coroutine>` for
                     details.
    '''

    # When this function gets updated, update groutine.wrapper also!

    future = concurrent.TracebackFuture()

    def greenlet_base():
        try:
            result = f(*args, **kwargs)
        except Exception:
            future.set_exc_info(sys.exc_info())
        else:
            future.set_result(result)

    gr = greenlet.greenlet(sc_wrap(greenlet_base))
    with NullContext():
        gr.switch()

    return future
Beispiel #24
0
    def hdel(self, key, *fields):
        """
        Remove the specified fields from the hash stored at `key`.

        Specified fields that do not exist within this hash are ignored.
        If `key` does not exist, it is treated as an empty hash and this
        command returns zero.

        :param key: The key of the hash
        :type key: :class:`str`, :class:`bytes`
        :param fields: iterable of field names to retrieve
        :returns: the number of fields that were removed from the hash,
            not including specified by non-existing fields.
        :rtype: int

        """
        if not fields:
            future = concurrent.TracebackFuture()
            future.set_result(0)
        else:
            future = self._execute([b'HDEL', key] + list(fields))
        return future
Beispiel #25
0
    def auth(self, password):
        """Request for authentication in a password-protected Redis server.
        Redis can be instructed to require a password before allowing clients
        to execute commands. This is done using the ``requirepass`` directive
        in the configuration file.

        If the password does not match, an
        :exc:`~tredis.exceptions.AuthError` exception
        will be raised.

        :param password: The password to authenticate with
        :type password: :class:`str`, :class:`bytes`
        :rtype: bool
        :raises: :exc:`~tredis.exceptions.AuthError`,
                 :exc:`~tredis.exceptions.RedisError`

        """
        future = concurrent.TracebackFuture()

        def on_response(response):
            """Process the redis response

            :param response: The future with the response
            :type response: tornado.concurrent.Future

            """
            exc = response.exception()
            if exc:
                if exc.args[0] == b'invalid password':
                    future.set_exception(exceptions.AuthError(exc))
                else:
                    future.set_exception(exc)
            else:
                future.set_result(response.result())

        execute_future = self._execute([b'AUTH', password], b'OK')
        self._ioloop.add_future(execute_future, on_response)
        return future
Beispiel #26
0
    def _get_instance_credentials_async(self, role):
        """Attempt to get temporary credentials for the specified role from the
        EC2 Instance Metadata and user data API

        :param str role: The role to get temporary credentials for

        :rtype: :class:`~tornado.concurrent.TracebackFuture`
        :raises: tornado.httpclient.HTTPError

        """
        future = concurrent.TracebackFuture()

        def on_response(response):
            if not self._future_exception(response, future):
                body = response.result().body
                future.set_result(json.loads(body.decode('utf-8')))

        url_path = INSTANCE_CREDENTIALS_PATH.format(role)
        result = self._client.fetch(INSTANCE_ENDPOINT.format(url_path),
                                    connect_timeout=HTTP_TIMEOUT,
                                    request_timeout=HTTP_TIMEOUT)
        self._ioloop.add_future(result, on_response)
        return future
Beispiel #27
0
    def _connect(self):
        """Connect to PostgreSQL, either by reusing a connection from the pool
        if possible, or by creating the new connection.

        :rtype: psycopg2.extensions.connection
        :raises: pool.NoIdleConnectionsError

        """
        future = concurrent.TracebackFuture()

        # Attempt to get a cached connection from the connection pool
        try:
            connection = self._pool_manager.get(self.pid, self)
            self._connections[connection.fileno()] = connection
            future.set_result(connection)

            # Add the connection to the IOLoop
            self._ioloop.add_handler(connection.fileno(), self._on_io_events,
                                     ioloop.IOLoop.WRITE)
        except pool.NoIdleConnectionsError:
            self._create_connection(future)

        return future
Beispiel #28
0
def gyield(future, timeout=None):
    '''
        This is functionally equivalent to the 'yield' statements used in a
        :func:`@gen.coroutine <tornado.gen.coroutine>`, but doesn't require
        turning all your functions into generators -- so you can use the
        return statement normally, and exceptions won't be accidentally
        discarded.
        
        This can be used on any function that returns a future object, such
        as functions decorated by :func:`@gen.coroutine <tornado.gen.coroutine>`,
        and most of the tornado API as of tornado 4.0.
        
        This function must only be used by functions that either have a
        :func:`@greenado.groutine <groutine>` decorator, or functions that are
        children of functions that have the decorator applied.
        
        :param future:  A :class:`tornado.concurrent.Future` object
        :param timeout: Number of seconds to wait before raising a
                        :exc:`TimeoutError`. Default is no timeout.
                        `Parameter added in version 0.1.8.`

        :returns:       The result set on the future object
        :raises:        * If an exception is set on the future, the exception
                          will be thrown to the caller of gyield.
                        * If the timeout expires, :exc:`TimeoutError` will be
                          raised.
                          
        .. versionchanged:: 0.1.8
           Added timeout parameter
           
        .. versionchanged:: 0.2.0
           If a timeout occurs, the :exc:`TimeoutError` will not be set on the
           future object, but will only be raised to the caller.
           
        .. note: This cannot be used with :func:`tornado.gen.moment`, use 
                 :func:`gmoment` instead
    '''

    gr = greenlet.getcurrent()
    assert gr.parent is not None, "gyield() can only be called from functions that have the @greenado.groutine decorator in the call stack."

    # don't switch/wait if the future is already ready to go
    if not future.done():

        io_loop = IOLoop.current()
        wait_future = future

        if timeout != None and timeout > 0:
            # optimization: only do timeout related work if a timeout is happening..

            timeout_handle = None
            timeout_future = None

            def on_complete(result):
                if timeout_future.done():
                    # resolve the future so tornado doesn't complain
                    try:
                        result.result()
                    except Exception:
                        # If you don't want to see this error, then implement cancellation
                        # in the thing that the future came from
                        logger.warn(
                            "gyield() timeout expired, and this exception was ignored",
                            exc_info=1)
                else:
                    timeout_future.set_result(True)
                    io_loop.remove_timeout(timeout_handle)
                gr.switch()

            def on_timeout():
                timeout_future.set_exception(
                    TimeoutError("Timeout after %s seconds" % timeout))
                gr.switch()

            wait_future = timeout_future = concurrent.TracebackFuture()
            timeout_handle = io_loop.add_timeout(io_loop.time() + timeout,
                                                 on_timeout)

        else:

            def on_complete(result):
                gr.switch()

        io_loop.add_future(future, on_complete)

        with NullContext():
            gr.parent.switch()

            while not wait_future.done():
                gr.parent.switch()

        wait_future.result()

    return future.result()
Beispiel #29
0
def get_future(loop):
    return concurrent.TracebackFuture()
Beispiel #30
0
    def _execute(self, method, query, parameters=None):
        """Issue a query asynchronously on the server, mogrifying the
        parameters against the sql statement and yielding the results
        as a :py:class:`Results <queries.tornado_session.Results>` object.

        This function reduces duplicate code for callproc and query by getting
        the class attribute for the method passed in as the function to call.

        :param str method: The method attribute to use
        :param str query: The SQL statement or Stored Procedure name
        :param list|dict parameters: A dictionary of query parameters
        :rtype: Results
        :raises: queries.DataError
        :raises: queries.DatabaseError
        :raises: queries.IntegrityError
        :raises: queries.InternalError
        :raises: queries.InterfaceError
        :raises: queries.NotSupportedError
        :raises: queries.OperationalError
        :raises: queries.ProgrammingError

        """
        future = concurrent.TracebackFuture()

        def on_connected(cf):
            """Invoked by the future returned by self._connect"""
            if cf.exception():
                future.set_exception(cf.exception())
                return

            # Get the psycopg2 connection object and cursor
            conn = cf.result()
            cursor = self._get_cursor(conn)

            def completed(qf):
                """Invoked by the IOLoop when the future has completed"""
                if qf.exception():
                    error = qf.exception()
                    LOGGER.debug('Cleaning cursor due to exception: %r', error)
                    self._exec_cleanup(cursor, conn.fileno())
                    future.set_exception(error)
                else:
                    value = Results(cursor, self._exec_cleanup, conn.fileno())
                    future.set_result(value)

            # Setup a callback to wait on the query result
            self._futures[conn.fileno()] = concurrent.TracebackFuture()

            # Add the future to the IOLoop
            self._ioloop.add_future(self._futures[conn.fileno()], completed)

            # Get the cursor, execute the query
            func = getattr(cursor, method)
            try:
                func(query, parameters)
            except Exception as error:
                future.set_exception(error)

        # Ensure the pool exists for the connection
        self._ensure_pool_exists()

        # Grab a connection to PostgreSQL
        self._ioloop.add_future(self._connect(), on_connected)

        # Return the future for the query result
        return future