Ejemplo n.º 1
0
Archivo: gen.py Proyecto: yetone/hurray
    def wrapper(*args, **kwargs):
        future = TracebackFuture()

        if replace_callback and 'callback' in kwargs:
            callback = kwargs.pop('callback')
            IOLoop.current().add_future(
                future, lambda future: callback(future.result()))

        try:
            result = func(*args, **kwargs)
        except (Return, StopIteration) as e:
            result = _value_from_stopiteration(e)
        except Exception:
            future.set_exc_info(sys.exc_info())
            return future
        else:
            if isinstance(result, GeneratorType):
                # Inline the first iteration of Runner.run.  This lets us
                # avoid the cost of creating a Runner when the coroutine
                # never actually yields, which in turn allows us to
                # use "optional" coroutines in critical path code without
                # performance penalty for the synchronous case.
                try:
                    orig_stack_contexts = stack_context._state.contexts
                    yielded = next(result)
                    if stack_context._state.contexts is not orig_stack_contexts:
                        yielded = TracebackFuture()
                        yielded.set_exception(
                            stack_context.StackContextInconsistentError(
                                'stack_context inconsistency (probably caused '
                                'by yield within a "with StackContext" block)')
                        )
                except (StopIteration, Return) as e:
                    future.set_result(_value_from_stopiteration(e))
                except Exception:
                    future.set_exc_info(sys.exc_info())
                else:
                    Runner(result, future, yielded)
                try:
                    return future
                finally:
                    # Subtle memory optimization: if next() raised an exception,
                    # the future's exc_info contains a traceback which
                    # includes this stack frame.  This creates a cycle,
                    # which will be collected at the next full GC but has
                    # been shown to greatly increase memory usage of
                    # benchmarks (relative to the refcount-based scheme
                    # used in the absence of cycles).  We can avoid the
                    # cycle by clearing the local variable after we return it.
                    future = None
        future.set_result(result)
        return future
Ejemplo n.º 2
0
def main():
    options.parse_command_line()

    if len(sys.argv) == 1:
        app_log.warning(
            "Warning: no config file specified, using the default config. "
            "In order to specify a config file use "
            "'hurray --config=/path/to/hurray.conf'")

    # check if base directory exists and is writable (TODO)
    absbase = os.path.abspath(os.path.expanduser(options.base))
    if not os.access(absbase, os.W_OK):
        app_log.error(
            "base directory {} does not exist or is not writable!".format(
                absbase))
        sys.exit(1)

    SWMR_SYNC.set_strategy(options.locking)

    server = HurrayServer(workers=options.workers)

    sockets = []

    if options.port != 0:
        sockets = bind_sockets(options.port, options.host)
        app_log.info("Listening on %s:%d", options.host, options.port)

    if options.socket:
        socket_file = os.path.abspath(os.path.expanduser(options.socket))
        app_log.info("Listening on {}".format(socket_file))
        sockets.append(bind_unix_socket(socket_file))

    if len(sockets) < 1:
        app_log.error('Define a socket and/or a port > 0')
        return

    signal.signal(signal.SIGTERM, partial(sig_handler, server))
    signal.signal(signal.SIGINT, partial(sig_handler, server))

    # Note that it does not make much sense to start >1 (master) processes
    # because they implement an async event loop that creates worker processes
    # itself.
    server.start(options.processes)

    # deregister the multiprocessing exit handler for the forked children.
    # Otherwise they try to join the shared (parent) process manager
    # SWMRSyncManager.
    import atexit
    atexit.unregister(_exit_function)

    server.add_sockets(sockets)
    IOLoop.current().start()
Ejemplo n.º 3
0
Archivo: gen.py Proyecto: yetone/hurray
def sleep(duration):
    """Return a `.Future` that resolves after the given number of seconds.

    When used with ``yield`` in a coroutine, this is a non-blocking
    analogue to `time.sleep` (which should not be used in coroutines
    because it is blocking)::

        yield gen.sleep(0.5)

    Note that calling this function on its own does nothing; you must
    wait on the `.Future` it returns (usually by yielding it).

    .. versionadded:: 4.1
    """
    f = Future()
    IOLoop.current().call_later(duration, lambda: f.set_result(None))
    return f
Ejemplo n.º 4
0
Archivo: gen.py Proyecto: yetone/hurray
    def __init__(self, future, io_loop=None):
        """Adapts a `.Future` to the `YieldPoint` interface.

        .. versionchanged:: 4.1
           The ``io_loop`` argument is deprecated.
        """
        self.future = future
        self.io_loop = io_loop or IOLoop.current()
Ejemplo n.º 5
0
def main():
    options.parse_command_line()
    server = HurrayServer()

    sockets = []

    if options.port != 0:
        sockets = bind_sockets(options.port, options.host)
        app_log.info("Listening on %s:%d", options.host, options.port)

    if options.socket:
        app_log.info("Listening on %s", options.socket)
        sockets.append(bind_unix_socket(options.socket))

    if len(sockets) < 1:
        app_log.error('Define a socket and/or a port > 0')
        return

    server.start(options.processes)
    server.add_sockets(sockets)
    IOLoop.current().start()
Ejemplo n.º 6
0
    def add_sockets(self, sockets):
        """Makes this server start accepting connections on the given sockets.

        The ``sockets`` parameter is a list of socket objects such as
        those returned by `~hurray.server.netutil.bind_sockets`.
        `add_sockets` is typically used in combination with that
        method and `hurray.server.process.fork_processes` to provide greater
        control over the initialization of a multi-process server.
        """
        if self.io_loop is None:
            self.io_loop = IOLoop.current()

        for sock in sockets:
            self._sockets[sock.fileno()] = sock
            add_accept_handler(sock,
                               self._handle_connection,
                               io_loop=self.io_loop)
Ejemplo n.º 7
0
def add_accept_handler(sock, callback, io_loop=None):
    """Adds an `.IOLoop` event handler to accept new connections on ``sock``.

    When a connection is accepted, ``callback(connection, address)`` will
    be run (``connection`` is a socket object, and ``address`` is the
    address of the other end of the connection).  Note that this signature
    is different from the ``callback(fd, events)`` signature used for
    `.IOLoop` handlers.

    .. versionchanged:: 4.1
       The ``io_loop`` argument is deprecated.
    """
    if io_loop is None:
        io_loop = IOLoop.current()

    def accept_handler(fd, events):
        # More connections may come in while we're handling callbacks;
        # to prevent starvation of other tasks we must limit the number
        # of connections we accept at a time.  Ideally we would accept
        # up to the number of connections that were waiting when we
        # entered this method, but this information is not available
        # (and rearranging this method to call accept() as many times
        # as possible before running any callbacks would have adverse
        # effects on load balancing in multiprocess configurations).
        # Instead, we use the (default) listen backlog as a rough
        # heuristic for the number of connections we can reasonably
        # accept at once.
        for i in xrange(_DEFAULT_BACKLOG):
            try:
                connection, address = sock.accept()
            except socket.error as e:
                # _ERRNO_WOULDBLOCK indicate we have accepted every
                # connection that is available.
                if errno_from_exception(e) in _ERRNO_WOULDBLOCK:
                    return
                # ECONNABORTED indicates that there was a connection
                # but it was closed while still in the accept queue.
                # (observed on FreeBSD).
                if errno_from_exception(e) == errno.ECONNABORTED:
                    continue
                raise
            callback(connection, address)

    io_loop.add_handler(sock, accept_handler, IOLoop.READ)
Ejemplo n.º 8
0
Archivo: gen.py Proyecto: yetone/hurray
 def __init__(self, gen, result_future, first_yielded):
     self.gen = gen
     self.result_future = result_future
     self.future = _null_future
     self.yield_point = None
     self.pending_callbacks = None
     self.results = None
     self.running = False
     self.finished = False
     self.had_exception = False
     self.io_loop = IOLoop.current()
     # For efficiency, we do not create a stack context until we
     # reach a YieldPoint (stack contexts are required for the historical
     # semantics of YieldPoints, but not for Futures).  When we have
     # done so, this field will be set and must be called at the end
     # of the coroutine.
     self.stack_context_deactivate = None
     if self.handle_yield(first_yielded):
         self.run()
Ejemplo n.º 9
0
def sig_handler(server, sig, frame):
    io_loop = IOLoop.instance()
    tid = process.task_id() or 0

    def stop_loop(deadline):
        now = time.time()
        if now < deadline and (io_loop._callbacks or io_loop._timeouts):
            io_loop.add_timeout(now + 1, stop_loop, deadline)
        else:
            io_loop.stop()
            server.shutdown_pool()
            logging.info('Task %d shutdown complete' % tid)

    def shutdown():
        logging.info('Stopping hurray server task %d' % tid)
        server.stop()
        stop_loop(time.time() + SHUTDOWN_GRACE_PERIOD)

    io_loop.add_callback_from_signal(shutdown)
Ejemplo n.º 10
0
Archivo: gen.py Proyecto: yetone/hurray
def with_timeout(timeout, future, io_loop=None, quiet_exceptions=()):
    """Wraps a `.Future` (or other yieldable object) in a timeout.

    Raises `TimeoutError` if the input future does not complete before
    ``timeout``, which may be specified in any form allowed by
    `.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time
    relative to `.IOLoop.time`)

    If the wrapped `.Future` fails after it has timed out, the exception
    will be logged unless it is of a type contained in ``quiet_exceptions``
    (which may be an exception type or a sequence of types).

    Does not support `YieldPoint` subclasses.

    .. versionadded:: 4.0

    .. versionchanged:: 4.1
       Added the ``quiet_exceptions`` argument and the logging of unhandled
       exceptions.

    .. versionchanged:: 4.4
       Added support for yieldable objects other than `.Future`.
    """
    # TODO: allow YieldPoints in addition to other yieldables?
    # Tricky to do with stack_context semantics.
    #
    # It's tempting to optimize this by cancelling the input future on timeout
    # instead of creating a new one, but A) we can't know if we are the only
    # one waiting on the input future, so cancelling it might disrupt other
    # callers and B) concurrent futures can only be cancelled while they are
    # in the queue, so cancellation cannot reliably bound our waiting time.
    future = convert_yielded(future)
    result = Future()
    chain_future(future, result)
    if io_loop is None:
        io_loop = IOLoop.current()

    def error_callback(future):
        try:
            future.result()
        except Exception as e:
            if not isinstance(e, quiet_exceptions):
                app_log.error("Exception in Future %r after timeout",
                              future,
                              exc_info=True)

    def timeout_callback():
        result.set_exception(TimeoutError("Timeout"))
        # In case the wrapped future goes on to fail, log it.
        future.add_done_callback(error_callback)

    timeout_handle = io_loop.add_timeout(timeout, timeout_callback)
    if isinstance(future, Future):
        # We know this future will resolve on the IOLoop, so we don't
        # need the extra thread-safety of IOLoop.add_future (and we also
        # don't care about StackContext here.
        future.add_done_callback(
            lambda future: io_loop.remove_timeout(timeout_handle))
    else:
        # concurrent.futures.Futures may resolve on any thread, so we
        # need to route them back to the IOLoop.
        io_loop.add_future(
            future, lambda future: io_loop.remove_timeout(timeout_handle))
    return result