Ejemplo n.º 1
0
def killsock(sock):
    """Attempts to cleanly shutdown a socket. Regardless of cleanliness,
    ensures that upon return, the socket is fully closed, catching any
    exceptions along the way. A safe and prompt way to dispose of the
    socket, freeing system resources.
    """
    if hasattr(sock, '_sock'):
        ml.ld("Killing socket {0}/FD {1}", id(sock), sock._sock.fileno())
    else:
        ml.ld("Killing socket {0}", id(sock))
    try:
        # TODO: better ideas for how to get SHUT_RDWR constant?
        sock.shutdown(gevent.socket.SHUT_RDWR)
    except gevent.socket.error:
        pass  # just being nice to the server, don't care if it fails
    except Exception as e:
        log_rec = context.get_context().log.info("SOCKET", "SHUTDOWN")
        log_rec.failure('error ({exc}) shutting down socket: {socket}',
                        socket=sock, exc=e)
    try:
        sock.close()
    except gevent.socket.error:
        pass  # just being nice to the server, don't care if it fails
    except Exception as e:
        log_rec = context.get_context().log.info("SOCKET", "CLOSE")
        log_rec.failure('error ({exc}) closing socket: {socket}',
                        socket=sock, exc=e)
Ejemplo n.º 2
0
def killsock(sock):
    """Attempts to cleanly shutdown a socket. Regardless of cleanliness,
    ensures that upon return, the socket is fully closed, catching any
    exceptions along the way. A safe and prompt way to dispose of the
    socket, freeing system resources.
    """
    if hasattr(sock, '_sock'):
        ml.ld("Killing socket {0}/FD {1}", id(sock), sock._sock.fileno())
    else:
        ml.ld("Killing socket {0}", id(sock))
    try:
        # TODO: better ideas for how to get SHUT_RDWR constant?
        sock.shutdown(gevent.socket.SHUT_RDWR)
    except gevent.socket.error:
        pass  # just being nice to the server, don't care if it fails
    except Exception as e:
        log_rec = context.get_context().log.info("SOCKET", "SHUTDOWN")
        log_rec.failure('error ({exc}) shutting down socket: {socket}',
                        socket=sock, exc=e)
    try:
        sock.close()
    except gevent.socket.error:
        pass  # just being nice to the server, don't care if it fails
    except Exception as e:
        log_rec = context.get_context().log.info("SOCKET", "CLOSE")
        log_rec.failure('error ({exc}) closing socket: {socket}',
                        socket=sock, exc=e)
Ejemplo n.º 3
0
 def apply(self, func, args, kwargs):
     done = gevent.event.Event()
     self.in_q.append((done, func, args, kwargs, curtime()))
     context.get_context().stats['cpu_bound.depth'].add(1 + len(self.in_q))
     while not self.in_async:
         gevent.sleep(0.01)  # poll until worker thread has initialized
     self.in_async.send()
     done.wait()
     res = self.results[done]
     del self.results[done]
     if isinstance(res, self._Caught):
         raise res.err
     return res
Ejemplo n.º 4
0
 def apply(self, func, args, kwargs):
     done = gevent.event.Event()
     self.in_q.append((done, func, args, kwargs, curtime()))
     context.get_context().stats['cpu_bound.depth'].add(1 + len(self.in_q))
     while not self.in_async:
         gevent.sleep(0.01)  # poll until worker thread has initialized
     self.in_async.send()
     done.wait()
     res = self.results[done]
     del self.results[done]
     if isinstance(res, self._Caught):
         raise res.err
     return res
Ejemplo n.º 5
0
def get_sampro_data():
    processed = defaultdict(int)
    profiler = context.get_context().profiler
    if profiler is None:
        return "(sampling disabled; enable with infra.get_context().set_sampling(True))"
    for k, v in context.get_context().profiler.live_data_copy().iteritems():
        code, lineno, parentcode = k
        if parentcode is None:
            # TODO: shorten filenames up to the .git directory
            key = (code.co_name + "(" + code.co_filename +
                   ", " + str(code.co_firstlineno) + ")")
            processed[key] += v
    processed = reversed(sorted([(v, k) for k, v in processed.items()]))
    return dict(processed)
Ejemplo n.º 6
0
 def _run(self):
     # in_cpubound_thread is sentinel to prevent double thread dispatch
     context.get_context().thread_locals.in_cpubound_thread = True
     try:
         self.in_async = gevent.get_hub().loop.async()
         self.in_q_has_data = gevent.event.Event()
         self.in_async.start(self.in_q_has_data.set)
         while not self.stopping:
             if not self.in_q:
                 # wait for more work
                 self.in_q_has_data.clear()
                 self.in_q_has_data.wait()
                 continue
             # arbitrary non-preemptive service discipline can go here
             # FIFO for now, but we should experiment with others
             jobid, func, args, kwargs, enqueued = self.in_q.popleft()
             started = curtime()
             try:
                 ret = self.results[jobid] = func(*args, **kwargs)
             except Exception as e:
                 ret = self.results[jobid] = self._Caught(e)
             self.out_q.append(jobid)
             self.out_async.send()
             # keep track of some statistics
             queued, duration = started - enqueued, curtime() - started
             size = None
             # ret s set up above before async send
             if hasattr(ret, '__len__') and callable(ret.__len__):
                 size = len(ret)
             _queue_stats('cpu_bound', func.__name__, queued, duration, size)
     except:
         self._error()
Ejemplo n.º 7
0
        def g(*a, **kw):
            enqueued = curtime()
            ctx = context.get_context()
            started = []

            def in_thread(*a, **kw):
                ml.ld3("In thread {0}", f.__name__)
                started.append(curtime())
                return f(*a, **kw)

            # some modules import things lazily; it is too dangerous
            # to run a function in another thread if the import lock is
            # held by the current thread (this happens rarely -- only
            # if the thread dispatched function is being executed at
            # the import time of a module)
            if not ctx.cpu_thread_enabled or imp.lock_held():
                ret = in_thread(*a, **kw)
            elif in_threadpool() is self.pool:
                ret = in_thread(*a, **kw)
            else:
                ctx.stats[self.name + '.depth'].add(1 + len(self.pool))
                ret = self.pool.apply_e((Exception,), in_thread, a, kw)
                ml.ld3("Enqueued to thread {0}/depth {1}", f.__name__, len(pool))
            start = started[0]
            duration = curtime() - start
            queued = start - enqueued
            if hasattr(ret, '__len__') and callable(ret.__len__):
                prsize = ret.__len__()  # parameter-or-return size
            elif a and hasattr(a[0], '__len__') and callable(a[0].__len__):
                prsize = a[0].__len__()
            else:
                prsize = None
            _queue_stats(name, f.__name__, queued, duration, prsize)
            return ret
Ejemplo n.º 8
0
def close_threadpool():
    tlocals = context.get_context().thread_locals
    if hasattr(tlocals, 'cpu_bound_thread'):
        ml.ld2("Closing thread pool {0}", id(tlocals.cpu_thread))
        cpu_thread = tlocals.cpu_bound_thread
        cpu_thread.stopping = True
        del tlocals.cpu_bound_thread
Ejemplo n.º 9
0
 def _run(self):
     # in_cpubound_thread is sentinel to prevent double thread dispatch
     context.get_context().thread_locals.in_cpubound_thread = True
     try:
         self.in_async = gevent.get_hub().loop.async()
         self.in_q_has_data = gevent.event.Event()
         self.in_async.start(self.in_q_has_data.set)
         while not self.stopping:
             if not self.in_q:
                 # wait for more work
                 self.in_q_has_data.clear()
                 self.in_q_has_data.wait()
                 continue
             # arbitrary non-preemptive service discipline can go here
             # FIFO for now, but we should experiment with others
             jobid, func, args, kwargs, enqueued = self.in_q.popleft()
             started = curtime()
             try:
                 ret = self.results[jobid] = func(*args, **kwargs)
             except Exception as e:
                 ret = self.results[jobid] = self._Caught(e)
             self.out_q.append(jobid)
             self.out_async.send()
             # keep track of some statistics
             queued, duration = started - enqueued, curtime() - started
             size = None
             # ret s set up above before async send
             if hasattr(ret, '__len__') and callable(ret.__len__):
                 size = len(ret)
             _queue_stats('cpu_bound', func.__name__, queued, duration, size)
     except:
         self._error()
Ejemplo n.º 10
0
def _make_server_sock(address, socket_type=gevent.socket.socket):
    ml.ld("about to bind to {0!r}", address)
    ml2.info('listen_prep').success('about to bind to {addr}', addr=address)
    if isinstance(address, basestring):
        if not hasattr(socket, "AF_UNIX"):
            raise ValueError(
                "attempted to bind to Unix Domain Socket {0:r} "
                "on system without UDS support".format(address))
        if os.path.exists(address):
            os.unlink(address)
        sock = socket_type(socket.AF_UNIX)
    else:
        sock = socket_type()
    sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
    sock.bind(address)

    # NOTE: this is a "hint" to the OS than a strict rule about backlog size
    with context.get_context().log.critical('LISTEN') as _log:
        sock.listen(DEFAULT_SOCKET_LISTEN_SIZE)
        if ufork is not None:
            # we may fork, so protect ourselves
            flags = fcntl.fcntl(sock.fileno(), fcntl.F_GETFD)
            fcntl.fcntl(sock.fileno(), fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
        #ml.la("Listen to {0!r} gave this socket {1!r}", address, sock)
        _log.success('Listen to {addr} gave {sock}', addr=address, sock=sock)
    return sock
Ejemplo n.º 11
0
def close_threadpool():
    tlocals = context.get_context().thread_locals
    if hasattr(tlocals, 'cpu_bound_thread'):
        ml.ld2("Closing thread pool {0}", id(tlocals.cpu_thread))
        cpu_thread = tlocals.cpu_bound_thread
        cpu_thread.stopping = True
        del tlocals.cpu_bound_thread
Ejemplo n.º 12
0
        def g(*a, **kw):
            enqueued = curtime()
            ctx = context.get_context()
            started = []

            def in_thread(*a, **kw):
                ml.ld3("In thread {0}", f.__name__)
                started.append(curtime())
                return f(*a, **kw)

            # some modules import things lazily; it is too dangerous
            # to run a function in another thread if the import lock is
            # held by the current thread (this happens rarely -- only
            # if the thread dispatched function is being executed at
            # the import time of a module)
            if not ctx.cpu_thread_enabled or imp.lock_held():
                ret = in_thread(*a, **kw)
            elif in_threadpool() is self.pool:
                ret = in_thread(*a, **kw)
            else:
                ctx.stats[self.name + '.depth'].add(1 + len(self.pool))
                ret = self.pool.apply_e((Exception,), in_thread, a, kw)
                ml.ld3("Enqueued to thread {0}/depth {1}", f.__name__, len(pool))
            start = started[0]
            duration = curtime() - start
            queued = start - enqueued
            if hasattr(ret, '__len__') and callable(ret.__len__):
                prsize = ret.__len__()  # parameter-or-return size
            elif a and hasattr(a[0], '__len__') and callable(a[0].__len__):
                prsize = a[0].__len__()
            else:
                prsize = None
            _queue_stats(name, f.__name__, queued, duration, prsize)
            return ret
Ejemplo n.º 13
0
 def _error(self):
     # TODO: something better, but this is darn useful for debugging
     import traceback
     traceback.print_exc()
     ctx = context.get_context()
     tl = ctx.thread_locals
     if hasattr(tl, 'cpu_bound_thread') and tl.cpu_bound_thread is self:
         del tl.cpu_bound_thread
Ejemplo n.º 14
0
 def _error(self):
     # TODO: something better, but this is darn useful for debugging
     import traceback
     traceback.print_exc()
     ctx = context.get_context()
     tl = ctx.thread_locals
     if hasattr(tl, 'cpu_bound_thread') and tl.cpu_bound_thread is self:
         del tl.cpu_bound_thread
Ejemplo n.º 15
0
def staggered_retries(run, *a, **kw):
    """
    A version of spawn that will block will it is done
    running the function, and which will call the function
    repeatedly as time progresses through the timeouts list.

    Best used for idempotent network calls (e.g. HTTP GETs).

    e.g.::

        user_data = async.staggered_retries(get_data, max_results,
                                            latent_data_ok, public_credential_load,
                                            timeouts_secs=[0.1, 0.5, 1, 2])

    returns None on timeout.
    """
    ctx = context.get_context()
    ready = gevent.event.Event()
    ready.clear()

    def callback(source):
        if source.successful():
            ready.set()

    if 'timeouts_secs' in kw:
        timeouts_secs = kw.pop('timeouts_secs')
    else:
        timeouts_secs = [0.05, 0.1, 0.15, 0.2]
    if timeouts_secs[0] > 0:
        timeouts_secs.insert(0, 0)
    gs = gevent.spawn(run, *a, **kw)
    gs.link_value(callback)
    running = [gs]
    for i in range(1, len(timeouts_secs)):
        this_timeout = timeouts_secs[i] - timeouts_secs[i - 1]
        if ctx.dev:
            this_timeout = this_timeout * 5.0
        ml.ld2("Using timeout {0}", this_timeout)
        try:
            with gevent.Timeout(this_timeout):
                ready.wait()
                break
        except gevent.Timeout:
            ml.ld2("Timed out!")
            log_rec = ctx.log.critical('ASYNC.STAGGER', run.__name__)
            log_rec.failure('timed out after {timeout}',
                            timeout=this_timeout)
            gs = gevent.spawn(run, *a, **kw)
            gs.link_value(callback)
            running.append(gs)
    vals = [l.value for l in running if l.successful()]
    for g in running:
        g.kill()
    if vals:
        return vals[0]
    else:
        return None
Ejemplo n.º 16
0
 def start_accepting(self):
     # NB: This is called via BaseServer.start, which is invoked
     # *only* in post_fork.  It is *criticial* this thread is *not*
     # started prior to forking, lest it die.
     if self._watcher is None:
         accept_maxlen = context.get_context().accept_queue_maxlen
         self._watcher = ThreadWatcher(self.socket, self.loop,
                                       accept_maxlen)
         self._watcher.start(self._do_read)
Ejemplo n.º 17
0
 def __init__(self, socket, address, server, rfile=None):
     if rfile is None and hasattr(socket, "_makefile_refs"):
         rfile = socket.makefile()
         # restore gEvent's work-around of not letting wsgi.environ['input']
         # keep the socket alive in CLOSE_WAIT state after client is gone
         # to work with async.SSLSocket
         socket._makefile_refs -= 1
     self.state = context.get_context().markov_stats['wsgi_handler'].make_transitor('new')
     super(MakeFileCloseWSGIHandler, self).__init__(socket, address, server, rfile)
Ejemplo n.º 18
0
    def do_read(self):
        # invoked via BaseServer._do_read.  Whereas
        # StreamServer.do_read calls self.socket.accept, we just
        # need to pop off our queue
        if not self._watcher:
            return
        if not self._watcher.queue:
            raise RuntimeError('QUEUE DISAPPEARED')
        client_socket, address, exc, pushed_at = self._watcher.queue.pop()

        age = nanotime() - pushed_at
        context.get_context().stats[CONN_AGE_STATS].add(age / 1e6)

        if exc is not None:
            # raise the Exception
            raise exc

        return gevent.socket.socket(_sock=client_socket), address
Ejemplo n.º 19
0
def get_web_logs():
    sgrp = context.get_context().server_group
    if not sgrp:
        raise EnvironmentError("context.server_group unset (infra.serve() not called)")
    result = []
    for serv in sgrp.servers:
        if hasattr(serv, "log"):
            result.extend(serv.log.msgs)
    return result
Ejemplo n.º 20
0
    def do_read(self):
        # invoked via BaseServer._do_read.  Whereas
        # StreamServer.do_read calls self.socket.accept, we just
        # need to pop off our queue
        if not self._watcher:
            return
        if not self._watcher.queue:
            raise RuntimeError('QUEUE DISAPPEARED')
        client_socket, address, exc, pushed_at = self._watcher.queue.pop()

        age = nanotime() - pushed_at
        context.get_context().stats[CONN_AGE_STATS].add(age / 1e6)

        if exc is not None:
            # raise the Exception
            raise exc

        return gevent.socket.socket(_sock=client_socket), address
Ejemplo n.º 21
0
 def start_accepting(self):
     # NB: This is called via BaseServer.start, which is invoked
     # *only* in post_fork.  It is *criticial* this thread is *not*
     # started prior to forking, lest it die.
     if self._watcher is None:
         accept_maxlen = context.get_context().accept_queue_maxlen
         self._watcher = ThreadWatcher(self.socket, self.loop,
                                       accept_maxlen)
         self._watcher.start(self._do_read)
Ejemplo n.º 22
0
def get_connections():
    ctx = context.get_context()
    ret = {}
    for model in ctx.connection_mgr.server_models.values():
        ret[model.address] = {
            "last_error": datetime.datetime.fromtimestamp(
                int(model.last_error)).strftime('%Y-%m-%d %H:%M:%S'),
            "sockets": [repr(s) for s in model.active_connections.values()]
        }
    return ret
Ejemplo n.º 23
0
def _filter_stats(prefix):
    out = {}
    ctx = context.get_context()
    # pick up numerical stats
    stat_dict_names = (
        "stats", "durations", "intervals", "volatile_stats", "markov_stats", "sketches")
    for stat_dict_name in stat_dict_names:
        stats = getattr(ctx, stat_dict_name)
        out.update([(k, v) for k,v in stats.items() if prefix in k])
    return out
Ejemplo n.º 24
0
 def g(*a, **kw):
     ctx = context.get_context()
     # in_cpubound_thread is sentinel to prevent double-thread dispatch
     if (not ctx.cpu_thread_enabled or imp.lock_held()
             or getattr(ctx.thread_locals, 'in_cpubound_thread', False)):
         return f(*a, **kw)
     if not hasattr(ctx.thread_locals, 'cpu_bound_thread'):
         ctx.thread_locals.cpu_bound_thread = CPUThread()
     ml.ld3("Calling in cpu thread {0}", f.__name__)
     return ctx.thread_locals.cpu_bound_thread.apply(f, a, kw)
Ejemplo n.º 25
0
 def do_open(self, conn_type, req):
     get_log_record = getattr(context.get_context().log, self.LOG_LEVEL)
     with get_log_record(**self.get_log_kwargs(req)) as log_record:
         self.pre_request(log_record, req)
         log_record['full_url'] = req.get_full_url()
         resp = urllib2.AbstractHTTPHandler.do_open(self, conn_type, req)
         log_record['status_code'] = resp.getcode()
         log_record.success('{record_name} got {status_code}')
         self.post_request(log_record, req, resp)
         return resp
Ejemplo n.º 26
0
 def g(*a, **kw):
     ctx = context.get_context()
     # in_cpubound_thread is sentinel to prevent double-thread dispatch
     if (not ctx.cpu_thread_enabled or imp.lock_held()
             or getattr(ctx.thread_locals, 'in_cpubound_thread', False)):
         return f(*a, **kw)
     if not hasattr(ctx.thread_locals, 'cpu_bound_thread'):
         ctx.thread_locals.cpu_bound_thread = CPUThread()
     ml.ld3("Calling in cpu thread {0}", f.__name__)
     return ctx.thread_locals.cpu_bound_thread.apply(f, a, kw)
Ejemplo n.º 27
0
    def run(self):
        _nanotime = nanotime
        _DROPPED_CONN_STATS = DROPPED_CONN_STATS
        _DROPPED_EXC_STATS = DROPPED_EXC_STATS

        while self.running:
            sock, addr, exc = None, None, None
            try:
                sock, addr = self.listener.accept()
            except socket.error as exc:
                if exc.errno == errno.EINVAL:
                    ml.la('thread accept resulted in EINVAL; '
                          'assuming socket has been shutdown and terminating '
                          'accept loop')
                    self.running = False
                    return
            except Exception as exc:
                # Maybe call sys.exc_info() here?  Is that safe to
                # send across threads?
                pass
            if len(self.queue) >= self.maxlen:
                ctx = context.get_context()
                stats = context.get_context().stats
                log_rec = ctx.log.critical('HTTP')
                if sock and addr:
                    sock.close()
                    stats[_DROPPED_CONN_STATS].add(1)
                    log_rec.failure(
                        'thread closed {socket} on {addr} '
                        'because queue is full',
                        socket=socket,
                        addr=addr)
                else:
                    stats[_DROPPED_EXC_STATS].add(1)
                    log_rec.failure(
                        'thread dropped {exc} '
                        'because queue is full',
                        exc=exc)
            else:
                self.queue.appendleft((sock, addr, exc, _nanotime()))
                # wake up the watcher greenlet in the main thread
                self. async .send()
Ejemplo n.º 28
0
    def wrap_socket_and_handle(self, client_socket, address):
        ctx = context.get_context()
        ctx.client_sockets[client_socket] = 1

        if not self.ssl_args:
            raise ValueError('https server requires server-side'
                             ' SSL certificate')
        protocol = _socket_protocol(client_socket)
        if protocol == "ssl":
            with ctx.log.info('HANDLE.SSL', str(address[0])):
                ssl_socket = async.wrap_socket_context(
                    client_socket, **self.ssl_args)
            ctx.sketches['http.ssl.client_ips'].add(address[0])
            return self.handle(ssl_socket, address)
        elif protocol == "http":
            with ctx.log.info('HANDLE.NOSSL', str(address[0])):
                self._no_ssl(client_socket, address)
            ctx.sketches['http.client_ips'].add(address[0])
        else:
            context.get_context().intervals["server.pings"].tick()
Ejemplo n.º 29
0
    def wrap_socket_and_handle(self, client_socket, address):
        ctx = context.get_context()
        ctx.client_sockets[client_socket] = 1

        if not self.ssl_args:
            raise ValueError('https server requires server-side'
                             ' SSL certificate')
        protocol = _socket_protocol(client_socket)
        if protocol == "ssl":
            with ctx.log.info('HANDLE.SSL', str(address[0])):
                ssl_socket = async .wrap_socket_context(
                    client_socket, **self.ssl_args)
            ctx.sketches['http.ssl.client_ips'].add(address[0])
            return self.handle(ssl_socket, address)
        elif protocol == "http":
            with ctx.log.info('HANDLE.NOSSL', str(address[0])):
                self._no_ssl(client_socket, address)
            ctx.sketches['http.client_ips'].add(address[0])
        else:
            context.get_context().intervals["server.pings"].tick()
Ejemplo n.º 30
0
def _queue_stats(qname, fname, queued_ns, duration_ns, size_B=None):
    ctx = context.get_context()
    fprefix = qname + '.' + fname
    ctx.stats[fprefix + '.queued(ms)'].add(queued_ns * 1000)
    ctx.stats[fprefix + '.duration(ms)'].add(duration_ns * 1000)
    ctx.stats[qname + '.queued(ms)'].add(queued_ns * 1000)
    ctx.stats[qname + '.duration(ms)'].add(duration_ns * 1000)
    if size_B is not None:
        ctx.stats[fprefix + '.len'].add(size_B)
        if duration_ns:  # may be 0
            ctx.stats[fprefix + '.rate(B/ms)'].add(size_B / (duration_ns * 1000.0))
Ejemplo n.º 31
0
 def __init__(self, socket, address, server, rfile=None):
     if rfile is None and hasattr(socket, "_makefile_refs"):
         rfile = socket.makefile()
         # restore gEvent's work-around of not letting wsgi.environ['input']
         # keep the socket alive in CLOSE_WAIT state after client is gone
         # to work with async.SSLSocket
         socket._makefile_refs -= 1
     self.state = context.get_context(
     ).markov_stats['wsgi_handler'].make_transitor('new')
     super(MakeFileCloseWSGIHandler, self).__init__(socket, address, server,
                                                    rfile)
Ejemplo n.º 32
0
def get_config_dict():
    'Returns information about the current environment in a dictionary'
    ctx = context.get_context()
    data = []
    keys_handled_directly = ['protected', 'ssl_contexts']
    for k in ctx.__dict__:
        if k not in keys_handled_directly:
            data.append([k, getattr(ctx, k)])
    # TODO: log and ssl_context info

    return dict([(e[0], e[1:]) for e in data])
Ejemplo n.º 33
0
def _queue_stats(qname, fname, queued_ns, duration_ns, size_B=None):
    ctx = context.get_context()
    fprefix = qname + '.' + fname
    ctx.stats[fprefix + '.queued(ms)'].add(queued_ns * 1000)
    ctx.stats[fprefix + '.duration(ms)'].add(duration_ns * 1000)
    ctx.stats[qname + '.queued(ms)'].add(queued_ns * 1000)
    ctx.stats[qname + '.duration(ms)'].add(duration_ns * 1000)
    if size_B is not None:
        ctx.stats[fprefix + '.len'].add(size_B)
        if duration_ns:  # may be 0
            ctx.stats[fprefix + '.rate(B/ms)'].add(size_B / (duration_ns * 1000.0))
Ejemplo n.º 34
0
def get_warnings(path=None):
    warns = context.get_context().get_warnings()
    if path:
        path_segs = path.split('.')
        for seg in path_segs:
            if seg in warns:
                warns = warns[seg]
            else:
                return "(none)"
    warns = _dict_map(warns, _transform)
    return warns
Ejemplo n.º 35
0
    def run(self):
        _nanotime = nanotime
        _DROPPED_CONN_STATS = DROPPED_CONN_STATS
        _DROPPED_EXC_STATS = DROPPED_EXC_STATS

        while self.running:
            sock, addr, exc = None, None, None
            try:
                sock, addr = self.listener.accept()
            except socket.error as exc:
                if exc.errno == errno.EINVAL:
                    ml.la('thread accept resulted in EINVAL; '
                          'assuming socket has been shutdown and terminating '
                          'accept loop')
                    self.running = False
                    return
            except Exception as exc:
                # Maybe call sys.exc_info() here?  Is that safe to
                # send across threads?
                pass
            if len(self.queue) >= self.maxlen:
                ctx = context.get_context()
                stats = context.get_context().stats
                log_rec = ctx.log.critical('HTTP')
                if sock and addr:
                    sock.close()
                    stats[_DROPPED_CONN_STATS].add(1)
                    log_rec.failure('thread closed {socket} on {addr} '
                                    'because queue is full',
                                    socket=socket,
                                    addr=addr)
                else:
                    stats[_DROPPED_EXC_STATS].add(1)
                    log_rec.failure('thread dropped {exc} '
                                    'because queue is full',
                                    exc=exc)
            else:
                self.queue.appendleft((sock, addr, exc, _nanotime()))
                # wake up the watcher greenlet in the main thread
                self.async.send()
Ejemplo n.º 36
0
 def serve_forever(self):
     ctx = context.get_context()
     ctx.running = True
     try:
         # set up greenlet switch monitoring
         import greenlet
         greenlet.settrace(ctx._trace)
     except AttributeError:
         pass  # oh well
     if not self.prefork:
         self.start()
         log_msg = 'Group initialized and serving forever...'
         ctx.log.critical('GROUP.INIT').success(log_msg)
         if ctx.dev and ctx.dev_service_repl_enabled and os.isatty(0):
             if not hasattr(os, "getpgrp"):  # Windows
                 fg = True
             else:
                 try:
                     fg = os.getpgrp() == os.tcgetpgrp(sys.stdout.fileno())
                 except OSError:
                     fg = False
             if fg:
                 # only start REPL on unix machines if running in foreground
                 async .start_repl({'server': ctx.server_group})
         try:
             while 1:
                 async .sleep(1.0)
         finally:
             self.stop()
         return
     if not ufork:
         raise RuntimeError(
             'attempted to run pre-forked on platform without fork')
     if ctx.tracing:  # a little bit hacky, disable tracing in aribter
         self.trace_in_child = True
         ctx.set_greenlet_trace(False)
     else:
         self.trace_in_child = False
     self.arbiter = ufork.Arbiter(post_fork=self._post_fork,
                                  child_pre_exit=self.stop,
                                  parent_pre_stop=ctx.stop,
                                  size=self.num_workers,
                                  sleep=async .sleep,
                                  fork=gevent.fork,
                                  child_memlimit=ctx.worker_memlimit)
     if self.daemonize:
         pgrpfile = os.path.join(ctx.process_group_file_path,
                                 '{0}.pgrp'.format(ctx.appname))
         self.arbiter.spawn_daemon(pgrpfile=pgrpfile)
     else:
         self.arbiter.run()
Ejemplo n.º 37
0
def get_recent(thing1=None, thing2=None):
    ctx = context.get_context()
    if thing1 is None:
        return [k for k in ctx.recent.keys()]
    if thing2 is None:
        if isinstance(ctx.recent[thing1], (dict, cache.Cache)):
            return dict([(repr(k), list(v))
                         for k, v in ctx.recent[thing1].items()])
        else:
            return list(ctx.recent[thing1])
    else:
        if isinstance(ctx.recent[thing1], (dict, cache.Cache)):
            return dict([(repr(k), list(v)) for k, v in
                         ctx.recent[thing1].items() if thing2 in str(k)])
        else:
            return '{"error":"' + thing1 + ' recent data is not dict."}'
Ejemplo n.º 38
0
 def serve_forever(self):
     ctx = context.get_context()
     ctx.running = True
     try:
         # set up greenlet switch monitoring
         import greenlet
         greenlet.settrace(ctx._trace)
     except AttributeError:
         pass  # oh well
     if not self.prefork:
         self.start()
         log_msg = 'Group initialized and serving forever...'
         ctx.log.critical('GROUP.INIT').success(log_msg)
         if ctx.dev and ctx.dev_service_repl_enabled and os.isatty(0):
             if not hasattr(os, "getpgrp"):  # Windows
                 fg = True
             else:
                 try:
                     fg = os.getpgrp() == os.tcgetpgrp(sys.stdout.fileno())
                 except OSError:
                     fg = False
             if fg:
                 # only start REPL on unix machines if running in foreground
                 async.start_repl({'server': ctx.server_group})
         try:
             while 1:
                 async.sleep(1.0)
         finally:
             self.stop()
         return
     if not ufork:
         raise RuntimeError('attempted to run pre-forked on platform without fork')
     if ctx.tracing:  # a little bit hacky, disable tracing in aribter
         self.trace_in_child = True
         ctx.set_greenlet_trace(False)
     else:
         self.trace_in_child = False
     self.arbiter = ufork.Arbiter(
         post_fork=self._post_fork, child_pre_exit=self.stop, parent_pre_stop=ctx.stop,
         size=self.num_workers, sleep=async.sleep, fork=gevent.fork, child_memlimit=ctx.worker_memlimit)
     if self.daemonize:
         pgrpfile = os.path.join(ctx.process_group_file_path,
                                '{0}.pgrp'.format(ctx.appname))
         self.arbiter.spawn_daemon(pgrpfile=pgrpfile)
     else:
         self.arbiter.run()
Ejemplo n.º 39
0
def get_connection_mgr():
    connection_mgr = context.get_context().connection_mgr
    server_models, sockpools = {}, {}
    for k in connection_mgr.server_models:
        server_model = connection_mgr.server_models[k]
        server_models[repr(k)] = {
            "info": repr(server_model),
            "fds": [s.fileno() for s in server_model.active_connections.keys()]
        }
    for prot in connection_mgr.sockpools:
        for sock_type in connection_mgr.sockpools[prot]:
            sockpool = connection_mgr.sockpools[prot][sock_type]
            sockpools[repr((prot, sock_type))] = {
                "info": repr(sockpool),
                "addresses": map(repr, sockpool.free_socks_by_addr.keys())
            }
    return {'server_models': server_models, 'sockpools': sockpools}
Ejemplo n.º 40
0
def view_obj(request, obj_id=None):
    import obj_browser

    if obj_id is None:
        return clastic.redirect(
            request.path + '/{0}'.format(id(context.get_context())))

    for obj in gc.get_objects():
        if id(obj) == obj_id:
            break
    else:
        raise ValueError("no Python object with id {0}".format(obj_id))

    path, _, _ = request.path.rpartition('/')
    return clastic.Response(
        obj_browser.render_html(
            obj, lambda id: path + '/{0}'.format(id)),
        mimetype="text/html")
Ejemplo n.º 41
0
    def _post_fork(self):
        # TODO: revisit this with newer gevent release
        hub = gevent.hub.get_hub()
        hub.loop.reinit()  # reinitializes libev
        hub._threadpool = None  # eliminate gevent's internal threadpools
        gevent.sleep(0)  # let greenlets run
        # finally, eliminate our threadpools
        ctx = context.get_context()
        ctx.thread_locals = threading.local()
        # do not print logs failures -- they are in stats
        ctx.log_failure_print = False
        if self.daemonize:
            ll.use_std_out()
        if ctx.sampling:
            ctx.set_sampling(False)
            ctx.set_sampling(True)

        if self.post_fork:
            self.post_fork()
        msg = 'successfully started process {pid}'
        ctx.log.critical('WORKER', 'START').success(msg, pid=os.getpid())
        if self.trace_in_child:  # re-enable tracing LONG SPIN detection
            ctx.set_greenlet_trace(True)  # if it was enabled before forking
        self.start()
Ejemplo n.º 42
0
    def _post_fork(self):
        # TODO: revisit this with newer gevent release
        hub = gevent.hub.get_hub()
        hub.loop.reinit()  # reinitializes libev
        hub._threadpool = None  # eliminate gevent's internal threadpools
        gevent.sleep(0)  # let greenlets run
        # finally, eliminate our threadpools
        ctx = context.get_context()
        ctx.thread_locals = threading.local()
        # do not print logs failures -- they are in stats
        ctx.log_failure_print = False
        if self.daemonize:
            ll.use_std_out()
        if ctx.sampling:
            ctx.set_sampling(False)
            ctx.set_sampling(True)

        if self.post_fork:
            self.post_fork()
        msg = 'successfully started process {pid}'
        ctx.log.critical('WORKER', 'START').success(msg, pid=os.getpid())
        if self.trace_in_child:  # re-enable tracing LONG SPIN detection
            ctx.set_greenlet_trace(True)  # if it was enabled before forking
        self.start()
Ejemplo n.º 43
0
 def request(self, next, request, _route):
     url = getattr(_route, 'pattern', '').encode('utf-8')
     ctx = context.get_context()
     with ctx.log.info('URL', url) as request_txn:
         request_txn.msg = {}
         return next(api_cal_trans=request_txn)
Ejemplo n.º 44
0
    def __init__(self,
                 wsgi_apps=(),
                 stream_handlers=(),
                 custom_servers=(),
                 prefork=None,
                 daemonize=None,
                 socket_ark_self=None,
                 socket_ark_peer=None,
                 socket_ark_secret='secret',
                 **kw):
        ctx = context.get_context()
        self.wsgi_apps = list(wsgi_apps or [])
        self.stream_handlers = list(stream_handlers or [])
        self.custom_servers = list(custom_servers or [])

        self.prefork = prefork
        self.daemonize = daemonize
        # max number of concurrent clients per worker
        self.max_clients = kw.pop('max_clients', DEFAULT_MAX_CLIENTS)
        self.num_workers = kw.pop('num_workers', DEFAULT_NUM_WORKERS)
        self.post_fork = kw.pop('post_fork', None)  # post-fork callback
        self.server_log = kw.pop('gevent_log', None)

        self.meta_address = kw.pop('meta_address', None)
        self.console_address = kw.pop('console_address', None)
        self._require_client_auth = kw.pop('require_client_auth', True)

        if kw:
            raise TypeError('unexpected keyword args: %r' % kw.keys())

        self.client_pool = gevent.pool.Pool(ctx.max_concurrent_clients)
        self.servers = []
        self.socks = {}

        # setup socket ark if keeping sockets alive
        self._init_socket_ark(socket_ark_self, socket_ark_peer,
                              socket_ark_secret)

        # we do NOT want a gevent socket if we're going to use a
        # thread to manage our accepts; it's critical that the
        # accept() call *blocks*
        socket_type = gevent.socket.socket if not ufork else socket.socket

        for app, address, ssl in wsgi_apps:
            sock = self.acquire_server_sock(address, socket_type=socket_type)

            if isinstance(ssl, SSLContext):
                ssl_context = ssl
            elif ssl:
                ssl_context = ctx.ssl_context
            else:
                ssl_context = None
            if ssl_context:
                if self._no_client_auth_req:
                    server = MultiProtocolWSGIServer(sock,
                                                     app,
                                                     spawn=self.client_pool,
                                                     context=ssl_context)
                else:
                    server = SslContextWSGIServer(sock,
                                                  app,
                                                  spawn=self.client_pool,
                                                  context=ssl_context)
            else:
                server = ThreadQueueWSGIServer(sock, app)
            server.log = self.server_log or RotatingGeventLog()
            self.servers.append(server)
            self.socks[server] = sock
            # prevent a "blocking" call to DNS on each request
            # (NOTE: although the OS won't block, gevent will dispatch
            # to a threadpool which is expensive)
            server.set_environ({
                'SERVER_NAME': socket.getfqdn(address[0]),
                'wsgi.multiprocess': True
            })
        for handler, address in self.stream_handlers:
            sock = self.acquire_server_sock(address)
            server = gevent.server.StreamServer(sock,
                                                handler,
                                                spawn=self.client_pool)
            self.servers.append(server)
        for server_class, address in self.custom_servers:
            # our stated requirement is that users provide a subclass
            # of StreamServer, which would *not* know about our thread
            # queue.  consequently we can't give it a blocking socket
            sock = self.acquire_server_sock(address)
            server = server_class(sock, spawn=self.client_pool)
            self.servers.append(server)
        if self.console_address is not None:
            try:
                sock = self.acquire_server_sock(self.console_address)
            except Exception as e:
                print "WARNING: unable to start backdoor server on port", ctx.backdoor_port, repr(
                    e)
            else:
                server = gevent.server.StreamServer(sock,
                                                    console_sock_handle,
                                                    spawn=self.client_pool)
                self.servers.append(server)
        # set all servers max_accept to 1 since we are in a pre-forked/multiprocess environment
        for server in self.servers:
            server.max_accept = 1
Ejemplo n.º 45
0
def reset_stats():
    import faststat
    context.get_context().stats = defaultdict(faststat.Stats)
    return "OK"
Ejemplo n.º 46
0
import gevent
from gevent import pywsgi
import gevent.server
import gevent.socket
import gevent.pool

import clastic
from faststat import nanotime

from support import async
from support import context
from support.crypto import SSLContext

import ll
ml = ll.LLogger()
ml2 = context.get_context().log.get_module_logger()

# TODO: autoadd console and meta servers

DEFAULT_NUM_WORKERS = 1
DEFAULT_MAX_CLIENTS = 1024  # per worker
DEFAULT_SOCKET_LISTEN_SIZE = 128


class Group(object):
    def __init__(self, wsgi_apps=(), stream_handlers=(), custom_servers=(),
                 prefork=None, daemonize=None, **kw):
        """\
        Create a new Group of servers which can be started/stopped/forked
        as a group.
Ejemplo n.º 47
0
import gevent.server
import gevent.socket
import gevent.pool

import clastic
from faststat import nanotime
from boltons.socketutils import BufferedSocket

from support import async
from support import context
from support.crypto import SSLContext
from support import buffered_socket

import ll
ml = ll.LLogger()
ml2 = context.get_context().log.get_module_logger()

# TODO: autoadd console and meta servers

DEFAULT_NUM_WORKERS = 1
DEFAULT_MAX_CLIENTS = 1024  # per worker
DEFAULT_SOCKET_LISTEN_SIZE = 128


class Group(object):
    """\
    Create a new Group of servers which can be started/stopped/forked
    as a group.

    *wsgi_apps* should be of the form  [ (wsgi_app, address, ssl), ...  ]
Ejemplo n.º 48
0
 def request(self, next, request, _route):
     url = getattr(_route, 'pattern', '').encode('utf-8')
     ctx = context.get_context()
     with ctx.log.info('URL', url) as request_txn:
         request_txn.msg = {}
         return next(api_cal_trans=request_txn)
Ejemplo n.º 49
0
    def __init__(self, wsgi_apps=(), stream_handlers=(), custom_servers=(),
                 prefork=None, daemonize=None, **kw):
        """\
        Create a new Group of servers which can be started/stopped/forked
        as a group.

        *wsgi_apps* should be of the form  [ (wsgi_app, address, ssl), ...  ]

        *stream_handlers* should be of the form  [ (handler_func, address), ...  ]

        *custom_servers* should be of the form [ (server_class, address), ... ]
        where server_class refers to subclasses of gevent.server.StreamServer
        which define their own handle function

        address here refers to a tuple (ip, port), or more generally anything which is
        acceptable as the address parameter to
        `socket.bind() <http://docs.python.org/2/library/socket.html#socket.socket.bind>`_.

        `handler_func` should have the following signature: f(socket, address), following
        the `convention of gevent <http://www.gevent.org/servers.html>`_.
        """
        ctx = context.get_context()
        self.wsgi_apps = list(wsgi_apps or [])
        self.stream_handlers = list(stream_handlers or [])
        self.custom_servers = list(custom_servers or [])

        self.prefork = prefork
        self.daemonize = daemonize
        # max number of concurrent clients per worker
        self.max_clients = kw.pop('max_clients', DEFAULT_MAX_CLIENTS)
        self.num_workers = kw.pop('num_workers', DEFAULT_NUM_WORKERS)
        self.post_fork = kw.pop('post_fork', None)  # post-fork callback
        self.server_log = kw.pop('gevent_log', None)

        self.meta_address = kw.pop('meta_address', None)
        self.console_address = kw.pop('console_address', None)
        self._require_client_auth = kw.pop('require_client_auth', True)

        if kw:
            raise TypeError('unexpected keyword args: %r' % kw.keys())

        self.client_pool = gevent.pool.Pool(ctx.max_concurrent_clients)
        self.servers = []
        self.socks = {}

        # we do NOT want a gevent socket if we're going to use a
        # thread to manage our accepts; it's critical that the
        # accept() call *blocks*
        socket_type = gevent.socket.socket if not ufork else socket.socket

        for app, address, ssl in wsgi_apps:
            sock = _make_server_sock(address, socket_type=socket_type)

            if isinstance(ssl, SSLContext):
                ssl_context = ssl
            elif ssl:
                ssl_context = ctx.ssl_context
            else:
                ssl_context = None
            if ssl_context:
                if self._no_client_auth_req:
                    server = MultiProtocolWSGIServer(
                        sock, app, spawn=self.client_pool, context=ssl_context)
                else:
                    server = SslContextWSGIServer(
                        sock, app, spawn=self.client_pool, context=ssl_context)
            else:
                server = ThreadQueueWSGIServer(sock, app)
            server.log = self.server_log or RotatingGeventLog()
            self.servers.append(server)
            self.socks[server] = sock
            # prevent a "blocking" call to DNS on each request
            # (NOTE: although the OS won't block, gevent will dispatch
            # to a threadpool which is expensive)
            server.set_environ({'SERVER_NAME': socket.getfqdn(address[0]),
                                'wsgi.multiprocess': True})
        for handler, address in self.stream_handlers:
            sock = _make_server_sock(address)
            server = gevent.server.StreamServer(sock, handler, spawn=self.client_pool)
            self.servers.append(server)
        for server_class, address in self.custom_servers:
            # our stated requirement is that users provide a subclass
            # of StreamServer, which would *not* know about our thread
            # queue.  consequently we can't give it a blocking socket
            sock = _make_server_sock(address)
            server = server_class(sock, spawn=self.client_pool)
            self.servers.append(server)
        if self.console_address is not None:
            try:
                sock = _make_server_sock(self.console_address)
            except Exception as e:
                print "WARNING: unable to start backdoor server on port", ctx.backdoor_port, repr(e)
            else:
                server = gevent.server.StreamServer(sock, console_sock_handle, spawn=self.client_pool)
                self.servers.append(server)
        # set all servers max_accept to 1 since we are in a pre-forked/multiprocess environment
        for server in self.servers:
            server.max_accept = 1