Exemple #1
0
def get_zmq_classes(env=None):  #{
    """ Returns ZMQ Context and Poller classes that are
        compatible with the current environment.

        Tries to detect a monkey-patched green thread environment
        and choses an appropriate Context class.

        Gevent, Eventlet and Greenhouse are supported as well as the
        regular PyZMQ Context class.
    """
    env = env or detect_green_env()

    if env == 'gevent':
        from zmq.green import Context, Poller

    elif env == 'greenhouse':
        import greenhouse
        green = greenhouse.patched('zmq')
        Context, Poller = green.Context, green.Poller

    elif env == 'eventlet':
        from eventlet.green.zmq import Context
        class Poller(object):
            def __init__(self, *args, **kwargs):
                raise NotImplementedError('eventlet does not support ZeroMQ Poller')

    else:
        from zmq import Context, Poller

    return Context, Poller
Exemple #2
0
def get_zmq_classes(env=None):  #{
    """ Returns ZMQ Context and Poller classes that are
        compatible with the current environment.

        Tries to detect a monkey-patched green thread environment
        and choses an appropriate Context class.

        Gevent, Eventlet and Greenhouse are supported as well as the
        regular PyZMQ Context class.
    """
    env = env or detect_green_env()

    if env == 'gevent':
        from zmq.green import Context, Poller

    elif env == 'greenhouse':
        import greenhouse
        green = greenhouse.patched('zmq')
        Context, Poller = green.Context, green.Poller

    elif env == 'eventlet':
        from eventlet.green.zmq import Context
        class Poller(object):
            def __init__(self, *args, **kwargs):
                raise NotImplementedError('eventlet does not support ZeroMQ Poller')

    else:
        from zmq import Context, Poller

    return Context, Poller
Exemple #3
0
def get_zmq_classes(env='auto'):
    """ Returns ZMQ Context and Poller classes that are
        compatible with the current environment.

        If env is 'auto' (default), tries to detect a monkey-patched
        green thread environment. Gevent, Eventlet and Greenhouse are
        supported as well as the regular PyZMQ Context class.
    """
    if env == 'auto':
        env = detect_green_env()

    if env == 'gevent':
        from zmq.green import Context, Poller

    elif env == 'greenhouse':
        import greenhouse
        green = greenhouse.patched('zmq')
        Context, Poller = green.Context, green.Poller

    elif env == 'eventlet':
        from eventlet.green.zmq import Context
        class Poller(object):
            def __init__(self, *args, **kwargs):
                raise NotImplementedError('eventlet does not support ZeroMQ Poller')

    elif env in [None, 'threading']:
        from zmq import Context, Poller

    else:
        raise ValueError('unsupported environment %r' % env)

    return Context, Poller
Exemple #4
0
#!/usr/bin/env python

import socket
import traceback

import greenhouse

SocketServer = greenhouse.patched("SocketServer")

greenhouse.global_exception_handler(traceback.print_exception)

PORT = 9000
connections = {}


class NCChatHandler(SocketServer.StreamRequestHandler):
    def handle(self):
        self.connection.sendall("enter your name up to 20 characters\r\n")

        name = self.rfile.readline().rstrip()
        if len(name) > 20:
            self.connection.sendall("name too long!\r\n")
            return

        if name in connections:
            self.connection.sendall("already have a '%s'\r\n" % name)
            return

        connections[name] = self

        greenhouse.schedule(self._broadcast, args=(
Exemple #5
0
# vim: fileencoding=utf8:et:sw=4:ts=8:sts=4

import fcntl
import os
import struct
import sys
import time

import greenhouse
import mummy

from . import errors, futures

green_os = greenhouse.patched('os')

CMD_JOB = 1
CMD_CLOSE = 2
RC_GOOD = 1
RC_BAD = 2


class Worker(object):
    def __init__(self, func):
        self.func = func
        self.pid = None
        self.readpipe = None
        self.writepipe = None
        self._inflight = None
        self._childpipes = None

    def start(self):
Exemple #6
0
# vim: fileencoding=utf8:et:sw=4:ts=8:sts=4

import collections
import functools

import greenhouse

os = greenhouse.patched("os")
signal = greenhouse.patched("signal")

from . import errors, futures, worker


class Master(object):
    def __init__(self, func, count):
        self._func = func
        self._count = count
        self._workers = [None] * count
        self._available = set()
        self._backlog = collections.deque()
        self._worker_closed = greenhouse.Event()
        self._closed_count = 0
        self._closing = False

    def start(self):
        signal.signal(signal.SIGCHLD, self._sigchld)

        for i in xrange(self._count):
            w = worker.Worker(self._func)
            w.start()
            self._workers[i] = w
Exemple #7
0
#!/usr/bin/env python

import socket
import traceback

import greenhouse

SocketServer = greenhouse.patched("SocketServer")

greenhouse.global_exception_handler(traceback.print_exception)

PORT = 9000
connections = {}


class NCChatHandler(SocketServer.StreamRequestHandler):
    def handle(self):
        self.connection.sendall("enter your name up to 20 characters\r\n")

        name = self.rfile.readline().rstrip()
        if len(name) > 20:
            self.connection.sendall("name too long!\r\n")
            return

        if name in connections:
            self.connection.sendall("already have a '%s'\r\n" % name)
            return

        connections[name] = self

        greenhouse.schedule(self._broadcast,
Exemple #8
0
from __future__ import absolute_import

import sys
import time

import greenhouse
import junction
from . import client, configs, service

signal = greenhouse.patched("signal")


cmds = {}
def command(name):
    return lambda f: (cmds.__setitem__(name, f), f)[1]

def default_cmd(options, *args):
    print """missing command.

available commands:
- %s""" % ("\n- ".join(cmds.keys()),)

def getcmd(name):
    return cmds.get(name, default_cmd)

@command('runservice')
def runservice(options, instance_name):
    conf = configs.get_configs(options.configfile)
    instance = [inst for inst in conf.instances if inst.name == instance_name]
    if not instance:
        print >> sys.stderr, "unknown shard %r" % instance_name
Exemple #9
0
#!/usr/bin/env python
# vim: fileencoding=utf8:et:sta:ai:sw=4:ts=4:sts=4

import errno
import logging
import os
import sys
import traceback

import greenhouse
import junction

subprocess = greenhouse.patched("subprocess")


# turn on greenhouse and junction logging, and exception printing
greenhouse.configure_logging(level=1)
junction.configure_logging(level=1)
greenhouse.global_exception_handler(traceback.print_exception)

# except quiet the greenhouse scheduler's logger since the backdoor
# sets and unsets hooks with every REPL line
logging.getLogger("greenhouse.scheduler").setLevel(logging.WARNING)


BACKDOOR_PORT = 9123

def run_backdoor(finished):
    global BACKDOOR_PORT

    try:
Exemple #10
0
'''a bunch of examples of how to get a list of urls in parallel

each of them uses a different greenhouse api to retrieve a list of urls in
parallel and return a dictionary mapping urls to response bodies
'''

import greenhouse

# urllib2 obviously doesn't explicitly use greenhouse sockets, but we can
# import it with the socket module patched so it uses them anyway
urllib2 = greenhouse.patched("urllib2")


#
# simply schedule greenlets and use an event to signal the all clear
#

def _get_one(url, results, count, done_event):
    results[url] = urllib2.urlopen(url).read()
    if len(results) == count:
        done_event.set() # wake up the original greenlet

def get_urls(urls):
    count = len(urls)
    results = {}
    alldone = greenhouse.Event()

    # each url gets its own greenlet to fetch it
    for url in urls:
        greenhouse.schedule(_get_one, args=(url, results, count, alldone))
Exemple #11
0
# vim: fileencoding=utf8:et:sw=4:ts=8:sts=4

import os

import databacon
import greenhouse

redis = greenhouse.patched("redis")


redispool = None

def setup_redispool():
    global redispool

    #TODO: cross-mount a unix domain socket
    host = "redis0.redis.dev.docker"
    port = 6379
    if "REDIS_PORT_6379_TCP_ADDR" in os.environ:
        host = os.environ["REDIS_PORT_6379_TCP_ADDR"]
        port = os.environ["REDIS_PORT_6379_TCP_PORT"]

    redispool = redis.ConnectionPool(host=host, port=port, db=0,
            socket_timeout=1.0)


def get_redis():
    return redis.Redis(connection_pool=redispool)


def setup_dbpool():
Exemple #12
0
'''a bunch of examples of how to get a list of urls in parallel

each of them uses a different greenhouse api to retrieve a list of urls in
parallel and return a dictionary mapping urls to response bodies
'''

import greenhouse

# urllib2 obviously doesn't explicitly use greenhouse sockets, but we can
# import it with the socket module patched so it uses them anyway
urllib2 = greenhouse.patched("urllib2")

#
# simply schedule greenlets and use an event to signal the all clear
#


def _get_one(url, results, count, done_event):
    results[url] = urllib2.urlopen(url).read()
    if len(results) == count:
        done_event.set()  # wake up the original greenlet


def get_urls(urls):
    count = len(urls)
    results = {}
    alldone = greenhouse.Event()

    # each url gets its own greenlet to fetch it
    for url in urls:
        greenhouse.schedule(_get_one, args=(url, results, count, alldone))