Exemplo n.º 1
0
 def __init__(self, server, size, connect_timeout):
     Pool.__init__(self, max_size=size)
     self.server = server
     self._connect_timeout = connect_timeout
     self._parent_class_getter = super(MemcacheConnPool, self).get
     try:
         # call the patched .get() if eventlet is older than 0.9.17
         if StrictVersion(eventlet_version) < StrictVersion('0.9.17'):
             self._parent_class_getter = self._upstream_fixed_get
     except ValueError:
         # "invalid" version number or otherwise error parsing version
         pass
Exemplo n.º 2
0
 def __init__(self, server, size, connect_timeout):
     Pool.__init__(self, max_size=size)
     self.server = server
     self._connect_timeout = connect_timeout
     self._parent_class_getter = super(MemcacheConnPool, self).get
     try:
         # call the patched .get() if eventlet is older than 0.9.17
         if StrictVersion(eventlet_version) < StrictVersion('0.9.17'):
             self._parent_class_getter = self._upstream_fixed_get
     except ValueError:
         # "invalid" version number or otherwise error parsing version
         pass
Exemplo n.º 3
0
 def _setup_dbs(self):
     for db in 'zodb', 'zodb_session':
         options = copy.deepcopy(self.options.__dict__)
         options['zodb_db'] = db
         connectionFactory = getUtility(IZodbFactoryLookup).get()
         self._dbs[db], _ = connectionFactory.getConnection(**options)
     self._pool = Pool(create=self._create, max_size=20)
Exemplo n.º 4
0
 def __init__(self, host='localhost', port=8125, ipv6=False, prefix=None,
              maxudpsize=512):
     """Create a new client."""
     fam = socket.AF_INET6 if ipv6 else socket.AF_INET
     family, _, _, _, addr = socket.getaddrinfo(host, port, fam, socket.SOCK_DGRAM)[0]
     self._addr = addr
     self._pool = Pool(max_size=100, min_size=0, create=lambda: socket.socket(family, socket.SOCK_DGRAM))
     self._prefix = prefix
     self._maxudpsize = maxudpsize
Exemplo n.º 5
0
    def put(self, connection):
        if self.current_size > self.max_size:
            self.current_size -= 1
            # close the connection if needed
            if connection.sock is not None:
                connection.close()
            return

        try:
            response = connection.getresponse()
            response.close()
        except httplib.ResponseNotReady:
            pass
        except:
            connection.close()
            connection = self.create()
            
        if connection.sock is None:
            connection = self.create()
            
        Pool.put(self, connection)
Exemplo n.º 6
0
    def __init__(
            self, controllercls, connection_factory, exchange, topic,
            pool=None, poolsize=1000):
        self.nodeid = UIDGEN()

        self.max_workers = poolsize
        if pool is None:
            self.procpool = GreenPool(size=poolsize)
        else:
            self.procpool = pool

        self.controller = controllercls()
        self.service = self.controller
        self.topic = topic
        self.greenlet = None
        self.consume_ready = Event()

        node_topic = "{}.{}".format(self.topic, self.nodeid)
        self.nova_queues = [
            entities.get_topic_queue(exchange, topic),
            entities.get_topic_queue(exchange, node_topic),
            entities.get_fanout_queue(topic), ]

        self._channel = None
        self._consumers = None

        self.connection = connection_factory()
        self.connection_factory = connection_factory

        inject_dependencies(self.controller, self)

        self._connection_pool = Pool(
            max_size=self.procpool.size,
            create=connection_factory
        )

        self.workers = set()
        self._pending_ack_messages = []
        self._pending_requeue_messages = []
        self._do_cancel_consumers = False
        self._consumers_cancelled = Event()

        self._timers = list(get_timers(self.controller))
Exemplo n.º 7
0
class ConnectionPool(Pool):
    """A pool which gives out saranwrapped MySQLdb connections from a pool
    """
    def __init__(self, min_size=0, max_size=4, *args, **kwargs):
        self._args = args
        self._kwargs = kwargs
        Pool.__init__(self, min_size, max_size)

    def create(self):
        return saranwrap.wrap(MySQLdb).connect(*self._args, **self._kwargs)

    def put(self, conn):
        # rollback any uncommitted changes, so that the next process
        # has a clean slate.  This also pokes the process to see if
        # it's dead or None
        try:
            conn.rollback()
        except (AttributeError, DeadProcess), e:
            conn = self.create()
        # TODO figure out if we're still connected to the database
        if conn is not None:
            Pool.put(self, conn)
        else:
            self.current_size -= 1
Exemplo n.º 8
0
class StatsClient(StatsClientBase):
    """A client for statsd."""

    def __init__(self, host='localhost', port=8125, ipv6=False, prefix=None,
                 maxudpsize=512):
        """Create a new client."""
        fam = socket.AF_INET6 if ipv6 else socket.AF_INET
        family, _, _, _, addr = socket.getaddrinfo(host, port, fam, socket.SOCK_DGRAM)[0]
        self._addr = addr
        self._pool = Pool(max_size=100, min_size=0, create=lambda: socket.socket(family, socket.SOCK_DGRAM))
        self._prefix = prefix
        self._maxudpsize = maxudpsize

    def _send(self, data):
        """Send data to statsd."""
        try:
            with self._pool.item() as _sock:
                _sock.sendto(data.encode('ascii'), self._addr)
        except socket.error:
            # No time for love, Dr. Jones!
            pass

    def pipeline(self):
        return Pipeline(self)
Exemplo n.º 9
0
def generate_report(conf, policy_name=None):
    global json_output
    json_output = config_true_value(conf.get('dump_json', 'no'))
    if policy_name is None:
        policy = POLICIES.default
    else:
        policy = POLICIES.get_by_name(policy_name)
        if policy is None:
            exit('Unable to find policy: %s' % policy_name)
    if not json_output:
        print('Using storage policy: %s ' % policy.name)

    swift_dir = conf.get('swift_dir', '/etc/swift')
    retries = int(conf.get('retries', 5))
    concurrency = int(conf.get('concurrency', 25))
    endpoint_type = str(conf.get('endpoint_type', 'publicURL'))
    region_name = str(conf.get('region_name', ''))
    container_report = config_true_value(conf.get('container_report', 'yes'))
    object_report = config_true_value(conf.get('object_report', 'yes'))
    if not (object_report or container_report):
        exit("Neither container or object report is set to run")
    user_domain_name = str(conf.get('user_domain_name', ''))
    project_domain_name = str(conf.get('project_domain_name', ''))
    project_name = str(conf.get('project_name', ''))
    insecure = config_true_value(conf.get('keystone_api_insecure', 'no'))

    coropool = GreenPool(size=concurrency)

    os_options = {'endpoint_type': endpoint_type}
    if user_domain_name:
        os_options['user_domain_name'] = user_domain_name
    if project_domain_name:
        os_options['project_domain_name'] = project_domain_name
    if project_name:
        os_options['project_name'] = project_name
    if region_name:
        os_options['region_name'] = region_name

    url, token = get_auth(conf['auth_url'], conf['auth_user'],
                          conf['auth_key'],
                          auth_version=conf.get('auth_version', '1.0'),
                          os_options=os_options,
                          insecure=insecure)
    account = url.rsplit('/', 1)[1]
    connpool = Pool(max_size=concurrency)
    connpool.create = lambda: SimpleClient(
        url=url, token=token, retries=retries)

    container_ring = Ring(swift_dir, ring_name='container')
    object_ring = Ring(swift_dir, ring_name=policy.ring_name)

    output = {}
    if container_report:
        output['container'] = container_dispersion_report(
            coropool, connpool, account, container_ring, retries,
            conf.get('partitions'), policy)
    if object_report:
        output['object'] = object_dispersion_report(
            coropool, connpool, account, object_ring, retries,
            conf.get('partitions'), policy)

    return output
Exemplo n.º 10
0
class RemoteProcedureCall(object):
    def __init__(self):
        alarm_rpc_connection = lambda : RpcConnection(True)
        self.metric_pool = Pool(create=RpcConnection, max_size=500)
        self.alarm_pool = Pool(create=alarm_rpc_connection, max_size=20)
        
        
    def read_msg(self):
        msg = 'AMQP Connection is closed %d time(s)... retrying.'
        max_retries = 5
        with self.metric_pool.item() as conn:
            for i in range(max_retries + 1):
                try:
                    frame, header, body = conn.channel.basic_get(
                                                    queue='metric_queue')
                    if frame:
                        conn.channel.basic_ack(delivery_tag=frame.delivery_tag)
                    return frame, body
                
                except (AMQPConnectionError, AMQPChannelError, 
                        ConnectionClosed):
                    if i < max_retries:
                        conn.connect()
                        LOG.warn(_(msg) % i)
                        time.sleep(2 * i)
                    else:
                        raise


    def send_msg(self, message_id, body):
        """
        Args:
            message_id: int
                ex) PUT_METRIC_DATA_MSG_ID (0x0001)
                    PUT_METRIC_ALARM_MSG_ID (0x0002)
                    ...
            body: dict object (will be converted into json format)
            
        """

        def publish(body, properties, use_metric_pool=True):
            msg = 'AMQP Connection is closed %d time(s)... retrying.'
            max_retries = 5
            mq_pool = self.metric_pool if use_metric_pool else self.alarm_pool
            
            with mq_pool.item() as conn:
                for i in range(max_retries + 1):
                    try:
                        return conn.channel.basic_publish(exchange='',
                                           routing_key='metric_queue',
                                           body=body, properties=properties)
                    except ConnectionClosed:
                        if i < max_retries:
                            conn.connect()
                            LOG.warn(_(msg) % i)
                            time.sleep(2 * i)
                        else:
                            raise


        if type(message_id) is not int:
            raise RpcInvokeException()
        
        message_uuid = str(uuid.uuid4()) 
        body.setdefault('message_id', message_id)
        body.setdefault('message_uuid', message_uuid)
        
        properties=pika.BasicProperties(delivery_mode=2)
        use_metric_pool = (message_id == PUT_METRIC_DATA_MSG_ID)
        publish(json.dumps(body), properties, use_metric_pool)
            
        LOG.info(_("send_msg - id(%03d), %s"), message_id, message_uuid)
        LOG.debug(_("send_msg - body(%s)"), str(body))
Exemplo n.º 11
0
 def __init__(self):
     alarm_rpc_connection = lambda : RpcConnection(True)
     self.metric_pool = Pool(create=RpcConnection, max_size=500)
     self.alarm_pool = Pool(create=alarm_rpc_connection, max_size=20)
Exemplo n.º 12
0
class ZAuthServer(CmdBase):

    _dbs = {}
    _pool = None

    def _create(self):
        _tm = TransactionManager()
        return DBConnectionContainer(self._dbs['zodb'], 
                self._dbs['zodb_session'],_tm)

    def _setup_dbs(self):
        for db in 'zodb', 'zodb_session':
            options = copy.deepcopy(self.options.__dict__)
            options['zodb_db'] = db
            connectionFactory = getUtility(IZodbFactoryLookup).get()
            self._dbs[db], _ = connectionFactory.getConnection(**options)
        self._pool = Pool(create=self._create, max_size=20)

    @property
    @contextmanager
    def db(self):
        """
        Use this context manager exclusively for database access. It manages
        checking DBConnectionContainers out from the pool, wrapping them in
        a tpool Proxy, and cleaning up transactions afterward.
        """
        with self._pool.item() as conns:
            proxied = tpool.Proxy(conns)
            yield proxied
            proxied.abort()

    def run(self, host, port):
        self._setup_dbs()
        wsgi.server(eventlet.listen((host, port)), self.route)

    def _unauthorized(self, msg, start_response):
        start_response('401 Unauthorized', [('Content-Type', 'text/html')])
        return msg

    def _challenge(self, start_response):
        body = 'Please authenticate'
        headers = [
            ('content-type', 'text/plain'),
            ('content-length', str(len(body))),
            ('WWW-Authenticate', 'Basic realm="%s"' % "ZAuthRealm")]
        start_response('401 Unauthorized', headers)
        return [body]

    def handleLogin(self, env, start_response):
        basic = env.get('HTTP_AUTHORIZATION', None)
        if basic is None:
            return self._challenge(start_response)
        response = WSGIResponse()
        request = HTTPRequest(env['wsgi.input'], env, response)
        with self.db as db:
            db.sync()
            authorization = IAuthorizationTool(db.dmd())
            credentials = authorization.extractCredentials(request)

        login = credentials.get('login', None)
        password = credentials.get('password', None)
        # no credentials to test authentication
        if login is None or password is None:
            return self._unauthorized("Missing Authentication Credentials", start_response)

        # test authentication
        if not authorization.authenticateCredentials(login, password):
            return self._unauthorized( "Failed Authentication", start_response)

        # create the session data
        with self.db as db:
            db.sync()
            db.browser_id_manager().REQUEST = request
            tokenId = db.browser_id_manager().getBrowserId(create=1)
        expires = time.time() + 60 * 20
        token = dict(id=tokenId, expires=expires)
        with self.db as db:
            db.sync()
            session = db.session_data()
            if session.get(tokenId) is None:
                session[tokenId] = token
                db.commit()
        start_response('200 OK', [('Content-Type', 'text/html')])
        return json.dumps(token)

    
    def handleValidate(self, env, start_response):
        queryString = env.get('QUERY_STRING')
        if not queryString:
            return self._unauthorized("Missing Token Id", start_response)

        tokenId = queryString.replace('id=', '')
        if tokenId is None:
            return self._unauthorized("Missing Token Id", start_response)
        token = None
        expired = False
        with self.db as db:
            db.sync()
            token = db.session_data().get(tokenId)
        if token is None:
            return self._unauthorized("Unable to find token %s " % tokenId, start_response)
        expired = time.time() >= token['expires']
        if expired:
            return self._unauthorized("Token Expired", start_response)
        start_response('200 OK', [('Content-Type', 'text/html')])
        return json.dumps(token)

    def route(self, env, start_response):
        path = env['PATH_INFO']
        if path == '/authorization/login':
            result =  self.handleLogin(env, start_response)
            return result
        elif path == '/authorization/validate':
            return self.handleValidate(env, start_response)
        elif path =='/':
            return self.index(env, start_response)
        else:
            start_response('404 OK', [('Content-Type', 'text/html')])
            return ""

    def index(self, env, start_response):
        start_response('200 OK', [('Content-Type', 'text/html')])
        return """
        <html>
            <head>
                <title>ZAuthService</title>
            </head>
            <body>
                <h1>Menu</h1>
                <ul>
                    <li><a href="/authorization/login">Login</a></li>
                    <li><a href="/authorization/validate">Validate</a></li>
                </ul>
            </body>
        </html>
        """

    def buildOptions(self):
        CmdBase.buildOptions(self)
        connectionFactory = getUtility(IZodbFactoryLookup).get()
        connectionFactory.buildOptions(self.parser)
Exemplo n.º 13
0
 def __init__(self, mocks):
     Pool.__init__(self, max_size=2)
     self.mocks = mocks
Exemplo n.º 14
0
 def __init__(self, min_size = 0, max_size = 4, *args, **kwargs):
     self._args = args
     self._kwargs = kwargs
     Pool.__init__(self, min_size, max_size)
Exemplo n.º 15
0
class RemoteProcedureCall(object):
    def __init__(self):
        alarm_rpc_connection = lambda: RpcConnection(True)
        self.metric_pool = Pool(create=RpcConnection, max_size=500)
        self.alarm_pool = Pool(create=alarm_rpc_connection, max_size=20)

    def read_msg(self):
        msg = 'AMQP Connection is closed %d time(s)... retrying.'
        max_retries = 5
        with self.metric_pool.item() as conn:
            for i in range(max_retries + 1):
                try:
                    frame, header, body = conn.channel.basic_get(
                        queue='metric_queue')
                    if frame:
                        conn.channel.basic_ack(delivery_tag=frame.delivery_tag)
                    return frame, body

                except (AMQPConnectionError, AMQPChannelError,
                        ConnectionClosed):
                    if i < max_retries:
                        conn.connect()
                        LOG.warn(_(msg) % i)
                        time.sleep(2 * i)
                    else:
                        raise

    def send_msg(self, message_id, body):
        """
        Args:
            message_id: int
                ex) PUT_METRIC_DATA_MSG_ID (0x0001)
                    PUT_METRIC_ALARM_MSG_ID (0x0002)
                    ...
            body: dict object (will be converted into json format)
            
        """
        def publish(body, properties, use_metric_pool=True):
            msg = 'AMQP Connection is closed %d time(s)... retrying.'
            max_retries = 5
            mq_pool = self.metric_pool if use_metric_pool else self.alarm_pool

            with mq_pool.item() as conn:
                for i in range(max_retries + 1):
                    try:
                        return conn.channel.basic_publish(
                            exchange='',
                            routing_key='metric_queue',
                            body=body,
                            properties=properties)
                    except ConnectionClosed:
                        if i < max_retries:
                            conn.connect()
                            LOG.warn(_(msg) % i)
                            time.sleep(2 * i)
                        else:
                            raise

        if type(message_id) is not int:
            raise RpcInvokeException()

        message_uuid = str(uuid.uuid4())
        body.setdefault('message_id', message_id)
        body.setdefault('message_uuid', message_uuid)

        properties = pika.BasicProperties(delivery_mode=2)
        use_metric_pool = (message_id == PUT_METRIC_DATA_MSG_ID)
        publish(json.dumps(body), properties, use_metric_pool)

        LOG.info(_("send_msg - id(%03d), %s"), message_id, message_uuid)
        LOG.debug(_("send_msg - body(%s)"), str(body))
Exemplo n.º 16
0
 def __init__(self, server, size, connect_timeout):
     Pool.__init__(self, max_size=size)
     self.host, self.port = utils.parse_socket_string(
         server, DEFAULT_MEMCACHED_PORT)
     self._connect_timeout = connect_timeout
Exemplo n.º 17
0
 def get(self):
     fp, sock = Pool.get(self)
     if fp is None:
         # An error happened previously, so we need a new connection
         fp, sock = self.create()
     return fp, sock
Exemplo n.º 18
0
 def __init__(self, server, size, connect_timeout):
     Pool.__init__(self, max_size=size)
     self.host, self.port = utils.parse_socket_string(
         server, DEFAULT_MEMCACHED_PORT)
     self._connect_timeout = connect_timeout
Exemplo n.º 19
0
 def __init__(self, uri, use_proxy=False, min_size=0, max_size=4):
     self.uri = uri
     self.use_proxy = use_proxy
     Pool.__init__(self, min_size, max_size)
Exemplo n.º 20
0
 def __init__(self):
     alarm_rpc_connection = lambda: RpcConnection(True)
     self.metric_pool = Pool(create=RpcConnection, max_size=500)
     self.alarm_pool = Pool(create=alarm_rpc_connection, max_size=20)
Exemplo n.º 21
0
try:
    import yajl as json
except ImportError:
    import json

import socket
from urllib import urlencode

from heroshi import get_logger
log = get_logger("api")
from heroshi.conf import settings
from heroshi.error import ApiError


manager_connections = Pool(max_size=2)
manager_connections.create = lambda: httplib2.Http(timeout=20)


def request_manager(resource, method, data=None, headers=None):
    use_headers = {
        'User-Agent': settings.identity['user_agent'],
        'X-Heroshi-Auth': settings.api_key,
        'Expect': '', # a try to fix result: 100 not-ok problem
    }
    if headers is not None:
        use_headers.update(headers)

    # concat URIs carefully
    base_url = settings.manager_url
    url = base_url.strip('/') + resource
Exemplo n.º 22
0
 def __init__(self, min_size=0, max_size=4, *args, **kwargs):
     self._args = args
     self._kwargs = kwargs
     Pool.__init__(self, min_size, max_size)
Exemplo n.º 23
0
    Given a pool, handle item management (like stale items in the pool)
    while passing other errors to the calling method.
    """
    i = pool.get()
    try:
        yield i
    except StaleConnectionError, err:
        print err
    else:
        print i(), 'returned to pool'
        pool.put(i)


if __name__ == '__main__':
    pool_size = 5
    p = Pool(min_size=1, max_size=pool_size, create=new_connection)
    attempts = 1
    max_attempts = 5
    stale = True
    while attempts < max_attempts:
        with PoolManager(p) as i:
            try:
                attempts += 1
                print 'starting attempt %s' % attempts
                print i()
                print i(stale=stale)
                #print i(err=True)
            except ProcessingError, err:
                print err, 'as expected'
            else:
                break
Exemplo n.º 24
0
 def __init__(self, server, size, connect_timeout):
     Pool.__init__(self, max_size=size)
     self.server = server
     self._connect_timeout = connect_timeout
Exemplo n.º 25
0
 def __init__(self, mocks):
     Pool.__init__(self, max_size=2)
     self.mocks = mocks
Exemplo n.º 26
0
 def __init__(self, mocks):
     Pool.__init__(self, max_size=2)
     self.mocks = mocks
     # setting this for the eventlet workaround in the MemcacheConnPool
     self._parent_class_getter = super(memcached.MemcacheConnPool, self).get
Exemplo n.º 27
0
def generate_report(conf, policy_name=None):
    global json_output
    json_output = config_true_value(conf.get('dump_json', 'no'))
    if policy_name is None:
        policy = POLICIES.default
    else:
        policy = POLICIES.get_by_name(policy_name)
        if policy is None:
            exit('Unable to find policy: %s' % policy_name)
    if not json_output:
        print('Using storage policy: %s ' % policy.name)

    swift_dir = conf.get('swift_dir', '/etc/swift')
    retries = int(conf.get('retries', 5))
    concurrency = int(conf.get('concurrency', 25))
    endpoint_type = str(conf.get('endpoint_type', 'publicURL'))
    region_name = str(conf.get('region_name', ''))
    container_report = config_true_value(conf.get('container_report', 'yes'))
    object_report = config_true_value(conf.get('object_report', 'yes'))
    if not (object_report or container_report):
        exit("Neither container or object report is set to run")
    user_domain_name = str(conf.get('user_domain_name', ''))
    project_domain_name = str(conf.get('project_domain_name', ''))
    project_name = str(conf.get('project_name', ''))
    insecure = config_true_value(conf.get('keystone_api_insecure', 'no'))

    coropool = GreenPool(size=concurrency)

    os_options = {'endpoint_type': endpoint_type}
    if user_domain_name:
        os_options['user_domain_name'] = user_domain_name
    if project_domain_name:
        os_options['project_domain_name'] = project_domain_name
    if project_name:
        os_options['project_name'] = project_name
    if region_name:
        os_options['region_name'] = region_name

    url, token = get_auth(conf['auth_url'],
                          conf['auth_user'],
                          conf['auth_key'],
                          auth_version=conf.get('auth_version', '1.0'),
                          os_options=os_options,
                          insecure=insecure)
    account = url.rsplit('/', 1)[1]
    connpool = Pool(max_size=concurrency)
    connpool.create = lambda: SimpleClient(
        url=url, token=token, retries=retries)

    container_ring = Ring(swift_dir, ring_name='container')
    object_ring = Ring(swift_dir, ring_name=policy.ring_name)

    output = {}
    if container_report:
        output['container'] = container_dispersion_report(
            coropool, connpool, account, container_ring, retries,
            conf.get('partitions'), policy)
    if object_report:
        output['object'] = object_dispersion_report(coropool, connpool,
                                                    account, object_ring,
                                                    retries,
                                                    conf.get('partitions'),
                                                    policy)

    return output
Exemplo n.º 28
0
 def __init__(self, mocks):
     Pool.__init__(self, max_size=2)
     self.mocks = mocks
     # setting this for the eventlet workaround in the MemcacheConnPool
     self._parent_class_getter = super(memcached.MemcacheConnPool, self).get
Exemplo n.º 29
0
class Service(ConsumerMixin):
    def __init__(
            self, controllercls, connection_factory, exchange, topic,
            pool=None, poolsize=1000):
        self.nodeid = UIDGEN()

        self.max_workers = poolsize
        if pool is None:
            self.procpool = GreenPool(size=poolsize)
        else:
            self.procpool = pool

        self.controller = controllercls()
        self.service = self.controller
        self.topic = topic
        self.greenlet = None
        self.consume_ready = Event()

        node_topic = "{}.{}".format(self.topic, self.nodeid)
        self.nova_queues = [
            entities.get_topic_queue(exchange, topic),
            entities.get_topic_queue(exchange, node_topic),
            entities.get_fanout_queue(topic), ]

        self._channel = None
        self._consumers = None

        self.connection = connection_factory()
        self.connection_factory = connection_factory

        inject_dependencies(self.controller, self)

        self._connection_pool = Pool(
            max_size=self.procpool.size,
            create=connection_factory
        )

        self.workers = set()
        self._pending_ack_messages = []
        self._pending_requeue_messages = []
        self._do_cancel_consumers = False
        self._consumers_cancelled = Event()

        self._timers = list(get_timers(self.controller))

    def start(self):
        self.start_timers()
        # greenlet has a magic attribute ``dead`` - pylint: disable=E1101
        if self.greenlet is not None and not self.greenlet.dead:
            raise RuntimeError()
        self.greenlet = eventlet.spawn(self.run)

    def start_timers(self):
        for timer in self._timers:
            timer.start()

    def get_consumers(self, Consumer, channel):
        nova_consumer = Consumer(
            self.nova_queues, callbacks=[self.on_nova_message, ])

        consume_consumers = get_consumers(
            Consumer, self, self.on_consume_message)

        consumers = [nova_consumer] + list(consume_consumers)

        prefetch_count = self.procpool.size
        for consumer in consumers:
            consumer.qos(prefetch_count=prefetch_count)

        return consumers

    def on_consume_ready(self, connection, channel, consumers, **kwargs):
        self._consumers = consumers
        self._channel = channel
        self.consume_ready.send(None)

    def on_consume_end(self, connection, channel):
        self.consume_ready.reset()

    def on_nova_message(self, body, message):
        _log.debug('spawning RPC worker (%d free)', self.procpool.free())

        gt = self.procpool.spawn(self.handle_rpc_message, body)

        gt.link(self.handle_rpc_message_processed, message)
        self.workers.add(gt)

    def on_consume_message(self, consumer_method_config, body, message):
        _log.debug('spawning consume worker (%d free)', self.procpool.free())

        gt = self.procpool.spawn(
            self.handle_consume_message, consumer_method_config, body, message)

        gt.link(self.handle_consume_message_processed)
        self.workers.add(gt)

    def handle_rpc_message(self, body):
        # item is patched on for python with ``with``, pylint can't find it
        # pylint: disable=E1102
        with self._connection_pool.item() as connection:
            process_rpc_message(connection, self.controller, body)

    def handle_rpc_message_processed(self, gt, message):
        self.workers.discard(gt)
        self._pending_ack_messages.append(message)

    def handle_consume_message(self, consumer_method_config, body, message):
        with log_time(_log.debug, 'processed consume message in %0.3fsec'):
            consumer_method, consumer_config = consumer_method_config

            try:
                consumer_method(body)
            except Exception as e:
                if consumer_config.requeue_on_error:
                    _log.exception(
                        'failed to consume message, requeueing message: '
                        '%s(): %s', consumer_method, e)
                    self._pending_requeue_messages.append(message)
                else:
                    _log.exception(
                        'failed to consume message, ignoring message: '
                        '%s(): %s', consumer_method, e)
                    self._pending_ack_messages.append(message)
            else:
                self._pending_ack_messages.append(message)

    def handle_consume_message_processed(self, gt):
        self.workers.discard(gt)

    def on_iteration(self):
        self.process_consumer_cancellation()
        # we need to make sure we process any pending messages before shutdown
        self.process_pending_message_acks()
        self.process_shutdown()

    def process_consumer_cancellation(self):
        if self._do_cancel_consumers:
            self._do_cancel_consumers = False
            if self._consumers:
                _log.debug('cancelling consumers')
                for consumer in self._consumers:
                    consumer.cancel()
            self._consumers_cancelled.send(True)

    def process_pending_message_acks(self):
        messages = self._pending_ack_messages
        if messages:
            _log.debug('ack() %d processed messages', len(messages))
            while messages:
                msg = messages.pop()
                msg.ack()
                eventlet.sleep()

        messages = self._pending_requeue_messages
        if messages:
            _log.debug('requeue() %d processed messages', len(messages))
            while messages:
                msg = messages.pop()
                msg.requeue()
                eventlet.sleep()

    def consume(self, limit=None, timeout=None, safety_interval=0.1, **kwargs):
        """ Lifted from kombu so we are able to break the loop immediately
            after a shutdown is triggered rather than waiting for the timeout.
        """
        elapsed = 0
        with self.Consumer() as (connection, channel, consumers):
            with self.extra_context(connection, channel):
                self.on_consume_ready(connection, channel, consumers, **kwargs)
                for i in limit and xrange(limit) or count():
                    # moved from after the following `should_stop` condition to
                    # avoid waiting on a drain_events timeout before breaking
                    # the loop.
                    self.on_iteration()
                    if self.should_stop:
                        break

                    try:
                        connection.drain_events(timeout=safety_interval)
                    except socket.timeout:
                        elapsed += safety_interval
                        # Excluding the following clause from coverage,
                        # as timeout never appears to be set - This method
                        # is a lift from kombu so will leave in place for now.
                        if timeout and elapsed >= timeout:  # pragma: no cover
                            raise socket.timeout()
                    except socket.error:
                        if not self.should_stop:
                            raise
                    else:
                        yield
                        elapsed = 0

    def process_shutdown(self):
        consumers_cancelled = self._consumers_cancelled.ready()

        no_active_timers = (len(self._timers) == 0)

        no_active_workers = (self.procpool.running() < 1)

        no_pending_message_acks = not (
            self._pending_ack_messages or
            self._pending_requeue_messages
        )

        ready_to_stop = (
            consumers_cancelled and
            no_active_timers and
            no_active_workers and
            no_pending_message_acks
        )

        if ready_to_stop:
            _log.debug('notifying service to stop')
            self.should_stop = True

    def cancel_consumers(self):
        # greenlet has a magic attribute ``dead`` - pylint: disable=E1101
        if self.greenlet is not None and not self.greenlet.dead:
            # since consumers were started in a separate thread,
            # we will just notify the thread to avoid getting
            # "Second simultaneous read" errors
            _log.debug('notifying consumers to be cancelled')
            self._do_cancel_consumers = True
            self._consumers_cancelled.wait()
        else:
            _log.debug('consumer thread already dead')

    def cancel_timers(self):
        if self._timers:
            _log.debug('stopping %d timers', len(self._timers))
            while self._timers:
                self._timers.pop().stop()

    def kill_workers(self):
        _log.debug('force killing %d workers', len(self.workers))
        while self.workers:
            self.workers.pop().kill()

    def wait_for_workers(self):
        pool = self.procpool
        _log.debug('waiting for %d workers to complete', pool.running())
        pool.waitall()

    def shut_down(self):
        # greenlet has a magic attribute ``dead`` - pylint: disable=E1101
        if self.greenlet is not None and not self.greenlet.dead:
            _log.debug('stopping service')
            self.greenlet.wait()

        # TODO: when is this ever not None?
        if self._channel is not None:
            _log.debug('closing channel')
            self._channel.close()

    def kill(self, force=False):
        _log.debug('killing service')

        self.cancel_consumers()

        self.cancel_timers()

        if force:
            self.kill_workers()
        else:
            self.wait_for_workers()

        self.shut_down()

    def link(self, *args, **kwargs):
        return self.greenlet.link(*args, **kwargs)