示例#1
0
class Pool(Group):
    def __init__(self, size=None, greenlet_class=None):
        if size is not None and size < 0:
            raise ValueError('size must not be negative: %r' % (size, ))
        Group.__init__(self)
        self.size = size
        if greenlet_class is not None:
            self.greenlet_class = greenlet_class
        if size is None:
            self._semaphore = DummySemaphore()
        else:
            self._semaphore = Semaphore(size)

    def wait_available(self):
        self._semaphore.wait()

    def full(self):
        return self.free_count() <= 0

    def free_count(self):
        if self.size is None:
            return 1
        return max(0, self.size - len(self))

    def add(self, greenlet):
        self._semaphore.acquire()
        try:
            Group.add(self, greenlet)
        except:
            self._semaphore.release()
            raise

    def _discard(self, greenlet):
        Group._discard(self, greenlet)
        self._semaphore.release()
示例#2
0
文件: pool.py 项目: uschen/gevent3
class Pool(Group):
    def __init__(self, size=None, greenlet_class=None):
        if size is not None and size < 0:
            raise ValueError("size must not be negative: %r" % (size,))
        Group.__init__(self)
        self.size = size
        if greenlet_class is not None:
            self.greenlet_class = greenlet_class
        if size is None:
            self._semaphore = DummySemaphore()
        else:
            self._semaphore = Semaphore(size)

    def wait_available(self):
        self._semaphore.wait()

    def full(self):
        return self.free_count() <= 0

    def free_count(self):
        if self.size is None:
            return 1
        return max(0, self.size - len(self))

    def add(self, greenlet):
        self._semaphore.acquire()
        try:
            Group.add(self, greenlet)
        except:
            self._semaphore.release()
            raise

    def _discard(self, greenlet):
        Group._discard(self, greenlet)
        self._semaphore.release()
示例#3
0
 def listen(self, callback):
     # XXX: should only spawn one monitor greelet per thread
     condition = self._condition
     started = Semaphore(0)
     def _listen():
         with condition:
             while True:
                 started.release()
                 condition.wait()
                 callback(self)
     gevent.spawn(_listen)
     started.wait()
示例#4
0
文件: pool.py 项目: dsuch/gevent
class Pool(Group):

    def __init__(self, size=None, greenlet_class=None):
        """
        Create a new pool.

        A pool is like a group, but the maximum number of members
        is governed by the *size* parameter.

        :keyword int size: If given, this non-negative integer is the
            maximum count of active greenlets that will be allowed in
            this pool. A few values have special significance:

            * ``None`` (the default) places no limit on the number of
              greenlets. This is useful when you need to track, but not limit,
              greenlets, as with :class:`gevent.pywsgi.WSGIServer`. A :class:`Group`
              may be a more efficient way to achieve the same effect.
            * ``0`` creates a pool that can never have any active greenlets. Attempting
              to spawn in this pool will block forever. This is only useful
              if an application uses :meth:`wait_available` with a timeout and checks
              :meth:`free_count` before attempting to spawn.
        """
        if size is not None and size < 0:
            raise ValueError('size must not be negative: %r' % (size, ))
        Group.__init__(self)
        self.size = size
        if greenlet_class is not None:
            self.greenlet_class = greenlet_class
        if size is None:
            self._semaphore = DummySemaphore()
        else:
            self._semaphore = Semaphore(size)

    def wait_available(self, timeout=None):
        """
        Wait until it's possible to spawn a greenlet in this pool.

        :param float timeout: If given, only wait the specified number
            of seconds.

        .. warning:: If the pool was initialized with a size of 0, this
           method will block forever unless a timeout is given.

        :return: A number indicating how many new greenlets can be put into
           the pool without blocking.

        .. versionchanged:: 1.1a3
            Added the ``timeout`` parameter.
        """
        return self._semaphore.wait(timeout=timeout)

    def full(self):
        """
        Return a boolean indicating whether this pool has any room for
        members. (True if it does, False if it doesn't.)
        """
        return self.free_count() <= 0

    def free_count(self):
        """
        Return a number indicating *approximately* how many more members
        can be added to this pool.
        """
        if self.size is None:
            return 1
        return max(0, self.size - len(self))

    def add(self, greenlet):
        """
        Begin tracking the given greenlet, blocking until space is available.

        .. seealso:: :meth:`Group.add`
        """
        self._semaphore.acquire()
        try:
            Group.add(self, greenlet)
        except:
            self._semaphore.release()
            raise

    def _discard(self, greenlet):
        Group._discard(self, greenlet)
        self._semaphore.release()
示例#5
0
class VncKombuClientBase(object):
    def _update_sandesh_status(self, status, msg=''):
        ConnectionState.update(conn_type=ConnType.DATABASE,
            name='RabbitMQ', status=status, message=msg,
            server_addrs=self._server_addrs)
    # end _update_sandesh_status

    def publish(self, message):
        self._publish_queue.put(message)
    # end publish

    def sigterm_handler(self):
        self.shutdown()
        exit()

    def __init__(self, rabbit_ip, rabbit_port, rabbit_user, rabbit_password,
                 rabbit_vhost, rabbit_ha_mode, q_name, subscribe_cb, logger,
                 heartbeat_seconds=0, register_handler=True, **kwargs):
        self._rabbit_ip = rabbit_ip
        self._rabbit_port = rabbit_port
        self._rabbit_user = rabbit_user
        self._rabbit_password = rabbit_password
        self._rabbit_vhost = rabbit_vhost
        self._subscribe_cb = subscribe_cb
        self._logger = logger
        self._publish_queue = Queue()
        self._conn_lock = Semaphore()
        self._heartbeat_seconds = heartbeat_seconds

        self.obj_upd_exchange = kombu.Exchange('vnc_config.object-update', 'fanout',
                                               durable=False)
        self._ssl_params = self._fetch_ssl_params(**kwargs)

        # Register a handler for SIGTERM so that we can release the lock
        # Without it, it can take several minutes before new master is elected
        # If any app using this wants to register their own sigterm handler,
        # then we will have to modify this function to perhaps take an argument
        if register_handler:
            gevent.signal(signal.SIGTERM, self.sigterm_handler)

    def num_pending_messages(self):
        return self._publish_queue.qsize()
    # end num_pending_messages

    def prepare_to_consume(self):
        # override this method
        return

    def _reconnect(self, delete_old_q=False):
        if self._conn_lock.locked():
            # either connection-monitor or publisher should have taken
            # the lock. The one who acquired the lock would re-establish
            # the connection and releases the lock, so the other one can
            # just wait on the lock, till it gets released
            self._conn_lock.wait()
            if self._conn_state == ConnectionStatus.UP:
                return

        with self._conn_lock:
            msg = "RabbitMQ connection down"
            self._logger(msg, level=SandeshLevel.SYS_NOTICE)
            self._update_sandesh_status(ConnectionStatus.DOWN)
            self._conn_state = ConnectionStatus.DOWN

            self._conn.close()

            self._conn.ensure_connection()
            self._conn.connect()

            self._update_sandesh_status(ConnectionStatus.UP)
            self._conn_state = ConnectionStatus.UP
            msg = 'RabbitMQ connection ESTABLISHED %s' % repr(self._conn)
            self._logger(msg, level=SandeshLevel.SYS_NOTICE)

            self._channel = self._conn.channel()
            if self._subscribe_cb is not None:
                if delete_old_q:
                    # delete the old queue in first-connect context
                    # as db-resync would have caught up with history.
                    try:
                        bound_q = self._update_queue_obj(self._channel)
                        bound_q.delete()
                    except Exception as e:
                        msg = 'Unable to delete the old ampq queue: %s' %(str(e))
                        self._logger(msg, level=SandeshLevel.SYS_ERR)

                self._consumer = kombu.Consumer(self._channel,
                                               queues=self._update_queue_obj,
                                               callbacks=[self._subscribe])
            else: # only a producer
                self._consumer = None

            self._producer = kombu.Producer(self._channel, exchange=self.obj_upd_exchange)
    # end _reconnect

    def _delete_queue(self):
        # delete the queue
        try:
            bound_q = self._update_queue_obj(self._channel)
            if bound_q:
                bound_q.delete()
        except Exception as e:
            msg = 'Unable to delete the old ampq queue: %s' %(str(e))
            self._logger(msg, level=SandeshLevel.SYS_ERR)
    #end _delete_queue

    def _connection_watch(self, connected):
        if not connected:
            self._reconnect()

        self.prepare_to_consume()
        while True:
            try:
                self._consumer.consume()
                self._conn.drain_events()
            except self._conn.connection_errors + self._conn.channel_errors as e:
                self._reconnect()
    # end _connection_watch

    def _connection_watch_forever(self):
        connected = True
        while True:
            try:
                self._connection_watch(connected)
            except Exception as e:
                msg = 'Error in rabbitmq drainer greenlet: %s' %(str(e))
                self._logger(msg, level=SandeshLevel.SYS_ERR)
                # avoid 'reconnect()' here as that itself might cause exception
                connected = False
    # end _connection_watch_forever

    def _connection_heartbeat(self):
        while True:
            try:
                if self._conn.connected:
                    self._conn.heartbeat_check()
            except Exception as e:
                msg = 'Error in rabbitmq heartbeat greenlet: %s' %(str(e))
                self._logger(msg, level=SandeshLevel.SYS_ERR)
            finally:
                gevent.sleep(float(self._heartbeat_seconds/2))
    # end _connection_heartbeat

    def _publisher(self):
        message = None
        connected = True
        while True:
            try:
                if not connected:
                    self._reconnect()
                    connected = True

                if not message:
                    # earlier was sent fine, dequeue one more
                    message = self._publish_queue.get()

                while True:
                    try:
                        self._producer.publish(message)
                        message = None
                        break
                    except self._conn.connection_errors + self._conn.channel_errors as e:
                        self._reconnect()
            except Exception as e:
                log_str = "Error in rabbitmq publisher greenlet: %s" %(str(e))
                self._logger(log_str, level=SandeshLevel.SYS_ERR)
                # avoid 'reconnect()' here as that itself might cause exception
                connected = False
    # end _publisher

    def _subscribe(self, body, message):
        try:
            self._subscribe_cb(body)
        finally:
            message.ack()


    def _start(self, client_name):
        self._reconnect(delete_old_q=True)

        self._publisher_greenlet = vnc_greenlets.VncGreenlet(
                                               'Kombu ' + client_name,
                                               self._publisher)
        self._connection_monitor_greenlet = vnc_greenlets.VncGreenlet(
                                               'Kombu ' + client_name + '_ConnMon',
                                               self._connection_watch_forever)
        if self._heartbeat_seconds:
            self._connection_heartbeat_greenlet = vnc_greenlets.VncGreenlet(
                'Kombu ' + client_name + '_ConnHeartBeat',
                self._connection_heartbeat)
        else:
            self._connection_heartbeat_greenlet = None

    def greenlets(self):
        ret = [self._publisher_greenlet, self._connection_monitor_greenlet]
        if self._connection_heartbeat_greenlet:
            ret.append(self._connection_heartbeat_greenlet)
        return ret

    def shutdown(self):
        self._publisher_greenlet.kill()
        self._connection_monitor_greenlet.kill()
        if self._connection_heartbeat_greenlet:
            self._connection_heartbeat_greenlet.kill()
        if self._consumer:
            self._delete_queue()
        self._conn.close()

    def reset(self):
        self._publish_queue = Queue()

    _SSL_PROTOCOLS = {
        "tlsv1": ssl.PROTOCOL_TLSv1,
        "sslv23": ssl.PROTOCOL_SSLv23
    }

    @classmethod
    def validate_ssl_version(cls, version):
        version = version.lower()
        try:
            return cls._SSL_PROTOCOLS[version]
        except KeyError:
            raise RuntimeError('Invalid SSL version: {}'.format(version))

    def _fetch_ssl_params(self, **kwargs):
        if strtobool(str(kwargs.get('rabbit_use_ssl', False))):
            ssl_params = dict()
            ssl_version = kwargs.get('kombu_ssl_version', '')
            keyfile = kwargs.get('kombu_ssl_keyfile', '')
            certfile = kwargs.get('kombu_ssl_certfile', '')
            ca_certs = kwargs.get('kombu_ssl_ca_certs', '')
            if ssl_version:
                ssl_params.update({'ssl_version':
                    self.validate_ssl_version(ssl_version)})
            if keyfile:
                ssl_params.update({'keyfile': keyfile})
            if certfile:
                ssl_params.update({'certfile': certfile})
            if ca_certs:
                ssl_params.update({'ca_certs': ca_certs})
                ssl_params.update({'cert_reqs': ssl.CERT_REQUIRED})
            return ssl_params or True
        return False
示例#6
0
class VncKombuClientBase(object):
    def _update_sandesh_status(self, status, msg=''):
        ConnectionState.update(conn_type=ConnType.DATABASE,
                               name='RabbitMQ',
                               status=status,
                               message=msg,
                               server_addrs=self._server_addrs)

    # end _update_sandesh_status

    def publish(self, message):
        self._publish_queue.put(message)

    # end publish

    def sigterm_handler(self):
        self.shutdown()
        exit()

    def __init__(self,
                 rabbit_ip,
                 rabbit_port,
                 rabbit_user,
                 rabbit_password,
                 rabbit_vhost,
                 rabbit_ha_mode,
                 q_name,
                 subscribe_cb,
                 logger,
                 heartbeat_seconds=0,
                 **kwargs):
        self._rabbit_ip = rabbit_ip
        self._rabbit_port = rabbit_port
        self._rabbit_user = rabbit_user
        self._rabbit_password = rabbit_password
        self._rabbit_vhost = rabbit_vhost
        self._subscribe_cb = subscribe_cb
        self._logger = logger
        self._publish_queue = Queue()
        self._conn_lock = Semaphore()
        self._heartbeat_seconds = heartbeat_seconds

        self.obj_upd_exchange = kombu.Exchange('vnc_config.object-update',
                                               'fanout',
                                               durable=False)
        self._ssl_params = self._fetch_ssl_params(**kwargs)

        # Register a handler for SIGTERM so that we can release the lock
        # Without it, it can take several minutes before new master is elected
        # If any app using this wants to register their own sigterm handler,
        # then we will have to modify this function to perhaps take an argument
        gevent.signal(signal.SIGTERM, self.sigterm_handler)

    def num_pending_messages(self):
        return self._publish_queue.qsize()

    # end num_pending_messages

    def prepare_to_consume(self):
        # override this method
        return

    def _reconnect(self, delete_old_q=False):
        if self._conn_lock.locked():
            # either connection-monitor or publisher should have taken
            # the lock. The one who acquired the lock would re-establish
            # the connection and releases the lock, so the other one can
            # just wait on the lock, till it gets released
            self._conn_lock.wait()
            if self._conn_state == ConnectionStatus.UP:
                return

        with self._conn_lock:
            msg = "RabbitMQ connection down"
            self._logger(msg, level=SandeshLevel.SYS_NOTICE)
            self._update_sandesh_status(ConnectionStatus.DOWN)
            self._conn_state = ConnectionStatus.DOWN

            self._conn.close()

            self._conn.ensure_connection()
            self._conn.connect()

            self._update_sandesh_status(ConnectionStatus.UP)
            self._conn_state = ConnectionStatus.UP
            msg = 'RabbitMQ connection ESTABLISHED %s' % repr(self._conn)
            self._logger(msg, level=SandeshLevel.SYS_NOTICE)

            self._channel = self._conn.channel()
            if self._subscribe_cb is not None:
                if delete_old_q:
                    # delete the old queue in first-connect context
                    # as db-resync would have caught up with history.
                    try:
                        bound_q = self._update_queue_obj(self._channel)
                        bound_q.delete()
                    except Exception as e:
                        msg = 'Unable to delete the old ampq queue: %s' % (
                            str(e))
                        self._logger(msg, level=SandeshLevel.SYS_ERR)

                self._consumer = kombu.Consumer(self._channel,
                                                queues=self._update_queue_obj,
                                                callbacks=[self._subscribe])
            else:  # only a producer
                self._consumer = None

            self._producer = kombu.Producer(self._channel,
                                            exchange=self.obj_upd_exchange)

    # end _reconnect

    def _delete_queue(self):
        # delete the queue
        try:
            bound_q = self._update_queue_obj(self._channel)
            if bound_q:
                bound_q.delete()
        except Exception as e:
            msg = 'Unable to delete the old ampq queue: %s' % (str(e))
            self._logger(msg, level=SandeshLevel.SYS_ERR)

    #end _delete_queue

    def _connection_watch(self, connected):
        if not connected:
            self._reconnect()

        self.prepare_to_consume()
        while True:
            try:
                self._consumer.consume()
                self._conn.drain_events()
            except self._conn.connection_errors + self._conn.channel_errors as e:
                self._reconnect()

    # end _connection_watch

    def _connection_watch_forever(self):
        connected = True
        while True:
            try:
                self._connection_watch(connected)
            except Exception as e:
                msg = 'Error in rabbitmq drainer greenlet: %s' % (str(e))
                self._logger(msg, level=SandeshLevel.SYS_ERR)
                # avoid 'reconnect()' here as that itself might cause exception
                connected = False

    # end _connection_watch_forever

    def _connection_heartbeat(self):
        while True:
            try:
                if self._conn.connected:
                    self._conn.heartbeat_check()
            except Exception as e:
                msg = 'Error in rabbitmq heartbeat greenlet: %s' % (str(e))
                self._logger(msg, level=SandeshLevel.SYS_ERR)
            finally:
                gevent.sleep(float(self._heartbeat_seconds / 2))

    # end _connection_heartbeat

    def _publisher(self):
        message = None
        connected = True
        while True:
            try:
                if not connected:
                    self._reconnect()
                    connected = True

                if not message:
                    # earlier was sent fine, dequeue one more
                    message = self._publish_queue.get()

                while True:
                    try:
                        self._producer.publish(message)
                        message = None
                        break
                    except self._conn.connection_errors + self._conn.channel_errors as e:
                        self._reconnect()
            except Exception as e:
                log_str = "Error in rabbitmq publisher greenlet: %s" % (str(e))
                self._logger(log_str, level=SandeshLevel.SYS_ERR)
                # avoid 'reconnect()' here as that itself might cause exception
                connected = False

    # end _publisher

    def _subscribe(self, body, message):
        try:
            self._subscribe_cb(body)
        finally:
            message.ack()

    def _start(self, client_name):
        self._reconnect(delete_old_q=True)

        self._publisher_greenlet = vnc_greenlets.VncGreenlet(
            'Kombu ' + client_name, self._publisher)
        self._connection_monitor_greenlet = vnc_greenlets.VncGreenlet(
            'Kombu ' + client_name + '_ConnMon',
            self._connection_watch_forever)
        if self._heartbeat_seconds:
            self._connection_heartbeat_greenlet = vnc_greenlets.VncGreenlet(
                'Kombu ' + client_name + '_ConnHeartBeat',
                self._connection_heartbeat)
        else:
            self._connection_heartbeat_greenlet = None

    def greenlets(self):
        ret = [self._publisher_greenlet, self._connection_monitor_greenlet]
        if self._connection_heartbeat_greenlet:
            ret.append(self._connection_heartbeat_greenlet)
        return ret

    def shutdown(self):
        self._publisher_greenlet.kill()
        self._connection_monitor_greenlet.kill()
        if self._connection_heartbeat_greenlet:
            self._connection_heartbeat_greenlet.kill()
        self._producer.close()
        if self._consumer:
            self._consumer.close()
            self._delete_queue()
        self._conn.close()

    def reset(self):
        self._publish_queue = Queue()

    _SSL_PROTOCOLS = {
        "tlsv1": ssl.PROTOCOL_TLSv1,
        "sslv23": ssl.PROTOCOL_SSLv23
    }

    @classmethod
    def validate_ssl_version(cls, version):
        version = version.lower()
        try:
            return cls._SSL_PROTOCOLS[version]
        except KeyError:
            raise RuntimeError('Invalid SSL version: {}'.format(version))

    def _fetch_ssl_params(self, **kwargs):
        if strtobool(str(kwargs.get('rabbit_use_ssl', False))):
            ssl_params = dict()
            ssl_version = kwargs.get('kombu_ssl_version', '')
            keyfile = kwargs.get('kombu_ssl_keyfile', '')
            certfile = kwargs.get('kombu_ssl_certfile', '')
            ca_certs = kwargs.get('kombu_ssl_ca_certs', '')
            if ssl_version:
                ssl_params.update(
                    {'ssl_version': self.validate_ssl_version(ssl_version)})
            if keyfile:
                ssl_params.update({'keyfile': keyfile})
            if certfile:
                ssl_params.update({'certfile': certfile})
            if ca_certs:
                ssl_params.update({'ca_certs': ca_certs})
                ssl_params.update({'cert_reqs': ssl.CERT_REQUIRED})
            return ssl_params or True
        return False
示例#7
0
class IronicKombuClient(object):

    def __init__(self, rabbit_server, rabbit_port, rabbit_user, rabbit_password,
                 notification_level, ironic_notif_mgr_obj, **kwargs):
        self._rabbit_port = rabbit_port
        self._rabbit_user = rabbit_user
        self._rabbit_password = rabbit_password
        self._rabbit_hosts = self._parse_rabbit_hosts(rabbit_server)
        self._rabbit_ip = self._rabbit_hosts[0]["host"]
        self._notification_level = notification_level
        self._ironic_notification_manager = ironic_notif_mgr_obj
        self._conn_lock = Semaphore()

        # Register a handler for SIGTERM so that we can release the lock
        # Without it, it can take several minutes before new master is elected
        # If any app using this wants to register their own sigterm handler,
        # then we will have to modify this function to perhaps take an argument
        # gevent.signal(signal.SIGTERM, self.sigterm_handler)

        self._url = "amqp://%s:%s@%s:%s/" % (self._rabbit_user, self._rabbit_password, 
                                             self._rabbit_ip, self._rabbit_port)
        msg = "Initializing RabbitMQ connection, urls %s" % self._url
        #self._conn_state = ConnectionStatus.INIT
        self._conn = kombu.Connection(self._url)
        self._exchange = self._set_up_exchange()
        self._queues = []
        self._queues = self._set_up_queues(self._notification_level)
        if not self._queues:
            exit()

    def _parse_rabbit_hosts(self, rabbit_servers):

        default_dict = {'user': self._rabbit_user,
                        'password': self._rabbit_password,
                        'port': self._rabbit_port}
        ret = []
        rabbit_hosts = re.compile('[,\s]+').split(rabbit_servers)
        for s in rabbit_hosts:
            match = re.match("(?:(?P<user>.*?)(?::(?P<password>.*?))*@)*(?P<host>.*?)(?::(?P<port>\d+))*$", s)
            if match:
                mdict = match.groupdict().copy()
                for key in ['user', 'password', 'port']:
                    if not mdict[key]:
                        mdict[key] = default_dict[key]

                ret.append(mdict)

        return ret

    def _set_up_exchange(self, exchange_name=None):
        if exchange_name:
            exchange = kombu.Exchange(str(exchange_name), type="topic", durable=False)
        else:
            exchange = kombu.Exchange("ironic", type="topic", durable=False)

    def _set_up_queues(self, notification_level):
        if notification_level not in ['info', 'debug', 'warning', 'error']:
            msg = "Unrecongized notification level: " + str(notification_level) + \
                  "\nPlease enter a valid notification level from: 'info', 'debug', 'warning', 'error'"
            return 0
        sub_queue_names = []
        sub_queues = []
        log_levels = []
        if notification_level == "debug":
            log_levels = ['debug', 'info', 'warning', 'error']
        elif notification_level == "info":
            log_levels = ['info', 'warning', 'error']
        elif notification_level == "warning":
            log_levels = ['warning', 'error']
        elif notification_level == "error":
            log_levels = ['error']

        for level in log_levels:
            sub_queue_names.append('ironic_versioned_notifications.'+str(level))

        for sub_queue_name in sub_queue_names:
            sub_queues.append(kombu.Queue(str(sub_queue_name), durable=False, exchange=self._exchange, routing_key=str(sub_queue_name)))

        return sub_queues

    def _reconnect(self, delete_old_q=False):
        if self._conn_lock.locked():
            # either connection-monitor or publisher should have taken
            # the lock. The one who acquired the lock would re-establish
            # the connection and releases the lock, so the other one can
            # just wait on the lock, till it gets released
            self._conn_lock.wait()
            #if self._conn_state == ConnectionStatus.UP:
            #    return

        with self._conn_lock:
            msg = "RabbitMQ connection down"
            #self._logger(msg, level=SandeshLevel.SYS_NOTICE)
            #self._update_sandesh_status(ConnectionStatus.DOWN)
            #self._conn_state = ConnectionStatus.DOWN

            self._conn.close()

            self._conn.ensure_connection()
            self._conn.connect()

            #self._update_sandesh_status(ConnectionStatus.UP)
            #self._conn_state = ConnectionStatus.UP
            msg = 'RabbitMQ connection ESTABLISHED %s' % repr(self._conn)
            #self._logger(msg, level=SandeshLevel.SYS_NOTICE)

            self._channel = self._conn.channel()
            self._consumer = kombu.Consumer(self._conn,
                                           queues=self._queues,
                                           callbacks=[self._subscriber],
                                           accept=["application/json"])
    # end _reconnect

    def _connection_watch(self, connected, timeout=10000):
        if not connected:
            self._reconnect()

        while True:
            try:
                self._consumer.consume()
                self._conn.drain_events()
            except self._conn.connection_errors + self._conn.channel_errors as e:
                self._reconnect()
    # end _connection_watch

    def _connection_watch_forever(self, timeout=10000):
        connected = True
        while True:
            try:
                self._connection_watch(connected, timeout)
            except Exception as e:
                msg = 'Error in rabbitmq drainer greenlet: %s' %(str(e))
                print(msg) 
                # avoid 'reconnect()' here as that itself might cause exception
                connected = False
    # end _connection_watch_forever

    def _process_message_dict(self, message_dict):
        return message_dict["event_type"]

    def _subscribe_cb(self, body):
        #print("The body is {}".format(body))
        message_dict = json.loads(str(body["oslo.message"]))
        #print("Event recorded: " + str(self._process_message_dict(message_dict)))
        #print("Message: \n" + str(message_dict))
        message_dict_payload = message_dict.pop("payload")
        ironic_object_data = message_dict_payload["ironic_object.data"]
        for k in message_dict:
            ironic_object_data[k] = message_dict[k]
        ironic_node_list = []
        ironic_node_list.append(ironic_object_data)
        self._ironic_notification_manager.process_ironic_node_info(ironic_node_list)

    def _subscriber(self, body, message):
        try:
            self._subscribe_cb(body)
            message.ack()
        except Exception as e:
            print("The error is " + str(e))

    def _start(self):
        self._reconnect()
        self._connection_watch_forever()

    def shutdown(self):
        self._conn.close()
示例#8
0
class VncKombuClientBase(object):
    def _update_sandesh_status(self, status, msg=''):
        ConnectionState.update(conn_type=ConnectionType.DATABASE,
            name='RabbitMQ', status=status, message=msg,
            server_addrs=["%s:%s" % (self._rabbit_ip, self._rabbit_port)])
    # end _update_sandesh_status

    def publish(self, message):
        self._publish_queue.put(message)
    # end publish

    def __init__(self, rabbit_ip, rabbit_port, rabbit_user, rabbit_password,
                 rabbit_vhost, rabbit_ha_mode, q_name, subscribe_cb, logger):
        self._rabbit_ip = rabbit_ip
        self._rabbit_port = rabbit_port
        self._rabbit_user = rabbit_user
        self._rabbit_password = rabbit_password
        self._rabbit_vhost = rabbit_vhost
        self._subscribe_cb = subscribe_cb
        self._logger = logger
        self._publish_queue = Queue()
        self._conn_lock = Semaphore()

        self.obj_upd_exchange = kombu.Exchange('vnc_config.object-update', 'fanout',
                                               durable=False)

    def num_pending_messages(self):
        return self._publish_queue.qsize()
    # end num_pending_messages

    def prepare_to_consume(self):
        # override this method
        return

    def _reconnect(self, delete_old_q=False):
        if self._conn_lock.locked():
            # either connection-monitor or publisher should have taken
            # the lock. The one who acquired the lock would re-establish
            # the connection and releases the lock, so the other one can 
            # just wait on the lock, till it gets released
            self._conn_lock.wait()
            if self._conn_state == ConnectionStatus.UP:
                return

        with self._conn_lock:
            msg = "RabbitMQ connection down"
            self._logger(msg, level=SandeshLevel.SYS_ERR)
            self._update_sandesh_status(ConnectionStatus.DOWN)
            self._conn_state = ConnectionStatus.DOWN

            self._conn.close()

            self._conn.ensure_connection()
            self._conn.connect()

            self._update_sandesh_status(ConnectionStatus.UP)
            self._conn_state = ConnectionStatus.UP
            msg = 'RabbitMQ connection ESTABLISHED %s' % repr(self._conn)
            self._logger(msg, level=SandeshLevel.SYS_NOTICE)

            self._channel = self._conn.channel()
            if delete_old_q:
                # delete the old queue in first-connect context
                # as db-resync would have caught up with history.
                try:
                    bound_q = self._update_queue_obj(self._channel)
                    bound_q.delete()
                except Exception as e:
                    msg = 'Unable to delete the old ampq queue: %s' %(str(e))
                    self._logger(msg, level=SandeshLevel.SYS_ERR)

            self._consumer = kombu.Consumer(self._channel,
                                           queues=self._update_queue_obj,
                                           callbacks=[self._subscribe])
            self._producer = kombu.Producer(self._channel, exchange=self.obj_upd_exchange)
    # end _reconnect

    def _connection_watch(self, connected):
        if not connected:
            self._reconnect()

        self.prepare_to_consume()
        while True:
            try:
                self._consumer.consume()
                self._conn.drain_events()
            except self._conn.connection_errors + self._conn.channel_errors as e:
                self._reconnect()
    # end _connection_watch

    def _connection_watch_forever(self):
        connected = True
        while True:
            try:
                self._connection_watch(connected)
            except Exception as e:
                msg = 'Error in rabbitmq drainer greenlet: %s' %(str(e))
                self._logger(msg, level=SandeshLevel.SYS_ERR)
                # avoid 'reconnect()' here as that itself might cause exception
                connected = False
    # end _connection_watch_forever

    def _publisher(self):
        message = None
        connected = True
        while True:
            try:
                if not connected:
                    self._reconnect()
                    connected = True

                if not message:
                    # earlier was sent fine, dequeue one more
                    message = self._publish_queue.get()

                while True:
                    try:
                        self._producer.publish(message)
                        message = None
                        break
                    except self._conn.connection_errors + self._conn.channel_errors as e:
                        self._reconnect()
            except Exception as e:
                log_str = "Error in rabbitmq publisher greenlet: %s" %(str(e))
                self._logger(log_str, level=SandeshLevel.SYS_ERR)
                # avoid 'reconnect()' here as that itself might cause exception
                connected = False
    # end _publisher

    def _subscribe(self, body, message):
        try:
            self._subscribe_cb(body)
        finally:
            message.ack()


    def _start(self):
        self._reconnect(delete_old_q=True)

        self._publisher_greenlet = gevent.spawn(self._publisher)
        self._connection_monitor_greenlet = gevent.spawn(self._connection_watch_forever)

    def shutdown(self):
        self._publisher_greenlet.kill()
        self._connection_monitor_greenlet.kill()
        self._producer.close()
        self._consumer.close()
        self._conn.close()
示例#9
0
文件: pool.py 项目: DigDug101/gevent
class Pool(Group):
    def __init__(self, size=None, greenlet_class=None):
        """
        Create a new pool.

        A pool is like a group, but the maximum number of members
        is governed by the *size* parameter.

        :keyword int size: If given, this non-negative integer is the
            maximum count of active greenlets that will be allowed in
            this pool. A few values have special significance:

            * ``None`` (the default) places no limit on the number of
              greenlets. This is useful when you need to track, but not limit,
              greenlets, as with :class:`gevent.pywsgi.WSGIServer`
            * ``0`` creates a pool that can never have any active greenlets. Attempting
              to spawn in this pool will block forever. This is only useful
              if an application uses :meth:`wait_available` with a timeout and checks
              :meth:`free_count` before attempting to spawn.
        """
        if size is not None and size < 0:
            raise ValueError('size must not be negative: %r' % (size, ))
        Group.__init__(self)
        self.size = size
        if greenlet_class is not None:
            self.greenlet_class = greenlet_class
        if size is None:
            self._semaphore = DummySemaphore()
        else:
            self._semaphore = Semaphore(size)

    def wait_available(self, timeout=None):
        """
        Wait until it's possible to spawn a greenlet in this pool.

        :param float timeout: If given, only wait the specified number
            of seconds.

        .. warning:: If the pool was initialized with a size of 0, this
           method will block forever unless a timeout is given.

        :return: A number indicating how many new greenlets can be put into
           the pool without blocking.

        .. versionchanged:: 1.1a3
            Added the ``timeout`` parameter.
        """
        return self._semaphore.wait(timeout=timeout)

    def full(self):
        """
        Return a boolean indicating whether this pool has any room for
        members. (True if it does, False if it doesn't.)
        """
        return self.free_count() <= 0

    def free_count(self):
        """
        Return a number indicating approximately how many more members
        can be added to this pool.
        """
        if self.size is None:
            return 1
        return max(0, self.size - len(self))

    def add(self, greenlet):
        self._semaphore.acquire()
        try:
            Group.add(self, greenlet)
        except:
            self._semaphore.release()
            raise

    def _discard(self, greenlet):
        Group._discard(self, greenlet)
        self._semaphore.release()
示例#10
0
class IronicKombuClient(object):

    def __init__(self, rabbit_server, rabbit_port,
                 rabbit_user, rabbit_password,
                 notification_level, ironic_notif_mgr_obj, **kwargs):
        self._rabbit_port = rabbit_port
        self._rabbit_user = rabbit_user
        self._rabbit_password = rabbit_password
        self._rabbit_hosts = self._parse_rabbit_hosts(rabbit_server)
        self._rabbit_ip = self._rabbit_hosts[0]["host"]
        self._notification_level = notification_level
        self._ironic_notification_manager = ironic_notif_mgr_obj
        self._conn_lock = Semaphore()

        # Register a handler for SIGTERM so that we can release the lock
        # Without it, it can take several minutes before new master is elected
        # If any app using this wants to register their own sigterm handler,
        # then we will have to modify this function to perhaps take an argument
        # gevent.signal(signal.SIGTERM, self.sigterm_handler)

        self._url = "amqp://%s:%s@%s:%s/" % (self._rabbit_user,
                                             self._rabbit_password,
                                             self._rabbit_ip,
                                             self._rabbit_port)
        msg = "Initializing RabbitMQ connection, urls %s" % self._url
        # self._conn_state = ConnectionStatus.INIT
        self._conn = kombu.Connection(self._url)
        self._exchange = self._set_up_exchange()
        self._queues = []
        self._queues = self._set_up_queues(self._notification_level)
        if not self._queues:
            exit()

    def _parse_rabbit_hosts(self, rabbit_servers):

        default_dict = {'user': self._rabbit_user,
                        'password': self._rabbit_password,
                        'port': self._rabbit_port}
        ret = []
        rabbit_hosts = re.compile('[,\s]+').split(rabbit_servers)
        for s in rabbit_hosts:
            match = re.match("(?:(?P<user>.*?)"
                             "(?::(?P<password>.*?))"
                             "*@)*(?P<host>.*?)(?::(?P<port>\d+))*$", s)
            if match:
                mdict = match.groupdict().copy()
                for key in ['user', 'password', 'port']:
                    if not mdict[key]:
                        mdict[key] = default_dict[key]

                ret.append(mdict)

        return ret

    def _set_up_exchange(self, exchange_name=None):
        if exchange_name:
            exchange = kombu.Exchange(str(exchange_name),
                                      type="topic", durable=False)
        else:
            exchange = kombu.Exchange("ironic", type="topic", durable=False)

    def _set_up_queues(self, notification_level):
        if notification_level not in ['info', 'debug', 'warning', 'error']:
            msg = "Unrecongized notification level: " + \
                  str(notification_level) + \
                  "\nPlease enter a valid notification level from: " \
                  "'info', 'debug', 'warning', 'error'"
            return 0
        sub_queue_names = []
        sub_queues = []
        log_levels = []
        if notification_level == "debug":
            log_levels = ['debug', 'info', 'warning', 'error']
        elif notification_level == "info":
            log_levels = ['info', 'warning', 'error']
        elif notification_level == "warning":
            log_levels = ['warning', 'error']
        elif notification_level == "error":
            log_levels = ['error']

        for level in log_levels:
            sub_queue_names.append('ironic_versioned_notifications.' +
                                   str(level))

        for sub_queue_name in sub_queue_names:
            sub_queues.append(kombu.Queue(str(sub_queue_name),
                              durable=False, exchange=self._exchange,
                              routing_key=str(sub_queue_name)))

        return sub_queues

    def _reconnect(self, delete_old_q=False):
        if self._conn_lock.locked():
            # either connection-monitor or publisher should have taken
            # the lock. The one who acquired the lock would re-establish
            # the connection and releases the lock, so the other one can
            # just wait on the lock, till it gets released
            self._conn_lock.wait()
            # if self._conn_state == ConnectionStatus.UP:
            #    return

        with self._conn_lock:
            msg = "RabbitMQ connection down"
            # self._logger(msg, level=SandeshLevel.SYS_NOTICE)
            # self._update_sandesh_status(ConnectionStatus.DOWN)
            # self._conn_state = ConnectionStatus.DOWN

            self._conn.close()

            self._conn.ensure_connection()
            self._conn.connect()

            # self._update_sandesh_status(ConnectionStatus.UP)
            # self._conn_state = ConnectionStatus.UP
            msg = 'RabbitMQ connection ESTABLISHED %s' % repr(self._conn)
            # self._logger(msg, level=SandeshLevel.SYS_NOTICE)

            self._channel = self._conn.channel()
            self._consumer = kombu.Consumer(self._conn,
                                            queues=self._queues,
                                            callbacks=[self._subscriber],
                                            accept=["application/json"])
    # end _reconnect

    def _connection_watch(self, connected, timeout=10000):
        if not connected:
            self._reconnect()

        while True:
            try:
                self._consumer.consume()
                self._conn.drain_events()
            except self._conn.connection_errors + self._conn.channel_errors:
                self._reconnect()
    # end _connection_watch

    def _connection_watch_forever(self, timeout=10000):
        connected = True
        while True:
            try:
                self._connection_watch(connected, timeout)
            except Exception as e:
                msg = 'Error in rabbitmq drainer greenlet: %s' % (str(e))
                print(msg)
                # avoid 'reconnect()' here as that itself might cause exception
                connected = False
    # end _connection_watch_forever

    def _process_message_dict(self, message_dict):
        return message_dict["event_type"]

    def _subscribe_cb(self, body):
        # print("The body is {}".format(body))
        message_dict = json.loads(str(body["oslo.message"]))
        # print("Message: \n" + str(message_dict))
        message_dict_payload = message_dict.pop("payload")
        ironic_object_data = message_dict_payload["ironic_object.data"]
        for k in message_dict:
            ironic_object_data[k] = message_dict[k]
        ironic_node_list = []
        ironic_node_list.append(ironic_object_data)
        self._ironic_notification_manager.process_ironic_node_info(
            ironic_node_list)

    def _subscriber(self, body, message):
        try:
            self._subscribe_cb(body)
            message.ack()
        except Exception as e:
            print("The error is " + str(e))

    def _start(self):
        self._reconnect()
        self._connection_watch_forever()

    def shutdown(self):
        self._conn.close()
示例#11
0
class VncKombuClientBase(object):
    def _update_sandesh_status(self, status, msg=''):
        ConnectionState.update(conn_type=ConnectionType.DATABASE,
            name='RabbitMQ', status=status, message=msg,
            server_addrs=["%s:%s" % (self._rabbit_ip, self._rabbit_port)])
    # end _update_sandesh_status

    def publish(self, message):
        self._publish_queue.put(message)
    # end publish

    def __init__(self, rabbit_ip, rabbit_port, rabbit_user, rabbit_password,
                 rabbit_vhost, rabbit_ha_mode, q_name, subscribe_cb, logger):
        self._rabbit_ip = rabbit_ip
        self._rabbit_port = rabbit_port
        self._rabbit_user = rabbit_user
        self._rabbit_password = rabbit_password
        self._rabbit_vhost = rabbit_vhost
        self._subscribe_cb = subscribe_cb
        self._logger = logger
        self._publish_queue = Queue()
        self._conn_lock = Semaphore()

        self.obj_upd_exchange = kombu.Exchange('vnc_config.object-update', 'fanout',
                                               durable=False)

    def num_pending_messages(self):
        return self._publish_queue.qsize()
    # end num_pending_messages

    def prepare_to_consume(self):
        # override this method
        return

    def _reconnect(self):
        if self._conn_lock.locked():
            # either connection-monitor or publisher should have taken
            # the lock. The one who acquired the lock would re-establish
            # the connection and releases the lock, so the other one can 
            # just wait on the lock, till it gets released
            self._conn_lock.wait()
            return

        self._conn_lock.acquire()

        msg = "RabbitMQ connection down"
        self._logger(msg, level=SandeshLevel.SYS_ERR)
        self._update_sandesh_status(ConnectionStatus.DOWN)
        self._conn_state = ConnectionStatus.DOWN

        self._conn.close()

        self._conn.ensure_connection()
        self._conn.connect()

        self._update_sandesh_status(ConnectionStatus.UP)
        self._conn_state = ConnectionStatus.UP
        msg = 'RabbitMQ connection ESTABLISHED %s' % repr(self._conn)
        self._logger(msg, level=SandeshLevel.SYS_NOTICE)

        self._channel = self._conn.channel()
        self._consumer = kombu.Consumer(self._channel,
                                       queues=self._update_queue_obj,
                                       callbacks=[self._subscribe])
        if self._can_consume:
            self._consumer.consume()
        self._producer = kombu.Producer(self._channel, exchange=self.obj_upd_exchange)

        self._conn_lock.release()
    # end _reconnect

    def _connection_watch(self):
        self.prepare_to_consume()
        self._can_consume = True
        self._consumer.consume()
        while True:
            try:
                self._conn.drain_events()
            except self._conn.connection_errors + self._conn.channel_errors as e:
                self._reconnect()
    # end _connection_watch

    def _publisher(self):
        while True:
            try:
                message = self._publish_queue.get()
                while True:
                    try:
                        self._producer.publish(message)
                        break
                    except self._conn.connection_errors + self._conn.channel_errors as e:
                        self._reconnect()
            except Exception as e:
                log_str = "Unknown exception in _publisher greenlet" + str(e)
                self._logger(log_str, level=SandeshLevel.SYS_ERR)
    # end _publisher

    def _subscribe(self, body, message):
        try:
            self._subscribe_cb(body)
        finally:
            message.ack()


    def _start(self):
        self._can_consume = False
        self._reconnect()

        self._publisher_greenlet = gevent.spawn(self._publisher)
        self._connection_monitor_greenlet = gevent.spawn(self._connection_watch)

    def shutdown(self):
        self._publisher_greenlet.kill()
        self._connection_monitor_greenlet.kill()
        self._producer.close()
        self._consumer.close()
        self._conn.close()