def get_queue(): if eventletutils.is_monkey_patched('thread'): import eventlet return eventlet.queue.Queue(), eventlet.queue.Empty import six return six.moves.queue.Queue(), six.moves.queue.Empty
def get_executor(method): if eventletutils.is_monkey_patched('thread'): from oslo_messaging._drivers.zmq_driver.poller import green_poller return green_poller.GreenExecutor(method) from oslo_messaging._drivers.zmq_driver.poller import threading_poller return threading_poller.ThreadingExecutor(method)
def consume(self, timeout=None): """Receive messages. :param timeout: poll timeout in seconds """ def _raise_timeout(exc): raise driver_common.Timeout(str(exc)) timer = driver_common.DecayingTimer(duration=timeout) timer.start() poll_timeout = (self.consumer_timeout if timeout is None else min(timeout, self.consumer_timeout)) while True: if self._consume_loop_stopped: return try: if eventletutils.is_monkey_patched('thread'): return tpool.execute(self._poll_messages, poll_timeout) return self._poll_messages(poll_timeout) except ConsumerTimeout as exc: poll_timeout = timer.check_return( _raise_timeout, exc, maximum=self.consumer_timeout) except Exception: LOG.exception("Failed to consume messages") return
def get_pool(size): import futurist if eventletutils.is_monkey_patched('thread'): return futurist.GreenThreadPoolExecutor(size) return futurist.ThreadPoolExecutor(size)
def get_poller(): if eventletutils.is_monkey_patched('thread'): from oslo_messaging._drivers.zmq_driver.poller import green_poller return green_poller.GreenPoller() from oslo_messaging._drivers.zmq_driver.poller import threading_poller return threading_poller.ThreadingPoller()
def consume(self, timeout=None): """Receive messages. :param timeout: poll timeout in seconds """ def _raise_timeout(exc): raise driver_common.Timeout(str(exc)) timer = driver_common.DecayingTimer(duration=timeout) timer.start() poll_timeout = (self.consumer_timeout if timeout is None else min( timeout, self.consumer_timeout)) while True: if self._consume_loop_stopped: return try: if eventletutils.is_monkey_patched('thread'): return tpool.execute(self._poll_messages, poll_timeout) return self._poll_messages(poll_timeout) except ConsumerTimeout as exc: poll_timeout = timer.check_return( _raise_timeout, exc, maximum=self.consumer_timeout) except Exception: LOG.exception("Failed to consume messages") return
def _init_if_needed(self): cur_pid = os.getpid() if self._pid == cur_pid: return with self._init_lock: if self._pid == cur_pid: return if self._pid: LOG.warning("New pid is detected. Old: %s, new: %s. " "Cleaning up...", self._pid, cur_pid) # Note(dukhlov): we need to force select poller usage in case # when 'thread' module is monkey patched becase current # eventlet implementation does not support patching of # poll/epoll/kqueue if eventletutils.is_monkey_patched("thread"): from pika.adapters import select_connection select_connection.SELECT_TYPE = "select" mgr = driver.DriverManager( 'oslo.messaging.pika.connection_factory', self._connection_factory_type ) self._connection_factory = mgr.driver(self.url, self.conf) # initializing 2 connection pools: 1st for connections without # confirmations, 2nd - with confirmations self._connection_without_confirmation_pool = pika_pool.QueuedPool( create=self.create_connection, max_size=self.conf.oslo_messaging_pika.pool_max_size, max_overflow=self.conf.oslo_messaging_pika.pool_max_overflow, timeout=self.conf.oslo_messaging_pika.pool_timeout, recycle=self.conf.oslo_messaging_pika.pool_recycle, stale=self.conf.oslo_messaging_pika.pool_stale, ) self._connection_with_confirmation_pool = pika_pool.QueuedPool( create=self.create_connection, max_size=self.conf.oslo_messaging_pika.pool_max_size, max_overflow=self.conf.oslo_messaging_pika.pool_max_overflow, timeout=self.conf.oslo_messaging_pika.pool_timeout, recycle=self.conf.oslo_messaging_pika.pool_recycle, stale=self.conf.oslo_messaging_pika.pool_stale, ) self._connection_with_confirmation_pool.Connection = ( _PooledConnectionWithConfirmations ) self._pid = cur_pid
def notify_send(self, topic, ctxt, msg, retry): """Send messages to Kafka broker. :param topic: String of the topic :param ctxt: context for the messages :param msg: messages for publishing :param retry: the number of retry """ retry = retry if retry >= 0 else None message = pack_message(ctxt, msg) message = jsonutils.dumps(message).encode('utf-8') try: self._ensure_producer() if eventletutils.is_monkey_patched('thread'): return tpool.execute(self._produce_message, topic, message) return self._produce_message(topic, message) except Exception: # NOTE(sileht): if something goes wrong close the producer # connection self._close_producer() raise
import queue import socket import threading import time from oslo_config import cfg from oslo_log import log as logging from oslo_metrics import message_type from oslo_utils import eventletutils from oslo_utils import importutils LOG = logging.getLogger(__name__) eventlet = importutils.try_import('eventlet') if eventlet and eventletutils.is_monkey_patched("thread"): # Here we initialize module with the native python threading module # if it was already monkey patched by eventlet/greenlet. stdlib_threading = eventlet.patcher.original('threading') else: # Manage the case where we run this driver in a non patched environment # and where user even so configure the driver to run heartbeat through # a python thread, if we don't do that when the heartbeat will start # we will facing an issue by trying to override the threading module. stdlib_threading = threading oslo_messaging_metrics = [ cfg.BoolOpt('metrics_enabled', default=False, help='Boolean to send rpc metrics to oslo.metrics.'), cfg.IntOpt('metrics_buffer_size',
def import_zmq(): imported_zmq = importutils.try_import( 'eventlet.green.zmq' if eventletutils.is_monkey_patched('thread') else 'zmq', default=None ) return imported_zmq
import tenacity from oslo_messaging._drivers import base from oslo_messaging._drivers import common as driver_common from oslo_messaging._drivers import kafka_options from oslo_messaging._drivers import pool as driver_pool from oslo_messaging._i18n import _LE from oslo_messaging._i18n import _LW from oslo_serialization import jsonutils import logging as l l.basicConfig(level=l.INFO) l.getLogger("kafka").setLevel(l.WARN) l.getLogger("stevedore").setLevel(l.WARN) if eventletutils.is_monkey_patched('select'): # monkeypatch the vendored SelectSelector._select like eventlet does # https://github.com/eventlet/eventlet/blob/master/eventlet/green/selectors.py#L32 from eventlet.green import select selectors.SelectSelector._select = staticmethod(select.select) # Force to use the select selectors KAFKA_SELECTOR = selectors.SelectSelector else: KAFKA_SELECTOR = selectors.DefaultSelector LOG = logging.getLogger(__name__) def unpack_message(msg): context = {}
def test_eventlet_is_patched(self, mock_patcher): mock_patcher.is_monkey_patched.return_value = True self.assertTrue(eventletutils.is_monkey_patched('os')) mock_patcher.is_monkey_patched.return_value = False self.assertFalse(eventletutils.is_monkey_patched('os'))
''' monkey-patch dynamically replace modules green thread http://eventlet.net/doc/modules/greenthread.html Or 如果使用线程做过重要的编程,你就知道写出程序有多么困难,因为调度程序任何时候都能中断线程。 必须记住保留锁,去保护程序中的重要部分,防止多步操作在执行的过程中中断,防止数据处于无效状态。 而协程默认会做好全方位保护,以防止中断。我们必须显式产出才能让程序的余下部分运行。 对协程来说,无需保留锁,在多个线程之间同步操作,协程自身就会同步,因为在任意时刻只有一个协程运行。 想交出控制权时,可以使用 yield 或 yield from 把控制权交还调度程序。 这就是能够安全地取消协程的原因:按照定义,协程只能在暂停的 yield处取消, 因此可以处理 CancelledError 异常,执行清理操作。 总而言之,协程比线程更节省资源,效率更高,并且更安全。 ''' from oslo_utils import eventletutils print(locals()) print(globals()) print(eventletutils.fetch_current_thread_functor()) print(eventletutils.is_monkey_patched('sys')) eventletutils.warn_eventlet_not_patched(expected_patched_modules=None, what='this library')
def test_eventlet_no_patcher(self): self.assertFalse(eventletutils.is_monkey_patched('os'))
def get_executor_with_context(): if eventletutils.is_monkey_patched('thread'): LOG.debug("Threading is patched, using an eventlet executor") return 'eventlet' LOG.debug("Using a threading executor") return 'threading'
def import_zmq(): imported_zmq = importutils.try_import( 'eventlet.green.zmq' if eventletutils.is_monkey_patched('thread') else 'zmq', default=None) return imported_zmq
from oslo_messaging._drivers import base from oslo_messaging._drivers import common as driver_common from oslo_messaging._drivers import kafka_options from oslo_messaging._drivers import pool as driver_pool from oslo_messaging._i18n import _LE from oslo_messaging._i18n import _LW from oslo_serialization import jsonutils import logging as l l.basicConfig(level=l.INFO) l.getLogger("kafka").setLevel(l.WARN) l.getLogger("stevedore").setLevel(l.WARN) if eventletutils.is_monkey_patched('select'): # monkeypatch the vendored SelectSelector._select like eventlet does # https://github.com/eventlet/eventlet/blob/master/eventlet/green/selectors.py#L32 from eventlet.green import select selectors.SelectSelector._select = staticmethod(select.select) # Force to use the select selectors KAFKA_SELECTOR = selectors.SelectSelector else: KAFKA_SELECTOR = selectors.DefaultSelector LOG = logging.getLogger(__name__) def unpack_message(msg): context = {}
def _Event(): if eventletutils.is_monkey_patched('thread'): return _ThreadingEvent() else: return _GreenEvent()