def _init_if_needed(self): cur_pid = os.getpid() if self._pid == cur_pid: return with self._init_lock: if self._pid == cur_pid: return if self._pid: LOG.warning("New pid is detected. Old: %s, new: %s. " "Cleaning up...", self._pid, cur_pid) # Note(dukhlov): we need to force select poller usage in case # when 'thread' module is monkey patched becase current # eventlet implementation does not support patching of # poll/epoll/kqueue if eventletutils.is_monkey_patched("thread"): from pika.adapters import select_connection select_connection.SELECT_TYPE = "select" mgr = driver.DriverManager( 'oslo.messaging.pika.connection_factory', self._connection_factory_type ) self._connection_factory = mgr.driver(self.url, self.conf) # initializing 2 connection pools: 1st for connections without # confirmations, 2nd - with confirmations self._connection_without_confirmation_pool = pika_pool.QueuedPool( create=self.create_connection, max_size=self.conf.oslo_messaging_pika.pool_max_size, max_overflow=self.conf.oslo_messaging_pika.pool_max_overflow, timeout=self.conf.oslo_messaging_pika.pool_timeout, recycle=self.conf.oslo_messaging_pika.pool_recycle, stale=self.conf.oslo_messaging_pika.pool_stale, ) self._connection_with_confirmation_pool = pika_pool.QueuedPool( create=self.create_connection, max_size=self.conf.oslo_messaging_pika.pool_max_size, max_overflow=self.conf.oslo_messaging_pika.pool_max_overflow, timeout=self.conf.oslo_messaging_pika.pool_timeout, recycle=self.conf.oslo_messaging_pika.pool_recycle, stale=self.conf.oslo_messaging_pika.pool_stale, ) self._connection_with_confirmation_pool.Connection = ( _PooledConnectionWithConfirmations ) self._pid = cur_pid
def test_use_it(): params = pika.URLParameters( 'amqp://*****:*****@localhost:5672/?' 'socket_timeout=10&' 'connection_attempts=2' ) pool = pika_pool.QueuedPool( create=lambda: pika.BlockingConnection(parameters=params), max_size=10, max_overflow=10, timeout=10, recycle=3600, stale=45, ) with pool.acquire() as cxn: cxn.channel.basic_publish( body=json.dumps({ 'type': 'banana', 'description': 'they are yellow' }), exchange='', routing_key='fruits', properties=pika.BasicProperties( content_type='application/json', content_encoding='utf-8', delivery_mode=2, ) )
def init_rabbitmq_connection(app): """Initialize the webserver rabbitmq connection. This initializes _rabbitmq as a connection pool from which new RabbitMQ connections can be acquired. """ global _rabbitmq if "RABBITMQ_HOST" not in app.config: app.logger.error( "RabbitMQ host:port not defined. Sleeping 2 seconds, and exiting.") sleep(2) sys.exit(-1) connection_parameters = pika.ConnectionParameters( host=app.config['RABBITMQ_HOST'], port=app.config['RABBITMQ_PORT'], virtual_host=app.config['RABBITMQ_VHOST'], credentials=pika.PlainCredentials(app.config['RABBITMQ_USERNAME'], app.config['RABBITMQ_PASSWORD']), ) _rabbitmq = pika_pool.QueuedPool( create=lambda: pika.BlockingConnection(connection_parameters), max_size=100, max_overflow=10, timeout=10, recycle=3600, stale=45, )
def init_rabbitmq_connection(app): """Initialize the webserver rabbitmq connection. This initializes _rabbitmq as a connection pool from which new RabbitMQ connections can be acquired. """ global _rabbitmq if _rabbitmq is not None: return if "RABBITMQ_HOST" not in app.config: raise ConnectionError( "Cannot connect to RabbitMQ: host and port not defined") connection_parameters = pika.ConnectionParameters( host=app.config['RABBITMQ_HOST'], port=app.config['RABBITMQ_PORT'], virtual_host=app.config['RABBITMQ_VHOST'], credentials=pika.PlainCredentials(app.config['RABBITMQ_USERNAME'], app.config['RABBITMQ_PASSWORD']), ) _rabbitmq = pika_pool.QueuedPool( create=lambda: pika.BlockingConnection(connection_parameters), max_size=100, max_overflow=10, timeout=10, recycle=3600, stale=45, ) app.logger.info('Connection to RabbitMQ established!')
def basic_publish(cls, exchange='', routing_key='', body='', server_node=None): """ 消息发送 @param exchange: @param routing_key: @param body: @param server_node: 要连接到的rabbitmq服务器node @return: """ if server_node is None: server_node = CURRENT_NODE if CURRENT_NODE in RABBITMQ_SERVER else 'default' if server_node not in cls.__pool_dict: cls.__pool_dict[server_node] = pika_pool.QueuedPool( create=lambda: pika.BlockingConnection( pika.ConnectionParameters(**RABBITMQ_SERVER[server_node])), max_size=10, max_overflow=10, timeout=5, recycle=3600, stale=60, ) print("Connecting to rabbitmq server @%s" % RABBITMQ_SERVER[server_node]) with cls.__pool_dict[server_node].acquire() as connection: connection.channel.basic_publish( exchange=exchange, routing_key=routing_key, body=body, properties=pika.BasicProperties( delivery_mode=2, # make message persistent ))
def init_rabbitmq_connection(app): """Create a connection to the RabbitMQ server.""" global _rabbitmq if "RABBITMQ_HOST" not in app.config: app.logger.error( "RabbitMQ host:port not defined. Sleeping 2 seconds, and exiting.") sleep(2) sys.exit(-1) connection_config = { 'username': app.config['RABBITMQ_USERNAME'], 'password': app.config['RABBITMQ_PASSWORD'], 'host': app.config['RABBITMQ_HOST'], 'port': app.config['RABBITMQ_PORT'], 'virtual_host': app.config['RABBITMQ_VHOST'] } connection = utils.connect_to_rabbitmq(**connection_config, error_logger=app.logger.error, error_retry_delay=2) _rabbitmq = pika_pool.QueuedPool( create=lambda: connection, max_size=100, max_overflow=10, timeout=10, recycle=3600, stale=45, )
def init_rabbitmq_connection(app): """Create a connection to the RabbitMQ server.""" global _rabbitmq if "RABBITMQ_HOST" not in app.config: app.logger.error("RabbitMQ host:port not defined. Sleeping 2 seconds, and exiting.") sleep(2) sys.exit(-1) params = pika.URLParameters( 'amqp://*****:*****@%s:%d/?socket_timeout=10&connection_attempts=2' % (app.config['RABBITMQ_HOST'], app.config['RABBITMQ_PORT']) ) while True: try: _rabbitmq = pika_pool.QueuedPool( create=lambda: pika.BlockingConnection(parameters=params), max_size=100, max_overflow=10, timeout=10, recycle=3600, stale=45, ) return except Exception as err: app.logger.error("Cannot connect to rabbitmq, sleeping 2 seconds") sleep(2) continue
def queued_pool(params): return pika_pool.QueuedPool( create=lambda: pika.BlockingConnection(params), recycle=10, stale=10, max_size=10, max_overflow=10, timeout=10, )
def __init__(self, mq_server01=conf.mq_server01, mq_server02=conf.mq_server02, mq_port=conf.mq_port): log.debug('Connecting to rabbitmq server, server01=%s, server02=%s' % (mq_server01, mq_server02)) # self.params = pika.URLParameters( # 'amqp://*****:*****@127.0.0.1:5672/?' # 'socket_timeout=10&' # 'connection_attempts=2') self.params01 = pika.ConnectionParameters(host=mq_server01, port=mq_port) self.params02 = pika.ConnectionParameters(host=mq_server02, port=mq_port) try: self.pool = pika_pool.QueuedPool( create=lambda: pika.BlockingConnection(parameters=self.params01 ), max_size=10, max_overflow=10, timeout=10, recycle=3600, stale=45) except Exception, e: log.error('rabbitmq server %s connection error: reason=%s' % (mq_server01, e)) try: self.pool = pika_pool.QueuedPool( create=lambda: pika.BlockingConnection(parameters=self. params02), max_size=10, max_overflow=10, timeout=10, recycle=3600, stale=45) except Exception, e: log.error('rabbitmq server %s connection error: reason=%s' % (mq_server02, e)) raise
def connect(authenticator): params = pika.URLParameters( authenticator.config['RABBIT_MQ_URL_PARAMETER_PIKA_POOL']) return pika_pool.QueuedPool( create=lambda: pika.BlockingConnection(parameters=params), max_size=authenticator.config['POOL_MAXIMUM_CONNECTION_LIMIT'], max_overflow=authenticator. config['POOL_MAXIMUM_CONNECTION_OVERFLOW_LIMIT'], timeout=authenticator. config['POOL_MAXIMUM_CONNECTION_ACQUIRE_TIME'], stale=authenticator.config['POOL_CONNECTION_STATE_TIME_DURATION'])
def pika_pool(self): if not self._pika_pool: pika_params = pika.URLParameters(settings.BROKER_URL) pika_params.socket_timeout = 5 self._pika_pool = pika_pool.QueuedPool( create=lambda: pika.BlockingConnection(parameters=pika_params), max_size=10, max_overflow=10, timeout=10, recycle=3600, stale=45, ) return self._pika_pool
def __init__(self, amqp_url, routing_key): if amqp_url and routing_key: self.amqp_url = amqp_url self.routing_key = routing_key self.params = pika.URLParameters(amqp_url) self.pool = pika_pool.QueuedPool( # create=lambda: pika.BlockingConnection(parameters=params), create=lambda: self.create_connection(), max_size=100, max_overflow=10, timeout=10, recycle=3600, stale=45, ) else: raise Exception("amqp_url or routing_key much not empty!")
def pool(self): ctx = stack.top if ctx is not None: params = pika.URLParameters(current_app.config['RABBITMQ_URL']) if not hasattr(ctx, 'pika_flask'): ctx.pika_flask = pika_pool.QueuedPool( create=lambda: pika.BlockingConnection(parameters=params), max_size=10, max_overflow=10, timeout=10, recycle=3600, stale=45, ) with ctx.pika_flask.acquire() as cxn: cxn.channel.exchange_declare(exchange='geokrety', exchange_type='topic', durable=True) return ctx.pika_flask
def get_rabbit_connection(): '''Connect to Rabbit MQ :param RABBIT_CRED: connection string for rabbit MQ ''' import pika import pika_pool pool = pika_pool.QueuedPool( create=lambda: pika.BlockingConnection(parameters=pika.URLParameters( os.environ['RABBIT_CRED'])), max_size=10, max_overflow=10, timeout=10, recycle=3600, stale=45, ) return pool
def get_pool(queue_url, **kwargs): """ Get an instance of Pika pool. This is a pool of Pika connections which can be used to write messages to RabbitMQ. Arguments: queue_url : rabbit MQ URI Options passed as named args include: socket_timeout connection_attempts max_size max_overflow timeout recycle stale """ options = { 'socket_timeout': 10, 'connection_attempts': 2, 'max_size': 10, 'max_overflow': 10, 'timeout': 10, 'recycle': 3600, 'stale': 45 } options.update(kwargs) """Pika is not thread safe, but 1 connection per thread is wasteful so use a pool instead""" params = pika.URLParameters(queue_url + '?socket_timeout=' + str(options['socket_timeout']) + '&connection_attempts=' + str(options['connection_attempts'])) return pika_pool.QueuedPool( create=lambda: pika.BlockingConnection(parameters=params), max_size=options['max_size'], max_overflow=options['max_overflow'], timeout=options['timeout'], recycle=options['recycle'], stale=options['stale'])
from elasticsearch_dsl.connections import connections import settings REDIS_POOL = redis.ConnectionPool.from_url(settings.REDIS_URI) es = connections.create_connection(hosts=[settings.ELASTIC_URI]) pika_params = pika.URLParameters(settings.AMQP_URI + '?' 'socket_timeout=1&' 'connection_attempts=2') rmq_pool = pika_pool.QueuedPool( create=lambda: pika.BlockingConnection(parameters=pika_params), max_size=10, max_overflow=10, timeout=10, recycle=3600, stale=45, ) logger = logging.getLogger('Campaigns') logger.setLevel(getattr(logging, settings.LOG_LEVEL)) ch = logging.StreamHandler(sys.stdout) ch.setLevel(getattr(logging, settings.LOG_LEVEL)) logger.addHandler(ch) from actors.ssp import SSP # noqa ssp_ref = SSP.start() from actors.ssp_py import SspPy # noqa
def __init__(self, conf, url, default_exchange=None, allowed_remote_exmods=None): self.conf = conf self._force_select_poller_use = ( pika_drv_cmns.is_eventlet_monkey_patched('select')) # processing rpc options self.default_rpc_exchange = ( conf.oslo_messaging_pika.default_rpc_exchange) self.rpc_reply_exchange = (conf.oslo_messaging_pika.rpc_reply_exchange) self.allowed_remote_exmods = [pika_drv_cmns.EXCEPTIONS_MODULE] if allowed_remote_exmods: self.allowed_remote_exmods.extend(allowed_remote_exmods) self.rpc_listener_prefetch_count = ( conf.oslo_messaging_pika.rpc_listener_prefetch_count) self.default_rpc_retry_attempts = ( conf.oslo_messaging_pika.default_rpc_retry_attempts) self.rpc_retry_delay = (conf.oslo_messaging_pika.rpc_retry_delay) if self.rpc_retry_delay < 0: raise ValueError("rpc_retry_delay should be non-negative integer") self.rpc_reply_listener_prefetch_count = ( conf.oslo_messaging_pika.rpc_listener_prefetch_count) self.rpc_reply_retry_attempts = ( conf.oslo_messaging_pika.rpc_reply_retry_attempts) self.rpc_reply_retry_delay = ( conf.oslo_messaging_pika.rpc_reply_retry_delay) if self.rpc_reply_retry_delay < 0: raise ValueError("rpc_reply_retry_delay should be non-negative " "integer") self.rpc_queue_expiration = ( self.conf.oslo_messaging_pika.rpc_queue_expiration) # processing notification options self.default_notification_exchange = ( conf.oslo_messaging_pika.default_notification_exchange) self.notification_persistence = ( conf.oslo_messaging_pika.notification_persistence) self.notification_listener_prefetch_count = ( conf.oslo_messaging_pika.notification_listener_prefetch_count) self.default_notification_retry_attempts = ( conf.oslo_messaging_pika.default_notification_retry_attempts) if self.default_notification_retry_attempts is None: raise ValueError("default_notification_retry_attempts should be " "an integer") self.notification_retry_delay = ( conf.oslo_messaging_pika.notification_retry_delay) if (self.notification_retry_delay is None or self.notification_retry_delay < 0): raise ValueError("notification_retry_delay should be non-negative " "integer") self._tcp_user_timeout = self.conf.oslo_messaging_pika.tcp_user_timeout self.host_connection_reconnect_delay = ( self.conf.oslo_messaging_pika.host_connection_reconnect_delay) self._heartbeat_interval = ( self.conf.oslo_messaging_pika.heartbeat_interval) # initializing connection parameters for configured RabbitMQ hosts common_pika_params = { 'virtual_host': url.virtual_host, 'channel_max': self.conf.oslo_messaging_pika.channel_max, 'frame_max': self.conf.oslo_messaging_pika.frame_max, 'ssl': self.conf.oslo_messaging_pika.ssl, 'ssl_options': self.conf.oslo_messaging_pika.ssl_options, 'socket_timeout': self.conf.oslo_messaging_pika.socket_timeout, } self._connection_lock = threading.Lock() self._connection_host_param_list = [] self._connection_host_status_list = [] if not url.hosts: raise ValueError("You should provide at least one RabbitMQ host") for transport_host in url.hosts: pika_params = common_pika_params.copy() pika_params.update( host=transport_host.hostname, port=transport_host.port, credentials=pika_credentials.PlainCredentials( transport_host.username, transport_host.password), ) self._connection_host_param_list.append(pika_params) self._connection_host_status_list.append({ self.HOST_CONNECTION_LAST_TRY_TIME: 0, self.HOST_CONNECTION_LAST_SUCCESS_TRY_TIME: 0 }) self._next_connection_host_num = random.randint( 0, len(self._connection_host_param_list) - 1) # initializing 2 connection pools: 1st for connections without # confirmations, 2nd - with confirmations self.connection_without_confirmation_pool = pika_pool.QueuedPool( create=self.create_connection, max_size=self.conf.oslo_messaging_pika.pool_max_size, max_overflow=self.conf.oslo_messaging_pika.pool_max_overflow, timeout=self.conf.oslo_messaging_pika.pool_timeout, recycle=self.conf.oslo_messaging_pika.pool_recycle, stale=self.conf.oslo_messaging_pika.pool_stale, ) self.connection_with_confirmation_pool = pika_pool.QueuedPool( create=self.create_connection, max_size=self.conf.oslo_messaging_pika.pool_max_size, max_overflow=self.conf.oslo_messaging_pika.pool_max_overflow, timeout=self.conf.oslo_messaging_pika.pool_timeout, recycle=self.conf.oslo_messaging_pika.pool_recycle, stale=self.conf.oslo_messaging_pika.pool_stale, ) self.connection_with_confirmation_pool.Connection = ( _PooledConnectionWithConfirmations)
channel=channel, routing_key="notification") import pika import pika_pool rmq_usr = str(parser.get('RABBITMQ_SECTION', 'USER')) rmq_pass = str(parser.get('RABBITMQ_SECTION', 'PASSWORD')) credentials = pika.PlainCredentials(rmq_usr, rmq_pass) pool = pika_pool.QueuedPool( create=lambda: pika.BlockingConnection( pika.ConnectionParameters( parser.get('RABBITMQ_SECTION', 'RABBITMQ_HOST'), 5672, '/', credentials)), max_size=10, max_overflow=10, timeout=10, recycle=3600, stale=45, ) orchestrator_exchange = Exchange("orchestrator", type="direct") orchestrator_producer = Producer(exchange=orchestrator_exchange, channel=channel, routing_key="orchestrator") job_manager_exchange = Exchange("job_manager", type="direct") job_manager_producer = Producer(exchange=job_manager_exchange, channel=channel, routing_key="job_manager")
} REDIS_TRIGGER_EVENT_CHANNEL = 'tragger_events' RABBITMQ_PARAMS = pika.URLParameters( 'amqp://*****:*****@localhost:5672/?' 'socket_timeout=10&' 'connection_attempts=2' ) RABBIT_POOL = pika_pool.QueuedPool( create=lambda: pika.BlockingConnection(parameters=RABBITMQ_PARAMS), max_size=20, max_overflow=10, timeout=10, recycle=3600, stale=45, ) RABBIT_TRIGGER_NOTIFY_QUEUE = 'trigger_notify_queue' RABBIT_CHECK_SERVICE_ALIVE_QUEUE = 'check_service_alive_queue' SALT_CONFIG_FILES_DIR=os.path.join(os.path.join(BASE_DIR,'saltapp'),'salt_configs') # #任务定义所在的模块 # CELERY_IMPORTS = ('monitor.tasks', ) # # 使用和Django一样的时区 # CELERY_TIMEZONE = TIME_ZONE # # #以上为基本配置,以下为周期性任务定义,以celerybeat_开头的