Esempio n. 1
0
 def test_setitem_adds_default_exchange(self):
     q = Queues(default_exchange=Exchange('bar'))
     assert q.default_exchange
     queue = Queue('foo', exchange=None)
     queue.exchange = None
     q['foo'] = queue
     assert q['foo'].exchange == q.default_exchange
Esempio n. 2
0
def init_plugin(plugin):
    from sentry.plugins import bindings
    plugin.setup(bindings)

    # Register contexts from plugins if necessary
    if hasattr(plugin, 'get_custom_contexts'):
        from sentry.interfaces.contexts import contexttype
        for cls in plugin.get_custom_contexts() or ():
            contexttype(cls)

    if (hasattr(plugin, 'get_cron_schedule') and plugin.is_enabled()):
        schedules = plugin.get_cron_schedule()
        if schedules:
            settings.CELERYBEAT_SCHEDULE.update(schedules)

    if (hasattr(plugin, 'get_worker_imports') and plugin.is_enabled()):
        imports = plugin.get_worker_imports()
        if imports:
            settings.CELERY_IMPORTS += tuple(imports)

    if (hasattr(plugin, 'get_worker_queues') and plugin.is_enabled()):
        from kombu import Queue
        for queue in plugin.get_worker_queues():
            try:
                name, routing_key = queue
            except ValueError:
                name = routing_key = queue
            q = Queue(name, routing_key=routing_key)
            q.durable = False
            settings.CELERY_QUEUES.append(q)
Esempio n. 3
0
 def queue_delete(self, queue_name):
     # NOTE(gtt): We can omit exchange and routing_key argument here
     # queue = Queue(queue_name, exchange=exchange,
     #      routing_key=routing_key, channel=conn.channel())
     queue = Queue(queue_name, channel=self._channel())
     print "Deleting queue %s" % queue
     return queue.delete()
Esempio n. 4
0
 def listen_events(cls, routing_key, exchange=BUS_EXCHANGE_NAME):
     exchange = Exchange(exchange, type=BUS_EXCHANGE_TYPE)
     with Connection(BUS_URL) as conn:
         queue = Queue(BUS_QUEUE_NAME, exchange=exchange, routing_key=routing_key, channel=conn.channel())
         queue.declare()
         queue.purge()
         cls.bus_queue = queue
Esempio n. 5
0
    def get_consumers(self, Consumer, channel):
        assert self.mq_config

        # Declaring ourselves rather than use auto-declare.
        log.debug("Declaring %s exchange", self.mq_config.exchange)
        self.exchange(channel).declare()

        queues = []
        for queue_name, routing_keys in self.mq_config.queues.items():
            queue = Queue(name=queue_name,
                          exchange=self.exchange,
                          channel=channel,
                          durable=True)
            log.debug("Declaring queue %s", queue_name)
            queue.declare()
            for routing_key in routing_keys:
                log.debug("Binding queue %s to %s", queue_name, routing_key)
                queue.bind_to(exchange=self.exchange,
                              routing_key=routing_key)
            queues.append(queue)

        consumer = Consumer(queues=queues,
                            callbacks=[self._callback],
                            auto_declare=False)
        consumer.qos(prefetch_count=1, apply_global=True)
        return [consumer]
Esempio n. 6
0
 def test_declare(self):
     chan = get_conn().channel()
     b = Queue('foo', self.exchange, 'foo', channel=chan)
     self.assertTrue(b.is_bound)
     b.declare()
     self.assertIn('exchange_declare', chan)
     self.assertIn('queue_declare', chan)
     self.assertIn('queue_bind', chan)
Esempio n. 7
0
 def queue_get(self, queue_name, ack=True):
     queue = Queue(queue_name, channel=self._channel())
     msg = queue.get()
     if not msg:
         return None
     if ack:
         msg.ack()
     return msg
Esempio n. 8
0
    def test_eq(self):
        q1 = Queue('xxx', Exchange('xxx', 'direct'), 'xxx')
        q2 = Queue('xxx', Exchange('xxx', 'direct'), 'xxx')
        self.assertEqual(q1, q2)
        self.assertEqual(q1.__eq__(True), NotImplemented)

        q3 = Queue('yyy', Exchange('xxx', 'direct'), 'xxx')
        self.assertNotEqual(q1, q3)
Esempio n. 9
0
    def test_eq(self):
        q1 = Queue('xxx', Exchange('xxx', 'direct'), 'xxx')
        q2 = Queue('xxx', Exchange('xxx', 'direct'), 'xxx')
        assert q1 == q2
        assert q1.__eq__(True) == NotImplemented

        q3 = Queue('yyy', Exchange('xxx', 'direct'), 'xxx')
        assert q1 != q3
Esempio n. 10
0
 def test_declare(self):
     chan = get_conn().channel()
     b = Queue('foo', self.exchange, 'foo', channel=chan)
     assert b.is_bound
     b.declare()
     assert 'exchange_declare' in chan
     assert 'queue_declare' in chan
     assert 'queue_bind' in chan
Esempio n. 11
0
def add_binding(queue_name, routing_key, exchange_name=None):
    exchange_name = exchange_name or world.config['bus']['exchange_name']
    exchange = Exchange(exchange_name, type=world.config['bus']['exchange_type'])
    with Connection(world.config['bus_url']) as conn:
        queue = Queue(queue_name, exchange=exchange, routing_key=routing_key,
                      channel=conn.channel())
        queue.declare()
        queue.purge()
        _queues[queue_name] = queue
def _declare_queue(glance_api_cfg, routing_key, conn, exchange):
    queue = Queue(name=routing_key,
                  routing_key=routing_key,
                  exchange=exchange,
                  channel=conn.channel(),
                  durable=False)
    queue.declare()

    return queue
Esempio n. 13
0
 def test_also_binds_exchange(self):
     chan = get_conn().channel()
     b = Queue('foo', self.exchange)
     assert not b.is_bound
     assert not b.exchange.is_bound
     b = b.bind(chan)
     assert b.is_bound
     assert b.exchange.is_bound
     assert b.channel is b.exchange.channel
     assert b.exchange is not self.exchange
Esempio n. 14
0
 def test_also_binds_exchange(self):
     chan = get_conn().channel()
     b = Queue('foo', self.exchange)
     self.assertFalse(b.is_bound)
     self.assertFalse(b.exchange.is_bound)
     b = b.bind(chan)
     self.assertTrue(b.is_bound)
     self.assertTrue(b.exchange.is_bound)
     self.assertIs(b.channel, b.exchange.channel)
     self.assertIsNot(b.exchange, self.exchange)
Esempio n. 15
0
        def declare_dead_queue():
            channel = connection.channel()
            dead_exchange = Exchange(name=config.rabbitmq_dead_exchange(),
                                     type='direct',
                                     channel=channel)
            dead_queue = Queue(name=config.rabbitmq_dead_queue(),
                               routing_key=config.rabbitmq_routing_key(),
                               exchange=dead_exchange,
                               channel=channel)

            dead_queue.declare()

            return dead_exchange
Esempio n. 16
0
        def declare_queue(self, connection,
                          name='',
                          auto_delete=False, durable=False,
                          **kwargs):
            queue_args = kwargs.pop('queue_arguments', {})
            queue_args['x-ha-policy'] = 'all'

            queue = Queue(name,
                          durable=durable, auto_delete=auto_delete,
                          queue_arguments=queue_args,
                          **kwargs)

            queue.maybe_bind(connection.default_channel)
            queue.queue_declare()
            return queue
Esempio n. 17
0
class MqServer(object):
    """
    exchange='E_X7_W2S', queue='Q_X7_W2S',routing_key = 'RK_X7_W2S'
    """

    def __init__(self, callback, kwargs):
        self.callback = callback
        if kwargs:
            self.kwargs = kwargs
        else:
            self.kwargs = MqDict

    def connect(self, hostname="localhost", userid="guest", password="******", virtual_host="/"):
        self.conn = BrokerConnection(hostname, userid, password, virtual_host)
        # define Web2Server exchange
        exchange = Exchange(self.kwargs["X7_E"], type="direct")
        self.queue = Queue(self.kwargs["X7_Q"], exchange, routing_key=self.kwargs["X7_RK"])
        channel = self.conn.channel()

        consumer = Consumer(channel, self.queue, callbacks=[self.callback])
        consumer.consume()

    def run(self, once=False):
        if once:
            self.conn.drain_events()
        else:
            while True:
                self.conn.drain_events()

    def get(self):
        message = self.queue.get(block=True)
        message.ack()
        return message
Esempio n. 18
0
    def test_basic_get(self):
        chan1 = self.connection.channel()
        producer = Producer(chan1, self.exchange)
        producer.publish({"basic.get": "this"}, routing_key="basic_get")
        chan1.close()

        chan2 = self.connection.channel()
        queue = Queue("amqplib_basic_get", self.exchange, "basic_get")
        queue = queue(chan2)
        queue.declare()
        for i in range(50):
            m = queue.get()
            if m:
                break
            time.sleep(0.1)
        self.assertEqual(m.payload, {"basic.get": "this"})
        chan2.close()
Esempio n. 19
0
    def __init__(self, name=None, exchange_name=None, exchange=None,
                 durable=False, auto_delete=True, callback=None,
                 pulse_config=None, **kwargs):
        self.callback = callback
        self.pulse_config = pulse_config or {}

        self.data = None
        self.logger = logging.getLogger('mozmill-ci')

        durable = durable or self.pulse_config.get('durable', False)

        if exchange_name:
            # Using passive mode is important, otherwise pulse returns 403
            exchange = Exchange(exchange_name, type='topic', passive=True)

        Queue.__init__(self, name=name, exchange=exchange, durable=durable,
                       auto_delete=not durable, **kwargs)
    def pre_declare_queues(self, queues):
        """Pre-declare any queues that will be used in tests.

        :queues: list of names of queues

        """

        declared_queues = []
        for queue_name in queues:
            q = Queue(queue_name,
                      self._exchange,
                      channel=self._connection,
                      durable=self._exchange.durable,
                      routing_key=queue_name)
            q.declare()
            declared_queues.append(q)

        self.queues = declared_queues
Esempio n. 21
0
 def add_compat(self, name, **options):
     # docs used to use binding_key as routing key
     options.setdefault('routing_key', options.get('binding_key'))
     if options['routing_key'] is None:
         options['routing_key'] = name
     if self.ha_policy is not None:
         self._set_ha_policy(options.setdefault('queue_arguments', {}))
     q = self[name] = Queue.from_dict(name, **options)
     return q
Esempio n. 22
0
    def connect(self, hostname="localhost", userid="guest", password="******", virtual_host="/"):
        self.conn = BrokerConnection(hostname, userid, password, virtual_host)
        # define Web2Server exchange
        exchange = Exchange(self.kwargs["X7_E"], type="direct")
        self.queue = Queue(self.kwargs["X7_Q"], exchange, routing_key=self.kwargs["X7_RK"])
        channel = self.conn.channel()

        consumer = Consumer(channel, self.queue, callbacks=[self.callback])
        consumer.consume()
Esempio n. 23
0
    def test_basic_get(self):
        if not self.verify_alive():
            return
        chan1 = self.connection.channel()
        producer = Producer(chan1, self.exchange)
        chan2 = self.connection.channel()
        queue = Queue(self.P("basic_get"), self.exchange, "basic_get")
        queue = queue(chan2)
        queue.declare()
        producer.publish({"basic.get": "this"}, routing_key="basic_get")
        chan1.close()

        for i in range(self.event_loop_max):
            m = queue.get()
            if m:
                break
            time.sleep(0.1)
        self.assertEqual(m.payload, {"basic.get": "this"})
        chan2.close()
Esempio n. 24
0
def get_mesage():
    exchange = Exchange(name=exchange_name, type='topic',
                        exclusive=False, durable=False, auto_delete=False)

    queue_name = 'neutron_notifications.info'
    q = Queue(queue_name, exchange=exchange,
              routing_key=routing_key, channel=conn.channel())
    msg = q.get()

    if msg is None:
        print 'No messages'
        return

    try:
        pprint.pprint(json.loads(msg.body), indent=2)
    except ValueError:
        print msg.body
    finally:
        msg.ack()
Esempio n. 25
0
 def add_compat(self, name, **options):
     # docs used to use binding_key as routing key
     options.setdefault("routing_key", options.get("binding_key"))
     if options["routing_key"] is None:
         options["routing_key"] = name
     if self.ha_policy is not None:
         self._set_ha_policy(options.setdefault("queue_arguments", {}))
     if self.max_priority is not None:
         self._set_max_priority(options.setdefault("queue_arguments", {}))
     q = self[name] = Queue.from_dict(name, **options)
     return q
Esempio n. 26
0
 def push_sync(self,upload=True,delay=0,dryrun=False,timeout=None): # pragma: no cover
     "wait for push messages"
     from kombu import Connection, Exchange, Queue, Consumer
     import socket, ssl
     url, opts, exchange, queue = self.get_broker()
     def callback(body, message):
         self.process_update(body)
         message.ack()
     with Connection(url,**opts) as conn:
         self.connection = conn
         queue = Queue(queue, channel=conn)
         queue.queue_declare()
         queue.bind_to(exchange)
         try: excpt = (socket.error, ssl.SSLZeroReturnError)
         except AttributeError: excpt = socket.error
         with conn.Consumer(queue, accept=['json'], callbacks=[callback]) as consumer:
             while True:
                 try: conn.drain_events(timeout=timeout)
                 except socket.timeout: pass
                 except excpt: break
Esempio n. 27
0
    def test_basic_get(self):
        if not self.verify_alive():
            return
        chan1 = self.connection.channel()
        producer = chan1.Producer(self.exchange)
        chan2 = self.connection.channel()
        queue = Queue(self.P('basic_get'), self.exchange, 'basic_get')
        queue = queue(chan2)
        queue.declare()
        producer.publish({'basic.get': 'this'}, routing_key='basic_get')
        chan1.close()

        for i in range(self.event_loop_max):
            m = queue.get()
            if m:
                break
            time.sleep(0.1)
        self.assertEqual(m.payload, {'basic.get': 'this'})
        self.purge([queue.name])
        chan2.close()
Esempio n. 28
0
    def test_declare_but_no_exchange(self):
        q = Queue('a')
        q.queue_declare = Mock()
        q.queue_bind = Mock()
        q.exchange = None

        q.declare()
        q.queue_declare.assert_called_with(False, passive=False)
Esempio n. 29
0
    def send(self):

        try:
            # Connection
            conn = Connection(self.broker)

            # Channel
            channel = conn.channel()

            # Exchange
            task_exchange = Exchange(self._exchange_name,
                                     type=self._queue_type)

            # Queues
            if self._queue_name:
                queue = Queue(name=self._queue_name, channel=channel,
                              exchange=task_exchange,
                              routing_key=self._routing_key)
                queue.declare()

            # Producer
            producer = Producer(exchange=task_exchange, channel=channel,
                                routing_key=self._routing_key)

            # Send message
            for message in self._msgs:
                serialized_message = json.dumps(message, ensure_ascii=False)
                producer.publish(serialized_message)

            conn.close()

        except Exception, e:

            self.log.error(
                u'QueueManagerError - Error on sending objects from queue.')
            self.log.debug(e)
            raise Exception(
                'QueueManagerError - Error on sending objects to queue.')
Esempio n. 30
0
    def test_declare__no_declare(self):
        q = Queue('a', no_declare=True)
        q.queue_declare = Mock()
        q.queue_bind = Mock()
        q.exchange = None

        q.declare()
        self.assertFalse(q.queue_declare.called)
        self.assertFalse(q.queue_bind.called)
Esempio n. 31
0
from kombu import Queue
## Broker settings.
BROKER_URL = 'amqp://*****:*****@rabbit_mq:5672/'

CELERY_IGNORE_RESULT = False

CELERY_RESULT_BACKEND = "mongodb"
CELERY_MONGODB_BACKEND_SETTINGS = {
    "host": "mongo_result_backend",
    "port": 27017,
    "database": "TMS_DB",
    "taskmeta_collection": "taskmeta_collection",
    "user": '******',
    "password": '******'
}

CELERYD_MAX_TASKS_PER_CHILD = 50

CELERY_QUEUES = {Queue('sample_region_1'), Queue('sample_region_2')}
Esempio n. 32
0
# -*- coding:utf-8 -*-
from __future__ import absolute_import
import os
from django.conf import settings
from kombu import Queue, Exchange
from celery.schedules import crontab

# set the default Django settings module for the 'celery' program.
# see reference: http://docs.jinkan.org/docs/celery/django/first-steps-with-django.html

BROKER_URL = 'redis://'+settings.CACHE['redis']["celery"]["host"]+\
                   ':'+str(settings.CACHE['redis']["celery"]["port"])+'/'+\
                   str(settings.CACHE['redis']["celery"]["db"])
CELERY_RESULT_BACKEND = BROKER_URL

CELERY_QUEUES = (Queue('default', Exchange('default'), routing_key='default'),
                 Queue('low', Exchange('low'), routing_key='low_celery'),
                 Queue('middle',
                       Exchange('middle'),
                       routing_key='middle_celery'),
                 Queue('high', Exchange('high'), routing_key='high_celery'))
CELERY_DEFAULT_QUEUE = 'default'
CELERY_DEFAULT_EXCHANGE = 'default'
CELERY_DEFAULT_ROUTING_KEY = 'default'

CELERY_TIMEZONE = 'Asia/Shanghai'

CELERYBEAT_SCHEDULE = {
    'add': {
        'task': 'console.tasks.auto_update_database',
        'schedule': crontab(minute="*/10"),
Esempio n. 33
0

if __name__ == "__main__":
    run(host="localhost", port=8080)
else:
    #get config info:
    parser = OptionParser()
    parser.add_option("-c",
                      dest='configfile',
                      default=sys.argv[0].replace('.py', '.conf'),
                      help="configuration file to use")
    (options, args) = parser.parse_args()
    initConfig()

    #connect and declare the message queue/kombu objects.
    connString = 'amqp://{0}:{1}@{2}:{3}//'.format(options.mquser,
                                                   options.mqpassword,
                                                   options.mqserver,
                                                   options.mqport)
    mqConn = Connection(connString)

    eventTaskExchange = Exchange(name=options.taskexchange,
                                 type='direct',
                                 durable=True)
    eventTaskExchange(mqConn).declare()
    eventTaskQueue = Queue(options.taskexchange, exchange=eventTaskExchange)
    eventTaskQueue(mqConn).declare()
    mqproducer = mqConn.Producer(serializer='json')

    application = default_app()
content_encoding = 'utf-8'
timezone = "europe/rome"
broker_url = "redis://redis:6379"
result_backend = "redis://redis:6379"
accept_content = ['application/json']
result_serializer = 'json'
task_serializer = 'json'
broker_pool_limit = 120
# celery queues setup
task_default_queue = 'extract'
task_default_routing_key = 'workflow.extract'

worker_enable_remote_control = False
worker_send_task_events = False

task_queues = (Queue('extract', routing_key='workflow.extract'),
               Queue('transform', routing_key='workflow.transform'),
               Queue('load', routing_key='workflow.load'))

# celery queue routing
task_routes = {
    'workflow.tasks.download_file': {
        'queue': 'extract',
        'routing_key': 'workflow.extract',
    },
    'workflow.tasks.image_to_gray': {
        'queue': 'transform',
        'routing_key': 'workflow.transform',
    },
    'workflow.tasks.image_to_md5': {
        'queue': 'transform',
Esempio n. 35
0
 def test__repr__(self):
     channel = self.connection.channel()
     b1 = Queue('qname1', self.exchange, 'rkey')
     self.assertTrue(repr(Consumer(channel, [b1])))
Esempio n. 36
0
__author__ = 'marc'
"""
from kombu example
http://kombu.readthedocs.org/en/latest/userguide/examples.html
"""
from kombu import Exchange, Queue

task_exchange = Exchange('tasks', type='direct')
task_queues = [
    Queue('hipri', task_exchange, routing_key='hipri'),
    Queue('midpri', task_exchange, routing_key='midpri'),
    Queue('lopri', task_exchange, routing_key='lopri')
]
    'task_default_exchange':
    CELERY_APP_NAME,
    'task_default_exchange_type':
    'topic',
    'task_default_queue':
    CELERY_APP_NAME,
    'worker_prefetch_multiplier':
    1,
    'worker_concurrency':
    e('VIDEO_TRANSCODING_CELERY_CONCURRENCY'),
    'task_acks_late':
    True,
    'task_reject_on_worker_lost':
    True,
    'task_queues': [
        Queue(CELERY_APP_NAME, routing_key=CELERY_APP_NAME),
    ]
}

# Directory for large output files
VIDEO_TEMP_DIR = '/tmp'

# Download source before processing
VIDEO_DOWNLOAD_SOURCE = bool(int(e('VIDEO_DOWNLOAD_SOURCE', 0)))

# A list of WebDAV endpoints for storing video results
VIDEO_ORIGINS = e('VIDEO_ORIGINS',
                  'http://storage.localhost:8080/videos/').split(',')

# Video streamer public urls (comma-separated)
VIDEO_EDGES = e('VIDEO_EDGES', 'http://storage.localhost:8080/').split(',')
Esempio n. 38
0
CELERY_IGNORE_RESULT = True
CELERY_DISABLE_RATE_LIMITS = True
CELERYD_TASK_SOFT_TIME_LIMIT = 300
CELERYD_LOG_FORMAT = '%(message)s level=%(levelname)s process=%(processName)s'
CELERYD_TASK_LOG_FORMAT = ' '.join(
    [CELERYD_LOG_FORMAT, 'task=%(task_name)s task_id=%(task_id)s'])

CELERYBEAT_SCHEDULE_FILENAME = env('CELERYBEAT_SCHEDULE_FILENAME',
                                   './celerybeatschedule.db')
CELERY_DEFAULT_QUEUE = 'default'
CELERY_DEFAULT_EXCHANGE = 'default'
CELERY_DEFAULT_ROUTING_KEY = 'default'

CELERY_QUEUES = (
    Queue('default',
          Exchange(CELERY_DEFAULT_EXCHANGE),
          routing_key=CELERY_DEFAULT_ROUTING_KEY),
    Queue('expiry', Exchange('expiry', type='topic'), routing_key='expiry.#'),
    Queue('legal', Exchange('legal', type='topic'), routing_key='legal.#'),
    Queue('publish',
          Exchange('publish', type='topic'),
          routing_key='publish.#'),
)

CELERY_ROUTES = {
    'apps.archive.content_expiry': {
        'queue': 'expiry',
        'routing_key': 'expiry.content'
    },
    'superdesk.io.gc_ingest': {
        'queue': 'expiry',
Esempio n. 39
0
# I use these to debug kombu crashes; we get a more informative message.
CELERY_TASK_SERIALIZER = 'json'
#CELERY_RESULT_SERIALIZER = 'json'
CELERY_ACCEPT_CONTENT = ['json', 'pickle']

# Set Tasks Queues
# CELERY_TASK_DEFAULT_QUEUE = "default"
# CELERY_TASK_DEFAULT_EXCHANGE = "default"
# CELERY_TASK_DEFAULT_EXCHANGE_TYPE = "direct"
# CELERY_TASK_DEFAULT_ROUTING_KEY = "default"
CELERY_TASK_CREATE_MISSING_QUEUES = True
GEONODE_EXCHANGE = Exchange("default", type="direct", durable=True)
GEOSERVER_EXCHANGE = Exchange("geonode", type="topic", durable=False)
CELERY_TASK_QUEUES = (
    Queue('default', GEONODE_EXCHANGE, routing_key='default'),
    Queue('geonode', GEONODE_EXCHANGE, routing_key='geonode'),
    Queue('update', GEONODE_EXCHANGE, routing_key='update'),
    Queue('cleanup', GEONODE_EXCHANGE, routing_key='cleanup'),
    Queue('email', GEONODE_EXCHANGE, routing_key='email'),
)

if USE_GEOSERVER and ASYNC_SIGNALS:
    from geonode.messaging.queues import QUEUES
    CELERY_TASK_QUEUES += QUEUES

# CELERYBEAT_SCHEDULE = {
#     ...
#     'update_feeds': {
#         'task': 'arena.social.tasks.Update',
#         'schedule': crontab(minute='*/6'),
Esempio n. 40
0
 def test_flow(self):
     channel = self.connection.channel()
     queue = Queue('qname', self.exchange, 'rkey')
     consumer = Consumer(channel, queue, auto_declare=True)
     consumer.flow(False)
     self.assertIn('flow', channel)
Esempio n. 41
0
 def test_qos(self):
     channel = self.connection.channel()
     queue = Queue('qname', self.exchange, 'rkey')
     consumer = Consumer(channel, queue, auto_declare=True)
     consumer.qos(30, 10, False)
     self.assertIn('basic_qos', channel)
Esempio n. 42
0
 def test_maybe_declare(self, maybe_declare):
     p = self.connection.Producer()
     q = Queue('foo')
     p.maybe_declare(q)
     maybe_declare.assert_called_with(q, p.channel, False)
Esempio n. 43
0
                'propagate': True,
            },
            'hawkrest': {
                'handlers': ['console'],
                'level': 'WARNING',
            },
            'treeherder': {
                'handlers': ['console'],
                'level': 'DEBUG',
                'propagate': False,
            }
        }
    }

CELERY_QUEUES = [
    Queue('default', Exchange('default'), routing_key='default'),
    # queue for failed jobs/logs
    Queue('log_parser', Exchange('default'), routing_key='log_parser.normal'),
    Queue('log_parser_fail',
          Exchange('default'),
          routing_key='log_parser.failures'),
    Queue('log_store_failure_lines',
          Exchange('default'),
          routing_key='store_failure_lines.normal'),
    Queue('log_store_failure_lines_fail',
          Exchange('default'),
          routing_key='store_failure_lines.failures'),
    Queue('log_crossreference_error_lines',
          Exchange('default'),
          routing_key='crossreference_error_lines.normal'),
    Queue('log_crossreference_error_lines_fail',
# CELERY_BROKER_URL = 'redis://*****:*****@rabbitmq:5672'

# Set result backend
CELERY_RESULT_BACKEND = 'redis://redis:6379'
CELERY_REDIS_MAX_CONNECTIONS = 1

# Set celery serializer
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_ACCEPT_CONTENT = ['application/json']

# Set celery Queues
CELERY_TASK_QUEUES = (
    Queue('default', Exchange('default'), routing_key='default'),
    Queue('high', Exchange('high'), routing_key='high'),
    Queue('low', Exchange('low'), routing_key='low'),
)
CELERY_TASK_DEFAULT_QUEUE = 'default'
CELERY_TASK_DEFAULT_EXCHANGE = 'default'
CELERY_TASK_DEFAULT_ROUTING_KEY = 'default'
CELERY_TASK_ROUTES = {
    # -- HIGH PRIORITY QUEUE -- #
    'web.tasks.task_google_trends_parser': {
        'queue': 'high'
    },
    'web.tasks.task_cnn_news_parser': {
        'queue': 'high'
    },
    # -- LOW PRIORITY QUEUE -- #
Esempio n. 45
0
default_handler.addFilter(ReqIDLogFilter())
logging.basicConfig(
    format=
    "[%(asctime)s][%(filename)s][line:%(lineno)d][%(levelname)s][%(req_id)s]%(message)s",
    datefmt="%y-%m-%d %H:%M:%S",
    level=logging.INFO,
    handlers=[default_handler],
)
log: logging.Logger = logging.getLogger()

# 声明交换机
DEMO_EXCHANGE: Exchange = Exchange("DEMO_E", type="direct", delivery_mode=2)

# 声明队列
DEMO_QUEUE_LIST: List[Queue] = [
    Queue("DEMO_Q", DEMO_EXCHANGE, routing_key="demo_queue")
]


def demo_parse_body(body_dict: Dict[str, Any]) -> None:
    try:
        1 / 0
    except ZeroDivisionError as e:
        raise SkipException from e
    except Exception:
        pass


class Worker(ConsumerMixin):
    """继承kombu的消费者类"""
    def __init__(self,
Esempio n. 46
0
STATICSITEMAPS_ROOT_SITEMAP = 'firecares.urls.sitemaps'

CELERY_DEFAULT_QUEUE = "default"
CELERY_DEFAULT_EXCHANGE = "default"
CELERY_DEFAULT_EXCHANGE_TYPE = "direct"
CELERY_DEFAULT_ROUTING_KEY = "default"
CELERY_CREATE_MISSING_QUEUES = True

CELERY_IMPORTS = ('firecares.tasks.cache', 'firecares.tasks.update',
                  'firecares.tasks.email', 'firecares.tasks.cleanup',
                  'firecares.tasks.quality_control', 'firecares.tasks.slack',
                  'firecares.tasks.weather_task',
                  'firecares.tasks.predictions')

CELERY_QUEUES = [
    Queue('default', routing_key='default'),
    Queue('cache', routing_key='cache'),
    Queue('update', routing_key='update'),
    Queue('email', routing_key='email'),
    Queue('cleanup', routing_key='cleanup'),
    Queue('quality-control', routing_key='quality-control'),
    Queue('slack', routing_key='slack'),
    Queue('servicearea', routing_key='servicearea'),
    Queue('weather-task', routing_key='weather-task'),
    Queue('singlenode', routing_key='singlenode'),
]

ACCOUNT_ACTIVATION_DAYS = 7
REGISTRATION_OPEN = False
REGISTRATION_FORM = 'firecares.firecares_core.ext.registration.forms.LimitedRegistrationForm'
INVITATIONS_SIGNUP_REDIRECT = 'registration_register'
Esempio n. 47
0
 def test_set_no_ack(self):
     channel = self.connection.channel()
     queue = Queue('qname', self.exchange, 'rkey')
     consumer = Consumer(channel, queue, auto_declare=True, no_ack=True)
     self.assertTrue(consumer.no_ack)
Esempio n. 48
0
    'sentry.tasks.clear_expired_snoozes',
    'sentry.tasks.check_auth',
    'sentry.tasks.collect_project_platforms',
    'sentry.tasks.deletion',
    'sentry.tasks.digests',
    'sentry.tasks.dsymcache',
    'sentry.tasks.email',
    'sentry.tasks.merge',
    'sentry.tasks.store',
    'sentry.tasks.options',
    'sentry.tasks.ping',
    'sentry.tasks.post_process',
    'sentry.tasks.process_buffer',
)
CELERY_QUEUES = [
    Queue('default', routing_key='default'),
    Queue('alerts', routing_key='alerts'),
    Queue('auth', routing_key='auth'),
    Queue('cleanup', routing_key='cleanup'),
    Queue('merge', routing_key='merge'),
    Queue('search', routing_key='search'),
    Queue('events', routing_key='events'),
    Queue('update', routing_key='update'),
    Queue('email', routing_key='email'),
    Queue('options', routing_key='options'),
    Queue('digests.delivery', routing_key='digests.delivery'),
    Queue('digests.scheduling', routing_key='digests.scheduling'),
    Queue('stats', routing_key='stats'),
]

for queue in CELERY_QUEUES:
Esempio n. 49
0
def create_app(_read_config=True, **config):
    app = flask.Flask(__name__,
                      static_folder=None,
                      template_folder=os.path.join(PROJECT_ROOT, 'templates'))

    # app.wsgi_app = TracerMiddleware(app.wsgi_app, app)

    # This key is insecure and you should override it on the server
    app.config['SECRET_KEY'] = 't\xad\xe7\xff%\xd2.\xfe\x03\x02=\xec\xaf\\2+\xb8=\xf7\x8a\x9aLD\xb1'

    app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True
    app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql:///changes'
    app.config['SQLALCHEMY_POOL_SIZE'] = 60
    app.config['SQLALCHEMY_MAX_OVERFLOW'] = 20
    # required for flask-debugtoolbar and the db perf metrics we record
    app.config['SQLALCHEMY_RECORD_QUERIES'] = True

    app.config['REDIS_URL'] = 'redis://localhost/0'
    app.config['GROUPER_API_URL'] = 'https://localhost/'
    app.config['GROUPER_PERMISSIONS_ADMIN'] = 'changes.prod.admin'
    app.config['GROUPER_PERMISSIONS_PROJECT_ADMIN'] = 'changes.prod.project.admin'
    app.config['GROUPER_EXCLUDED_ROLES'] = ['np-owner']
    app.config['DEBUG'] = True
    app.config['HTTP_PORT'] = 5000
    app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0

    app.config['BAZEL_ARTIFACT_SUFFIX'] = '.bazel'

    app.config['BAZEL_TEST_OUTPUT_RELATIVE_PATH'] = 'bazel-testlogs/'

    app.config['API_TRACEBACKS'] = True

    # Expiration delay between when a snapshot image becomes superceded and when
    # it becomes truly expired (and thus no longer included in the sync information
    # for any cluster that runs that particular image's plan)
    app.config['CACHED_SNAPSHOT_EXPIRATION_DELTA'] = timedelta(hours=1)

    # default snapshot ID to use when no project-specific active image available
    app.config['DEFAULT_SNAPSHOT'] = None
    app.config['SNAPSHOT_S3_BUCKET'] = None
    app.config['LXC_PRE_LAUNCH'] = None
    app.config['LXC_POST_LAUNCH'] = None

    # APT mirror URLs to use for new LXC containers created by changes-client.
    # NB: these aren't currently supported in the public changes-client repo.
    app.config['LXC_APT_MIRROR'] = None
    app.config['LXC_APT_SECURITY_MIRROR'] = None

    # name of the template to use for LXC (usually the name of a particular
    # Linux distro). Defaults to ubuntu.
    app.config['LXC_TEMPLATE'] = 'ubuntu'

    # Location of artifacts server that is passed to changes-client
    # (include http:// or https://)
    #
    # The default artifact server url uses a random uri which is expected to fail
    # without being overridden. This value is referenced in test code.
    app.config['ARTIFACTS_SERVER'] = 'http://localhost:1234'

    # The default max artifact size handlers should be capable of processing.
    app.config['MAX_ARTIFACT_BYTES'] = 200 * 1024 * 1024
    # The max artifact size the analytics json handler should be capable of processing.
    app.config['MAX_ARTIFACT_BYTES_ANALYTICS_JSON'] = 70 * 1024 * 1024

    # the binary to use for running changes-client. Default is just
    # "changes-client", but can also be specified as e.g. a full path.
    app.config['CHANGES_CLIENT_BINARY'] = 'changes-client'

    app.config['CHANGES_CLIENT_DEFAULT_BUILD_TYPE'] = 'legacy'

    # Base URI to use for git repos that we want to clone (currently only used
    # for the "other_repos" buildstep config). The repo name is appended
    # directly to this, so it should already contain necessary colons and
    # slashes, etc. For example, if GIT_DEFAULT_BASE_URI is `[email protected]:`
    # and a repo is specified as `changes.git`, the clone url will be
    # `[email protected]:changes.git`
    app.config['GIT_DEFAULT_BASE_URI'] = None
    # Same as GIT_DEFAULT_BASE_URI but used for mercurial repos.
    app.config['MERCURIAL_DEFAULT_BASE_URI'] = None

    # This is a hash from each build type (string identifiers used in
    # build step configuration) to a "build spec", a definition of
    # how to use changes-client to build. To use changes-client, the key
    # 'uses_client' must be set to True.
    #
    # Required build spec keys for client:
    #   adapter -> basic or lxc
    #   jenkins-command -> command to run from jenkins directly ($JENKINS_COMMAND)
    #   commands -> array of hash from script -> string that represents a script
    #
    # Optional keys (lxc-only)
    #   pre-launch -> lxc pre-launch script
    #   post-launch -> lxc post-launch script
    #   release -> lxc release
    app.config['CHANGES_CLIENT_BUILD_TYPES'] = {
        'legacy': {'uses_client': False},
    }

    app.config['CELERY_ACCEPT_CONTENT'] = ['changes_json']
    app.config['CELERY_ACKS_LATE'] = True
    app.config['CELERY_BROKER_URL'] = 'redis://localhost/0'
    app.config['CELERY_DEFAULT_QUEUE'] = "default"
    app.config['CELERY_DEFAULT_EXCHANGE'] = "default"
    app.config['CELERY_DEFAULT_EXCHANGE_TYPE'] = "direct"
    app.config['CELERY_DEFAULT_ROUTING_KEY'] = "default"
    app.config['CELERY_DISABLE_RATE_LIMITS'] = True
    app.config['CELERY_IGNORE_RESULT'] = True
    app.config['CELERY_RESULT_BACKEND'] = None
    app.config['CELERY_RESULT_SERIALIZER'] = 'changes_json'
    app.config['CELERY_SEND_EVENTS'] = False
    app.config['CELERY_TASK_RESULT_EXPIRES'] = 1
    app.config['CELERY_TASK_SERIALIZER'] = 'changes_json'
    app.config['CELERYD_PREFETCH_MULTIPLIER'] = 1
    app.config['CELERYD_MAX_TASKS_PER_CHILD'] = 10000

    # By default, Celery logs writes to stdout/stderr as WARNING, which
    # is a bit harsh considering that some of the code is code we don't
    # own calling 'print'. This flips the default back to INFO, which seems
    # more appropriate. Can be overridden by the Changes config.
    app.config['CELERY_REDIRECT_STDOUTS_LEVEL'] = 'INFO'

    app.config['CELERY_QUEUES'] = (
        Queue('job.sync', routing_key='job.sync'),
        Queue('job.create', routing_key='job.create'),
        Queue('celery', routing_key='celery'),
        Queue('events', routing_key='events'),
        Queue('default', routing_key='default'),
        Queue('delete', routing_key='delete'),
        Queue('repo.sync', Exchange('fanout', 'fanout'), routing_key='repo.sync'),
        Queue('grouper.sync', routing_key='grouper.sync'),
        Broadcast('repo.update'),
    )
    app.config['CELERY_ROUTES'] = {
        'create_job': {
            'queue': 'job.create',
            'routing_key': 'job.create',
        },
        'sync_job': {
            'queue': 'job.sync',
            'routing_key': 'job.sync',
        },
        'sync_job_step': {
            'queue': 'job.sync',
            'routing_key': 'job.sync',
        },
        'sync_build': {
            'queue': 'job.sync',
            'routing_key': 'job.sync',
        },
        'check_repos': {
            'queue': 'repo.sync',
            'routing_key': 'repo.sync',
        },
        'sync_grouper': {
            'queue': 'grouper.sync',
            'routing_key': 'grouper.sync',
        },
        'sync_repo': {
            'queue': 'repo.sync',
            'routing_key': 'repo.sync',
        },
        'run_event_listener': {
            'queue': 'events',
            'routing_key': 'events',
        },
        'fire_signal': {
            'queue': 'events',
            'routing_key': 'events',
        },
        'update_local_repos': {
            'queue': 'repo.update',
        },
        'delete_old_data': {
            'queue': 'delete',
            'routing_key': 'delete',
        },
        'delete_old_data_10m': {
            'queue': 'delete',
            'routing_key': 'delete',
        },
        'delete_old_data_5h_delayed': {
            'queue': 'delete',
            'routing_key': 'delete',
        },
    }

    app.config['EVENT_LISTENERS'] = (
        ('changes.listeners.mail.build_finished_handler', 'build.finished'),
        ('changes.listeners.green_build.revision_result_updated_handler', 'revision_result.updated'),
        ('changes.listeners.build_revision.revision_created_handler', 'revision.created'),
        ('changes.listeners.build_finished_notifier.build_finished_handler', 'build.finished'),
        ('changes.listeners.phabricator_listener.build_finished_handler', 'build.finished'),
        ('changes.listeners.analytics_notifier.build_finished_handler', 'build.finished'),
        ('changes.listeners.analytics_notifier.job_finished_handler', 'job.finished'),
        ('changes.listeners.revision_result.revision_result_build_finished_handler', 'build.finished'),
        ('changes.listeners.stats_notifier.build_finished_handler', 'build.finished'),
        ('changes.listeners.snapshot_build.build_finished_handler', 'build.finished'),
    )

    # restrict outbound notifications to the given domains
    app.config['MAIL_DOMAIN_WHITELIST'] = ()

    app.config['DEBUG_TB_ENABLED'] = True

    app.config['DEBUG_TB_PANELS'] = ('flask_debugtoolbar.panels.versions.VersionDebugPanel',
                                     'flask_debugtoolbar.panels.timer.TimerDebugPanel',
                                     'flask_debugtoolbar.panels.headers.HeaderDebugPanel',
                                     'flask_debugtoolbar.panels.request_vars.RequestVarsDebugPanel',
                                     # Disable the config vars panel by default; it can contain sensitive information.
                                     # 'flask_debugtoolbar.panels.config_vars.ConfigVarsDebugPanel',
                                     'flask_debugtoolbar.panels.template.TemplateDebugPanel',
                                     'flask_debugtoolbar.panels.sqlalchemy.SQLAlchemyDebugPanel',
                                     'flask_debugtoolbar.panels.logger.LoggingPanel',
                                     'flask_debugtoolbar.panels.profiler.ProfilerDebugPanel')

    # celerybeat must be running for our cleanup tasks to execute
    # e.g. celery worker -B
    app.config['CELERYBEAT_SCHEDULE'] = {
        'cleanup-tasks': {
            'task': 'cleanup_tasks',
            'schedule': timedelta(minutes=1),
        },
        'check-repos': {
            'task': 'check_repos',
            'schedule': timedelta(minutes=2),
        },
        # XXX(dcramer): Grouper is dropbox specific. Probably should delete it.
        # 'sync-grouper': {
        #     'task': 'sync_grouper',
        #     'schedule': timedelta(minutes=1),
        # },
        'aggregate-flaky-tests': {
            'task': 'aggregate_flaky_tests',
            # Hour 7 GMT is midnight PST, hopefully a time of low load
            'schedule': crontab(hour=7, minute=0),
        },
        'delete-old-data-10m': {
            'task': 'delete_old_data_10m',
            'schedule': timedelta(minutes=10),
        },
        'delete-old-data-5h-delayed': {
            'task': 'delete_old_data_5h_delayed',
            # This task runs every 4 hours but looks at 5 hours worth of tests
            # so consecutive runs will look at sets of tests that will overlap.
            # This is to make it unlikely to miss tests in between.
            #
            # While this is looking at 5 hours worth of tests, this should not be long running
            # as the shorter delete tasks will catch most cases and this checks
            # a time frame that should've been cleaned by them already.
            'schedule': crontab(hour='*/4'),
        },
        'update-local-repos': {
            'task': 'update_local_repos',
            'schedule': timedelta(minutes=1),
        }
    }
    app.config['CELERY_TIMEZONE'] = 'UTC'

    app.config['SENTRY_DSN'] = None
    app.config['SENTRY_INCLUDE_PATHS'] = [
        'changes',
    ]

    app.config['KOALITY_URL'] = None
    app.config['KOALITY_API_KEY'] = None

    app.config['GOOGLE_CLIENT_ID'] = None
    app.config['GOOGLE_CLIENT_SECRET'] = None
    app.config['GOOGLE_DOMAIN'] = None

    # must be a URL-safe base64-encoded 32-byte key
    app.config['COOKIE_ENCRYPTION_KEY'] = 'theDefaultKeyIs32BytesLongAndTotallyURLSafe='

    app.config['REPO_ROOT'] = None

    app.config['DEFAULT_FILE_STORAGE'] = 'changes.storage.s3.S3FileStorage'
    app.config['S3_ACCESS_KEY'] = None
    app.config['S3_SECRET_KEY'] = None
    app.config['S3_BUCKET'] = None

    app.config['PHABRICATOR_LINK_HOST'] = None
    app.config['PHABRICATOR_API_HOST'] = None
    app.config['PHABRICATOR_USERNAME'] = None
    app.config['PHABRICATOR_CERT'] = None

    # Configuration to access Zookeeper - currently used to discover mesos master leader instance
    # E.g., if mesos master is configured to talk to zk://zk1:2181,zk2:2181/mesos,
    # set ZOOKEEPER_HOSTS = 'zk1:2181,zk2:2181'
    #     ZOOKEEPER_MESOS_MASTER_PATH = '/mesos'
    #
    # This is only used to control mesos slave offline/online status from within Changes

    # Comma-separated list of host:port (or ip:port) to Zookeeper instances.
    app.config['ZOOKEEPER_HOSTS'] = 'zk:2181'
    # Namespace within zookeeper where mesos master election is performed.
    app.config['ZOOKEEPER_MESOS_MASTER_PATH'] = '/mesos'

    # List of valid tables to be written to when reporting project analytics.
    # Analytics artifacts targeting tables not listed here will be considered invalid.
    app.config['ANALYTICS_PROJECT_TABLES'] = []
    # URL any project analytics JSON entries will be posted to.
    # Entries will be posted as JSON, with the intended table specified as 'source' in the URL params.
    app.config['ANALYTICS_PROJECT_POST_URL'] = None

    app.config['SUPPORT_CONTACT'] = 'support'

    app.config['MAIL_DEFAULT_SENDER'] = 'changes@localhost'
    app.config['BASE_URI'] = 'http://localhost:5000'

    # if set to a string, most (all?) of the frontend js will make API calls
    # to the host this string is set to (e.g. http://changes.bigcompany.com)
    # THIS IS JUST FOR EASIER TESTING IN DEVELOPMENT. Although it won't even
    # work in prod: you'll have to start chrome with --disable-web-security to
    # make this work. Override this in your changes.conf.py file
    app.config['WEBAPP_USE_ANOTHER_HOST'] = None

    # Custom changes content unique to your deployment. This is intended to
    # customize the look and feel, provide contextual help and add custom links
    # to other internal tools. You should put your files in webapp/custom and
    # link them here.
    #
    # e.g. /acmecorp-changes/changes.js
    #
    # Some of the custom_content hooks can show images. Assume that the webserver
    # is willing to serve any file within the directory of the js file
    app.config['WEBAPP_CUSTOM_JS'] = None
    # This can be a .less file. We import it after the variables.less,
    # so you can override them in your file
    # Note: if you change this and nothing seems to happen, try deleting
    # webapp/.webassets-cache and bundled.css. This probably won't happen, though
    # If not specified, we will search for CUSTOM_CSS_FILE in the custom dir.
    app.config['WEBAPP_CUSTOM_CSS'] = None

    # In minutes, the timeout applied to jobs without a timeout specified at build time.
    # A timeout should nearly always be specified; this is just a safeguard so that
    # unspecified timeout doesn't mean "is allowed to run indefinitely".
    app.config['DEFAULT_JOB_TIMEOUT_MIN'] = 60

    # Number of milliseconds a transaction can run before triggering a warning.
    app.config['TRANSACTION_MS_WARNING_THRESHOLD'] = 2500

    # Hard maximum number of jobsteps to retry for a given job
    app.config['JOBSTEP_RETRY_MAX'] = 6
    # Maximum number of machines that we'll retry jobsteps for. This allows us
    # to retry more jobsteps if it's always the same machine failing.
    app.config['JOBSTEP_MACHINE_RETRY_MAX'] = 2

    # the PHID of the user creating quarantine tasks. We can use this to show
    # the list of open quarantine tasks inline
    app.config['QUARANTINE_PHID'] = None

    # The max length a test's output to be stored. If it is longer, the it will
    # be truncated.
    app.config['TEST_MESSAGE_MAX_LEN'] = 64 * 1024

    # List of packages needed to install bazel and any environment.
    app.config['BAZEL_APT_PKGS'] = ['bazel']

    # rsync source for encap
    # Example: rsync://example.com/encap/
    app.config['ENCAP_RSYNC_URL'] = None

    # In some configurations, build slaves might not have access to the Changes API via the
    # normal address; if PATCH_BASE_URI is specified, it'll be used as the base URI for
    # PATCH_URI variables provided to build slaves.
    app.config['PATCH_BASE_URI'] = None

    # name of default cluster to use for autogenerated jobs
    app.config['DEFAULT_CLUSTER'] = None

    # Maximum number of cpus allowed for a bazel executor. Since we expose `bazel.cpus` to
    # the user, this number needs to be bounded to avoid runaway resource allocation (by always
    # allocating large chunks of resources, like 12-16 cores), and to avoid invalid configuration
    # (like, requesting more cpus than available on a single slave, typically 32)
    app.config['MAX_CPUS_PER_EXECUTOR'] = 16

    # Minimum memory allowed per executor (in MB)
    app.config['MIN_MEM_MB_PER_EXECUTOR'] = 1024

    # Maximum memory allowed per executor (in MB)
    app.config['MAX_MEM_MB_PER_EXECUTOR'] = 16384

    # Maximum number of bazel executors allowed.
    app.config['MAX_EXECUTORS'] = 10

    # Absolute path to Bazel root (passed via --output_root to Bazel)
    # Storing bazel cache in tmpfs could be a bad idea because:
    #  - tmpfs means any files stored here will be stored purely in RAM and will eat into container limits
    #  - these containers are not persisted from the snapshot
    #
    # Bazel will create parent directories (if the user has appropriate permissions), if missing.
    app.config['BAZEL_ROOT_PATH'] = '/tmp/bazel_changes'

    # List of mandatory flags to be passed to `bazel test`
    app.config['BAZEL_MANDATORY_TEST_FLAGS'] = [
        '--spawn_strategy=sandboxed',
        '--genrule_strategy=sandboxed',
        '--keep_going',
    ]

    app.config['BAZEL_ADDITIONAL_TEST_FLAGS_WHITELIST_REGEX'] = [
        r'^--test_env=[A-Za-z0-9=]+',
        r'^--test_arg=[A-Za-z0-9=]+',
        r'^--define=[A-Za-z0-9=]+',
    ]

    app.config['SELECTIVE_TESTING_PROPAGATION_LIMIT'] = 30

    app.config['SELECTIVE_TESTING_ENABLED'] = False

    # Debug config entries passed to every autobazel jobstep
    app.config['BAZEL_DEBUG_CONFIG'] = {}

    # Extra test setup commands to be executed before collect-targets or `bazel test` invocations.
    app.config['BAZEL_EXTRA_SETUP_CMD'] = ['exit 0']

    # Jobsteps go from 'pending_allocation' to 'allocated' once an external scheduler claims them, and
    # once they begin running they're updated to 'in_progress'. If the scheduler somehow fails or drops
    # the task, this value is used to time out the 'allocated' status and revert back to 'pending_allocation'.
    # For current and expected schedulers, we don't allocate unless we think we can execute immediately, so
    # a 3 minute timeout is conservative and should be safe.
    app.config['JOBSTEP_ALLOCATION_TIMEOUT_SECONDS'] = 3 * 60

    app.config.update(config)

    if _read_config:
        if os.environ.get('CHANGES_CONF'):
            # CHANGES_CONF=/etc/changes.conf.py
            app.config.from_envvar('CHANGES_CONF')
        else:
            # Look for ~/.changes/changes.conf.py
            path = os.path.normpath(os.path.expanduser('~/.changes/changes.conf.py'))
            app.config.from_pyfile(path, silent=True)

    # default the DSN for changes-client to the server's DSN
    app.config.setdefault('CLIENT_SENTRY_DSN', app.config['SENTRY_DSN'])

    # Backwards compatibility with old configs containing BASE_URI
    if 'WEB_BASE_URI' not in app.config and 'BASE_URI' in app.config:
        app.config['WEB_BASE_URI'] = app.config['BASE_URI']
    if 'INTERNAL_BASE_URI' not in app.config and 'BASE_URI' in app.config:
        app.config['INTERNAL_BASE_URI'] = app.config['BASE_URI']

    parsed_url = urlparse(app.config['WEB_BASE_URI'])
    app.config.setdefault('PREFERRED_URL_SCHEME', 'https')

    if app.debug:
        app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
    else:
        app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 30

    app.url_map.converters['uuid'] = UUIDConverter

    # now that config is set up, let's ensure the CUSTOM_JS / CUSTOM_CSS
    # variables are safe (within the changes directory) and convert them to
    # absolute paths
    if app.config['WEBAPP_CUSTOM_CSS']:
        app.config['WEBAPP_CUSTOM_CSS'] = os.path.join(
            PROJECT_ROOT, 'webapp/custom/', app.config['WEBAPP_CUSTOM_CSS'])

        enforce_is_subdir(
            app.config['WEBAPP_CUSTOM_CSS'],
            os.path.join(PROJECT_ROOT, 'webapp/custom'))
    else:
        app.config['WEBAPP_CUSTOM_CSS'] = _find_custom_css()

    if app.config['WEBAPP_CUSTOM_JS']:
        app.config['WEBAPP_CUSTOM_JS'] = os.path.join(
            PROJECT_ROOT, 'webapp/custom/', app.config['WEBAPP_CUSTOM_JS'])

        enforce_is_subdir(
            app.config['WEBAPP_CUSTOM_JS'],
            os.path.join(PROJECT_ROOT, 'webapp/custom'))

    # init sentry first
    sentry.init_app(app)

    @app.before_request
    def capture_user(*args, **kwargs):
        from changes.api.auth import get_current_user
        user = get_current_user()
        if user is not None:
            sentry.client.user_context({
                'id': user.id,
                'email': user.email,
            })

    api.init_app(app)
    db.init_app(app)
    mail.init_app(app)
    queue.init_app(app)
    redis.init_app(app)
    statsreporter.init_app(app)

    configure_debug_toolbar(app)

    from raven.contrib.celery import register_signal, register_logger_signal
    register_signal(sentry.client)
    register_logger_signal(sentry.client, loglevel=logging.WARNING)

    # configure debug routes first
    if app.debug:
        configure_debug_routes(app)

    configure_templates(app)

    # TODO: these can be moved to wsgi app entrypoints
    configure_api_routes(app)
    configure_web_routes(app)

    configure_jobs(app)
    configure_transaction_logging(app)

    rules_file = app.config.get('CATEGORIZE_RULES_FILE')
    if rules_file:
        # Fail at startup if we have a bad rules file.
        categorize.load_rules(rules_file)

    import jinja2
    webapp_template_folder = os.path.join(PROJECT_ROOT, 'webapp/html')
    template_folder = os.path.join(PROJECT_ROOT, 'templates')
    template_loader = jinja2.ChoiceLoader([
                app.jinja_loader,
                jinja2.FileSystemLoader([webapp_template_folder, template_folder])
                ])
    app.jinja_loader = template_loader

    return app
Esempio n. 50
0
 def test_interface_after_reply_message_received(self):
     self.assertIsNone(StdChannel().after_reply_message_received(
         Queue('foo')))
Esempio n. 51
0
    # Load the Celery config
    CONFIG = load_named_config(CONFIG_NAME, DEFAULT_CONFIG)

CONFIG.setdefault("task_modules", [])
# load tasks modules declared as plugin entry points
for entrypoint in pkg_resources.iter_entry_points("swh.workers"):
    worker_registrer_fn = entrypoint.load()
    # The registry function is expected to return a dict which the 'tasks' key
    # is a string (or a list of strings) with the name of the python module in
    # which celery tasks are defined.
    task_modules = worker_registrer_fn().get("task_modules", [])
    CONFIG["task_modules"].extend(task_modules)

# Celery Queues
CELERY_QUEUES = [Queue("celery", Exchange("celery"), routing_key="celery")]

CELERY_DEFAULT_CONFIG = dict(
    # Timezone configuration: all in UTC
    enable_utc=True,
    timezone="UTC",
    # Imported modules
    imports=CONFIG.get("task_modules", []),
    # Time (in seconds, or a timedelta object) for when after stored task
    # tombstones will be deleted. None means to never expire results.
    result_expires=None,
    # A string identifying the default serialization method to use. Can
    # be json (default), pickle, yaml, msgpack, or any custom
    # serialization methods that have been registered with
    task_serializer="msgpack",
    # Result serialization format
Esempio n. 52
0
import time

BROKER_CLOUD = "localhost"

producer_connection = Connection(BROKER_CLOUD)
consumer_connection = Connection(BROKER_CLOUD)
MODE = "PUSH" # PUSH or PULL

exchange = Exchange("IOT", type="direct")

def handle_notification(body, message):
    # receive list items
    print('Receive message from Alert')
    print (body)
    time.sleep(3)

test = Queue(name='alert_out', exchange=exchange,
             routing_key='alert_out')

while 1:
    try:
        consumer_connection.ensure_connection(max_retries=1)
        with nested(Consumer(consumer_connection, queues=test,
                             callbacks=[handle_notification], no_ack=True)):
            while True:
                consumer_connection.drain_events()
    except (ConnectionRefusedError, exceptions.OperationalError):
        print('Connection lost')
    except consumer_connection.connection_errors:
        print('Connection error')
Esempio n. 53
0
        if task == 'Sleep':
            return {
                'exchange': 'scapl',
                'exchange_type': 'topic',
                'routing_key': 'as.task'
            }
        return None


default_exchange = Exchange('default', type='direct')
scapl_exchange = Exchange('scapl', type='topic')

CELERY_TIMEZONE = 'Europe/Paris'
CELERY_DEFAULT_QUEUE = 'default'
CELERY_QUEUES = (
    Queue('default', default_exchange, routing_key='default'),
    Queue('mq_as', scapl_exchange, routing_key='as.#'),
)
CELERY_IGNORE_RESULT = False
BROKER_URL = 'amqp://*****:*****@localhost:5672/vScapl'
#BROKER_URL = 'redis://localhost:6379/10'

CELERY_DEFAULT_EXCHANGE = 'scapl'
CELERY_DEFAULT_EXCHANGE_TYPE = 'topic'
CELERY_ROUTES = (ScaplRouter(), )
CELERY_ACCEPT_CONTENT = ['application/json', 'application/x-python-serialize']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_IMPORTS = ('tasks', )
CELERY_RESULT_BACKEND = 'amqp'
#CELERY_RESULT_BACKEND = 'redis://localhost:6379/11'
Esempio n. 54
0
 def test_recover(self):
     channel = self.connection.channel()
     b1 = Queue('qname1', self.exchange, 'rkey')
     consumer = Consumer(channel, [b1])
     consumer.recover()
     self.assertIn('basic_recover', channel)
Esempio n. 55
0
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.

from kombu import Queue

broker_url = 'amqp://*****:*****@localhost:5672//'
result_backend = 'redis://localhost:6379/0'

accept_content = ['pickle',] # Values are 'pickle', 'json', 'msgpack' and 'yaml'
task_serializer = "pickle"
result_serializer = "pickle"

timezone = 'Europe/Madrid'

task_default_queue = 'tasks'
task_queues = (
    Queue('tasks', routing_key='task.#'),
    Queue('transient', routing_key='transient.#', delivery_mode=1)
)
task_default_exchange = 'tasks'
task_default_exchange_type = 'topic'
task_default_routing_key = 'task.default'
Esempio n. 56
0
 def test_Consumer(self):
     q = Queue('foo')
     print(self.channel.queues)
     cons = self.channel.Consumer(q)
     self.assertIsInstance(cons, Consumer)
     self.assertIs(cons.channel, self.channel)
Esempio n. 57
0
    'sentry.tasks.process_buffer',
    'sentry.tasks.reports',
    'sentry.tasks.reprocessing',
    'sentry.tasks.scheduler',
    'sentry.tasks.signals',
    'sentry.tasks.store',
    'sentry.tasks.unmerge',
    'sentry.tasks.symcache_update',
    'sentry.tasks.servicehooks',
    'sentry.tagstore.tasks',
    'sentry.tasks.assemble',
    'sentry.tasks.integrations',
    'sentry.tasks.files',
)
CELERY_QUEUES = [
    Queue('activity.notify', routing_key='activity.notify'),
    Queue('alerts', routing_key='alerts'),
    Queue('auth', routing_key='auth'),
    Queue('assemble', routing_key='assemble'),
    Queue('buffers.process_pending', routing_key='buffers.process_pending'),
    Queue('commits', routing_key='commits'),
    Queue('cleanup', routing_key='cleanup'),
    Queue('default', routing_key='default'),
    Queue('digests.delivery', routing_key='digests.delivery'),
    Queue('digests.scheduling', routing_key='digests.scheduling'),
    Queue('email', routing_key='email'),
    Queue('events.index_event_tags', routing_key='events.index_event_tags'),
    Queue('events.preprocess_event', routing_key='events.preprocess_event'),
    Queue('events.reprocessing.preprocess_event',
          routing_key='events.reprocessing.preprocess_event'),
    Queue('events.process_event', routing_key='events.process_event'),
Esempio n. 58
0
CELERY_EAGER_PROPAGATES_EXCEPTIONS = True
CELERY_IGNORE_RESULT = True
CELERY_SEND_EVENTS = False
CELERY_RESULT_BACKEND = None
CELERY_TASK_RESULT_EXPIRES = 1
CELERY_DISABLE_RATE_LIMITS = True
CELERY_DEFAULT_QUEUE = "default"
CELERY_DEFAULT_EXCHANGE = "default"
CELERY_DEFAULT_EXCHANGE_TYPE = "direct"
CELERY_DEFAULT_ROUTING_KEY = "default"
CELERY_CREATE_MISSING_QUEUES = True
CELERY_IMPORTS = ('geonode.tasks.deletion', 'geonode.tasks.update',
                  'geonode.tasks.email')

CELERY_QUEUES = [
    Queue('default', routing_key='default'),
    Queue('cleanup', routing_key='cleanup'),
    Queue('update', routing_key='update'),
    Queue('email', routing_key='email'),
]

import djcelery

djcelery.setup_loader()

# Load more settings from a file called local_settings.py if it exists
try:
    from local_settings import *  # noqa
except ImportError:
    pass
Esempio n. 59
0
# option) any later version.                                              #
#                                                                         #
# AmCAT is distributed in the hope that it will be useful, but WITHOUT    #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or   #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public     #
# License for more details.                                               #
#                                                                         #
# You should have received a copy of the GNU Affero General Public        #
# License along with AmCAT.  If not, see <http://www.gnu.org/licenses/>.  #
###########################################################################
from kombu import Exchange, Queue

from settings import get_amcat_config

amcat_config = get_amcat_config()

CELERY_RESULT_BACKEND = 'amqp'
CELERY_TASK_RESULT_EXPIRES = 3600

_qname = amcat_config["celery"].get('queue')
CELERY_QUEUES = (Queue(_qname, Exchange('default'), routing_key=_qname), )
CELERY_DEFAULT_QUEUE = _qname
CELERY_DEFAULT_EXCHANGE_TYPE = 'direct'
CELERY_DEFAULT_ROUTING_KEY = _qname

CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_ACCEPT_CONTENT = ['json']

CELERY_IGNORE_RESULT = False
Esempio n. 60
0
 def test_receive_without_callbacks_raises(self):
     channel = self.connection.channel()
     b1 = Queue('qname1', self.exchange, 'rkey')
     consumer = Consumer(channel, [b1])
     with self.assertRaises(NotImplementedError):
         consumer.receive(1, 2)