Esempio n. 1
0
    def delete_queues(self, queue_name_root=None):
        '''
        The opposite of create_queues, will delete the queues that are specific to 
        the running instance of the task and remove its management data from the 
        management queue. 
        
        :param queue_name_root: Optionally a queue_name_root to use. This is useful
                                primarily for clean up of zombie queues.
        '''
        with Connection(current_app.conf.broker_url) as conn:
            try:
                # Connection is lazy. Force a connection now.
                conn.connect()
                c = conn.connection
                log.debug(f'\tConnection: {self.connection_names(c)[1]}')

                # Create a channel on the connection and log it in the RabbitMQ webmonitor format
                ch = c.channel()
                log.debug(f'\tChannel: {self.channel_names(ch)[1]}')

                # Create or get the exchange
                x = Exchange(self.exchange_name, channel=ch)
                log.debug(f'\tExchange: {x.name}')

                # Get the queue name root from management data
                if not queue_name_root:
                    queue_name_root = self.getattr('queue_name_root', None)
                    if not queue_name_root:
                        queue_name_root = self.get_management_data()
                        if not queue_name_root:
                            log.debug(
                                f'Delete Queues: Request to delete queues cannot be fulfilled for lack of a queue root name,'
                            )

                if queue_name_root:
                    # And the queue names
                    qnames = self.queue_names(queue_name_root)

                    # Create the queues
                    for k, qname in qnames.items():
                        # We keep the management queue, that is never deleted
                        if not k == self.management_key():
                            q = Queue(qname, channel=ch, no_declare=True)
                            q.delete()
                            log.debug(f'Deleted Queue: {q.name}')

            except Exception as e:
                log.error(f'QUEUE DELETION ERROR: {e}')
Esempio n. 2
0
 def queue_delete(self, queue_name):
     # NOTE(gtt): We can omit exchange and routing_key argument here
     # queue = Queue(queue_name, exchange=exchange,
     #      routing_key=routing_key, channel=conn.channel())
     queue = Queue(queue_name, channel=self._channel())
     print "Deleting queue %s" % queue
     return queue.delete()
Esempio n. 3
0
 def test_delete(self):
     b = Queue('foo', self.exchange, 'foo', channel=get_conn().channel())
     b.delete()
     assert 'queue_delete' in b.channel
Esempio n. 4
0
    def kill_zombies(self):
        '''
        Attempts to find zombie queues using the persistent and durable 
        management data, that is queues that have no active process using 
        them, and delete them. Simple housekeeping.
        
        Technically should not arise as only the management queue is
        durable and the others should die with a server restart. 
        '''

        log.debug(f'KILL ZOMBIES:')
        log.debug(f'\tZombie Search, getting management data...')
        mgmt_data = self.get_management_data(all=True)

        log.debug(f'\tZombie Search, getting active ids ...')
        try:
            active = current_app.control.inspect().active()
        except Exception as e:
            log.error(f'ZOMBIE SEARCH ERROR: {e}')
            active = {}

        active_ids = set()
        for node_id in active:
            for task_info in active[node_id]:
                if task_info['name'] == self.name:
                    active_ids.add(task_info['id'])

        managed_ids = set(mgmt_data.keys()) if mgmt_data else set()

        log.debug(f'\tZombie Search, active ids: {active_ids}')
        log.debug(f'\tZombie Search, managed ids: {managed_ids}')
        log.debug(f'\tZombie Search, happy with: {active_ids & managed_ids}')
        log.debug(
            f'\tZombie Search, will delete from management data: {managed_ids - active_ids}'
        )
        log.debug(
            f'\tZombie Search, will cull from active tasks: {active_ids - managed_ids}'
        )

        # Any tasks being managed but not active are not needed in in management data
        # But they may have left zombie queues lying around. So we look for and delete the
        # queues and then remove the task form the management data.
        del_ids = managed_ids - active_ids
        if del_ids:
            with InteractiveExchange(self) as x:
                for task_id in del_ids:
                    queue_name_root = mgmt_data[task_id]
                    qnames = self.queue_names(queue_name_root,
                                              except_management=True)
                    for qname in qnames:
                        q = Queue(
                            qname, channel=x.channel,
                            no_declare=True)  # TODO: need channel at least
                        q.delete()

            self.del_management_data(del_ids)

        # Then, any active tasks that don't have management data probably got
        # lost along the way somewhere. If we're feeling aggressive we can ask
        # them to abort.
        #
        # We don't know the names of these queues as we don't have their
        # queue_name_root (which we stored in management data which they are
        # lacking). But we can send a message via the exchange and use their
        # instruction routing key.
        ids_to_cull = active_ids - managed_ids
        if ids_to_cull and self.cull_forgotten_tasks:
            try:
                with InteractiveExchange(self) as x:
                    for task_id in ids_to_cull:
                        # TODO: THis is currently broken because of Kombu bug:
                        # See:
                        #    https://github.com/celery/kombu/issues/1174
                        #
                        # In mean time must specify content encoding explicitly
                        x.publish(self.DIE_CLEANLY,
                                  routing_key=self.instruction_key(task_id),
                                  content_encoding='utf-8')
            except Exception as e:
                log.error(f'\tZOMBIE KILL ERROR: {e}')

        log.debug(f'\tZombie Kill Done.')
Esempio n. 5
0
 def test_delete(self):
     b = Queue('foo', self.exchange, 'foo', channel=get_conn().channel())
     b.delete()
     self.assertIn('queue_delete', b.channel)
Esempio n. 6
0
from kombu import Connection, Producer, Exchange, Queue

server = "localhost"
connection = Connection('amqp://*****:*****@%s:5672//' % server)
channel = connection.channel()

for each in range(1, 1024):
    exchange = Exchange("BB_exchange_%s" % each, 'direct', channel)
    exchange.delete()

    queue = Queue("BB_queue_%s" % each,
                  exchange=exchange,
                  routing_key="BBBBBB",
                  channel=channel)
    queue.delete()
Esempio n. 7
0
 def delete_queue(self, connection, name, *args, **kwargs):
     queue = Queue(name=name)
     queue.maybe_bind(connection.default_channel)
     queue.delete(*args, **kwargs)
Esempio n. 8
0
 def delete_queue(self, connection, name, *args, **kwargs):
     queue = Queue(name=name)
     queue.maybe_bind(connection.default_channel)
     queue.delete(*args, **kwargs)
Esempio n. 9
0
from kombu import Connection
from kombu import Exchange, Queue
import config
connection = Connection(hostname=config.Queue_Server,
                        port=config.Queue_Port,
                        userid=config.Queue_User,
                        password=config.Queue_PassWord,
                        virtual_host=config.Queue_Path)
channel = connection.channel()
task_queue = Queue('net_request',durable=True,channel=channel)
task_queue.delete()

task_queue = Queue('weibo_request',durable=True,channel=channel)
task_queue.delete()

connection.close()
Esempio n. 10
0
class Worker(ConsumerMixin):
    def __init__(self, connection):
        self.connection = connection

    def get_consumers(self, Consumer, channel):
        return [Consumer(queues=task_queues,
                accept=['pickle', 'json'],
                callbacks=[self.process_task])]

    # TODO get req if hint in resp
    def process_task(self, body, message):
        rpc_body = json.loads(body)
        if "oslo.message" in rpc_body:
            print json.loads(rpc_body["oslo.message"])
        else:
            print rpc_body

with Connection('amqp://*****:*****@localhost:5672//') as conn:
    chan = conn.channel()
    queue = Queue("trace_", task_exchange, routing_key="publish.*", channel=chan)
    task_queues.append(queue)
    try:
        # Don't need check here, if commnd failed, it would raise CalledProcessError
        subprocess.check_output("sudo rabbitmqctl trace_on", shell=True)
        worker = Worker(conn)
        worker.run()
    except KeyboardInterrupt:
        subprocess.check_output("sudo rabbitmqctl trace_off", shell=True)
        queue.delete()
Esempio n. 11
0
class DistLockClient(object):

    producers = dict()

    def __init__(self, name):
        self.name = name
        self.exchange = Exchange(self.name)
        self.routing_key = 'lock_routing_' + self.name
        self.requester = Producer(
            Connection(),
            exchange=self.exchange,
            auto_declare=True,
        )
        self.id = uuid()

        self.lock_client_q = Queue(name=self.id,
                                   exchange=self.exchange,
                                   routing_key=self.id)
        self.lock_client = Consumer(
            Connection(),
            on_message=self.read_response,
            queues=[self.lock_client_q],
        )

        self.red_connection = redis.StrictRedis()
        self.lock_client_listen_thread = threading.Thread(target=self.listener)
        self.lock_client_listen_thread.daemon = True
        self.hold_lock = threading.Event()
        self.lock_client_listen_thread.start()
        DistLockClient.producers[self.id] = (self.requester, self.lock_client)

    def acquire(self):
        self.requester.publish(
            dict(request='ACQUIRE', id=self.id),
            retry=True,
            exchange=self.exchange,
            routing_key=self.routing_key,
        )
        # Block until acknowledgement from broker
        self.hold_lock.wait()

    def release(self):
        self.requester.publish(
            dict(request='RELEASE', id=self.id),
            retry=True,
            exchange=self.exchange,
            routing_key=self.routing_key,
        )
        self.red_connection.delete('current_lock_owner_' + self.name)
        # clear hold event
        self.hold_lock.clear()

    def listener(self):
        self.lock_client.add_queue(self.lock_client_q)
        self.lock_client.consume(no_ack=True)
        for _ in eventloop(self.lock_client.connection):
            pass

    def read_response(self, message):
        print(message.payload)
        self.hold_lock.set()

    def __enter__(self):
        self.acquire()

    def __exit__(self, e_type, e_value, traceback):
        self.release()

    def __del__(self):
        self.requester.connection.release()
        self.lock_client.connection.release()
        self.listen_thread.join()
        self.lock_client_q.delete()
Esempio n. 12
0
class TestVerifyTask(unittest.TestCase):
    def setUp(self):
        # Open connection to RabbitMQ
        self.conn = Connection(config['broker_url'])
        self.channel = self.conn.channel()

        # Declare Verify queue
        q = config['queues']['verify']
        self.verifyQ = Queue(q['name'],
                             channel=self.channel,
                             exchange=Exchange(q['name']),
                             routing_key=q['name'],
                             max_priority=q['max_task_priority'])
        self.verifyQ.declare()

        # Declare API queue
        q = config['queues']['api']
        self.apiQ = Queue(q['name'],
                          channel=self.channel,
                          exchange=Exchange(q['name']),
                          routing_key=q['name'],
                          max_priority=q['max_task_priority'])
        self.apiQ.declare()

    def tearDown(self):
        # Delete Verify queue
        self.apiQ.delete()
        # Delete API queue
        self.verifyQ.delete()
        # Close connection
        self.conn.close()

    def test_verify(self):

        data = [{
            'filename': '/var/store/15525119098910.pdf',
            'algorithm': 'md5',
            'checksum': 'ec4e3b91d2e03fdb17db55ff46da43b2'
        }, {
            'filename':
            '/var/store/15525119098910.pdf',
            'algorithm':
            'sha512',
            'checksum':
            'bc803d8abccf18d89765d6ae9fb7d490ad07f57a48e4987acc1'
            '73af4e65f143a4d215ffb59e9eebeb03849baab5a6e016e2806'
            'a2cd0e84b14c778bdb84afbbf4'
        }]

        for i in data:
            self.assertTrue(path.exists(i['filename']))

            # Queues cleanup
            self.verifyQ.purge()
            self.apiQ.purge()

            # Random DFO ID
            dfo_id = randint(1, 2147483647)

            # Send task
            q = config['queues']['verify']
            producer = self.conn.Producer()
            producer.publish(
                routing_key=q['name'],
                body=[[dfo_id, i['filename'], 'test', i['algorithm']], {}, {}],
                headers={
                    'task': 'verify_dfo',
                    'id': str(uuid.uuid1())
                })

            # Wait for result message for max 5 seconds
            msg = None
            wait = 0
            while wait <= 5 and msg is None:
                msg = self.apiQ.get(no_ack=False)
                if msg is None:
                    sleep(1)
                    wait += 1

            # Tests
            self.assertFalse(msg is None)
            self.assertTrue(msg.payload[0][0] == dfo_id)
            self.assertTrue(msg.payload[0][1] == i['algorithm'])
            self.assertTrue(msg.payload[0][2] == i['checksum'])