Пример #1
0
    def monitor(self, workers):
        finished_workers = []
        failed_workers = []

        for worker in workers:
            exitcode = worker.exitcode

            if exitcode is None:  # the process is running
                continue
            elif exitcode >= 1:  # the process failed
                failed_workers.append(worker)
            else:  # the process has finished (0) or was interrupted (<0)
                finished_workers.append(worker)

        # remove finished workers
        for worker in finished_workers:
            log.info(' [{}] Worker stopped'.format(worker.id))
            workers.remove(worker)

        # restart failed workers
        for worker in failed_workers:
            log.info(' [{}] Worker failed'.format(worker.id))
            workers.remove(worker)

            new_worker = Worker(queue_name=worker.queue_name,
                                worker_url=worker.worker_url)
            workers.append(new_worker)

            log.info(' [{}] Starting worker'.format(new_worker.id))
            new_worker.start()
Пример #2
0
    def start_workers_for_grader_url(cls,
                                     queue_name,
                                     destination_url,
                                     num_workers=1):
        """We need to start workers (consumers) to pull messages
        from the queue and pass them to our passive grader.

        `queue_name`: The name of the queue to pull messages from (string)

        `destination_url`: The url to forward responses to.

        `num_workers`: The number of workers to start for this queue (int)

        Raises an `AssertionError` if trying to start workers before
        stopping the current workers."""
        if hasattr(cls, 'worker_list'):
            assert (len(cls.worker_list) > 0)

        else:
            cls.worker_list = []

        for i in range(num_workers):
            worker = Worker(queue_name=queue_name, worker_url=destination_url)
            worker.start()
            cls.worker_list.append(worker)
Пример #3
0
    def monitor(self, workers):
        finished_workers = []
        failed_workers = []

        for worker in workers:
            exitcode = worker.exitcode

            if exitcode is None: # the process is running
                continue
            elif exitcode >= 1:  # the process failed
                failed_workers.append(worker)
            else:  # the process has finished (0) or was interrupted (<0)
                finished_workers.append(worker)

        # remove finished workers
        for worker in finished_workers:
            log.info(' [{}] Worker stopped'.format(worker.id))
            workers.remove(worker)

        # restart failed workers
        for worker in failed_workers:
            log.info(' [{}] Worker failed'.format(worker.id))
            workers.remove(worker)

            new_worker = Worker(queue_name=worker.queue_name, worker_url=worker.worker_url)
            workers.append(new_worker)

            log.info(' [{}] Starting worker'.format(new_worker.id))
            new_worker.start()
Пример #4
0
    def start_workers_for_grader_url(cls, queue_name,
                                     destination_url, num_workers=1):
        """We need to start workers (consumers) to pull messages
        from the queue and pass them to our passive grader.

        `queue_name`: The name of the queue to pull messages from (string)

        `destination_url`: The url to forward responses to.

        `num_workers`: The number of workers to start for this queue (int)

        Raises an `AssertionError` if trying to start workers before
        stopping the current workers."""
        if hasattr(cls, 'worker_list'):
            assert(len(cls.worker_list) > 0)

        else:
            cls.worker_list = []

        for i in range(num_workers):
            worker = Worker(queue_name=queue_name, worker_url=destination_url)

            # There is a bug in pika on Mac OS X
            # in which using multithreading.Process with
            # pika's ioloop causes an IncompatibleProtocolError
            # to be raised.
            # The workaround for now is to run each worker
            # as a separate thread.
            worker_thread = threading.Thread(target=worker.run)
            worker_thread.daemon = True
            worker_thread.start()

            cls.worker_list.append(worker)
Пример #5
0
class TestWorkerConnection(TestCase):

    QUEUE_NAME = 'test_queue_%s' % uuid4().hex

    def setUp(self):
        self.worker = Worker(queue_name=self.QUEUE_NAME,
                             worker_url='some_test_url')

    @mock.patch('time.sleep', mock.Mock(return_value=None))
    @mock.patch('queue.consumer.log')
    @mock.patch('queue.consumer.Worker.connect')
    def test_connection_retries_on_exception(self, mock_worker_connect,
                                             mock_log):
        """
        Tests worker's connection on 'AMQPConnectionError' exception.
        """
        mock_worker_connect.side_effect = AMQPConnectionError

        self.assertEquals(self.worker.retries, 0)

        with self.assertRaises(AMQPConnectionError):
            self.worker.run()

        self.assertEquals(self.worker.retries, settings.RETRY_MAX_ATTEMPTS)

        # Asserts connection retry logging.
        for attempt in xrange(1, settings.RETRY_MAX_ATTEMPTS):
            mock_log.info.assert_any_call(
                "[{id}] - Retrying connection, attempt # {attempt} of {max_attempts} of MAX"
                .format(id=self.worker.id,
                        attempt=attempt,
                        max_attempts=settings.RETRY_MAX_ATTEMPTS))

        # Asserts that the error was logged on crossing max retry attempts.
        mock_log.error.assert_called_with(
            "[{id}] Consumer for queue {queue} connection error: ".format(
                id=self.worker.id,
                queue=self.worker.queue_name,
            ))
Пример #6
0
class TestWorkerConnection(TestCase):

    QUEUE_NAME = 'test_queue_%s' % uuid4().hex

    def setUp(self):
        self.worker = Worker(queue_name=self.QUEUE_NAME, worker_url='some_test_url')

    @mock.patch('time.sleep', mock.Mock(return_value=None))
    @mock.patch('queue.consumer.log')
    @mock.patch('queue.consumer.Worker.connect')
    def test_connection_retries_on_exception(self, mock_worker_connect, mock_log):
        """
        Tests worker's connection on 'AMQPConnectionError' exception.
        """
        mock_worker_connect.side_effect = AMQPConnectionError

        self.assertEquals(self.worker.retries, 0)

        with self.assertRaises(AMQPConnectionError):
            self.worker.run()

        self.assertEquals(self.worker.retries, settings.RETRY_MAX_ATTEMPTS)

        # Asserts connection retry logging.
        for attempt in xrange(1, settings.RETRY_MAX_ATTEMPTS):
            mock_log.info.assert_any_call(
                "[{id}] - Retrying connection, attempt # {attempt} of {max_attempts} of MAX".format(
                    id=self.worker.id,
                    attempt=attempt,
                    max_attempts=settings.RETRY_MAX_ATTEMPTS
                )
            )

        # Asserts that the error was logged on crossing max retry attempts.
        mock_log.error.assert_called_with("[{id}] Consumer for queue {queue} connection error: ".format(
            id=self.worker.id,
            queue=self.worker.queue_name,
        ))
Пример #7
0
    def handle(self, *args, **options):
        log.info(' [*] Starting queue workers...')

        workers = []
        queues = settings.XQUEUES.items()

        # Assigned one worker for queue
        for name, url in queues:
            if url is not None:
                worker = Worker(queue_name=name, worker_url=url)
                workers.append(worker)

        # Start workers
        for worker in workers:
            log.info(' [{}] Starting worker'.format(worker.id))
            worker.start()

        # Monitor workers
        while workers:
            self.monitor(workers)
            time.sleep(MONITOR_SLEEPTIME)

        log.info(' [*] All workers finished. Exiting')
Пример #8
0
    def start_workers_for_grader_url(cls, queue_name,
                                     destination_url, num_workers=1):
        """We need to start workers (consumers) to pull messages
        from the queue and pass them to our passive grader.

        `queue_name`: The name of the queue to pull messages from (string)

        `destination_url`: The url to forward responses to.

        `num_workers`: The number of workers to start for this queue (int)

        Raises an `AssertionError` if trying to start workers before
        stopping the current workers."""
        if hasattr(cls, 'worker_list'):
            assert(len(cls.worker_list) > 0)

        else:
            cls.worker_list = []

        for i in range(num_workers):
            worker = Worker(queue_name=queue_name, worker_url=destination_url)
            worker.start()
            cls.worker_list.append(worker)
Пример #9
0
    def handle(self, *args, **options):
        log.info(' [*] Starting queue workers...')

        workers = []
        queues = settings.XQUEUES.items()

        # Assigned one worker for queue
        for name, url in queues:
            if url is not None:
                worker = Worker(queue_name=name, worker_url=url)
                workers.append(worker)

        # Start workers
        for worker in workers:
            log.info(' [{}] Starting worker'.format(worker.id))
            worker.start()

        # Monitor workers
        while workers:
            self.monitor(workers)
            time.sleep(MONITOR_SLEEPTIME)

        log.info(' [*] All workers finished. Exiting')
Пример #10
0
    def handle(self, *args, **options):
        log.info(' [*] Starting queue consumers...')

        workers = []
        queues = settings.XQUEUES.items()

        # Assigned one worker for queue
        for name, url in queues:
            if url is not None:
                worker = Worker(queue_name=name, worker_url=url)
                worker.start()
                workers.append(worker)

        for worker in workers:
            worker.join()

        log.info(' [*] All workers finished. Exiting')
Пример #11
0
 def setUp(self):
     self.worker = Worker(queue_name=self.QUEUE_NAME,
                          worker_url='some_test_url')
Пример #12
0
 def setUp(self):
     self.worker = Worker(queue_name=self.QUEUE_NAME, worker_url='some_test_url')