Exemple #1
0
 def create(self, w):
     w.hub = get_event_loop()
     if w.hub is None:
         required_hub = getattr(w._conninfo, 'requires_hub', None)
         w.hub = set_event_loop(
             (required_hub if required_hub else _Hub)(w.timer))
     self._patch_thread_primitives(w)
     return self
Exemple #2
0
def get_client(hub=None, **kwargs):
    """Get or create HTTP client bound to the current event loop."""
    hub = hub or get_event_loop()
    try:
        return hub._current_http_client
    except AttributeError:
        client = hub._current_http_client = Client(hub, **kwargs)
        return client
 def create(self, w):
     w.hub = get_event_loop()
     if w.hub is None:
         required_hub = getattr(w._conninfo, 'requires_hub', None)
         w.hub = set_event_loop((
             required_hub if required_hub else _Hub)(w.timer))
     self._patch_thread_primitives(w)
     return self
def get_client(hub=None, **kwargs):
    """Get or create HTTP client bound to the current event loop."""
    hub = hub or get_event_loop()
    try:
        return hub._current_http_client
    except AttributeError:
        client = hub._current_http_client = Client(hub, **kwargs)
        return client
Exemple #5
0
def hub(request):
    from kombu.asynchronous import Hub, get_event_loop, set_event_loop
    _prev_hub = get_event_loop()
    hub = Hub()
    set_event_loop(hub)

    yield hub

    if _prev_hub is not None:
        set_event_loop(_prev_hub)
Exemple #6
0
def hub(request):
    from kombu.asynchronous import Hub, get_event_loop, set_event_loop
    _prev_hub = get_event_loop()
    hub = Hub()
    set_event_loop(hub)

    yield hub

    if _prev_hub is not None:
        set_event_loop(_prev_hub)
Exemple #7
0
    def test_with_file_descriptor_safety(self):
        # Given: a test celery worker instance
        worker = self.create_worker(
            autoscale=[10, 5],
            use_eventloop=True,
            timer_cls='celery.utils.timer2.Timer',
            threads=False,
        )

        # Given: This test requires a QoS defined on the worker consumer
        worker.consumer.qos = qos = QoS(lambda prefetch_count: prefetch_count,
                                        2)
        qos.update()

        # Given: We have started the worker pool
        worker.pool.start()

        # Given: Utilize kombu to get the global hub state
        hub = get_event_loop()
        # Given: Initial call the Async Pool to register events works fine
        worker.pool.register_with_event_loop(hub)

        # Given: Mock the Hub to return errors for add and remove
        def throw_file_not_found_error(*args, **kwargs):
            raise OSError()

        hub.add = throw_file_not_found_error
        hub.add_reader = throw_file_not_found_error
        hub.remove = throw_file_not_found_error

        # When: Calling again to register with event loop ...
        worker.pool.register_with_event_loop(hub)
        worker.pool._pool.register_with_event_loop(hub)
        # Then: test did not raise OSError
        # Note: worker.pool is prefork.TaskPool whereas
        # worker.pool._pool is the asynpool.AsynPool class.

        # When: Calling the tic method on_poll_start
        worker.pool._pool.on_poll_start()
        # Then: test did not raise OSError

        # Given: a mock object that fakes whats required to do whats next
        proc = Mock(_sentinel_poll=42)

        # When: Calling again to register with event loop ...
        worker.pool._pool._track_child_process(proc, hub)
        # Then: test did not raise OSError

        # Given:
        worker.pool._pool._flush_outqueue = throw_file_not_found_error

        # Finally:  Clean up so the threads before/after fixture passes
        worker.terminate()
        worker.pool.terminate()
Exemple #8
0
    def __init__(self, *args, **kwargs):
        if boto3 is None:
            raise ImportError('boto3 is not installed')
        super(Channel, self).__init__(*args, **kwargs)

        # SQS blows up if you try to create a new queue when one already
        # exists but with a different visibility_timeout.  This prepopulates
        # the queue_cache to protect us from recreating
        # queues that are known to already exist.
        self._update_queue_cache(self.queue_name_prefix)

        self.hub = kwargs.get('hub') or get_event_loop()
Exemple #9
0
    def __init__(self, *args, **kwargs):
        if boto3 is None:
            raise ImportError('boto3 is not installed')
        super().__init__(*args, **kwargs)

        # SQS blows up if you try to create a new queue when one already
        # exists but with a different visibility_timeout.  This prepopulates
        # the queue_cache to protect us from recreating
        # queues that are known to already exist.
        self._update_queue_cache(self.queue_name_prefix)

        self.hub = kwargs.get('hub') or get_event_loop()
Exemple #10
0
    def test_with_autoscaler_file_descriptor_safety(self):
        # Given: a test celery worker instance with auto scaling
        worker = self.create_worker(
            autoscale=[10, 5],
            use_eventloop=True,
            timer_cls='celery.utils.timer2.Timer',
            threads=False,
        )
        # Given: This test requires a QoS defined on the worker consumer
        worker.consumer.qos = qos = QoS(lambda prefetch_count: prefetch_count,
                                        2)
        qos.update()

        # Given: We have started the worker pool
        worker.pool.start()

        # Then: the worker pool is the same as the autoscaler pool
        auto_scaler = worker.autoscaler
        assert worker.pool == auto_scaler.pool

        # Given: Utilize kombu to get the global hub state
        hub = get_event_loop()
        # Given: Initial call the Async Pool to register events works fine
        worker.pool.register_with_event_loop(hub)

        # Create some mock queue message and read from them
        _keep = [Mock(name=f'req{i}') for i in range(20)]
        [state.task_reserved(m) for m in _keep]
        auto_scaler.body()

        # Simulate a file descriptor from the list is closed by the OS
        # auto_scaler.force_scale_down(5)
        # This actually works -- it releases the semaphore properly
        # Same with calling .terminate() on the process directly
        for fd, proc in worker.pool._pool._fileno_to_outq.items():
            # however opening this fd as a file and closing it will do it
            queue_worker_socket = open(str(fd), "w")
            queue_worker_socket.close()
            break  # Only need to do this once

        # When: Calling again to register with event loop ...
        worker.pool.register_with_event_loop(hub)

        # Then: test did not raise "OSError: [Errno 9] Bad file descriptor!"

        # Finally:  Clean up so the threads before/after fixture passes
        worker.terminate()
        worker.pool.terminate()
Exemple #11
0
 def create(self, w):
     w.hub = get_event_loop()
     if w.hub is None:
         w.hub = set_event_loop(_Hub(w.timer))
     self._patch_thread_primitives(w)
     return self