Ejemplo n.º 1
0
def test_container_doesnt_exhaust_max_workers(container):
    spam_called = Event()
    spam_continue = Event()

    class Service(object):
        name = 'max-workers'

        @foobar
        def spam(self, a):
            spam_called.send(a)
            spam_continue.wait()

    container = ServiceContainer(Service, config={MAX_WORKERS_CONFIG_KEY: 1})

    dep = get_extension(container, Entrypoint)

    # start the first worker, which should wait for spam_continue
    container.spawn_worker(dep, ['ham'], {})

    # start the next worker in a speparate thread,
    # because it should block until the first one completed
    gt = spawn(container.spawn_worker, dep, ['eggs'], {})

    with Timeout(1):
        assert spam_called.wait() == 'ham'
        # if the container had spawned the second worker, we would see
        # an error indicating that spam_called was fired twice, and the
        # greenthread would now be dead.
        assert not gt.dead
        # reset the calls and allow the waiting worker to complete.
        spam_called.reset()
        spam_continue.send(None)
        # the second worker should now run and complete
        assert spam_called.wait() == 'eggs'
        assert gt.dead
Ejemplo n.º 2
0
    def __init__(self,
                 schedule: str,
                 tz: str = None,
                 concurrency: str = ConcurrencyPolicy.WAIT,
                 **kwargs):
        """
        Cron entrypoint. Fires according to a (possibly timezone-aware)
        cron schedule. If no timezone info is passed, the default is UTC.
        Set ``concurrency`` to ``ConcurrencyPolicy.ALLOW`` to allow multiple workers
        to run simultaneously. Set ``concurrency`` to ``ConcurrencyPolicy.SKIP`` to
        skip lapsed scheduled runs. The default behavior (``ConcurrencyPolicy.WAIT``)
        is to wait until the running worker completes and immediately spawn another
        if the schedule has lapsed.

        Example::

            class Service(object):
                name = "service"

            @cron(schedule='0 12 * * *', tz='America/Chicago')
            def ping(self):
                # method executes every day at noon America/Chicago time
                print("pong")

        """
        self.schedule = schedule
        self.tz = tz
        self.concurrency = concurrency
        self.should_stop = Event()
        self.worker_complete = Event()
        self.gt = None
        super().__init__(**kwargs)
Ejemplo n.º 3
0
def test_kill_container_with_active_workers(container_factory):
    waiting = Event()
    wait_forever = Event()

    class Service(object):
        name = 'kill-with-active-workers'

        @foobar
        def spam(self):
            waiting.send(None)
            wait_forever.wait()

    container = container_factory(Service, {})
    dep = get_extension(container, Entrypoint)

    # start the first worker, which should wait for spam_continue
    container.spawn_worker(dep, (), {})

    waiting.wait()

    with patch('nameko.containers._log') as logger:
        container.kill()

    assert logger.warning.call_args_list == [
        call('killing %s active workers(s)', 1),
        call('killing active worker for %s', ANY)
    ]
Ejemplo n.º 4
0
 def __init__(self):
     self.results = []
     self.stop = Event()
     self.no_more_results = Event()
     self.failure = None
     self.next_lease_id = 100000
     self.keys_written = set()
Ejemplo n.º 5
0
    def __init__(self, interval, eager=False, **kwargs):
        """
        Timer entrypoint. Fires every `interval` seconds or as soon as
        the previous worker completes if that took longer.

        The default behaviour is to wait `interval` seconds
        before firing for the first time. If you want the entrypoint
        to fire as soon as the service starts, pass `eager=True`.

        Example::

            timer = Timer.decorator

            class Service(object):
                name = "service"

                @timer(interval=5)
                def tick(self):
                    pass

        """
        self.interval = interval
        self.eager = eager
        self.should_stop = Event()
        self.worker_complete = Event()
        self.gt = None
        super(Timer, self).__init__(**kwargs)
Ejemplo n.º 6
0
    def test_exceptionpreservation(self):
        # events for controlling execution order
        gt1event = Event()
        gt2event = Event()

        def test_gt1():
            try:
                raise KeyError()
            except KeyError:
                gt1event.send('exception')
                gt2event.wait()
                assert sys.exc_info()[0] is KeyError
                gt1event.send('test passed')

        def test_gt2():
            gt1event.wait()
            gt1event.reset()
            assert sys.exc_info()[0] is None
            try:
                raise ValueError()
            except ValueError:
                gt2event.send('exception')
                gt1event.wait()
                assert sys.exc_info()[0] is ValueError

        g1 = eventlet.spawn(test_gt1)
        g2 = eventlet.spawn(test_gt2)
        try:
            g1.wait()
            g2.wait()
        finally:
            g1.kill()
            g2.kill()
Ejemplo n.º 7
0
def test_kill_container_with_active_workers(container_factory):
    waiting = Event()
    wait_forever = Event()

    class Service(object):
        name = 'kill-with-active-workers'

        @foobar
        def spam(self):
            waiting.send(None)
            wait_forever.wait()

    container = container_factory(Service, {})
    dep = get_dependency(container, EntrypointProvider)

    # start the first worker, which should wait for spam_continue
    container.spawn_worker(dep, (), {})

    waiting.wait()

    with patch('nameko.containers._log') as logger:
        container.kill()
    calls = logger.warning.call_args_list
    assert call(
        'killing active thread for %s', 'kill-with-active-workers.spam'
    ) in calls
Ejemplo n.º 8
0
 def __init__(self, interval, eager=True, **kwargs):
     self.gt = None
     self.eager = eager
     self.interval = interval
     self.stopping_event = Event()
     self.finished_event = Event()
     super(Timer, self).__init__(**kwargs)
Ejemplo n.º 9
0
def test_handlers_do_not_block(SlackClient, container_factory, config,
                               tracker):

    work_1 = Event()
    work_2 = Event()

    class Service:

        name = 'sample'

        @rtm.handle_event
        def handle_1(self, event):
            work_1.wait()
            tracker.handle_1(event)

        @rtm.handle_event
        def handle_2(self, event):
            work_2.wait()
            tracker.handle_2(event)

    events = [{'spam': 'ham'}]

    def rtm_read():
        if events:
            return [events.pop(0)]
        else:
            return []

    SlackClient.return_value.rtm_read.side_effect = rtm_read
    container = container_factory(Service, config)
    container.start()

    try:
        # both handlers are still working
        assert (tracker.handle_1.call_args_list == [])
        assert (tracker.handle_2.call_args_list == [])

        # finish work of the second handler
        work_2.send()
        sleep(0.1)

        # second handler is done
        assert (tracker.handle_1.call_args_list == [])
        assert (tracker.handle_2.call_args_list == [call({'spam': 'ham'})])

        # finish work of the first handler
        work_1.send()
        sleep(0.1)

        # first handler is done
        assert (tracker.handle_1.call_args_list == [call({'spam': 'ham'})])
        assert (tracker.handle_2.call_args_list == [call({'spam': 'ham'})])
    finally:
        if not work_1.ready():
            work_1.send()
        if not work_2.ready():
            work_2.send()
Ejemplo n.º 10
0
    def test_send(self):
        event1 = Event()
        event2 = Event()

        spawn(event1.send, 'hello event1')
        eventlet.Timeout(0, ValueError('interrupted'))
        try:
            result = event1.wait()
        except ValueError:
            X = object()
            result = with_timeout(DELAY, event2.wait, timeout_value=X)
            assert result is X, 'Nobody sent anything to event2 yet it received %r' % (result, )
Ejemplo n.º 11
0
    def handle_request(self, request):
        log.info('   ###   handle_request: %s' % request)
        request.shallow = False
        try:
            context_data = self.server.context_data_from_headers(request)
            openapi_request = OpenAPIRequest(request, self.operation)
            log.info('openapi_request: %r' % openapi_request)
            openapi_request_result = self.spec_manager.validate_request(
                openapi_request)
            args, kwargs = self.get_entrypoint_parameters(
                openapi_request_result)

            event = Event()
            self.container.spawn_worker(self,
                                        args,
                                        kwargs,
                                        context_data=context_data,
                                        handle_result=partial(
                                            self.handle_result, event))
            result = event.wait()

            log.info('handle_request: result: %s', result)
            response = self.response_from_result(result, openapi_request)
            log.info('handle_request: %r' % response)

        except Exception as exc:
            raise exc
            response = self.response_from_exception(exc, openapi_request)
        return response
Ejemplo n.º 12
0
    def test_create_shutdown_race(self):
        """
        Test the race condition where the pipeline shuts down while
        `create` is still executing.
        """
        created = []
        destroyed = []

        counter = itertools.count()
        creating = Event()

        def create():
            creating.send(True)
            eventlet.sleep()
            obj = next(counter)
            created.append(obj)
            return obj

        def destroy(obj):
            destroyed.append(obj)

        with ResourcePipeline(create, destroy).run():
            creating.wait()
            assert created == []

        assert created == destroyed == list(range(1))
Ejemplo n.º 13
0
    def __init__(self,
                 controllercls,
                 connection,
                 exchange,
                 topic,
                 pool=None,
                 poolsize=1000):
        self.nodeid = UIDGEN()

        if pool is None:
            self.procpool = GreenPool(size=poolsize)
        else:
            self.procpool = pool

        self.connection = connection
        self.controller = controllercls()
        self.topic = topic
        self.greenlet = None
        self.messagesem = Semaphore()
        self.consume_ready = Event()

        node_topic = "{}.{}".format(self.topic, self.nodeid)
        self.queues = [
            entities.get_topic_queue(exchange, topic),
            entities.get_topic_queue(exchange, node_topic),
            entities.get_fanout_queue(topic),
        ]
        self._channel = None
        self._consumers = None
Ejemplo n.º 14
0
def test_debounce(redis_):

    lock = Lock(redis_)

    tracker = Mock()
    release = Event()

    @lock.debounce
    def func(*args, **kwargs):
        release.wait()
        tracker(*args, **kwargs)
        return tracker

    def coroutine():
        return func("egg", spam="ham")

    thread = eventlet.spawn(coroutine)
    eventlet.sleep(0.1)

    assert b"1" == redis_.get("lock:func(egg)")

    another_thread = eventlet.spawn(coroutine)
    assert another_thread.wait() is None

    assert tracker.call_count == 0

    release.send()
    eventlet.sleep(0.1)

    assert b"0" == redis_.get("lock:func(egg)")

    assert tracker == thread.wait()

    assert 1 == tracker.call_count
    assert call("egg", spam="ham") == tracker.call_args
Ejemplo n.º 15
0
def test_debounce_with_repeat(redis_):

    lock = Lock(redis_)

    tracker = Mock()
    release = Event()

    @lock.debounce(repeat=True)
    def func(*args, **kwargs):
        tracker(*args, **kwargs)
        release.wait()
        return tracker

    def coroutine():
        return func("egg", spam="ham")

    thread = eventlet.spawn(coroutine)
    eventlet.sleep(0.1)

    assert b"1" == redis_.get("lock:func(egg)")

    # simulate locking attempt
    redis_.incr("lock:func(egg)")

    release.send()
    eventlet.sleep(0.1)

    assert b"0" == redis_.get("lock:func(egg)")

    assert tracker == thread.wait()

    # must be called twice with the same args
    assert 2 == tracker.call_count
    assert [call("egg", spam="ham"),
            call("egg", spam="ham")] == tracker.call_args_list
Ejemplo n.º 16
0
    def __init__(self, zmq_ctx, ctrl_endpoint):
        self.zmq_ctx = zmq_ctx
        # Request / Reply for stem cell querying against ctrl api.
        self.ctrl_endpoint = ctrl_endpoint
        self.ctrl_server = None

        # Pub / Sub for mass communication from controller (ie. shutdown).
        self.ctrl_bus_endpoint = None
        self.ctrl_bus = None

        # Directed / msg by role (load balanced)
        self.ctrl_msg_endpoint = None
        self.ctrl_role_msg = None

        # Role this stem cell will take within the simulation
        self.role = None

        # Unique identity for cell
        self.id = uuid.uuid4().hex

        # Registration data from control server
        self.registration = None

        self.threads = []
        self.stop_evt = Event()
Ejemplo n.º 17
0
    def handle_request(self, request):
        request.shallow = False
        try:
            context_data = self.server.context_data_from_headers(request)
            args, kwargs = self.get_entrypoint_parameters(request)

            self.check_signature(args, kwargs)
            event = Event()
            self.container.spawn_worker(self,
                                        args,
                                        kwargs,
                                        context_data=context_data,
                                        handle_result=partial(
                                            self.handle_result, event))
            result = event.wait()

            response = response_from_result(result)

        except Exception as exc:
            if (isinstance(exc, self.expected_exceptions)
                    or isinstance(exc, BadRequest)):
                status_code = 400
            else:
                status_code = 500
            error_dict = serialize(exc)
            payload = u'Error: {exc_type}: {value}\n'.format(**error_dict)

            response = Response(
                payload,
                status=status_code,
            )
        return response
Ejemplo n.º 18
0
    def __init__(self, func, *args, **kwargs):
        self.my_sem = Semaphore(0)  # This is held by the thread as it runs.
        self.caller_sem = None
        self.dead = False
        started = Event()
        self.id = 5
        self.ALL.append(self)

        def go():
            self.id = eventlet.corolocal.get_ident()
            started.send(True)
            self.my_sem.acquire(blocking=True, timeout=None)
            try:
                func(*args, **kwargs)
            # except Exception as e:
            #     print("Exception in coroutine! %s" % e)
            finally:
                self.dead = True
                self.caller_sem.release()  # Relinquish control back to caller.
                for i in range(len(self.ALL)):
                    if self.ALL[i].id == self.id:
                        del self.ALL[i]
                        break

        true_spawn(go)
        started.wait()
Ejemplo n.º 19
0
 def __init__(self, *args, **kwargs):
     self.gt = None
     self.started = False
     self.connection = None
     self.consumers_ready = Event()
     self.consumers_channels = set()
     super(BaseAMQPConsumer, self).__init__(*args, **kwargs)
Ejemplo n.º 20
0
def test_debounce_with_custom_key(redis_):

    lock = Lock(redis_)

    tracker = Mock()
    release = Event()

    @lock.debounce(key=lambda _, spam: "yo:{}".format(spam.upper()))
    def func(*args, **kwargs):
        tracker(*args, **kwargs)
        release.wait()
        return tracker

    def coroutine():
        return func("egg", spam="ham")

    thread = eventlet.spawn(coroutine)
    eventlet.sleep(0.1)

    assert b"1" == redis_.get("lock:yo:HAM")

    release.send()
    eventlet.sleep(0.1)

    assert b"0" == redis_.get("lock:yo:HAM")

    assert tracker == thread.wait()

    assert 1 == tracker.call_count
    assert call("egg", spam="ham") == tracker.call_args
Ejemplo n.º 21
0
 def _poll(self, sockets, timeout=None):
     #  Don't bother trampolining if there's data available immediately.
     #  This also avoids calling into the eventlet hub with a timeout of
     #  zero, which doesn't work right (it still switches the greenthread)
     (r, _, _) = zmq_poll.select(sockets, [], [], timeout=0)
     if r:
         return r
     if timeout == 0:
         return []
     #  Looks like we'll have to block :-(
     ready = []
     threads = []
     res = Event()
     for sock in sockets:
         threads.append(
             eventlet.spawn(self._do_poll, sock, ready, res, timeout))
     self.poll_threads.append((res, threads))
     try:
         res.wait()
     finally:
         self.poll_threads.remove((res, threads))
         for t in threads:
             t.kill()
             try:
                 t.wait()
             except GreenletExit:
                 pass
     return ready
Ejemplo n.º 22
0
def test_debounce_failing_on_execution(redis_):

    lock = Lock(redis_)

    tracker = Mock()
    release = Event()

    class Whoops(Exception):
        pass

    tracker.side_effect = Whoops("Yo!")

    @lock.debounce()
    def func(*args, **kwargs):
        release.wait()
        tracker(*args, **kwargs)

    def coroutine():
        with pytest.raises(Whoops):
            func("egg", spam="ham")

    thread = eventlet.spawn(coroutine)
    eventlet.sleep(0.1)

    assert b"1" == redis_.get("lock:func(egg)")

    release.send()
    eventlet.sleep(0.1)

    assert b"0" == redis_.get("lock:func(egg)")

    thread.wait()

    assert 1 == tracker.call_count
    assert call("egg", spam="ham") == tracker.call_args
Ejemplo n.º 23
0
    def __init__(self, zmq_ctx, ctrl_endpoint):
        self.zmq_ctx = zmq_ctx

        # Request / Reply for stem cell querying against ctrl api.
        self.ctrl_endpoint = ctrl_endpoint
        self.ctrl_server = None

        # Pub / Sub for mass communication from controller (ie. shutdown).
        self.ctrl_bus_endpoint = None
        self.ctrl_bus = None

        # Directed / msg by role (load balanced)
        self.role_endpoints = {}
        self.ctrl_role_msg = None

        self.threads = []
        self.stop_evt = Event()

        self.sim = Simulation()
        with open(
            os.path.join(
                os.path.dirname(
                    os.path.abspath(workflows.__file__)),
                'role-map.yml')) as fh:
            self.role_data = yaml.safe_load(fh.read())

        self.sim.load_roles(self.role_data['roles'])
Ejemplo n.º 24
0
def test_fail_fast_imap():
    # A failing call...
    failing_exception = Exception()

    def failing_call():
        raise failing_exception

    # ...and an eventually successful call.
    slow_call_returned = Event()

    def slow_call():
        sleep(5)
        slow_call_returned.send()  # pragma: no cover

    def identity_fn(fn):
        return fn()

    calls = [slow_call, failing_call]

    pool = GreenPool(2)

    # fail_fast_imap fails as soon as the exception is raised
    with pytest.raises(Exception) as raised_exc:
        list(fail_fast_imap(pool, identity_fn, calls))
    assert raised_exc.value == failing_exception

    # The slow call won't go past the sleep as it was killed
    assert not slow_call_returned.ready()
    assert pool.free() == 2
Ejemplo n.º 25
0
    def rengine_side(self, appid, token, uri):
        """ Handle rengine (client) GET requests """
        if not self.rengine_authorization_ok(appid, token):
            LOGGER.info('Rengine content request authorization fails')
            abort(401, 'Authorization failed')

        evt = Event()
        request_id = str(uuid4())
        self.request_id_events[request_id] = evt

        headers = [
            "%s: %s" % (header, val)
            for (header, val) in request.headers.items()
        ]
        packet = ScpPacket.make_sfkcontent(uri, request_id, headers)
        try:
            self._send(packet, appid)
        except Exception as e:
            abort(500, str(e))

        LOGGER.debug("uri %s expected" % uri)
        timeout = Timeout(TIMEOUT)
        try:
            resp = evt.wait()
        except Timeout:
            del self.request_id_events[request_id]
            abort(504, 'Gateway Timeout')
        finally:
            timeout.cancel()

        LOGGER.debug("uri %s got" % uri)

        return resp
Ejemplo n.º 26
0
    def check_container(self, target, recurse=False):
        account = target.account
        container = target.container

        if (account, container) in self.running:
            self.running[(account, container)].wait()
        if (account, container) in self.list_cache:
            return self.list_cache[(account, container)]
        self.running[(account, container)] = Event()
        print('Checking container "%s"' % target)
        account_listing = self.check_account(target)
        error = False
        if container not in account_listing:
            error = True
            print('  Container %s missing from account listing' % target)

        marker = None
        results = []
        ct_meta = dict()
        while True:
            try:
                _, resp = self.container_client.content_list(
                    account=account, reference=container, marker=marker)
            except exc.NotFound as e:
                self.container_not_found += 1
                error = True
                print('  Not found container "%s": %s' % (target, str(e)))
                break
            except Exception as e:
                self.container_exceptions += 1
                error = True
                print('  Exception container "%s": %s' % (target, str(e)))
                break

            if resp['objects']:
                marker = resp['objects'][-1]['name']
                results.extend(resp['objects'])
            else:
                ct_meta = resp
                ct_meta.pop('objects')
                break

        container_listing = dict()
        for obj in results:
            container_listing[obj['name']] = obj

        self.containers_checked += 1
        self.list_cache[(account, container)] = container_listing, ct_meta
        self.running[(account, container)].send(True)
        del self.running[(account, container)]

        if recurse:
            for obj in container_listing:
                t = target.copy()
                t.obj = obj
                self.pool.spawn_n(self.check_obj, t, True)
        if error and self.error_file:
            self.write_error(target)
        return container_listing, ct_meta
Ejemplo n.º 27
0
def test_prefetch_count(rabbit_manager, rabbit_config, container_factory):
    class NonShared(QueueConsumer):
        @property
        def sharing_key(self):
            return uuid.uuid4()

    messages = []

    class SelfishConsumer1(Consumer):
        queue_consumer = NonShared()

        def handle_message(self, body, message):
            consumer_continue.wait()
            super(SelfishConsumer1, self).handle_message(body, message)

    class SelfishConsumer2(Consumer):
        queue_consumer = NonShared()

        def handle_message(self, body, message):
            messages.append(body)
            super(SelfishConsumer2, self).handle_message(body, message)

    class Service(object):
        name = "service"

        @SelfishConsumer1.decorator(queue=ham_queue)
        @SelfishConsumer2.decorator(queue=ham_queue)
        def handle(self, payload):
            pass

    rabbit_config['max_workers'] = 1
    container = container_factory(Service, rabbit_config)
    container.start()

    consumer_continue = Event()

    # the two handlers would ordinarily take alternating messages, but are
    # limited to holding one un-ACKed message. Since Handler1 never ACKs, it
    # only ever gets one message, and Handler2 gets the others.

    def wait_for_expected(worker_ctx, res, exc_info):
        return {'m3', 'm4', 'm5'}.issubset(set(messages))

    with entrypoint_waiter(container, 'handle', callback=wait_for_expected):
        vhost = rabbit_config['vhost']
        properties = {'content_type': 'application/data'}
        for message in ('m1', 'm2', 'm3', 'm4', 'm5'):
            rabbit_manager.publish(vhost,
                                   'spam',
                                   '',
                                   message,
                                   properties=properties)

    # we don't know which handler picked up the first message,
    # but all the others should've been handled by Handler2
    assert messages[-3:] == ['m3', 'm4', 'm5']

    # release the waiting consumer
    consumer_continue.send(None)
Ejemplo n.º 28
0
    def check_obj(self, target, recurse=False):
        account = target.account
        container = target.container
        obj = target.obj

        if (account, container, obj) in self.running:
            self.running[(account, container, obj)].wait()
        if (account, container, obj) in self.list_cache:
            return self.list_cache[(account, container, obj)]
        self.running[(account, container, obj)] = Event()
        print('Checking object "%s"' % target)
        container_listing, ct_meta = self.check_container(target)
        error = False
        if obj not in container_listing:
            print('  Object %s missing from container listing' % target)
            error = True
            # checksum = None
        else:
            # TODO check checksum match
            # checksum = container_listing[obj]['hash']
            pass

        results = []
        meta = dict()
        try:
            meta, results = self.container_client.content_locate(
                account=account, reference=container, path=obj,
                properties=False)
        except exc.NotFound as e:
            self.object_not_found += 1
            error = True
            print('  Not found object "%s": %s' % (target, str(e)))
        except Exception as e:
            self.object_exceptions += 1
            error = True
            print(' Exception object "%s": %s' % (target, str(e)))

        chunk_listing = dict()
        for chunk in results:
            chunk_listing[chunk['url']] = chunk

        # Skip the check if we could not locate the object
        if meta:
            self.check_obj_policy(target.copy(), meta, results)
            self.list_cache[(account, container, obj)] = (chunk_listing, meta)

        self.objects_checked += 1
        self.running[(account, container, obj)].send(True)
        del self.running[(account, container, obj)]

        if recurse:
            for chunk in chunk_listing:
                t = target.copy()
                t.chunk = chunk
                self.pool.spawn_n(self.check_chunk, t)
        if error and self.error_file:
            self.write_error(target)
        return chunk_listing, meta
Ejemplo n.º 29
0
 def handle_message(self, socket_id, data, context_data):
     self.check_signature((socket_id, ), data)
     event = Event()
     self.container.spawn_worker(self, (socket_id, ),
                                 data,
                                 context_data=context_data,
                                 handle_result=partial(
                                     self.handle_result, event))
     return event.wait()
Ejemplo n.º 30
0
 def __exit__(self, type, value, traceback):
     if self.expected == 0:
         with Timeout(.5, None):
             self.event.wait()
         assert(len(self.received) == 0)
         return
     with Timeout(1):
         self.event.wait()
         self.event = Event()