Esempio n. 1
0
    def test_waiting_for_completion_generating_new_tick(self):
        # ticks every reactor iteration
        source = tick_provider.TickInterval("test_interval", 0, "a_pipeline_name")

        collected_ticks = defer.DeferredQueue()

        @defer.inlineCallbacks
        def collector(baton):
            yield util.wait(0.001)  # spend at least 1 ms "processing"
            collected_ticks.put(baton)

        fake_dependencies = util.AttributeDict(
            wait_for_resource=lambda key: defer.succeed(util.AttributeDict(process=collector))
        )
        source.dependencies = fake_dependencies
        source.running = True

        d = source.produce_ticks()

        yield collected_ticks.get()  # wait for the first tick to be provided

        # waiting two reactor iterations:
        yield util.wait(0)  # one in order for interval continue processing..
        yield util.wait(0)  # .. and one in order let the interval complete its wait(0)
        source.stopService()
        yield d  # wait for the interval to stop producing ticks

        # in this situation, one more baton will be created
        self.assertEquals(len(collected_ticks.pending), 1)
Esempio n. 2
0
    def test_restart_without_duplicates_during_processing(self):
        # ticks every reactor iteration
        interval = tick_provider.TickInterval("test_interval", 0, "a_pipeline_name")

        ticks = defer.DeferredQueue()

        @defer.inlineCallbacks
        def collector(baton):
            ticks.put(baton)
            yield util.wait(0)

        interval.dependencies = util.AttributeDict(
            wait_for_resource=lambda key: defer.succeed(util.AttributeDict(process=collector))
        )
        interval.running = True

        # this immediately produces a tick
        d = interval.produce_ticks()

        yield ticks.get()

        # now the producer is waiting in processing
        interval.stopService()
        # calling startService before the processing is complete should avoid the restart altogether
        interval.startService()

        # waiting 2 reactor iteration should produce a single tick
        yield util.wait(0)  # one for the first processing to complete
        yield util.wait(0)  # .. and one in order let the interval complete its wait(0)
        self.assertEquals(len(ticks.pending), 1)

        interval.stopService()
        # wait for the processing to complete
        yield d
Esempio n. 3
0
    def test_start_stop_ticking(self):
        # ticks every reactor iteration
        source = tick_provider.TickInterval("test_interval", 0, "a_pipeline_name", auto_start=False)

        collected_ticks = defer.DeferredQueue()

        def collector(baton):
            collected_ticks.put(baton)

        fake_dependencies = util.AttributeDict(
            wait_for_resource=lambda key: defer.succeed(util.AttributeDict(process=collector))
        )
        source.dependencies = fake_dependencies

        # simply starting the service should not produce any ticks
        source.startService()
        yield util.wait(0)
        self.assertEquals(collected_ticks.pending, list())

        # but explicitly telling it to start ticking should produce a tick every reactor iteration:
        source.start_ticking()
        yield collected_ticks.get()  # wait for the first tick to be provided

        self.assertEquals(collected_ticks.pending, list())

        # after we ask it to stop ticking, no more ticks should be produced:
        source.stop_ticking()
        yield util.wait(0)
        self.assertEquals(collected_ticks.pending, list())
Esempio n. 4
0
    def run(self):
        try:
            while self.running:
                try:
                    logger.info('PostgresListener attempting to connect to [{0}]'.format(self.profile_name))
                    yield self._run_exclusively(self._connect)
                    self.is_connected = True

                    yield self._run_exclusively(self._connection.runOperation, "SET application_name TO '%s-listener'" % self.profile_name)
                    for sql_string in self.checkout:
                        yield self._run_exclusively(self._connection.runOperation, sql_string)

                    self._connection.addNotifyObserver(self._notify)
                    logger.info('PostgresListener connected [{0}]'.format(self.profile_name))
                    self.on_connection_established(self)

                    while self.running:
                        try:
                            if self.is_waiting_for_txid_min:
                                yield self.cancellable(util.wait(self.txid_poll_interval))
                                yield self._check_txid_threshold()
                            else:
                                yield self.currently_pinging(self.cancellable(util.wait(self.ping_interval)))
                                yield self.currently_pinging(self._ping())
                        except defer.CancelledError:
                            pass

                    # We've cancelled, and is no longer running.
                    if self.is_connected:
                        self.is_connected = False
                        self.on_connection_lost(failure.Failure())

                except psycopg2.Error:
                    logger.exception('Database failure. Traceback follows')
                    failure_ = failure.Failure()
                    if self.is_connected:
                        self._cleanup()
                        self.is_connected = False
                        self.on_connection_lost(failure_)
                    else:
                        self.on_connection_failed(failure_)

                    yield self.cancellable(util.wait(self.retry_interval))

                except defer.CancelledError:
                    if self.is_connected:
                        self.is_connected = False
                        self.on_connection_lost(failure.Failure())

        except defer.CancelledError:
            pass
        except Exception as e:
            logger.exception('Unhandled exception in PostgresListener.run')
        finally:
            self._maybe_disconnect()
    def test_connect_event_called(self):
        """ Check that the right events are called when connections
        are established and lost. """
        engine_manager = self.make_engine_manager()

        engine_manager.engine = mocked_engine = self.mocker.engine
        mocked_connection = self.mocker.connection
        mocked_engine.connect.return_value = mocked_connection

        engine_manager.startService()

        event = yield self.events.get()
        self.assertEquals(event, ("connected", mocked_engine))

        engine_manager.stopService()
        yield util.wait(0)

        self.failIf(self.events.size > 0)

        self.assertEquals(
            self.mocker.mock_calls,
            [
                mock.call.engine.connect(),
                mock.call.connection.execute("SELECT 'ping'"),
                mock.call.connection.close(),
                mock.call.engine.dispose(),
            ],
        )
Esempio n. 6
0
    def test_sockets_of_the_same_name_are_reused(self):
        self.runtime_environment.configuration_manager.set('zmq.queues', dict(socketname=dict(type='PULL', binds=['inproc://socketname'])))
        socket_provider = providers.ZMQSocketProvider()
        socket_provider.configure(self.runtime_environment)

        # Request the same socket twice.
        resource_dependency = dependencies.ResourceDependency(provider='zmq.socket.socketname')
        same_resource_dependency = dependencies.ResourceDependency(provider='zmq.socket.socketname')

        # The two resource configuration-dicts now have on_available-events.
        sockets = []
        resource_dependency.on_resource_ready += lambda resource: sockets.append(resource)
        same_resource_dependency.on_resource_ready += lambda resource: sockets.append(resource)

        # we add them without dependencies, as they will be resolved by the resource manager
        self.dependency_manager.add_dependency(resource_dependency)
        self.dependency_manager.add_dependency(same_resource_dependency)

        # This provides the sockets.
        self.dependency_manager.resolve_initial_states()
        yield util.wait(0)

        # We should then have two sockets
        self.assertEquals(len(sockets), 2)
        # And they must be the same object.
        self.assertTrue(sockets[0] is sockets[1])
Esempio n. 7
0
    def test_restart_without_duplicates_during_processing(self):
        puller = util.PullFromQueueAndProcessInPipeline(self.queue, 'a_pipeline_name')

        @defer.inlineCallbacks
        def collector(baton):
            self.processed.put(baton)
            yield util.wait(0)

        self.collector = collector

        puller.configure(self.runtime_environment)

        puller.startService()

        self.queue.put('1')
        self.queue.put('2')
        self.queue.put('3')

        baton = yield self.processed.get()
        self.assertEquals(baton, '1')
        # the puller is now waiting on the collector sleep

        puller.stopService()
        puller.startService()

        yield util.wait(0)

        self.assertEquals(self.processed.pending, ['2'])

        puller.stopService()
Esempio n. 8
0
    def test_restart_without_duplicates_during_sleeping(self):
        # ticks every reactor iteration
        interval = tick_provider.TickInterval("test_interval", 0, "a_pipeline_name")

        ticks = defer.DeferredQueue()

        def collector(baton):
            ticks.put(baton)

        interval.dependencies = util.AttributeDict(
            wait_for_resource=lambda key: defer.succeed(util.AttributeDict(process=collector))
        )
        interval.running = True

        # this immediately produces a tick
        d = interval.produce_ticks()

        # now the producer is sleeping
        interval.stopService()
        # calling startService immediately produces a new tick
        interval.startService()

        # waiting 1 reactor iteration should produce a total of 3 ticks
        yield util.wait(0)
        self.assertEquals(len(ticks.pending), 3)

        interval.stopService()
        # wait for the processing to complete
        yield d
Esempio n. 9
0
    def test_tickintervals_created(self):
        provider = tick_provider.TickProvider()
        runtime_environment = processing.RuntimeEnvironment()

        dependency_manager = runtime_environment.dependency_manager
        dependency_manager.configure(runtime_environment)

        configuration_manager = runtime_environment.configuration_manager
        configuration_manager.set(
            "ticks.interval.my_interval",
            dict(interval=0, pipeline="pipeline_name"),  # creates a baton every reactor iteration
        )

        ticks = defer.DeferredQueue()

        resource_manager = runtime_environment.resource_manager
        resource_manager.register("pipeline.pipeline_name", StubPipelineProvider(ticks.put))

        provider.configure(runtime_environment)
        provider.startService()

        dependency_manager.resolve_initial_states()

        yield ticks.get()

        provider.stopService()

        # give the tick-interval 1 reactor iteration to shut down
        yield util.wait(0)
Esempio n. 10
0
    def _keep_waiting_for_advisory_lock(self, lock_name):
        lock_id = self._get_hash_for_lock(lock_name)

        # TODO: Get this into the run-loop above?
        try:
            while self.running:
                try:
                    has_lock = yield self._try_lock(lock_name)
                except psycopg2.Error:
                    # It'll be properly dealt with in run()
                    logger.exception('psycopg-error when waiting for lock [{0}]'.format(lock_name))

                else:
                    if has_lock:
                        self._held_advisory_locks.add(lock_name)
                        for d in self._deferreds_for_advisory_lock.pop(lock_name, []):
                            d.callback(lock_name)
                        return

                yield util.wait(self.retry_interval)

        except Exception:
            logger.exception('unhandled exception in _keep_waiting_for_advisory_lock')
            f = failure.Failure()
            self._lost_lock(lock_name)
Esempio n. 11
0
    def test_restarting_an_interval(self):
        self.intervals['test_interval'] = dict(interval=0, auto_start=False, pipeline='test_pipeline')

        self.provider.configure(self.runtime_environment)
        self.start_processor.configure(self.runtime_environment)
        self.stop_processor.configure(self.runtime_environment)
        self.dependency_manager.resolve_initial_states()

        # since the interval isn't started, no ticks should have been created
        self.assertEquals(self.ticks.pending, list())

        # running the start processor should start the ticking:
        self.start_processor.process(dict())
        yield self.ticks.get()
        self.assertEquals(self.ticks.pending, list())

        # running the stop processor should stop the ticking:
        yield self.stop_processor.process(dict())
        yield util.wait(0)
        self.assertEquals(self.ticks.pending, list())

        # running the start processor again should restart the ticking:
        self.start_processor.process(dict())
        yield self.ticks.get()
        self.assertEquals(self.ticks.pending, list())
Esempio n. 12
0
    def produce_ticks(self):
        while self.running:
            try:
                self._waiting_for_pipeline = self.dependencies.wait_for_resource('pipeline')
                pipeline = yield self._waiting_for_pipeline

                baton = self._create_baton()

                self._waiting_for_pipeline = pipeline.process(baton)
                yield self._waiting_for_pipeline
            except Exception as e:
                log.error()
            finally:
                self._waiting_for_pipeline = None

            # we might have stopped running while waiting for the pipeline to finish processing
            if not self.running:
                return

            # the processing might have taken some time, so subtract the time taken from the interval before waiting
            # we set the minimum sleep time to 0 in order to at least wait 1 reactor iteration between every
            # produced baton.
            processing_duration = time.time()-self._previous_tick
            sleep_time = max(self.interval-processing_duration, 0)

            try:
                self._sleeping = util.wait(sleep_time)
                yield self._sleeping
            except defer.CancelledError:
                return
            finally:
                self._sleeping = None
Esempio n. 13
0
        def statustest_status_updates_received(self):
            sub = subprocess.Popen(args=['piped', '-nc', 'twitter.yaml']+self.oauth_override, stdout=subprocess.PIPE)

            # wait four seconds
            yield util.wait(4)

            sub.terminate()

            status_updates = [line for line in sub.stdout if 'status-update -> ' in line]
            self.assertNotEquals(status_updates, list())
Esempio n. 14
0
    def run(self):
        while self.running:
            try:
                # Connect and ping. The engine is a pool, so we're not
                # really establishing new connections all the time.
                logger.info('Attempting to connect to [{0}]'.format(self.profile_name))
                yield self.cancellable(threads.deferToThread(self._test_connectivity, self.engine))
                logger.info('Connected to [{0}]'.format(self.profile_name))

                if not self.is_connected:
                    self.is_connected = True
                    self.on_connection_established(self.engine)

                while self.running:
                    yield self.cancellable(util.wait(self.ping_interval))
                    yield self.cancellable(threads.deferToThread(self._test_connectivity, self.engine))

            except defer.CancelledError:
                if self.is_connected:
                    self.is_connected = False
                    self.on_connection_lost(failure.Failure())

                continue # Engine is disposed in finally.

            except Exception as e:
                logger.exception('Error with engine [{0}]'.format(self.profile_name))
                failure_ = failure.Failure()

                if self.is_connected:
                    logger.error('Lost connection to [{0}]'.format(self.profile_name))
                    self.is_connected = False
                    self.on_connection_lost(failure_)
                else:
                    self.on_connection_failed(failure_)

            finally:
                self.is_connected = False
                self.engine.dispose()

            yield self.cancellable(util.wait(self.retry_interval))
Esempio n. 15
0
    def _test_connection_until_working(self):
        """ Keep trying to connect. Calls itself every
        `wait_between_reconnect_tries` seconds. """
        while self.running:
            try:
                log.debug('Trying to connect to database "%s"' % self.database_name)
                yield threads.deferToThread(self.test_connectivity)
                yield threads.deferToThread(self.reconnected)
                break

            except sa.exc.SQLAlchemyError, e:
                reactor.callFromThread(self.on_connection_failed, failure.Failure())
                log.error('Could not connect to database "%s": %s' % (self.database_name, e))
            yield util.wait(self.reconnect_wait)
    def test_handling_connect_error(self):
        """ If an error occurs on connect, another attempt should be made. """
        engine_manager = self.make_engine_manager()
        engine_manager.engine = mocked_engine = self.mocker.engine
        mocked_connection = self.mocker.connection
        fake_error = FakeError()
        mocked_engine.connect.side_effect = util.get_callable_with_different_side_effects(
            [fake_error, mocked_connection]
        )

        engine_manager.startService()
        event = yield self.events.get()
        self.assertEquals(event[0], "failed")
        self.assertTrue(event[1].value is fake_error)

        event = yield self.events.get()
        self.assertEquals(event, ("connected", mocked_engine))

        yield util.wait(0)
        engine_manager.stopService()
        yield util.wait(0)

        self.failIf(self.events.size > 0)
        self.assertEquals(
            self.mocker.mock_calls,
            [
                mock.call.engine.connect(),
                mock.call.engine.dispose(),  # The first connect fails, so the engine is disposed.
                mock.call.engine.connect(),
                mock.call.connection(),
                mock.call.connection().execute("SELECT 'ping'"),
                mock.call.connection().close(),
                mock.call.engine.dispose(),  # and disposed when the service stops.
                mock.call.engine.dispose(),  # and disposed when the service stops.
            ],
        )
    def test_processing_in_parallel(self):
        pipeline = FakeSlowPipeline()
        pipeline_resource = dependencies.InstanceDependency(pipeline)
        pipeline_resource.is_ready = True

        processor = self.make_and_configure_processor(parallel=True)
        processor.pipeline_dependency = pipeline_resource

        baton = dict(iterable=range(3))
        processor.process(baton)

        # As opposed to the serial test, we now expect all batons to have been processed.
        self.assertEqual(pipeline.batons, [])
        yield util.wait(0)
        self.assertEqual(pipeline.batons, range(3))
Esempio n. 18
0
    def test_stopping_an_interval(self):
        self.intervals['test_interval'] = dict(interval=0, pipeline='test_pipeline')

        self.provider.configure(self.runtime_environment)
        self.stop_processor.configure(self.runtime_environment)
        self.dependency_manager.resolve_initial_states()

        # the interval autostarts and produces a tick
        self.assertEquals(len(self.ticks.pending), 1)
        yield self.ticks.get()

        # running the stop processor should stop the ticking:
        yield self.stop_processor.process(dict())
        yield util.wait(0)
        self.assertEquals(self.ticks.pending, list())
Esempio n. 19
0
    def test_debug_handler_reaping(self):
        # reap all debuggers every reactor iteration:
        site_config = dict(routing=dict())
        web_site = web_provider.WebSite('site_name', site_config)

        debug_handler = web_provider.WebDebugHandler(web_site, reap_interval=0, max_inactive_time=0)
        debug_handler.setServiceParent(self.service)

        self.service.startService()

        f = failure.Failure(Exception())
        debug_handler.register_failure(f)

        self.assertEquals(len(debug_handler.children), 1)
        yield util.wait(0) # give the reaper one reactor iteration to reap the debugger
        self.assertEquals(len(debug_handler.children), 0)
Esempio n. 20
0
    def test_binding_before_connecting(self):
        queues = dict(
            binder=dict(type='PUSH', binds=['inproc://socketname']),
            connecter=dict(type='PULL', connects=['inproc://socketname']),
            )

        self.runtime_environment.configuration_manager.set('zmq.queues', queues)

        socket_provider = providers.ZMQSocketProvider()
        # Set up the mocks.
        context = self.mocker.mock()
        socket_provider.context_factory = lambda: context
        binder_socket = self.mocker.mock()
        connecter_socket = self.mocker.mock()

        # First the sockets are requested.
        context.socket(zmq.PUSH)
        self.mocker.result(binder_socket)
        context.socket(zmq.PULL)
        self.mocker.result(connecter_socket)

        with self.mocker.order():
            # Then they are bound/connected
            binder_socket.bind('inproc://socketname')
            connecter_socket.connect('inproc://socketname')

        self.mocker.replay()

        socket_provider.configure(self.runtime_environment)

        # Request the sockets
        connecter_dependency = dependencies.ResourceDependency(provider='zmq.socket.connecter')
        binder_dependency = dependencies.ResourceDependency(provider='zmq.socket.binder')

        socket_by_name = dict()
        connecter_dependency.on_resource_ready += lambda resource:socket_by_name.__setitem__('connecter', resource)
        binder_dependency.on_resource_ready += lambda resource:socket_by_name.__setitem__('binder', resource)

        self.dependency_manager.add_dependency(connecter_dependency)
        self.dependency_manager.add_dependency(binder_dependency)

        self.dependency_manager.resolve_initial_states()

        yield util.wait(0)

        self.mocker.verify()
        self.assertEquals(socket_by_name, dict(binder=binder_socket, connecter=connecter_socket))
    def test_handling_ping_error_then_reconnecting(self):
        engine_manager = self.make_engine_manager()
        engine_manager.engine = mocked_engine = self.mocker.engine
        mocked_connection = self.mocker.connection
        fake_error = FakeError()
        mocked_engine.connect.side_effect = lambda: mocked_connection
        mocked_connection.execute.side_effect = util.get_callable_with_different_side_effects(
            [
                None,
                fake_error,
                lambda *a, **kw: reactor.callFromThread(reactor.callLater, 0, engine_manager.stopService),
            ]
        )

        engine_manager.startService()

        self.assertEquals((yield self.events.get()), ("connected", mocked_engine))

        event = yield self.events.get()
        self.assertEquals(event[0], "lost")
        self.assertTrue(event[1].value is fake_error)

        self.assertEquals((yield self.events.get()), ("connected", mocked_engine))

        self.failIf(self.events.size > 0)
        yield util.wait(0)

        self.assertEquals(
            self.mocker.mock_calls,
            [
                # First ping succeeds.
                mock.call.engine.connect(),
                mock.call.connection.execute("SELECT 'ping'"),
                mock.call.connection.close(),
                # This one fails. The engine is disposed as a result.
                mock.call.engine.connect(),
                mock.call.connection.execute("SELECT 'ping'"),
                mock.call.connection.close(),
                mock.call.engine.dispose(),
                # After this ping we stop the service.
                mock.call.engine.connect(),
                mock.call.connection.execute("SELECT 'ping'"),
                mock.call.connection.close(),
                mock.call.engine.dispose(),
            ],
        )
    def test_processing_is_serial(self):
        pipeline = FakeSlowPipeline()
        pipeline_resource = dependencies.InstanceDependency(pipeline)
        pipeline_resource.is_ready = True

        processor = self.make_and_configure_processor()
        processor.pipeline_dependency = pipeline_resource

        baton = dict(iterable=range(3))
        processor.process(baton)

        # FakeSlowPipeline waits one reactor-iteration before returning.
        self.assertEqual(pipeline.batons, [])
        for i in range(3):
            # Thus, we expect one additional baton to be processed for
            # every reactor iteration when the processing is serial.
            yield util.wait(0)
            self.assertEqual(pipeline.batons, range(i + 1))
    def test_succeeding_when_the_first_one_is_done_serially(self):
        # Make a pipeline whose deferreds we can access.
        deferreds = list()
        batons = list()
        class FakePipeline:

            def process(self, baton):
                d = defer.Deferred()
                batons.append(baton)
                deferreds.append(d)
                return d

        pipeline_resource = dependencies.InstanceDependency(FakePipeline())
        pipeline_resource.is_ready = True

        processor = self.make_and_configure_processor(done_on_first=True)
        processor.pipeline_dependency = pipeline_resource

        baton = dict(iterable=range(3))

        d = processor.process(baton)

        # Only one item should have been attempted processed so far.
        self.assertEqual(batons, [0])

        # Fail it:
        deferreds[0].errback(failure.Failure(FakeError('forced error')))

        # Try the second item.
        yield util.wait(0)
        self.assertEqual(batons, [0, 1])
        # ... and make it a success
        deferreds[1].callback(['fake result'])

        yield d

        # That should have resulted in not attempting the last item.
        self.assertEqual(batons, [0, 1])
        self.assertEqual(baton, dict(iterable=range(3), results='fake result'))
Esempio n. 24
0
    def process(self, baton):
        message = util.dict_get_path(baton, self.message_path)

        tries = 0

        while True:
            try:
                self.dependencies.socket.send(str(message), flags=zmq.NOBLOCK)
                defer.returnValue(baton)
            except zmq.ZMQError, ze:
                if ze.errno != zmq.EAGAIN:
                    raise

            tries += 1

            if self.retries is not None and tries >= self.retries:
                break

            if self.retries is None:
                continue

            yield util.wait(self.retry_wait)
Esempio n. 25
0
    def test_connect_to_nonexisting_host_times_out(self):
        """ Ensure that connection eventually times out. """
        # timeout is expected to be an integer, and 0 means no
        # timeout, so we'll have to spend a second if we want to keep
        # things simple.
        timeout = 1
        wrong_config = dict(user='******', password='******', host='127.0.0.2',
                            database_name='if_this_exists_it_is_my_own_fault_some_tests_fail',
                            timeout=timeout)

        self.disable_logging()
        self.mocker.replay()

        # Make a new provider, since the default one from setUp is correctly configured.
        self.metadata_provider = db.DatabaseMetadataManager(wrong_config)

        try:
            self.metadata_provider.connect()
            self.fail("The connect should fail")
        except sa.exc.SQLAlchemyError:
            pass
        # wait one reactor iteration, so the callFromThread error from the connection-attempt gets executed
        yield util.wait(0)
    def test_system_events_triggered(self):
        batons = list()
        stub_pipeline_provider = StubPipelineProvider(batons.append)
        self.runtime_environment.resource_manager.register('pipeline.test_pipeline', stub_pipeline_provider)
        
        provider = system_events_provider.SystemEventsProvider()

        configuration_manager = self.runtime_environment.configuration_manager

        for event_type in 'startup', 'shutdown':
            configuration_manager.set('system-events.%s.name' % event_type, 'test_pipeline')

        provider.configure(self.runtime_environment)

        self.runtime_environment.dependency_manager.resolve_initial_states()

        # wait a reactor iteration so the startup event is triggered
        yield util.wait(0)
        self.assertEquals(batons, [dict(event_type='startup')])
        batons[:] = list()

        # trigger the shutdown event, which should give our pipeline a baton
        reactor.fireSystemEvent('shutdown')
        self.assertEquals(batons, [dict(event_type='shutdown')])
Esempio n. 27
0
    def keep_connecting(self):
        self.retries = 0
        self.delay = self.initial_delay

        while self.running:
            try:
                yield self.client.connect(self)
            except error.ConnectError as e:
                pass
            else:
                self._connecting = None
                break

            self.retries += 1
            if self.max_retries is not None and (self.retries > self.max_retries):
                log.info("Abandoning %s after %d retries." %(self.client, self.retries))
                break

            self.delay = min(self.delay * self.factor, self.max_delay)
            if self.jitter:
                self.delay = random.normalvariate(self.delay, self.delay * self.jitter)

            log.info("%s will retry in %d seconds" % (self.client, self.delay,))
            yield util.wait(self.delay)
Esempio n. 28
0
    def test_connection_events_called(self):
        """ Check that the right events are called when connections
        are established and lost. """

        queue = defer.DeferredQueue()

        self.metadata_provider.on_connection_established += lambda *a: queue.put('established')
        self.metadata_provider.on_connection_lost += lambda *a: queue.put('lost')

        # Connect and check that the event was called.
        self.metadata_provider.connect()
        event_invoked = yield queue.get()
        self.assertEquals(event_invoked, 'established')

        # It should be idempotent.
        self.metadata_provider.connect()
        yield util.wait(0) # To give any unwanted event a chance to put something in the queue.
        self.failIf(queue.size > 0, "When already connected, an additional connect() should not invoke the established-event")

        # Disconnect and check that the event was called.
        self.metadata_provider.disconnect()

        event_invoked = yield queue.get()
        self.assertEquals(event_invoked, 'lost')
Esempio n. 29
0
 def collector(baton):
     ticks.put(baton)
     yield util.wait(0)
Esempio n. 30
0
 def collector(baton):
     yield util.wait(0.001)  # spend at least 1 ms "processing"
     collected_ticks.put(baton)