def setUp(self): super(TestWorkerTaskExecutor, self).setUp() self.task = test_utils.DummyTask() self.task_uuid = 'task-uuid' self.task_args = {'a': 'a'} self.task_result = 'task-result' self.task_failures = {} self.timeout = 60 self.broker_url = 'broker-url' self.executor_uuid = 'executor-uuid' self.executor_exchange = 'executor-exchange' self.executor_topic = 'test-topic1' self.proxy_started_event = threading_utils.Event() # patch classes self.proxy_mock, self.proxy_inst_mock = self.patchClass( executor.proxy, 'Proxy') self.request_mock, self.request_inst_mock = self.patchClass( executor.pr, 'Request', autospec=False) # other mocking self.proxy_inst_mock.start.side_effect = self._fake_proxy_start self.proxy_inst_mock.stop.side_effect = self._fake_proxy_stop self.request_inst_mock.uuid = self.task_uuid self.request_inst_mock.expired = False self.request_inst_mock.task_cls = self.task.name self.wait_for_any_mock = self.patch( 'taskflow.engines.worker_based.executor.async_utils.wait_for_any') self.message_mock = mock.MagicMock(name='message') self.message_mock.properties = { 'correlation_id': self.task_uuid, 'type': pr.RESPONSE }
def test_wait_arrival(self): ev = threading_utils.Event() jobs = [] def poster(wait_post=0.2): if not ev.wait(test_utils.WAIT_TIMEOUT): raise RuntimeError("Waiter did not appear ready" " in %s seconds" % test_utils.WAIT_TIMEOUT) time.sleep(wait_post) self.board.post('test', p_utils.temporary_log_book()) def waiter(): ev.set() it = self.board.wait() jobs.extend(it) with connect_close(self.board): t1 = threading.Thread(target=poster) t1.daemon = True t1.start() t2 = threading.Thread(target=waiter) t2.daemon = True t2.start() for t in (t1, t2): t.join() self.assertEqual(1, len(jobs))
def test_double_reader_writer(self): lock = lock_utils.ReaderWriterLock() activated = collections.deque() active = threading_utils.Event() def double_reader(): with lock.read_lock(): active.set() while not lock.has_pending_writers: time.sleep(0.001) with lock.read_lock(): activated.append(lock.owner) def happy_writer(): with lock.write_lock(): activated.append(lock.owner) reader = threading_utils.daemon_thread(double_reader) reader.start() self.assertTrue(active.wait(test_utils.WAIT_TIMEOUT)) writer = threading_utils.daemon_thread(happy_writer) writer.start() reader.join() writer.join() self.assertEqual(2, len(activated)) self.assertEqual(['r', 'w'], list(activated))
def __init__(self, topic, exchange_name, type_handlers, on_wait=None, **kwargs): self._topic = topic self._exchange_name = exchange_name self._on_wait = on_wait self._running = threading_utils.Event() self._dispatcher = dispatcher.TypeDispatcher(type_handlers) self._dispatcher.add_requeue_filter( # NOTE(skudriashev): Process all incoming messages only if proxy is # running, otherwise requeue them. lambda data, message: not self.is_running) url = kwargs.get('url') transport = kwargs.get('transport') transport_opts = kwargs.get('transport_options') self._drain_events_timeout = DRAIN_EVENTS_PERIOD if transport == 'memory' and transport_opts: polling_interval = transport_opts.get('polling_interval') if polling_interval is not None: self._drain_events_timeout = polling_interval # create connection self._conn = kombu.Connection(url, transport=transport, transport_options=transport_opts) # create exchange self._exchange = kombu.Exchange(name=self._exchange_name, durable=False, auto_delete=True)
def test_run(self): components = self.make_components() components.conductor.connect() consumed_event = threading_utils.Event() def on_consume(state, details): consumed_event.set() components.board.notifier.register(base.REMOVAL, on_consume) with close_many(components.conductor, components.client): t = threading_utils.daemon_thread(components.conductor.run) t.start() lb, fd = pu.temporary_flow_detail(components.persistence) engines.save_factory_details(fd, test_factory, [False], {}, backend=components.persistence) components.board.post('poke', lb, details={'flow_uuid': fd.uuid}) self.assertTrue(consumed_event.wait(test_utils.WAIT_TIMEOUT)) self.assertTrue(components.conductor.stop(test_utils.WAIT_TIMEOUT)) self.assertFalse(components.conductor.dispatching) persistence = components.persistence with contextlib.closing(persistence.get_connection()) as conn: lb = conn.get_logbook(lb.uuid) fd = lb.find(fd.uuid) self.assertIsNotNone(fd) self.assertEqual(st.SUCCESS, fd.state)
def test_notify(self): barrier = threading_utils.Event() on_notify = mock.MagicMock() on_notify.side_effect = lambda *args, **kwargs: barrier.set() handlers = {pr.NOTIFY: on_notify} p = proxy.Proxy(TEST_TOPIC, TEST_EXCHANGE, handlers, transport='memory', transport_options={ 'polling_interval': POLLING_INTERVAL, }) t = threading_utils.daemon_thread(p.start) t.start() p.wait() p.publish(pr.Notify(), TEST_TOPIC) self.assertTrue(barrier.wait(test_utils.WAIT_TIMEOUT)) p.stop() t.join() self.assertTrue(on_notify.called) on_notify.assert_called_with({}, mock.ANY)
def test_response(self): barrier = threading_utils.Event() on_response = mock.MagicMock() on_response.side_effect = lambda *args, **kwargs: barrier.set() handlers = {pr.RESPONSE: on_response} p = proxy.Proxy(TEST_TOPIC, TEST_EXCHANGE, handlers, transport='memory', transport_options={ 'polling_interval': POLLING_INTERVAL, }) t = threading_utils.daemon_thread(p.start) t.start() p.wait() resp = pr.Response(pr.RUNNING) p.publish(resp, TEST_TOPIC) self.assertTrue(barrier.wait(test_utils.WAIT_TIMEOUT)) self.assertTrue(barrier.is_set()) p.stop() t.join() self.assertTrue(on_response.called) on_response.assert_called_with(resp.to_dict(), mock.ANY)
def test_alive_thread(self): death = tu.Event() t = tu.daemon_thread(_spinner, death) self.assertFalse(tu.is_alive(t)) t.start() self.assertTrue(tu.is_alive(t)) death.set() t.join() self.assertFalse(tu.is_alive(t))
def __init__(self, dispatch_periodicity=None): if dispatch_periodicity is None: dispatch_periodicity = self._SPIN_PERIODICITY if dispatch_periodicity <= 0: raise ValueError("Provided dispatch periodicity must be greater" " than zero and not '%s'" % dispatch_periodicity) self._targets = {} self._dead = threading_utils.Event() self._dispatch_periodicity = dispatch_periodicity self._stop_when_empty = False
def __init__(self, name, wait_for, wait_states, **kwargs): super(WaitForOneFromTask, self).__init__(name, **kwargs) if isinstance(wait_for, six.string_types): self.wait_for = [wait_for] else: self.wait_for = wait_for if isinstance(wait_states, six.string_types): self.wait_states = [wait_states] else: self.wait_states = wait_states self.event = threading_utils.Event()
def flush(client, path=None): # This uses the linearity guarantee of zookeeper (and associated libraries) # to create a temporary node, wait until a watcher notifies it's created, # then yield back for more work, and then at the end of that work delete # the created node. This ensures that the operations done in the yield # of this context manager will be applied and all watchers will have fired # before this context manager exits. if not path: path = FLUSH_PATH_TPL % uuidutils.generate_uuid() created = threading_utils.Event() deleted = threading_utils.Event() def on_created(data, stat): if stat is not None: created.set() return False # cause this watcher to cease to exist def on_deleted(data, stat): if stat is None: deleted.set() return False # cause this watcher to cease to exist watchers.DataWatch(client, path, func=on_created) client.create(path, makepath=True) if not created.wait(test_utils.WAIT_TIMEOUT): raise RuntimeError("Could not receive creation of %s in" " the alloted timeout of %s seconds" % (path, test_utils.WAIT_TIMEOUT)) try: yield finally: watchers.DataWatch(client, path, func=on_deleted) client.delete(path, recursive=True) if not deleted.wait(test_utils.WAIT_TIMEOUT): raise RuntimeError("Could not receive deletion of %s in" " the alloted timeout of %s seconds" % (path, test_utils.WAIT_TIMEOUT))
def __init__(self, callables, tombstone=None): if tombstone is None: self._tombstone = tu.Event() else: self._tombstone = tombstone self._callables = [] for i, cb in enumerate(callables, 1): if not six.callable(cb): raise ValueError("Periodic callback %s must be callable" % i) missing_attrs = _check_attrs(cb) if missing_attrs: raise ValueError("Periodic callback %s missing required" " attributes %s" % (i, missing_attrs)) if cb._periodic: self._callables.append(cb) self._immediates, self._schedule = _build(self._callables)
def __init__(self, topic, exchange, type_handlers=None, on_wait=None, url=None, transport=None, transport_options=None, retry_options=None): self._topic = topic self._exchange_name = exchange self._on_wait = on_wait self._running = threading_utils.Event() self._dispatcher = dispatcher.TypeDispatcher( # NOTE(skudriashev): Process all incoming messages only if proxy is # running, otherwise requeue them. requeue_filters=[lambda data, message: not self.is_running], type_handlers=type_handlers) ensure_options = self.DEFAULT_RETRY_OPTIONS.copy() if retry_options is not None: # Override the defaults with any user provided values... for k in set(six.iterkeys(ensure_options)): if k in retry_options: # Ensure that the right type is passed in... val = retry_options[k] if k in self._RETRY_INT_OPTS: tmp_val = int(val) else: tmp_val = float(val) if tmp_val < 0: raise ValueError("Expected value greater or equal to" " zero for 'retry_options' %s; got" " %s instead" % (k, val)) ensure_options[k] = tmp_val self._ensure_options = ensure_options self._drain_events_timeout = DRAIN_EVENTS_PERIOD if transport == 'memory' and transport_options: polling_interval = transport_options.get('polling_interval') if polling_interval is not None: self._drain_events_timeout = polling_interval # create connection self._conn = kombu.Connection(url, transport=transport, transport_options=transport_options) # create exchange self._exchange = kombu.Exchange(name=self._exchange_name, durable=False, auto_delete=True)
def _post_claim_job(self, job_name, book=None, details=None): arrived = threading_utils.Event() def set_on_children(children): if children: arrived.set() self.client.ChildrenWatch("/taskflow", set_on_children) job = self.board.post('test-1') # Make sure it arrived and claimed before doing further work... self.assertTrue(arrived.wait(test_utils.WAIT_TIMEOUT)) arrived.clear() self.board.claim(job, self.board.name) self.assertTrue(arrived.wait(test_utils.WAIT_TIMEOUT)) self.assertEqual(states.CLAIMED, job.state) return job
def test_periodic_single(self): barrier = latch.Latch(5) capture = [] tombstone = tu.Event() @periodic.periodic(0.01) def callee(): barrier.countdown() if barrier.needed == 0: tombstone.set() capture.append(1) w = periodic.PeriodicWorker([callee], tombstone=tombstone) t = tu.daemon_thread(target=w.start) t.start() t.join() self.assertEqual(0, barrier.needed) self.assertEqual(5, sum(capture)) self.assertTrue(tombstone.is_set())
def __init__(self, name, jobboard, persistence=None, engine=None, engine_options=None, wait_timeout=None): super(BlockingConductor, self).__init__(name, jobboard, persistence=persistence, engine=engine, engine_options=engine_options) if wait_timeout is None: wait_timeout = WAIT_TIMEOUT if isinstance(wait_timeout, (int, float) + six.string_types): self._wait_timeout = tt.Timeout(float(wait_timeout)) elif isinstance(wait_timeout, tt.Timeout): self._wait_timeout = wait_timeout else: raise ValueError("Invalid timeout literal: %s" % (wait_timeout)) self._dead = threading_utils.Event()
def test_event_wait(self): e = tu.Event() e.set() self.assertTrue(e.wait())
def test_daemon_thread(self): death = tu.Event() t = tu.daemon_thread(_spinner, death) self.assertTrue(t.daemon)
def __init__(self, timeout): if timeout < 0: raise ValueError("Timeout must be >= 0 and not %s" % (timeout)) self._timeout = timeout self._event = threading_utils.Event()
def setUp(self): super(TestThreadBundle, self).setUp() self.bundle = tu.ThreadBundle() self.death = tu.Event() self.addCleanup(self.bundle.stop) self.addCleanup(self.death.set)