예제 #1
0
    def test_wait_arrival(self):
        ev = threading.Event()
        jobs = []

        def poster(wait_post=0.2):
            if not ev.wait(test_utils.WAIT_TIMEOUT):
                raise RuntimeError("Waiter did not appear ready"
                                   " in %s seconds" % test_utils.WAIT_TIMEOUT)
            time.sleep(wait_post)
            self.board.post('test', test_utils.test_factory)

        def waiter():
            ev.set()
            it = self.board.wait(timeout=test_utils.WAIT_TIMEOUT)
            jobs.extend(it)

        with connect_close(self.board):
            t1 = threading_utils.daemon_thread(poster)
            t1.start()
            t2 = threading_utils.daemon_thread(waiter)
            t2.start()
            for t in (t1, t2):
                t.join()

        self.assertEqual(1, len(jobs))
예제 #2
0
def main():
    if six.PY3:
        # TODO(harlowja): Hack to make eventlet work right, remove when the
        # following is fixed: https://github.com/eventlet/eventlet/issues/230
        from zag.utils import eventlet_utils as _eu  # noqa
        try:
            import eventlet as _eventlet  # noqa
        except ImportError:
            pass
    with contextlib.closing(fake_client.FakeClient()) as c:
        created = []
        for i in compat_range(0, PRODUCERS):
            p = threading_utils.daemon_thread(producer, i + 1, c)
            created.append(p)
            p.start()
        consumed = collections.deque()
        for i in compat_range(0, WORKERS):
            w = threading_utils.daemon_thread(worker, i + 1, c, consumed)
            created.append(w)
            w.start()
        while created:
            t = created.pop()
            t.join()
        # At the end there should be nothing leftover, let's verify that.
        board = backends.fetch('verifier', SHARED_CONF.copy(), client=c)
        board.connect()
        with contextlib.closing(board):
            if board.job_count != 0 or len(consumed) != EXPECTED_UNITS:
                return 1
            return 0
예제 #3
0
    def test_class_based_flow_factories(self):
        components = self.make_components()
        components.conductor.connect()
        consumed_event = threading.Event()

        def on_consume(state, details):
            consumed_event.set()

        store = {'x': True, 'y': False, 'z': None}

        components.board.notifier.register(base.REMOVAL, on_consume)
        with close_many(components.conductor, components.client):
            t = threading_utils.daemon_thread(components.conductor.run)
            t.start()
            job = components.board.post('poke', ClassBasedFactory, store=store)

            lb = job.book
            fd = job.load_flow_detail()
            self.assertTrue(consumed_event.wait(test_utils.WAIT_TIMEOUT))
            components.conductor.stop()
            self.assertTrue(components.conductor.wait(test_utils.WAIT_TIMEOUT))
            self.assertFalse(components.conductor.dispatching)

        persistence = components.persistence
        with contextlib.closing(persistence.get_connection()) as conn:
            lb = conn.get_logbook(lb.uuid)
            fd = lb.find(fd.uuid)
        self.assertIsNotNone(fd)
        self.assertEqual(st.SUCCESS, fd.state)
예제 #4
0
    def test_job_compilation_errors(self):
        components = self.make_components()
        components.conductor.connect()
        job_trashed_event = threading.Event()
        job_abandoned_event = threading.Event()

        def on_job_trashed(event, details):
            if event == 'job_trashed':
                job_trashed_event.set()

        def on_job_abandoned(event, details):
            if event == 'job_abandoned':
                job_abandoned_event.set()

        components.conductor.notifier.register("job_trashed", on_job_trashed)
        components.conductor.notifier.register("job_abandoned",
                                               on_job_abandoned)
        with close_many(components.conductor, components.client):
            t = threading_utils.daemon_thread(components.conductor.run)
            t.start()
            components.board.post('poke', compiler_failure_factory)
            job_abandoned_event.wait(test_utils.WAIT_TIMEOUT)
            self.assertTrue(job_abandoned_event.is_set())

            job_trashed_event.wait(test_utils.WAIT_TIMEOUT)
            self.assertTrue(job_trashed_event.is_set())

            components.conductor.stop()
예제 #5
0
    def test_valid_listener_factories(self):
        def logging_listener_factory(job, engine):
            return timing_listener.DurationListener(engine)

        components = self.make_components(
            listener_factories=[logging_listener_factory])
        components.conductor.connect()
        consumed_event = threading.Event()

        def on_consume(state, details):
            consumed_event.set()

        store = {'x': True, 'y': False, 'z': None}

        components.board.notifier.register(base.REMOVAL, on_consume)
        mock_method = 'zag.listeners.timing.DurationListener._receiver'
        with mock.patch(mock_method) as mock_receiver:
            with close_many(components.conductor, components.client):
                t = threading_utils.daemon_thread(components.conductor.run)
                t.start()
                components.board.post('poke', test_store_factory, store=store)
                self.assertTrue(consumed_event.wait(test_utils.WAIT_TIMEOUT))
                components.conductor.stop()
                self.assertTrue(
                    components.conductor.wait(test_utils.WAIT_TIMEOUT))
                self.assertFalse(components.conductor.dispatching)

            self.assertGreaterEqual(1, mock_receiver.call_count)
예제 #6
0
    def test_stop(self):
        self.conn_inst_mock.drain_events.side_effect = socket.timeout

        # create proxy
        pr = self.proxy(reset_master_mock=True)

        # check that proxy is not running yes
        self.assertFalse(pr.is_running)

        # start proxy in separate thread
        t = threading_utils.daemon_thread(pr.start)
        t.start()

        # make sure proxy is started
        pr.wait()

        # check that proxy is running now
        self.assertTrue(pr.is_running)

        # stop proxy and wait for thread to finish
        pr.stop()

        # wait for thread to finish
        t.join()

        self.assertFalse(pr.is_running)
예제 #7
0
    def test_start_stop(self):
        events = collections.deque()

        def before_start(t):
            events.append('bs')

        def before_join(t):
            events.append('bj')
            self.death.set()

        def after_start(t):
            events.append('as')

        def after_join(t):
            events.append('aj')

        for _i in range(0, self.thread_count):
            self.bundle.bind(lambda: tu.daemon_thread(_spinner, self.death),
                             before_join=before_join,
                             after_join=after_join,
                             before_start=before_start,
                             after_start=after_start)
        self.assertEqual(self.thread_count, self.bundle.start())
        self.assertEqual(self.thread_count, len(self.bundle))
        self.assertEqual(self.thread_count, self.bundle.stop())
        for event in ['as', 'bs', 'bj', 'aj']:
            self.assertEqual(self.thread_count,
                             len([e for e in events if e == event]))
        self.assertEqual(0, self.bundle.stop())
        self.assertTrue(self.death.is_set())
예제 #8
0
    def test_notify(self):
        barrier = threading.Event()

        on_notify = mock.MagicMock()
        on_notify.side_effect = lambda *args, **kwargs: barrier.set()

        handlers = {pr.NOTIFY: dispatcher.Handler(on_notify)}
        p = proxy.Proxy(TEST_TOPIC,
                        TEST_EXCHANGE,
                        handlers,
                        transport='memory',
                        transport_options={
                            'polling_interval': POLLING_INTERVAL,
                        })

        t = threading_utils.daemon_thread(p.start)
        t.start()
        p.wait()
        p.publish(pr.Notify(), TEST_TOPIC)

        self.assertTrue(barrier.wait(test_utils.WAIT_TIMEOUT))
        p.stop()
        t.join()

        self.assertTrue(on_notify.called)
        on_notify.assert_called_with({}, mock.ANY)
예제 #9
0
    def test_response(self):
        barrier = threading.Event()

        on_response = mock.MagicMock()
        on_response.side_effect = lambda *args, **kwargs: barrier.set()

        handlers = {pr.RESPONSE: dispatcher.Handler(on_response)}
        p = proxy.Proxy(TEST_TOPIC,
                        TEST_EXCHANGE,
                        handlers,
                        transport='memory',
                        transport_options={
                            'polling_interval': POLLING_INTERVAL,
                        })

        t = threading_utils.daemon_thread(p.start)
        t.start()
        p.wait()
        resp = pr.Response(pr.RUNNING)
        p.publish(resp, TEST_TOPIC)

        self.assertTrue(barrier.wait(test_utils.WAIT_TIMEOUT))
        self.assertTrue(barrier.is_set())
        p.stop()
        t.join()

        self.assertTrue(on_response.called)
        on_response.assert_called_with(resp.to_dict(), mock.ANY)
예제 #10
0
    def test_delayed_job(self):
        components = self.make_components()
        components.conductor.connect()
        claimed_event = threading.Event()

        def on_claimed(event, details):
            if event == 'job_claimed':
                claimed_event.set()

        flow_store = {'x': True, 'y': False, 'z': None}

        components.conductor.notifier.register("job_claimed", on_claimed)
        with close_many(components.conductor, components.client):
            t = threading_utils.daemon_thread(components.conductor.run)
            t.start()
            job = components.board.post_delayed(180,
                                                'poke',
                                                test_store_factory,
                                                store=flow_store)
            lb = job.book
            fd = job.load_flow_detail()

            self.assertFalse(claimed_event.wait(2))
            components.conductor.stop()
            self.assertTrue(components.conductor.wait(test_utils.WAIT_TIMEOUT))
            self.assertFalse(components.conductor.dispatching)

        persistence = components.persistence
        with contextlib.closing(persistence.get_connection()) as conn:
            lb = conn.get_logbook(lb.uuid)
            fd = lb.find(fd.uuid)
        self.assertIsNotNone(fd)
        self.assertIsNone(fd.state)
예제 #11
0
 def test_bundle_length(self):
     self.assertEqual(0, len(self.bundle))
     for i in range(0, self.thread_count):
         self.bundle.bind(lambda: tu.daemon_thread(_spinner, self.death))
         self.assertEqual(1, self.bundle.start())
         self.assertEqual(i + 1, len(self.bundle))
     self.death.set()
     self.assertEqual(self.thread_count, self.bundle.stop())
     self.assertEqual(self.thread_count, len(self.bundle))
예제 #12
0
 def test_alive_thread(self):
     death = threading.Event()
     t = tu.daemon_thread(_spinner, death)
     self.assertFalse(tu.is_alive(t))
     t.start()
     self.assertTrue(tu.is_alive(t))
     death.set()
     t.join()
     self.assertFalse(tu.is_alive(t))
예제 #13
0
 def start(self):
     if threading_utils.is_alive(self._worker):
         raise RuntimeError("Worker thread must be stopped via stop()"
                            " before starting/restarting")
     super(ParallelProcessTaskExecutor, self).start()
     self._dispatcher.setup()
     self._worker = threading_utils.daemon_thread(
         asyncore.loop, map=self._dispatcher.map,
         timeout=self._wait_timeout)
     self._worker.start()
예제 #14
0
 def test_bind_invalid(self):
     self.assertRaises(ValueError, self.bundle.bind, 1)
     for k in ['after_start', 'before_start',
               'before_join', 'after_join']:
         kwargs = {
             k: 1,
         }
         self.assertRaises(ValueError, self.bundle.bind,
                           lambda: tu.daemon_thread(_spinner, self.death),
                           **kwargs)
예제 #15
0
 def test_run_empty(self):
     components = self.make_components()
     components.conductor.connect()
     with close_many(components.conductor, components.client):
         t = threading_utils.daemon_thread(components.conductor.run)
         t.start()
         components.conductor.stop()
         self.assertTrue(components.conductor.wait(test_utils.WAIT_TIMEOUT))
         self.assertFalse(components.conductor.dispatching)
         t.join()
예제 #16
0
def generate_reviewer(client, saver, name=NAME):
    """Creates a review producer thread with the given name prefix."""
    real_name = "%s_reviewer" % name
    no_more = threading.Event()
    jb = boards.fetch(real_name,
                      JOBBOARD_CONF,
                      client=client,
                      persistence=saver)

    def make_save_book(saver, review_id):
        # Record what we want to happen (sometime in the future).
        book = models.LogBook("book_%s" % review_id)
        detail = models.FlowDetail("flow_%s" % review_id,
                                   uuidutils.generate_uuid())
        book.add(detail)
        # Associate the factory method we want to be called (in the future)
        # with the book, so that the conductor will be able to call into
        # that factory to retrieve the workflow objects that represent the
        # work.
        #
        # These args and kwargs *can* be used to save any specific parameters
        # into the factory when it is being called to create the workflow
        # objects (typically used to tell a factory how to create a unique
        # workflow that represents this review).
        factory_args = ()
        factory_kwargs = {}
        engines.save_factory_details(detail, create_review_workflow,
                                     factory_args, factory_kwargs)
        with contextlib.closing(saver.get_connection()) as conn:
            conn.save_logbook(book)
            return book

    def run():
        """Periodically publishes 'fake' reviews to analyze."""
        jb.connect()
        review_generator = review_iter()
        with contextlib.closing(jb):
            while not no_more.is_set():
                review = six.next(review_generator)
                details = {
                    'store': {
                        'review': review,
                    },
                }
                job_name = "%s_%s" % (real_name, review['id'])
                print("Posting review '%s'" % review['id'])
                jb.post(job_name,
                        book=make_save_book(saver, review['id']),
                        details=details)
                time.sleep(REVIEW_CREATION_DELAY)

    # Return the unstarted thread, and a callback that can be used
    # shutdown that thread (to avoid running forever).
    return (threading_utils.daemon_thread(target=run), no_more.set)
예제 #17
0
def create_fractal():
    logging.basicConfig(level=logging.ERROR)

    # Setup our transport configuration and merge it into the worker and
    # engine configuration so that both of those use it correctly.
    shared_conf = dict(BASE_SHARED_CONF)
    shared_conf.update({
        'transport': 'memory',
        'transport_options': {
            'polling_interval': 0.1,
        },
    })

    if len(sys.argv) >= 2:
        output_filename = sys.argv[1]
    else:
        output_filename = None

    worker_conf = dict(WORKER_CONF)
    worker_conf.update(shared_conf)
    engine_conf = dict(ENGINE_CONF)
    engine_conf.update(shared_conf)
    workers = []
    worker_topics = []

    print('Calculating your mandelbrot fractal of size %sx%s.' % IMAGE_SIZE)
    try:
        # Create a set of workers to simulate actual remote workers.
        print('Running %s workers.' % (WORKERS))
        for i in compat_range(0, WORKERS):
            worker_conf['topic'] = 'calculator_%s' % (i + 1)
            worker_topics.append(worker_conf['topic'])
            w = worker.Worker(**worker_conf)
            runner = threading_utils.daemon_thread(w.run)
            runner.start()
            w.wait()
            workers.append((runner, w.stop))

        # Now use those workers to do something.
        engine_conf['topics'] = worker_topics
        results = calculate(engine_conf)
        print('Execution finished.')
    finally:
        # And cleanup.
        print('Stopping workers.')
        while workers:
            r, stopper = workers.pop()
            stopper()
            r.join()
    print("Writing image...")
    write_image(results, output_filename=output_filename)
예제 #18
0
 def _fetch_server(self, task_classes):
     endpoints = []
     for cls in task_classes:
         endpoints.append(endpoint.Endpoint(cls))
     server = worker_server.Server(
         TEST_TOPIC,
         TEST_EXCHANGE,
         futurist.ThreadPoolExecutor(max_workers=1),
         endpoints,
         transport='memory',
         transport_options={
             'polling_interval': POLLING_INTERVAL,
         })
     server_thread = threading_utils.daemon_thread(server.start)
     return (server, server_thread)
예제 #19
0
파일: types.py 프로젝트: pombredanne/zag
    def __init__(self, coordination_url, member_id, watch_groups):
        super(ToozWorkerFinder, self).__init__()
        self._coordinator = coordination.get_coordinator(
            coordination_url, member_id)
        self._watch_groups = watch_groups
        self._active_groups = {}
        self._available_workers = 0

        self._helpers = tu.ThreadBundle()
        p_worker = periodics.PeriodicWorker.create([self])
        if p_worker:
            self._helpers.bind(lambda: tu.daemon_thread(p_worker.start),
                               before_join=lambda t: p_worker.stop(),
                               after_join=lambda t: p_worker.reset(),
                               before_start=lambda t: p_worker.reset())
        self._activator = misc.Activator([self._coordinator, self._helpers])
예제 #20
0
def generate_conductor(client, saver, name=NAME):
    """Creates a conductor thread with the given name prefix."""
    real_name = "%s_conductor" % name
    jb = boards.fetch(name, JOBBOARD_CONF, client=client, persistence=saver)
    conductor = conductors.fetch("blocking",
                                 real_name,
                                 jb,
                                 engine='parallel',
                                 wait_timeout=SCAN_DELAY)

    def run():
        jb.connect()
        with contextlib.closing(jb):
            conductor.run()

    # Return the unstarted thread, and a callback that can be used
    # shutdown that thread (to avoid running forever).
    return (threading_utils.daemon_thread(target=run), conductor.stop)
예제 #21
0
파일: types.py 프로젝트: pombredanne/zag
 def __init__(self, coordinator_url, member_id, join_groups, worker_topic,
              worker_endpoints):
     self._coordinator = coordination.get_coordinator(
         coordinator_url, member_id)
     self._join_groups = frozenset(join_groups)
     self._helpers = tu.ThreadBundle()
     self._member_id = member_id
     self._capabilities = {
         'topic': worker_topic,
         'tasks': [e.name for e in worker_endpoints],
     }
     p_worker = periodics.PeriodicWorker.create([self])
     if p_worker:
         self._helpers.bind(lambda: tu.daemon_thread(p_worker.start),
                            before_join=lambda t: p_worker.stop(),
                            after_join=lambda t: p_worker.reset(),
                            before_start=lambda t: p_worker.reset())
     self._activator = misc.Activator([self._coordinator, self._helpers])
예제 #22
0
    def test_start_stop_order(self):
        start_events = collections.deque()
        death_events = collections.deque()

        def before_start(i, t):
            start_events.append((i, 'bs'))

        def before_join(i, t):
            death_events.append((i, 'bj'))
            self.death.set()

        def after_start(i, t):
            start_events.append((i, 'as'))

        def after_join(i, t):
            death_events.append((i, 'aj'))

        for i in range(0, self.thread_count):
            self.bundle.bind(lambda: tu.daemon_thread(_spinner, self.death),
                             before_join=functools.partial(before_join, i),
                             after_join=functools.partial(after_join, i),
                             before_start=functools.partial(before_start, i),
                             after_start=functools.partial(after_start, i))
        self.assertEqual(self.thread_count, self.bundle.start())
        self.assertEqual(self.thread_count, len(self.bundle))
        self.assertEqual(self.thread_count, self.bundle.stop())
        self.assertEqual(0, self.bundle.stop())
        self.assertTrue(self.death.is_set())

        expected_start_events = []
        for i in range(0, self.thread_count):
            expected_start_events.extend([
                (i, 'bs'), (i, 'as'),
            ])
        self.assertEqual(expected_start_events, list(start_events))

        expected_death_events = []
        j = self.thread_count - 1
        for _i in range(0, self.thread_count):
            expected_death_events.extend([
                (j, 'bj'), (j, 'aj'),
            ])
            j -= 1
        self.assertEqual(expected_death_events, list(death_events))
예제 #23
0
    def test_fail_run(self):
        components = self.make_components()
        components.conductor.connect()
        consumed_event = threading.Event()
        job_consumed_event = threading.Event()
        job_abandoned_event = threading.Event()

        def on_consume(state, details):
            consumed_event.set()

        def on_job_consumed(event, details):
            if event == 'job_consumed':
                job_consumed_event.set()

        def on_job_abandoned(event, details):
            if event == 'job_abandoned':
                job_abandoned_event.set()

        components.board.notifier.register(base.REMOVAL, on_consume)
        components.conductor.notifier.register("job_consumed", on_job_consumed)
        components.conductor.notifier.register("job_abandoned",
                                               on_job_abandoned)
        with close_many(components.conductor, components.client):
            t = threading_utils.daemon_thread(components.conductor.run)
            t.start()
            job = components.board.post('poke', test_blowup_factory)
            lb = job.book
            fd = job.load_flow_detail()

            self.assertTrue(consumed_event.wait(test_utils.WAIT_TIMEOUT))
            self.assertTrue(job_consumed_event.wait(test_utils.WAIT_TIMEOUT))
            self.assertFalse(job_abandoned_event.wait(1))
            components.conductor.stop()
            self.assertTrue(components.conductor.wait(test_utils.WAIT_TIMEOUT))
            self.assertFalse(components.conductor.dispatching)

        persistence = components.persistence
        with contextlib.closing(persistence.get_connection()) as conn:
            lb = conn.get_logbook(lb.uuid)
            fd = lb.find(fd.uuid)
        self.assertIsNotNone(fd)
        self.assertEqual(st.REVERTED, fd.state)
예제 #24
0
    def test_run_max_dispatches(self):
        components = self.make_components()
        components.conductor.connect()
        consumed_event = threading.Event()

        def on_consume(state, details):
            consumed_event.set()

        components.board.notifier.register(base.REMOVAL, on_consume)
        with close_many(components.client, components.conductor):
            t = threading_utils.daemon_thread(
                lambda: components.conductor.run(max_dispatches=5))
            t.start()
            for _ in range(5):
                components.board.post('poke', test_utils.test_factory)
                self.assertTrue(consumed_event.wait(test_utils.WAIT_TIMEOUT))
            components.board.post('poke', test_utils.test_factory)
            components.conductor.stop()
            self.assertTrue(components.conductor.wait(test_utils.WAIT_TIMEOUT))
            self.assertFalse(components.conductor.dispatching)
예제 #25
0
    def test_stop_aborts_engine(self):
        components = self.make_components()
        components.conductor.connect()
        consumed_event = threading.Event()
        job_consumed_event = threading.Event()
        job_abandoned_event = threading.Event()
        running_start_event = threading.Event()

        def on_running_start(event, details):
            running_start_event.set()

        def on_consume(state, details):
            consumed_event.set()

        def on_job_consumed(event, details):
            if event == 'job_consumed':
                job_consumed_event.set()

        def on_job_abandoned(event, details):
            if event == 'job_abandoned':
                job_abandoned_event.set()

        components.board.notifier.register(base.REMOVAL, on_consume)
        components.conductor.notifier.register("job_consumed", on_job_consumed)
        components.conductor.notifier.register("job_abandoned",
                                               on_job_abandoned)
        components.conductor.notifier.register("running_start",
                                               on_running_start)
        with close_many(components.conductor, components.client):
            t = threading_utils.daemon_thread(components.conductor.run)
            t.start()
            components.board.post('poke', sleep_factory, store={'duration': 2})
            running_start_event.wait(test_utils.WAIT_TIMEOUT)
            components.conductor.stop()
            job_abandoned_event.wait(test_utils.WAIT_TIMEOUT)
            self.assertTrue(job_abandoned_event.is_set())
            self.assertFalse(job_consumed_event.is_set())
            self.assertFalse(consumed_event.is_set())
예제 #26
0
    def test_multi_message(self):
        message_count = 30
        barrier = latch.Latch(message_count)
        countdown = lambda data, message: barrier.countdown()

        on_notify = mock.MagicMock()
        on_notify.side_effect = countdown

        on_response = mock.MagicMock()
        on_response.side_effect = countdown

        on_request = mock.MagicMock()
        on_request.side_effect = countdown

        handlers = {
            pr.NOTIFY: dispatcher.Handler(on_notify),
            pr.RESPONSE: dispatcher.Handler(on_response),
            pr.REQUEST: dispatcher.Handler(on_request),
        }
        p = proxy.Proxy(TEST_TOPIC,
                        TEST_EXCHANGE,
                        handlers,
                        transport='memory',
                        transport_options={
                            'polling_interval': POLLING_INTERVAL,
                        })

        t = threading_utils.daemon_thread(p.start)
        t.start()
        p.wait()

        for i in range(0, message_count):
            j = i % 3
            if j == 0:
                p.publish(pr.Notify(), TEST_TOPIC)
            elif j == 1:
                p.publish(pr.Response(pr.RUNNING), TEST_TOPIC)
            else:
                p.publish(
                    pr.Request(test_utils.DummyTask("dummy_%s" % i),
                               uuidutils.generate_uuid(), pr.EXECUTE, [],
                               None), TEST_TOPIC)

        self.assertTrue(barrier.wait(test_utils.WAIT_TIMEOUT))
        self.assertEqual(0, barrier.needed)
        p.stop()
        t.join()

        self.assertTrue(on_notify.called)
        self.assertTrue(on_response.called)
        self.assertTrue(on_request.called)

        self.assertEqual(10, on_notify.call_count)
        self.assertEqual(10, on_response.call_count)
        self.assertEqual(10, on_request.call_count)

        call_count = sum([
            on_notify.call_count,
            on_response.call_count,
            on_request.call_count,
        ])
        self.assertEqual(message_count, call_count)
예제 #27
0
# Create & start our worker that will actually run tasks..
#
# It will join 'coordinator_groups' groups and advertise its capabilities in
# those groups (so that engines may find it).
worker_id = uuidutils.generate_uuid()
advertiser_factory = wt.ToozWorkerAdvertiser.generate_factory({
    'coordinator': {
        'url': coordinator_url,
        'groups': coordinator_groups,
    },
})
w = worker.Worker(exchange,
                  uuidutils.generate_uuid(), [OneTask, TwoTask],
                  transport=transport,
                  advertiser_factory=advertiser_factory)
w_runner = tu.daemon_thread(target=w.run, banner_writer=banner_writer)

print("Booting up the worker...")
w_runner.start()
w.wait()

# Now make the engine that will use the previous workers to do some work...
finder_factory = wt.ToozWorkerFinder.generate_factory({
    'coordinator': {
        'url': coordinator_url,
        'groups': coordinator_groups,
    },
})

# Create a custom executor for the engine to use.
#
예제 #28
0
 def test_daemon_thread(self):
     death = threading.Event()
     t = tu.daemon_thread(_spinner, death)
     self.assertTrue(t.daemon)
예제 #29
0
            },
        })
    worker_conf = dict(WORKER_CONF)
    worker_conf.update(shared_conf)
    engine_options = dict(shared_conf)
    workers = []
    worker_topics = []

    try:
        # Create a set of workers to simulate actual remote workers.
        print('Running %s workers.' % (worker_count))
        for i in range(0, worker_count):
            worker_conf['topic'] = 'worker-%s' % (i + 1)
            worker_topics.append(worker_conf['topic'])
            w = worker.Worker(**worker_conf)
            runner = threading_utils.daemon_thread(w.run)
            runner.start()
            w.wait()
            workers.append((runner, w.stop))

        # Now use those workers to do something.
        print('Executing some work.')
        engine_options['topics'] = worker_topics
        result = run(engine_options)
        print('Execution finished.')
        # This is done so that the test examples can work correctly
        # even when the keys change order (which will happen in various
        # python versions).
        print("Result = %s" % json.dumps(result, sort_keys=True))
    finally:
        # And cleanup.