def test_double_reader_writer(self):
        lock = lock_utils.ReaderWriterLock()
        activated = collections.deque()
        active = threading_utils.Event()

        def double_reader():
            with lock.read_lock():
                active.set()
                while not lock.has_pending_writers:
                    time.sleep(0.001)
                with lock.read_lock():
                    activated.append(lock.owner)

        def happy_writer():
            with lock.write_lock():
                activated.append(lock.owner)

        reader = threading_utils.daemon_thread(double_reader)
        reader.start()
        self.assertTrue(active.wait(test_utils.WAIT_TIMEOUT))

        writer = threading_utils.daemon_thread(happy_writer)
        writer.start()

        reader.join()
        writer.join()
        self.assertEqual(2, len(activated))
        self.assertEqual(['r', 'w'], list(activated))
def main():
    if six.PY3:
        # TODO(harlowja): Hack to make eventlet work right, remove when the
        # following is fixed: https://github.com/eventlet/eventlet/issues/230
        from taskflow.utils import eventlet_utils as _eu  # noqa
        try:
            import eventlet as _eventlet  # noqa
        except ImportError:
            pass
    with contextlib.closing(fake_client.FakeClient()) as c:
        created = []
        for i in compat_range(0, PRODUCERS):
            p = threading_utils.daemon_thread(producer, i + 1, c)
            created.append(p)
            p.start()
        consumed = collections.deque()
        for i in compat_range(0, WORKERS):
            w = threading_utils.daemon_thread(worker, i + 1, c, consumed)
            created.append(w)
            w.start()
        while created:
            t = created.pop()
            t.join()
        # At the end there should be nothing leftover, let's verify that.
        board = backends.fetch('verifier', SHARED_CONF.copy(), client=c)
        board.connect()
        with contextlib.closing(board):
            if board.job_count != 0 or len(consumed) != EXPECTED_UNITS:
                return 1
            return 0
Beispiel #3
0
    def test_wait_arrival(self):
        ev = threading.Event()
        jobs = []

        def poster(wait_post=0.2):
            if not ev.wait(test_utils.WAIT_TIMEOUT):
                raise RuntimeError("Waiter did not appear ready"
                                   " in %s seconds" % test_utils.WAIT_TIMEOUT)
            time.sleep(wait_post)
            self.board.post('test', p_utils.temporary_log_book())

        def waiter():
            ev.set()
            it = self.board.wait()
            jobs.extend(it)

        with connect_close(self.board):
            t1 = threading_utils.daemon_thread(poster)
            t1.start()
            t2 = threading_utils.daemon_thread(waiter)
            t2.start()
            for t in (t1, t2):
                t.join()

        self.assertEqual(1, len(jobs))
def main():
    if six.PY3:
        # TODO(harlowja): Hack to make eventlet work right, remove when the
        # following is fixed: https://github.com/eventlet/eventlet/issues/230
        from taskflow.utils import eventlet_utils as _eu  # noqa
        try:
            import eventlet as _eventlet  # noqa
        except ImportError:
            pass
    with contextlib.closing(fake_client.FakeClient()) as c:
        created = []
        for i in compat_range(0, PRODUCERS):
            p = threading_utils.daemon_thread(producer, i + 1, c)
            created.append(p)
            p.start()
        consumed = collections.deque()
        for i in compat_range(0, WORKERS):
            w = threading_utils.daemon_thread(worker, i + 1, c, consumed)
            created.append(w)
            w.start()
        while created:
            t = created.pop()
            t.join()
        # At the end there should be nothing leftover, let's verify that.
        board = backends.fetch('verifier', SHARED_CONF.copy(), client=c)
        board.connect()
        with contextlib.closing(board):
            if board.job_count != 0 or len(consumed) != EXPECTED_UNITS:
                return 1
            return 0
Beispiel #5
0
    def test_wait_arrival(self):
        ev = threading.Event()
        jobs = []

        def poster(wait_post=0.2):
            if not ev.wait(test_utils.WAIT_TIMEOUT):
                raise RuntimeError("Waiter did not appear ready"
                                   " in %s seconds" % test_utils.WAIT_TIMEOUT)
            time.sleep(wait_post)
            self.board.post('test', p_utils.temporary_log_book())

        def waiter():
            ev.set()
            it = self.board.wait()
            jobs.extend(it)

        with connect_close(self.board):
            t1 = threading_utils.daemon_thread(poster)
            t1.start()
            t2 = threading_utils.daemon_thread(waiter)
            t2.start()
            for t in (t1, t2):
                t.join()

        self.assertEqual(1, len(jobs))
Beispiel #6
0
    def test_double_reader_writer(self):
        lock = lock_utils.ReaderWriterLock()
        activated = collections.deque()
        active = threading_utils.Event()

        def double_reader():
            with lock.read_lock():
                active.set()
                while not lock.has_pending_writers:
                    time.sleep(0.001)
                with lock.read_lock():
                    activated.append(lock.owner)

        def happy_writer():
            with lock.write_lock():
                activated.append(lock.owner)

        reader = threading_utils.daemon_thread(double_reader)
        reader.start()
        self.assertTrue(active.wait(test_utils.WAIT_TIMEOUT))

        writer = threading_utils.daemon_thread(happy_writer)
        writer.start()

        reader.join()
        writer.join()
        self.assertEqual(2, len(activated))
        self.assertEqual(['r', 'w'], list(activated))
Beispiel #7
0
 def start(self):
     """Starts proxy thread and associated topic notification thread."""
     if not _is_alive(self._proxy_thread):
         self._proxy_thread = tu.daemon_thread(self._proxy.start)
         self._proxy_thread.start()
         self._proxy.wait()
     if not _is_alive(self._periodic_thread):
         self._periodic.reset()
         self._periodic_thread = tu.daemon_thread(self._periodic.start)
         self._periodic_thread.start()
Beispiel #8
0
 def start(self):
     """Starts proxy thread and associated topic notification thread."""
     if not _is_alive(self._proxy_thread):
         self._proxy_thread = tu.daemon_thread(self._proxy.start)
         self._proxy_thread.start()
         self._proxy.wait()
     if not _is_alive(self._periodic_thread):
         self._periodic.reset()
         self._periodic_thread = tu.daemon_thread(self._periodic.start)
         self._periodic_thread.start()
    def test_double_acquire_many(self):
        activated = collections.deque()
        n_lock = lock_utils.MultiLock((threading.RLock(), threading.RLock()))

        def critical_section():
            start = time.time()
            time.sleep(0.05)
            end = time.time()
            activated.append((start, end))

        def run():
            with n_lock:
                critical_section()
                with n_lock:
                    critical_section()
                critical_section()

        threads = []
        for i in range(0, 20):
            t = threading_utils.daemon_thread(run)
            threads.append(t)
            t.start()
        while threads:
            t = threads.pop()
            t.join()

        for (start, end) in activated:
            self.assertEqual(1, _find_overlaps(activated, start, end))
    def test_acquired_pass(self):
        activated = collections.deque()
        lock1 = threading.Lock()
        lock2 = threading.Lock()
        n_lock = lock_utils.MultiLock((lock1, lock2))

        def critical_section():
            start = time.time()
            time.sleep(0.05)
            end = time.time()
            activated.append((start, end))

        def run():
            with n_lock:
                critical_section()

        threads = []
        for _i in range(0, 20):
            t = threading_utils.daemon_thread(run)
            threads.append(t)
            t.start()
        while threads:
            t = threads.pop()
            t.join()
        for (start, end) in activated:
            self.assertEqual(1, _find_overlaps(activated, start, end))

        self.assertFalse(lock1.locked())
        self.assertFalse(lock2.locked())
Beispiel #11
0
    def check_worker_state(self, context, startup=True):
        LOG.debug('Periodic Timer for worker health check expired')
        dead = []
        for instance in self.reaper_instances:
            if not instance.worker.is_alive() or instance.missed_acks > 5:
                LOG.error('Worker for aggregates %s, found dead.',
                         instance.aggregates)
                instance.stop_handling()
                instance.worker.join(timeout=0.1)
                dead.append(instance)
            else:
                # Each worker will reset this to 0. If the worker is stuck
                # after 3 periodic checks then it is presumed dead and it's
                # revived.
                instance.missed_acks += 1

        for instance in dead:
            self.reaper_instances.remove(instance)
            LOG.info('Reviving worker for aggregates %s.',
                     instance.aggregates)
            new_instance = reaper.Reaper(instance.aggregates)
            new_instance.worker = threading_utils.daemon_thread(
                new_instance.job_handler)
            self.reaper_instances.append(new_instance)
            new_instance.worker.start()
Beispiel #12
0
    def test_response(self):
        barrier = threading_utils.Event()

        on_response = mock.MagicMock()
        on_response.side_effect = lambda *args, **kwargs: barrier.set()

        handlers = {pr.RESPONSE: on_response}
        p = proxy.Proxy(TEST_TOPIC,
                        TEST_EXCHANGE,
                        handlers,
                        transport='memory',
                        transport_options={
                            'polling_interval': POLLING_INTERVAL,
                        })

        t = threading_utils.daemon_thread(p.start)
        t.start()
        p.wait()
        resp = pr.Response(pr.RUNNING)
        p.publish(resp, TEST_TOPIC)

        self.assertTrue(barrier.wait(test_utils.WAIT_TIMEOUT))
        self.assertTrue(barrier.is_set())
        p.stop()
        t.join()

        self.assertTrue(on_response.called)
        on_response.assert_called_with(resp.to_dict(), mock.ANY)
Beispiel #13
0
    def test_notify(self):
        barrier = threading_utils.Event()

        on_notify = mock.MagicMock()
        on_notify.side_effect = lambda *args, **kwargs: barrier.set()

        handlers = {pr.NOTIFY: on_notify}
        p = proxy.Proxy(TEST_TOPIC,
                        TEST_EXCHANGE,
                        handlers,
                        transport='memory',
                        transport_options={
                            'polling_interval': POLLING_INTERVAL,
                        })

        t = threading_utils.daemon_thread(p.start)
        t.start()
        p.wait()
        p.publish(pr.Notify(), TEST_TOPIC)

        self.assertTrue(barrier.wait(test_utils.WAIT_TIMEOUT))
        p.stop()
        t.join()

        self.assertTrue(on_notify.called)
        on_notify.assert_called_with({}, mock.ANY)
    def test_start_stop(self):
        events = collections.deque()

        def before_start(t):
            events.append('bs')

        def before_join(t):
            events.append('bj')
            self.death.set()

        def after_start(t):
            events.append('as')

        def after_join(t):
            events.append('aj')

        for _i in range(0, self.thread_count):
            self.bundle.bind(lambda: tu.daemon_thread(_spinner, self.death),
                             before_join=before_join,
                             after_join=after_join,
                             before_start=before_start,
                             after_start=after_start)
        self.assertEqual(self.thread_count, self.bundle.start())
        self.assertEqual(self.thread_count, len(self.bundle))
        self.assertEqual(self.thread_count, self.bundle.stop())
        for event in ['as', 'bs', 'bj', 'aj']:
            self.assertEqual(self.thread_count,
                             len([e for e in events if e == event]))
        self.assertEqual(0, self.bundle.stop())
        self.assertTrue(self.death.is_set())
    def test_double_acquire_many(self):
        activated = collections.deque()
        acquires = collections.deque()
        n_lock = lock_utils.MultiLock((threading.RLock(), threading.RLock()))

        def critical_section():
            start = now()
            time.sleep(NAPPY_TIME)
            end = now()
            activated.append((start, end))

        def run():
            with n_lock as gotten:
                acquires.append(gotten)
                critical_section()
                with n_lock as gotten:
                    acquires.append(gotten)
                    critical_section()
                critical_section()

        threads = []
        for i in range(0, self.THREAD_COUNT):
            t = threading_utils.daemon_thread(run)
            threads.append(t)
            t.start()
        while threads:
            t = threads.pop()
            t.join()

        self.assertTrue(all(acquires))
        self.assertEqual(self.THREAD_COUNT * 2, len(acquires))
        self.assertEqual(self.THREAD_COUNT * 3, len(activated))
        for (start, end) in activated:
            self.assertEqual(1, _find_overlaps(activated, start, end))
Beispiel #16
0
    def test_stop(self):
        self.conn_inst_mock.drain_events.side_effect = socket.timeout

        # create proxy
        pr = self.proxy(reset_master_mock=True)

        # check that proxy is not running yes
        self.assertFalse(pr.is_running)

        # start proxy in separate thread
        t = threading_utils.daemon_thread(pr.start)
        t.start()

        # make sure proxy is started
        pr.wait()

        # check that proxy is running now
        self.assertTrue(pr.is_running)

        # stop proxy and wait for thread to finish
        pr.stop()

        # wait for thread to finish
        t.join()

        self.assertFalse(pr.is_running)
Beispiel #17
0
    def test_run_max_dispatches(self):
        components = self.make_components()
        components.conductor.connect()
        consumed_event = threading.Event()

        def on_consume(state, details):
            consumed_event.set()

        components.board.notifier.register(base.REMOVAL, on_consume)
        with close_many(components.client, components.conductor):
            t = threading_utils.daemon_thread(
                lambda: components.conductor.run(max_dispatches=5))
            t.start()
            lb, fd = pu.temporary_flow_detail(components.persistence)
            engines.save_factory_details(fd,
                                         test_factory, [False], {},
                                         backend=components.persistence)
            for _ in range(5):
                components.board.post('poke',
                                      lb,
                                      details={'flow_uuid': fd.uuid})
                self.assertTrue(consumed_event.wait(test_utils.WAIT_TIMEOUT))
            components.board.post('poke', lb, details={'flow_uuid': fd.uuid})
            components.conductor.stop()
            self.assertTrue(components.conductor.wait(test_utils.WAIT_TIMEOUT))
            self.assertFalse(components.conductor.dispatching)
Beispiel #18
0
    def test_run(self):
        components = self.make_components()
        components.conductor.connect()
        consumed_event = threading.Event()

        def on_consume(state, details):
            consumed_event.set()

        components.board.notifier.register(base.REMOVAL, on_consume)
        with close_many(components.conductor, components.client):
            t = threading_utils.daemon_thread(components.conductor.run)
            t.start()
            lb, fd = pu.temporary_flow_detail(components.persistence)
            engines.save_factory_details(fd,
                                         test_factory, [False], {},
                                         backend=components.persistence)
            components.board.post('poke', lb, details={'flow_uuid': fd.uuid})
            self.assertTrue(consumed_event.wait(test_utils.WAIT_TIMEOUT))
            components.conductor.stop()
            self.assertTrue(components.conductor.wait(test_utils.WAIT_TIMEOUT))
            self.assertFalse(components.conductor.dispatching)

        persistence = components.persistence
        with contextlib.closing(persistence.get_connection()) as conn:
            lb = conn.get_logbook(lb.uuid)
            fd = lb.find(fd.uuid)
        self.assertIsNotNone(fd)
        self.assertEqual(st.SUCCESS, fd.state)
Beispiel #19
0
    def test_threaded_access_property(self):
        called = collections.deque()

        class A(object):
            @misc.cachedproperty
            def b(self):
                called.append(1)
                # NOTE(harlowja): wait for a little and give some time for
                # another thread to potentially also get in this method to
                # also create the same property...
                time.sleep(random.random() * 0.5)
                return 'b'

        a = A()
        threads = []
        try:
            for _i in range(0, 20):
                t = threading_utils.daemon_thread(lambda: a.b)
                threads.append(t)
            for t in threads:
                t.start()
        finally:
            while threads:
                t = threads.pop()
                t.join()

        self.assertEqual(1, len(called))
        self.assertEqual('b', a.b)
    def test_no_double_writers(self):
        lock = lock_utils.ReaderWriterLock()
        watch = timing.StopWatch(duration=5)
        watch.start()
        dups = collections.deque()
        active = collections.deque()

        def acquire_check(me):
            with lock.write_lock():
                if len(active) >= 1:
                    dups.append(me)
                    dups.extend(active)
                active.append(me)
                try:
                    time.sleep(random.random() / 100)
                finally:
                    active.remove(me)

        def run():
            me = threading.current_thread()
            while not watch.expired():
                acquire_check(me)

        threads = []
        for i in range(0, self.THREAD_COUNT):
            t = threading_utils.daemon_thread(run)
            threads.append(t)
            t.start()
        while threads:
            t = threads.pop()
            t.join()

        self.assertEqual([], list(dups))
        self.assertEqual([], list(active))
Beispiel #21
0
    def test_response(self):
        barrier = threading_utils.Event()

        on_response = mock.MagicMock()
        on_response.side_effect = lambda *args, **kwargs: barrier.set()

        handlers = {pr.RESPONSE: dispatcher.Handler(on_response)}
        p = proxy.Proxy(TEST_TOPIC, TEST_EXCHANGE, handlers,
                        transport='memory',
                        transport_options={
                            'polling_interval': POLLING_INTERVAL,
                        })

        t = threading_utils.daemon_thread(p.start)
        t.start()
        p.wait()
        resp = pr.Response(pr.RUNNING)
        p.publish(resp, TEST_TOPIC)

        self.assertTrue(barrier.wait(test_utils.WAIT_TIMEOUT))
        self.assertTrue(barrier.is_set())
        p.stop()
        t.join()

        self.assertTrue(on_response.called)
        on_response.assert_called_with(resp.to_dict(), mock.ANY)
Beispiel #22
0
    def test_double_acquire_many(self):
        activated = collections.deque()
        acquires = collections.deque()
        n_lock = lock_utils.MultiLock((threading.RLock(), threading.RLock()))

        def critical_section():
            start = now()
            time.sleep(NAPPY_TIME)
            end = now()
            activated.append((start, end))

        def run():
            with n_lock as gotten:
                acquires.append(gotten)
                critical_section()
                with n_lock as gotten:
                    acquires.append(gotten)
                    critical_section()
                critical_section()

        threads = []
        for i in range(0, self.THREAD_COUNT):
            t = threading_utils.daemon_thread(run)
            threads.append(t)
            t.start()
        while threads:
            t = threads.pop()
            t.join()

        self.assertTrue(all(acquires))
        self.assertEqual(self.THREAD_COUNT * 2, len(acquires))
        self.assertEqual(self.THREAD_COUNT * 3, len(activated))
        for (start, end) in activated:
            self.assertEqual(1, _find_overlaps(activated, start, end))
Beispiel #23
0
    def test_no_double_writers(self):
        lock = lock_utils.ReaderWriterLock()
        watch = timing.StopWatch(duration=5)
        watch.start()
        dups = collections.deque()
        active = collections.deque()

        def acquire_check(me):
            with lock.write_lock():
                if len(active) >= 1:
                    dups.append(me)
                    dups.extend(active)
                active.append(me)
                try:
                    time.sleep(random.random() / 100)
                finally:
                    active.remove(me)

        def run():
            me = threading.current_thread()
            while not watch.expired():
                acquire_check(me)

        threads = []
        for i in range(0, self.THREAD_COUNT):
            t = threading_utils.daemon_thread(run)
            threads.append(t)
            t.start()
        while threads:
            t = threads.pop()
            t.join()

        self.assertEqual([], list(dups))
        self.assertEqual([], list(active))
Beispiel #24
0
    def test_fail_run(self):
        components = self.make_components()
        components.conductor.connect()
        consumed_event = threading_utils.Event()

        def on_consume(state, details):
            consumed_event.set()

        components.board.notifier.register(jobboard.REMOVAL, on_consume)
        with close_many(components.conductor, components.client):
            t = threading_utils.daemon_thread(components.conductor.run)
            t.start()
            lb, fd = pu.temporary_flow_detail(components.persistence)
            engines.save_factory_details(fd, test_factory,
                                         [True], {},
                                         backend=components.persistence)
            components.board.post('poke', lb,
                                  details={'flow_uuid': fd.uuid})
            self.assertTrue(consumed_event.wait(test_utils.WAIT_TIMEOUT))
            self.assertTrue(components.conductor.stop(test_utils.WAIT_TIMEOUT))
            self.assertFalse(components.conductor.dispatching)

        persistence = components.persistence
        with contextlib.closing(persistence.get_connection()) as conn:
            lb = conn.get_logbook(lb.uuid)
            fd = lb.find(fd.uuid)
        self.assertIsNotNone(fd)
        self.assertEqual(st.REVERTED, fd.state)
    def test_run_max_dispatches(self):
        components = self.make_components()
        components.conductor.connect()
        consumed_event = threading.Event()

        def on_consume(state, details):
            consumed_event.set()

        components.board.notifier.register(base.REMOVAL, on_consume)
        with close_many(components.client, components.conductor):
            t = threading_utils.daemon_thread(
                lambda: components.conductor.run(max_dispatches=5))
            t.start()
            lb, fd = pu.temporary_flow_detail(components.persistence)
            engines.save_factory_details(fd, test_factory,
                                         [False], {},
                                         backend=components.persistence)
            for _ in range(5):
                components.board.post('poke', lb,
                                      details={'flow_uuid': fd.uuid})
                self.assertTrue(consumed_event.wait(
                    test_utils.WAIT_TIMEOUT))
            components.board.post('poke', lb,
                                  details={'flow_uuid': fd.uuid})
            components.conductor.stop()
            self.assertTrue(components.conductor.wait(test_utils.WAIT_TIMEOUT))
            self.assertFalse(components.conductor.dispatching)
Beispiel #26
0
    def test_threaded_access_property(self):
        called = collections.deque()

        class A(object):
            @misc.cachedproperty
            def b(self):
                called.append(1)
                # NOTE(harlowja): wait for a little and give some time for
                # another thread to potentially also get in this method to
                # also create the same property...
                time.sleep(random.random() * 0.5)
                return 'b'

        a = A()
        threads = []
        try:
            for _i in range(0, 20):
                t = threading_utils.daemon_thread(lambda: a.b)
                threads.append(t)
            for t in threads:
                t.start()
        finally:
            while threads:
                t = threads.pop()
                t.join()

        self.assertEqual(1, len(called))
        self.assertEqual('b', a.b)
    def test_combined_store(self):
        components = self.make_components()
        components.conductor.connect()
        consumed_event = threading.Event()

        def on_consume(state, details):
            consumed_event.set()

        flow_store = {'x': True, 'y': False}
        job_store = {'z': None}

        components.board.notifier.register(base.REMOVAL, on_consume)
        with close_many(components.conductor, components.client):
            t = threading_utils.daemon_thread(components.conductor.run)
            t.start()
            lb, fd = pu.temporary_flow_detail(components.persistence,
                                              meta={'store': flow_store})
            engines.save_factory_details(fd, test_store_factory,
                                         [], {},
                                         backend=components.persistence)
            components.board.post('poke', lb,
                                  details={'flow_uuid': fd.uuid,
                                           'store': job_store})
            self.assertTrue(consumed_event.wait(test_utils.WAIT_TIMEOUT))
            components.conductor.stop()
            self.assertTrue(components.conductor.wait(test_utils.WAIT_TIMEOUT))
            self.assertFalse(components.conductor.dispatching)

        persistence = components.persistence
        with contextlib.closing(persistence.get_connection()) as conn:
            lb = conn.get_logbook(lb.uuid)
            fd = lb.find(fd.uuid)
        self.assertIsNotNone(fd)
        self.assertEqual(st.SUCCESS, fd.state)
Beispiel #28
0
    def test_stop(self):
        self.conn_inst_mock.drain_events.side_effect = socket.timeout

        # create proxy
        pr = self.proxy(reset_master_mock=True)

        # check that proxy is not running yes
        self.assertFalse(pr.is_running)

        # start proxy in separate thread
        t = threading_utils.daemon_thread(pr.start)
        t.start()

        # make sure proxy is started
        pr.wait()

        # check that proxy is running now
        self.assertTrue(pr.is_running)

        # stop proxy and wait for thread to finish
        pr.stop()

        # wait for thread to finish
        t.join()

        self.assertFalse(pr.is_running)
    def test_start_stop(self):
        events = collections.deque()

        def before_start(t):
            events.append('bs')

        def before_join(t):
            events.append('bj')
            self.death.set()

        def after_start(t):
            events.append('as')

        def after_join(t):
            events.append('aj')

        for _i in range(0, self.thread_count):
            self.bundle.bind(lambda: tu.daemon_thread(_spinner, self.death),
                             before_join=before_join,
                             after_join=after_join,
                             before_start=before_start,
                             after_start=after_start)
        self.assertEqual(self.thread_count, self.bundle.start())
        self.assertEqual(self.thread_count, len(self.bundle))
        self.assertEqual(self.thread_count, self.bundle.stop())
        for event in ['as', 'bs', 'bj', 'aj']:
            self.assertEqual(self.thread_count,
                             len([e for e in events if e == event]))
        self.assertEqual(0, self.bundle.stop())
        self.assertTrue(self.death.is_set())
Beispiel #30
0
    def test_multi_message(self):
        message_count = 30
        barrier = latch.Latch(message_count)
        countdown = lambda data, message: barrier.countdown()

        on_notify = mock.MagicMock()
        on_notify.side_effect = countdown

        on_response = mock.MagicMock()
        on_response.side_effect = countdown

        on_request = mock.MagicMock()
        on_request.side_effect = countdown

        handlers = {
            pr.NOTIFY: dispatcher.Handler(on_notify),
            pr.RESPONSE: dispatcher.Handler(on_response),
            pr.REQUEST: dispatcher.Handler(on_request),
        }
        p = proxy.Proxy(TEST_TOPIC, TEST_EXCHANGE, handlers,
                        transport='memory',
                        transport_options={
                            'polling_interval': POLLING_INTERVAL,
                        })

        t = threading_utils.daemon_thread(p.start)
        t.start()
        p.wait()

        for i in range(0, message_count):
            j = i % 3
            if j == 0:
                p.publish(pr.Notify(), TEST_TOPIC)
            elif j == 1:
                p.publish(pr.Response(pr.RUNNING), TEST_TOPIC)
            else:
                p.publish(pr.Request(test_utils.DummyTask("dummy_%s" % i),
                                     uuidutils.generate_uuid(),
                                     pr.EXECUTE, [], None), TEST_TOPIC)

        self.assertTrue(barrier.wait(test_utils.WAIT_TIMEOUT))
        self.assertEqual(0, barrier.needed)
        p.stop()
        t.join()

        self.assertTrue(on_notify.called)
        self.assertTrue(on_response.called)
        self.assertTrue(on_request.called)

        self.assertEqual(10, on_notify.call_count)
        self.assertEqual(10, on_response.call_count)
        self.assertEqual(10, on_request.call_count)

        call_count = sum([
            on_notify.call_count,
            on_response.call_count,
            on_request.call_count,
        ])
        self.assertEqual(message_count, call_count)
Beispiel #31
0
 def start(self):
     """Starts message processing thread."""
     if self._helper is not None:
         raise RuntimeError("Worker executor must be stopped before"
                            " it can be started")
     self._helper = tu.daemon_thread(self._proxy.start)
     self._helper.start()
     self._proxy.wait()
 def test_bind_invalid(self):
     self.assertRaises(ValueError, self.bundle.bind, 1)
     for k in ['after_start', 'before_start', 'before_join', 'after_join']:
         kwargs = {
             k: 1,
         }
         self.assertRaises(ValueError, self.bundle.bind,
                           lambda: tu.daemon_thread(_spinner, self.death),
                           **kwargs)
 def test_bundle_length(self):
     self.assertEqual(0, len(self.bundle))
     for i in range(0, self.thread_count):
         self.bundle.bind(lambda: tu.daemon_thread(_spinner, self.death))
         self.assertEqual(1, self.bundle.start())
         self.assertEqual(i + 1, len(self.bundle))
     self.death.set()
     self.assertEqual(self.thread_count, self.bundle.stop())
     self.assertEqual(self.thread_count, len(self.bundle))
 def test_alive_thread(self):
     death = tu.Event()
     t = tu.daemon_thread(_spinner, death)
     self.assertFalse(tu.is_alive(t))
     t.start()
     self.assertTrue(tu.is_alive(t))
     death.set()
     t.join()
     self.assertFalse(tu.is_alive(t))
 def test_alive_thread(self):
     death = tu.Event()
     t = tu.daemon_thread(_spinner, death)
     self.assertFalse(tu.is_alive(t))
     t.start()
     self.assertTrue(tu.is_alive(t))
     death.set()
     t.join()
     self.assertFalse(tu.is_alive(t))
 def test_bundle_length(self):
     self.assertEqual(0, len(self.bundle))
     for i in range(0, self.thread_count):
         self.bundle.bind(lambda: tu.daemon_thread(_spinner, self.death))
         self.assertEqual(1, self.bundle.start())
         self.assertEqual(i + 1, len(self.bundle))
     self.death.set()
     self.assertEqual(self.thread_count, self.bundle.stop())
     self.assertEqual(self.thread_count, len(self.bundle))
Beispiel #37
0
 def __init__(self,
              uuid,
              exchange,
              topics,
              transition_timeout=pr.REQUEST_TIMEOUT,
              url=None,
              transport=None,
              transport_options=None,
              retry_options=None):
     self._uuid = uuid
     self._requests_cache = wt.RequestsCache()
     self._transition_timeout = transition_timeout
     type_handlers = {
         pr.RESPONSE:
         dispatcher.Handler(self._process_response,
                            validator=pr.Response.validate),
     }
     self._proxy = proxy.Proxy(uuid,
                               exchange,
                               type_handlers=type_handlers,
                               on_wait=self._on_wait,
                               url=url,
                               transport=transport,
                               transport_options=transport_options,
                               retry_options=retry_options)
     # NOTE(harlowja): This is the most simplest finder impl. that
     # doesn't have external dependencies (outside of what this engine
     # already requires); it though does create periodic 'polling' traffic
     # to workers to 'learn' of the tasks they can perform (and requires
     # pre-existing knowledge of the topics those workers are on to gather
     # and update this information).
     self._finder = wt.ProxyWorkerFinder(uuid, self._proxy, topics)
     self._finder.notifier.register(wt.WorkerFinder.WORKER_ARRIVED,
                                    self._on_worker)
     self._helpers = tu.ThreadBundle()
     self._helpers.bind(lambda: tu.daemon_thread(self._proxy.start),
                        after_start=lambda t: self._proxy.wait(),
                        before_join=lambda t: self._proxy.stop())
     p_worker = periodics.PeriodicWorker.create([self._finder])
     if p_worker:
         self._helpers.bind(lambda: tu.daemon_thread(p_worker.start),
                            before_join=lambda t: p_worker.stop(),
                            after_join=lambda t: p_worker.reset(),
                            before_start=lambda t: p_worker.reset())
Beispiel #38
0
 def start(self):
     if threading_utils.is_alive(self._worker):
         raise RuntimeError("Worker thread must be stopped via stop()"
                            " before starting/restarting")
     super(ParallelProcessTaskExecutor, self).start()
     self._dispatcher.setup()
     self._worker = threading_utils.daemon_thread(
         asyncore.loop, map=self._dispatcher.map,
         timeout=self._wait_timeout)
     self._worker.start()
Beispiel #39
0
 def test_run_empty(self):
     components = self.make_components()
     components.conductor.connect()
     with close_many(components.conductor, components.client):
         t = threading_utils.daemon_thread(components.conductor.run)
         t.start()
         self.assertTrue(
             components.conductor.stop(test_utils.WAIT_TIMEOUT))
         self.assertFalse(components.conductor.dispatching)
         t.join()
 def test_bind_invalid(self):
     self.assertRaises(ValueError, self.bundle.bind, 1)
     for k in ['after_start', 'before_start',
               'before_join', 'after_join']:
         kwargs = {
             k: 1,
         }
         self.assertRaises(ValueError, self.bundle.bind,
                           lambda: tu.daemon_thread(_spinner, self.death),
                           **kwargs)
Beispiel #41
0
 def test_run_empty(self):
     components = self.make_components()
     components.conductor.connect()
     with close_many(components.conductor, components.client):
         t = threading_utils.daemon_thread(components.conductor.run)
         t.start()
         components.conductor.stop()
         self.assertTrue(components.conductor.wait(test_utils.WAIT_TIMEOUT))
         self.assertFalse(components.conductor.dispatching)
         t.join()
Beispiel #42
0
 def __init__(self,
              uuid,
              exchange,
              topics,
              transition_timeout=pr.REQUEST_TIMEOUT,
              url=None,
              transport=None,
              transport_options=None,
              retry_options=None):
     self._uuid = uuid
     self._topics = topics
     self._requests_cache = wt.RequestsCache()
     self._workers = wt.TopicWorkers()
     self._transition_timeout = transition_timeout
     type_handlers = {
         pr.NOTIFY: [
             self._process_notify,
             functools.partial(pr.Notify.validate, response=True),
         ],
         pr.RESPONSE: [
             self._process_response,
             pr.Response.validate,
         ],
     }
     self._proxy = proxy.Proxy(uuid,
                               exchange,
                               type_handlers,
                               on_wait=self._on_wait,
                               url=url,
                               transport=transport,
                               transport_options=transport_options,
                               retry_options=retry_options)
     self._periodic = wt.PeriodicWorker(tt.Timeout(pr.NOTIFY_PERIOD),
                                        [self._notify_topics])
     self._helpers = tu.ThreadBundle()
     self._helpers.bind(lambda: tu.daemon_thread(self._proxy.start),
                        after_start=lambda t: self._proxy.wait(),
                        before_join=lambda t: self._proxy.stop())
     self._helpers.bind(lambda: tu.daemon_thread(self._periodic.start),
                        before_join=lambda t: self._periodic.stop(),
                        after_join=lambda t: self._periodic.reset(),
                        before_start=lambda t: self._periodic.reset())
def generate_reviewer(client, saver, name=NAME):
    """Creates a review producer thread with the given name prefix."""
    real_name = "%s_reviewer" % name
    no_more = threading.Event()
    jb = boards.fetch(real_name,
                      JOBBOARD_CONF,
                      client=client,
                      persistence=saver)

    def make_save_book(saver, review_id):
        # Record what we want to happen (sometime in the future).
        book = models.LogBook("book_%s" % review_id)
        detail = models.FlowDetail("flow_%s" % review_id,
                                   uuidutils.generate_uuid())
        book.add(detail)
        # Associate the factory method we want to be called (in the future)
        # with the book, so that the conductor will be able to call into
        # that factory to retrieve the workflow objects that represent the
        # work.
        #
        # These args and kwargs *can* be used to save any specific parameters
        # into the factory when it is being called to create the workflow
        # objects (typically used to tell a factory how to create a unique
        # workflow that represents this review).
        factory_args = ()
        factory_kwargs = {}
        engines.save_factory_details(detail, create_review_workflow,
                                     factory_args, factory_kwargs)
        with contextlib.closing(saver.get_connection()) as conn:
            conn.save_logbook(book)
            return book

    def run():
        """Periodically publishes 'fake' reviews to analyze."""
        jb.connect()
        review_generator = review_iter()
        with contextlib.closing(jb):
            while not no_more.is_set():
                review = six.next(review_generator)
                details = {
                    'store': {
                        'review': review,
                    },
                }
                job_name = "%s_%s" % (real_name, review['id'])
                print("Posting review '%s'" % review['id'])
                jb.post(job_name,
                        book=make_save_book(saver, review['id']),
                        details=details)
                time.sleep(REVIEW_CREATION_DELAY)

    # Return the unstarted thread, and a callback that can be used
    # shutdown that thread (to avoid running forever).
    return (threading_utils.daemon_thread(target=run), no_more.set)
Beispiel #44
0
    def _setup_workers(self, watched_aggregates):
        if len(watched_aggregates) == 0:
            LOG.debug('One worker for all infrastructure will be started')
            watched_aggregates = [watched_aggregates]

        for aggregates in watched_aggregates:
            if not isinstance(aggregates, list):
                aggregates = [aggregates]
            instance = reaper.Reaper(aggregates)
            instance.worker = threading_utils.daemon_thread(
                instance.job_handler)
            self.reaper_instances.append(instance)
Beispiel #45
0
def generate_reviewer(client, saver, name=NAME):
    """Creates a review producer thread with the given name prefix."""
    real_name = "%s_reviewer" % name
    no_more = threading.Event()
    jb = boards.fetch(real_name, JOBBOARD_CONF,
                      client=client, persistence=saver)

    def make_save_book(saver, review_id):
        # Record what we want to happen (sometime in the future).
        book = logbook.LogBook("book_%s" % review_id)
        detail = logbook.FlowDetail("flow_%s" % review_id,
                                    uuidutils.generate_uuid())
        book.add(detail)
        # Associate the factory method we want to be called (in the future)
        # with the book, so that the conductor will be able to call into
        # that factory to retrieve the workflow objects that represent the
        # work.
        #
        # These args and kwargs *can* be used to save any specific parameters
        # into the factory when it is being called to create the workflow
        # objects (typically used to tell a factory how to create a unique
        # workflow that represents this review).
        factory_args = ()
        factory_kwargs = {}
        engines.save_factory_details(detail, create_review_workflow,
                                     factory_args, factory_kwargs)
        with contextlib.closing(saver.get_connection()) as conn:
            conn.save_logbook(book)
            return book

    def run():
        """Periodically publishes 'fake' reviews to analyze."""
        jb.connect()
        review_generator = review_iter()
        with contextlib.closing(jb):
            while not no_more.is_set():
                review = six.next(review_generator)
                details = {
                    'store': {
                        'review': review,
                    },
                }
                job_name = "%s_%s" % (real_name, review['id'])
                print("Posting review '%s'" % review['id'])
                jb.post(job_name,
                        book=make_save_book(saver, review['id']),
                        details=details)
                time.sleep(REVIEW_CREATION_DELAY)

    # Return the unstarted thread, and a callback that can be used
    # shutdown that thread (to avoid running forever).
    return (threading_utils.daemon_thread(target=run), no_more.set)
Beispiel #46
0
def main():
    with contextlib.closing(fake_client.FakeClient()) as c:
        created = []
        for i in compat_range(0, PRODUCERS):
            p = threading_utils.daemon_thread(producer, i + 1, c)
            created.append(p)
            p.start()
        consumed = collections.deque()
        for i in compat_range(0, WORKERS):
            w = threading_utils.daemon_thread(worker, i + 1, c, consumed)
            created.append(w)
            w.start()
        while created:
            t = created.pop()
            t.join()
        # At the end there should be nothing leftover, let's verify that.
        board = backends.fetch('verifier', SHARED_CONF.copy(), client=c)
        board.connect()
        with contextlib.closing(board):
            if board.job_count != 0 or len(consumed) != EXPECTED_UNITS:
                return 1
            return 0
Beispiel #47
0
 def _fetch_server(self, task_classes):
     endpoints = []
     for cls in task_classes:
         endpoints.append(endpoint.Endpoint(cls))
     server = worker_server.Server(
         TEST_TOPIC, TEST_EXCHANGE,
         futures.ThreadPoolExecutor(1), endpoints,
         transport='memory',
         transport_options={
             'polling_interval': POLLING_INTERVAL,
         })
     server_thread = threading_utils.daemon_thread(server.start)
     return (server, server_thread)
def main():
    with contextlib.closing(fake_client.FakeClient()) as c:
        created = []
        for i in compat_range(0, PRODUCERS):
            p = threading_utils.daemon_thread(producer, i + 1, c)
            created.append(p)
            p.start()
        consumed = collections.deque()
        for i in compat_range(0, WORKERS):
            w = threading_utils.daemon_thread(worker, i + 1, c, consumed)
            created.append(w)
            w.start()
        while created:
            t = created.pop()
            t.join()
        # At the end there should be nothing leftover, let's verify that.
        board = backends.fetch('verifier', SHARED_CONF.copy(), client=c)
        board.connect()
        with contextlib.closing(board):
            if board.job_count != 0 or len(consumed) != EXPECTED_UNITS:
                return 1
            return 0
def create_fractal():
    logging.basicConfig(level=logging.ERROR)

    # Setup our transport configuration and merge it into the worker and
    # engine configuration so that both of those use it correctly.
    shared_conf = dict(BASE_SHARED_CONF)
    shared_conf.update({
        'transport': 'memory',
        'transport_options': {
            'polling_interval': 0.1,
        },
    })

    if len(sys.argv) >= 2:
        output_filename = sys.argv[1]
    else:
        output_filename = None

    worker_conf = dict(WORKER_CONF)
    worker_conf.update(shared_conf)
    engine_conf = dict(ENGINE_CONF)
    engine_conf.update(shared_conf)
    workers = []
    worker_topics = []

    print('Calculating your mandelbrot fractal of size %sx%s.' % IMAGE_SIZE)
    try:
        # Create a set of workers to simulate actual remote workers.
        print('Running %s workers.' % (WORKERS))
        for i in compat_range(0, WORKERS):
            worker_conf['topic'] = 'calculator_%s' % (i + 1)
            worker_topics.append(worker_conf['topic'])
            w = worker.Worker(**worker_conf)
            runner = threading_utils.daemon_thread(w.run)
            runner.start()
            w.wait()
            workers.append((runner, w.stop))

        # Now use those workers to do something.
        engine_conf['topics'] = worker_topics
        results = calculate(engine_conf)
        print('Execution finished.')
    finally:
        # And cleanup.
        print('Stopping workers.')
        while workers:
            r, stopper = workers.pop()
            stopper()
            r.join()
    print("Writing image...")
    write_image(results, output_filename=output_filename)
Beispiel #50
0
def create_fractal():
    logging.basicConfig(level=logging.ERROR)

    # Setup our transport configuration and merge it into the worker and
    # engine configuration so that both of those use it correctly.
    shared_conf = dict(BASE_SHARED_CONF)
    shared_conf.update({
        'transport': 'memory',
        'transport_options': {
            'polling_interval': 0.1,
        },
    })

    if len(sys.argv) >= 2:
        output_filename = sys.argv[1]
    else:
        output_filename = None

    worker_conf = dict(WORKER_CONF)
    worker_conf.update(shared_conf)
    engine_conf = dict(ENGINE_CONF)
    engine_conf.update(shared_conf)
    workers = []
    worker_topics = []

    print('Calculating your mandelbrot fractal of size %sx%s.' % IMAGE_SIZE)
    try:
        # Create a set of workers to simulate actual remote workers.
        print('Running %s workers.' % (WORKERS))
        for i in compat_range(0, WORKERS):
            worker_conf['topic'] = 'calculator_%s' % (i + 1)
            worker_topics.append(worker_conf['topic'])
            w = worker.Worker(**worker_conf)
            runner = threading_utils.daemon_thread(w.run)
            runner.start()
            w.wait()
            workers.append((runner, w.stop))

        # Now use those workers to do something.
        engine_conf['topics'] = worker_topics
        results = calculate(engine_conf)
        print('Execution finished.')
    finally:
        # And cleanup.
        print('Stopping workers.')
        while workers:
            r, stopper = workers.pop()
            stopper()
            r.join()
    print("Writing image...")
    write_image(results, output_filename=output_filename)
Beispiel #51
0
 def __init__(self, uuid, exchange, topics,
              transition_timeout=pr.REQUEST_TIMEOUT,
              url=None, transport=None, transport_options=None,
              retry_options=None):
     self._uuid = uuid
     self._requests_cache = wt.RequestsCache()
     self._transition_timeout = transition_timeout
     type_handlers = {
         pr.RESPONSE: [
             self._process_response,
             pr.Response.validate,
         ],
     }
     self._proxy = proxy.Proxy(uuid, exchange,
                               type_handlers=type_handlers,
                               on_wait=self._on_wait, url=url,
                               transport=transport,
                               transport_options=transport_options,
                               retry_options=retry_options)
     # NOTE(harlowja): This is the most simplest finder impl. that
     # doesn't have external dependencies (outside of what this engine
     # already requires); it though does create periodic 'polling' traffic
     # to workers to 'learn' of the tasks they can perform (and requires
     # pre-existing knowledge of the topics those workers are on to gather
     # and update this information).
     self._finder = wt.ProxyWorkerFinder(uuid, self._proxy, topics)
     self._finder.notifier.register(wt.WorkerFinder.WORKER_ARRIVED,
                                    self._on_worker)
     self._helpers = tu.ThreadBundle()
     self._helpers.bind(lambda: tu.daemon_thread(self._proxy.start),
                        after_start=lambda t: self._proxy.wait(),
                        before_join=lambda t: self._proxy.stop())
     p_worker = periodic.PeriodicWorker.create([self._finder])
     if p_worker:
         self._helpers.bind(lambda: tu.daemon_thread(p_worker.start),
                            before_join=lambda t: p_worker.stop(),
                            after_join=lambda t: p_worker.reset(),
                            before_start=lambda t: p_worker.reset())
Beispiel #52
0
 def _fetch_server(self, task_classes):
     endpoints = []
     for cls in task_classes:
         endpoints.append(endpoint.Endpoint(cls))
     server = worker_server.Server(TEST_TOPIC,
                                   TEST_EXCHANGE,
                                   futures.ThreadPoolExecutor(1),
                                   endpoints,
                                   transport='memory',
                                   transport_options={
                                       'polling_interval': POLLING_INTERVAL,
                                   })
     server_thread = threading_utils.daemon_thread(server.start)
     return (server, server_thread)
Beispiel #53
0
 def start(self):
     if threading_utils.is_alive(self._worker):
         raise RuntimeError("Worker thread must be stopped via stop()"
                            " before starting/restarting")
     super(ParallelProcessTaskExecutor, self).start()
     # These don't seem restartable; make a new one...
     if self._manager.is_shutdown():
         self._manager = _ViewableSyncManager()
     if not self._manager.is_running():
         self._manager.start()
     self._dispatcher.reset()
     self._queue = self._manager.Queue()
     self._worker = threading_utils.daemon_thread(self._dispatcher.run,
                                                  self._queue)
     self._worker.start()
Beispiel #54
0
    def test_scanning_periodic(self):
        p = PeriodicThingy()
        w = periodic.PeriodicWorker.create([p])
        self.assertEqual(2, len(w))

        t = tu.daemon_thread(target=w.start)
        t.start()
        time.sleep(0.1)
        w.stop()
        t.join()

        b_calls = [c for c in p.capture if c == 'b']
        self.assertGreater(0, len(b_calls))
        a_calls = [c for c in p.capture if c == 'a']
        self.assertGreater(0, len(a_calls))
Beispiel #55
0
 def start(self):
     if threading_utils.is_alive(self._worker):
         raise RuntimeError("Worker thread must be stopped via stop()"
                            " before starting/restarting")
     super(ParallelProcessTaskExecutor, self).start()
     # These don't seem restartable; make a new one...
     if self._manager.is_shutdown():
         self._manager = _ViewableSyncManager()
     if not self._manager.is_running():
         self._manager.start()
     self._dispatcher.reset()
     self._queue = self._manager.Queue()
     self._worker = threading_utils.daemon_thread(self._dispatcher.run,
                                                  self._queue)
     self._worker.start()
Beispiel #56
0
def generate_conductor(client, saver, name=NAME):
    """Creates a conductor thread with the given name prefix."""
    real_name = "%s_conductor" % name
    jb = boards.fetch(name, JOBBOARD_CONF,
                      client=client, persistence=saver)
    conductor = conductors.fetch("blocking", real_name, jb,
                                 engine='parallel', wait_timeout=SCAN_DELAY)

    def run():
        jb.connect()
        with contextlib.closing(jb):
            conductor.run()

    # Return the unstarted thread, and a callback that can be used
    # shutdown that thread (to avoid running forever).
    return (threading_utils.daemon_thread(target=run), conductor.stop)