Пример #1
0
def run_poster():
    # This just posts a single job and then ends...
    print("Starting poster with pid: %s" % ME)
    my_name = "poster-%s" % ME
    persist_backend = persistence_backends.fetch(PERSISTENCE_URI)
    with contextlib.closing(persist_backend):
        with contextlib.closing(persist_backend.get_connection()) as conn:
            conn.upgrade()
        job_backend = job_backends.fetch(my_name, JB_CONF,
                                         persistence=persist_backend)
        job_backend.connect()
        with contextlib.closing(job_backend):
            # Create information in the persistence backend about the
            # unit of work we want to complete and the factory that
            # can be called to create the tasks that the work unit needs
            # to be done.
            lb = models.LogBook("post-from-%s" % my_name)
            fd = models.FlowDetail("song-from-%s" % my_name,
                                   uuidutils.generate_uuid())
            lb.add(fd)
            with contextlib.closing(persist_backend.get_connection()) as conn:
                conn.save_logbook(lb)
            engines.save_factory_details(fd, make_bottles,
                                         [HOW_MANY_BOTTLES], {},
                                         backend=persist_backend)
            # Post, and be done with it!
            jb = job_backend.post("song-from-%s" % my_name, book=lb)
            print("Posted: %s" % jb)
            print("Goodbye...")
Пример #2
0
 def setUp(self):
     super(TestClaimListener, self).setUp()
     self.client = fake_client.FakeClient()
     self.addCleanup(self.client.stop)
     self.board = jobs.fetch('test', 'zookeeper', client=self.client)
     self.addCleanup(self.board.close)
     self.board.connect()
Пример #3
0
 def setUp(self):
     super(TestClaimListener, self).setUp()
     self.client = fake_client.FakeClient()
     self.addCleanup(self.client.stop)
     self.board = jobs.fetch('test', 'zookeeper', client=self.client)
     self.addCleanup(self.board.close)
     self.board.connect()
Пример #4
0
def _get_jobboard_backend(conf, persistence=None):
    client = None
    if conf.taskflow.connection == 'memory':
        client = fake_client.FakeClient()
    return boards.fetch(conf.job_board_name,
                        {'board': conf.taskflow.job_board_url},
                        client=client, persistence=persistence)
Пример #5
0
def _get_jobboard_backend(conf, persistence=None):
    client = None
    if conf.taskflow.connection == 'memory':
        client = fake_client.FakeClient()
    return boards.fetch(conf.job_board_name,
                        {'board': conf.taskflow.job_board_url},
                        client=client, persistence=persistence)
def main():
    if six.PY3:
        # TODO(harlowja): Hack to make eventlet work right, remove when the
        # following is fixed: https://github.com/eventlet/eventlet/issues/230
        from taskflow.utils import eventlet_utils as _eu  # noqa
        try:
            import eventlet as _eventlet  # noqa
        except ImportError:
            pass
    with contextlib.closing(fake_client.FakeClient()) as c:
        created = []
        for i in compat_range(0, PRODUCERS):
            p = threading_utils.daemon_thread(producer, i + 1, c)
            created.append(p)
            p.start()
        consumed = collections.deque()
        for i in compat_range(0, WORKERS):
            w = threading_utils.daemon_thread(worker, i + 1, c, consumed)
            created.append(w)
            w.start()
        while created:
            t = created.pop()
            t.join()
        # At the end there should be nothing leftover, let's verify that.
        board = backends.fetch('verifier', SHARED_CONF.copy(), client=c)
        board.connect()
        with contextlib.closing(board):
            if board.job_count != 0 or len(consumed) != EXPECTED_UNITS:
                return 1
            return 0
def main():
    if six.PY3:
        # TODO(harlowja): Hack to make eventlet work right, remove when the
        # following is fixed: https://github.com/eventlet/eventlet/issues/230
        from taskflow.utils import eventlet_utils as _eu  # noqa
        try:
            import eventlet as _eventlet  # noqa
        except ImportError:
            pass
    with contextlib.closing(fake_client.FakeClient()) as c:
        created = []
        for i in compat_range(0, PRODUCERS):
            p = threading_utils.daemon_thread(producer, i + 1, c)
            created.append(p)
            p.start()
        consumed = collections.deque()
        for i in compat_range(0, WORKERS):
            w = threading_utils.daemon_thread(worker, i + 1, c, consumed)
            created.append(w)
            w.start()
        while created:
            t = created.pop()
            t.join()
        # At the end there should be nothing leftover, let's verify that.
        board = backends.fetch('verifier', SHARED_CONF.copy(), client=c)
        board.connect()
        with contextlib.closing(board):
            if board.job_count != 0 or len(consumed) != EXPECTED_UNITS:
                return 1
            return 0
Пример #8
0
def create_jobboard(board_name=None, conf=None, persistence=None, **kwargs):
    """Factory method for creating a jobboard backend instance

    :param board_name: Name of the jobboard
    :param conf: Configuration parameters for the jobboard backend.
    :param persistence: A persistence backend instance to be used with the
                        jobboard.
    :param kwargs: Keyword arguments to be passed forward to the
                   persistence backend constructor
    :return: A persistence backend instance.
    """
    if board_name is None:
        board_name = cfg.CONF.taskflow.jobboard_name

    if conf is None:
        conf = {'board': 'zookeeper'}

        conf.update({
            "path": "%s/jobs" % (cfg.CONF.taskflow.zk_path),
            "hosts": cfg.CONF.taskflow.zk_hosts,
            "timeout": cfg.CONF.taskflow.zk_timeout
        })

    jb = job_backends.fetch(name=board_name,
                            conf=conf,
                            persistence=persistence,
                            **kwargs)
    jb.connect()
    return jb
Пример #9
0
def run_poster():
    # This just posts a single job and then ends...
    print("Starting poster with pid: %s" % ME)
    my_name = "poster-%s" % ME
    persist_backend = persistence_backends.fetch(PERSISTENCE_URI)
    with contextlib.closing(persist_backend):
        with contextlib.closing(persist_backend.get_connection()) as conn:
            conn.upgrade()
        job_backend = job_backends.fetch(my_name,
                                         JB_CONF,
                                         persistence=persist_backend)
        job_backend.connect()
        with contextlib.closing(job_backend):
            # Create information in the persistence backend about the
            # unit of work we want to complete and the factory that
            # can be called to create the tasks that the work unit needs
            # to be done.
            lb = models.LogBook("post-from-%s" % my_name)
            fd = models.FlowDetail("song-from-%s" % my_name,
                                   uuidutils.generate_uuid())
            lb.add(fd)
            with contextlib.closing(persist_backend.get_connection()) as conn:
                conn.save_logbook(lb)
            engines.save_factory_details(fd,
                                         make_bottles, [HOW_MANY_BOTTLES], {},
                                         backend=persist_backend)
            # Post, and be done with it!
            jb = job_backend.post("song-from-%s" % my_name, book=lb)
            print("Posted: %s" % jb)
            print("Goodbye...")
Пример #10
0
def get_jobboard(name, jobboard_name):
    config = {
        'hosts': ZK_HOST,
        'board': 'zookeeper',
        'path': '/taskflow/jobboard/zookeeper/' + jobboard_name,
    }
    return job_backends.fetch(name, config,
                              persistence=default_persistence_backend())
Пример #11
0
def get_jobboard(name, jobboard_name):
    config = {
        'hosts': ZK_HOST,
        'board': 'zookeeper',
        'path': '/taskflow/jobboard/zookeeper/' + jobboard_name,
    }
    return job_backends.fetch(name,
                              config,
                              persistence=default_persistence_backend())
Пример #12
0
 def test_zk_entry_point_existing_client(self):
     existing_client = fake_client.FakeClient()
     conf = {
         'board': 'zookeeper',
     }
     kwargs = {
         'client': existing_client,
     }
     with contextlib.closing(backends.fetch('test', conf, **kwargs)) as be:
         self.assertIsInstance(be, impl_zookeeper.ZookeeperJobBoard)
         self.assertIs(existing_client, be._client)
Пример #13
0
def generate_reviewer(client, saver, name=NAME):
    """Creates a review producer thread with the given name prefix."""
    real_name = "%s_reviewer" % name
    no_more = threading.Event()
    jb = boards.fetch(real_name,
                      JOBBOARD_CONF,
                      client=client,
                      persistence=saver)

    def make_save_book(saver, review_id):
        # Record what we want to happen (sometime in the future).
        book = models.LogBook("book_%s" % review_id)
        detail = models.FlowDetail("flow_%s" % review_id,
                                   uuidutils.generate_uuid())
        book.add(detail)
        # Associate the factory method we want to be called (in the future)
        # with the book, so that the conductor will be able to call into
        # that factory to retrieve the workflow objects that represent the
        # work.
        #
        # These args and kwargs *can* be used to save any specific parameters
        # into the factory when it is being called to create the workflow
        # objects (typically used to tell a factory how to create a unique
        # workflow that represents this review).
        factory_args = ()
        factory_kwargs = {}
        engines.save_factory_details(detail, create_review_workflow,
                                     factory_args, factory_kwargs)
        with contextlib.closing(saver.get_connection()) as conn:
            conn.save_logbook(book)
            return book

    def run():
        """Periodically publishes 'fake' reviews to analyze."""
        jb.connect()
        review_generator = review_iter()
        with contextlib.closing(jb):
            while not no_more.is_set():
                review = six.next(review_generator)
                details = {
                    'store': {
                        'review': review,
                    },
                }
                job_name = "%s_%s" % (real_name, review['id'])
                print("Posting review '%s'" % review['id'])
                jb.post(job_name,
                        book=make_save_book(saver, review['id']),
                        details=details)
                time.sleep(REVIEW_CREATION_DELAY)

    # Return the unstarted thread, and a callback that can be used
    # shutdown that thread (to avoid running forever).
    return (threading_utils.daemon_thread(target=run), no_more.set)
Пример #14
0
def generate_reviewer(client, saver, name=NAME):
    """Creates a review producer thread with the given name prefix."""
    real_name = "%s_reviewer" % name
    no_more = threading.Event()
    jb = boards.fetch(real_name, JOBBOARD_CONF,
                      client=client, persistence=saver)

    def make_save_book(saver, review_id):
        # Record what we want to happen (sometime in the future).
        book = logbook.LogBook("book_%s" % review_id)
        detail = logbook.FlowDetail("flow_%s" % review_id,
                                    uuidutils.generate_uuid())
        book.add(detail)
        # Associate the factory method we want to be called (in the future)
        # with the book, so that the conductor will be able to call into
        # that factory to retrieve the workflow objects that represent the
        # work.
        #
        # These args and kwargs *can* be used to save any specific parameters
        # into the factory when it is being called to create the workflow
        # objects (typically used to tell a factory how to create a unique
        # workflow that represents this review).
        factory_args = ()
        factory_kwargs = {}
        engines.save_factory_details(detail, create_review_workflow,
                                     factory_args, factory_kwargs)
        with contextlib.closing(saver.get_connection()) as conn:
            conn.save_logbook(book)
            return book

    def run():
        """Periodically publishes 'fake' reviews to analyze."""
        jb.connect()
        review_generator = review_iter()
        with contextlib.closing(jb):
            while not no_more.is_set():
                review = six.next(review_generator)
                details = {
                    'store': {
                        'review': review,
                    },
                }
                job_name = "%s_%s" % (real_name, review['id'])
                print("Posting review '%s'" % review['id'])
                jb.post(job_name,
                        book=make_save_book(saver, review['id']),
                        details=details)
                time.sleep(REVIEW_CREATION_DELAY)

    # Return the unstarted thread, and a callback that can be used
    # shutdown that thread (to avoid running forever).
    return (threading_utils.daemon_thread(target=run), no_more.set)
Пример #15
0
def run_conductor(only_run_once=False):
    # This continuously consumers until its stopped via ctrl-c or other
    # kill signal...
    event_watches = {}

    # This will be triggered by the conductor doing various activities
    # with engines, and is quite nice to be able to see the various timing
    # segments (which is useful for debugging, or watching, or figuring out
    # where to optimize).
    def on_conductor_event(cond, event, details):
        print("Event '%s' has been received..." % event)
        print("Details = %s" % details)
        if event.endswith("_start"):
            w = timing.StopWatch()
            w.start()
            base_event = event[0:-len("_start")]
            event_watches[base_event] = w
        if event.endswith("_end"):
            base_event = event[0:-len("_end")]
            try:
                w = event_watches.pop(base_event)
                w.stop()
                print("It took %0.3f seconds for event '%s' to finish" %
                      (w.elapsed(), base_event))
            except KeyError:
                pass
        if event == 'running_end' and only_run_once:
            cond.stop()

    print("Starting conductor with pid: %s" % ME)
    my_name = "conductor-%s" % ME
    persist_backend = persistence_backends.fetch(PERSISTENCE_URI)
    with contextlib.closing(persist_backend):
        with contextlib.closing(persist_backend.get_connection()) as conn:
            conn.upgrade()
        job_backend = job_backends.fetch(my_name,
                                         JB_CONF,
                                         persistence=persist_backend)
        job_backend.connect()
        with contextlib.closing(job_backend):
            cond = conductor_backends.fetch('blocking',
                                            my_name,
                                            job_backend,
                                            persistence=persist_backend)
            on_conductor_event = functools.partial(on_conductor_event, cond)
            cond.notifier.register(cond.notifier.ANY, on_conductor_event)
            # Run forever, and kill -9 or ctrl-c me...
            try:
                cond.run()
            finally:
                cond.stop()
                cond.wait()
Пример #16
0
def jobboard_backend_connection():
    """
    Get a connection to the job board backend and yield the connection
    to the context
    :yield obj conn: The job board backend connection
    """
    persistence_backend = persistence_backends.fetch(PERSISTENCE_CONF)
    job_board_backend = jobboard_backends.fetch(
        CONDUCTOR_NAME, JOBBOARD_CONF, persistence=persistence_backend)
    job_board_backend.connect()
    with closing(job_board_backend) as conn:
        conn.unfiltered_iterjobs = conn.iterjobs
        conn.iterjobs = jobboard_iterator(conn.unfiltered_iterjobs)
        yield conn
Пример #17
0
def run_conductor(only_run_once=False):
    # This continuously consumers until its stopped via ctrl-c or other
    # kill signal...
    event_watches = {}

    # This will be triggered by the conductor doing various activities
    # with engines, and is quite nice to be able to see the various timing
    # segments (which is useful for debugging, or watching, or figuring out
    # where to optimize).
    def on_conductor_event(cond, event, details):
        print("Event '%s' has been received..." % event)
        print("Details = %s" % details)
        if event.endswith("_start"):
            w = timing.StopWatch()
            w.start()
            base_event = event[0:-len("_start")]
            event_watches[base_event] = w
        if event.endswith("_end"):
            base_event = event[0:-len("_end")]
            try:
                w = event_watches.pop(base_event)
                w.stop()
                print("It took %0.3f seconds for event '%s' to finish"
                      % (w.elapsed(), base_event))
            except KeyError:
                pass
        if event == 'running_end' and only_run_once:
            cond.stop()

    print("Starting conductor with pid: %s" % ME)
    my_name = "conductor-%s" % ME
    persist_backend = persistence_backends.fetch(PERSISTENCE_URI)
    with contextlib.closing(persist_backend):
        with contextlib.closing(persist_backend.get_connection()) as conn:
            conn.upgrade()
        job_backend = job_backends.fetch(my_name, JB_CONF,
                                         persistence=persist_backend)
        job_backend.connect()
        with contextlib.closing(job_backend):
            cond = conductor_backends.fetch('blocking', my_name, job_backend,
                                            persistence=persist_backend)
            on_conductor_event = functools.partial(on_conductor_event, cond)
            cond.notifier.register(cond.notifier.ANY, on_conductor_event)
            # Run forever, and kill -9 or ctrl-c me...
            try:
                cond.run()
            finally:
                cond.stop()
                cond.wait()
Пример #18
0
def generate_conductor(client, saver, name=NAME):
    """Creates a conductor thread with the given name prefix."""
    real_name = "%s_conductor" % name
    jb = boards.fetch(name, JOBBOARD_CONF,
                      client=client, persistence=saver)
    conductor = conductors.fetch("blocking", real_name, jb,
                                 engine='parallel', wait_timeout=SCAN_DELAY)

    def run():
        jb.connect()
        with contextlib.closing(jb):
            conductor.run()

    # Return the unstarted thread, and a callback that can be used
    # shutdown that thread (to avoid running forever).
    return (threading_utils.daemon_thread(target=run), conductor.stop)
Пример #19
0
def generate_conductor(client, saver, name=NAME):
    """Creates a conductor thread with the given name prefix."""
    real_name = "%s_conductor" % name
    jb = boards.fetch(name, JOBBOARD_CONF, client=client, persistence=saver)
    conductor = conductors.fetch("blocking",
                                 real_name,
                                 jb,
                                 engine='parallel',
                                 wait_timeout=SCAN_DELAY)

    def run():
        jb.connect()
        with contextlib.closing(jb):
            conductor.run()

    # Return the unstarted thread, and a callback that can be used
    # shutdown that thread (to avoid running forever).
    return (threading_utils.daemon_thread(target=run), conductor.stop)
Пример #20
0
def main():
    with contextlib.closing(fake_client.FakeClient()) as c:
        created = []
        for i in compat_range(0, PRODUCERS):
            p = threading_utils.daemon_thread(producer, i + 1, c)
            created.append(p)
            p.start()
        consumed = collections.deque()
        for i in compat_range(0, WORKERS):
            w = threading_utils.daemon_thread(worker, i + 1, c, consumed)
            created.append(w)
            w.start()
        while created:
            t = created.pop()
            t.join()
        # At the end there should be nothing leftover, let's verify that.
        board = backends.fetch('verifier', SHARED_CONF.copy(), client=c)
        board.connect()
        with contextlib.closing(board):
            if board.job_count != 0 or len(consumed) != EXPECTED_UNITS:
                return 1
            return 0
def main():
    with contextlib.closing(fake_client.FakeClient()) as c:
        created = []
        for i in compat_range(0, PRODUCERS):
            p = threading_utils.daemon_thread(producer, i + 1, c)
            created.append(p)
            p.start()
        consumed = collections.deque()
        for i in compat_range(0, WORKERS):
            w = threading_utils.daemon_thread(worker, i + 1, c, consumed)
            created.append(w)
            w.start()
        while created:
            t = created.pop()
            t.join()
        # At the end there should be nothing leftover, let's verify that.
        board = backends.fetch('verifier', SHARED_CONF.copy(), client=c)
        board.connect()
        with contextlib.closing(board):
            if board.job_count != 0 or len(consumed) != EXPECTED_UNITS:
                return 1
            return 0
Пример #22
0
 def test_redis_entry_point(self):
     conf = {
         'board': 'redis',
     }
     with contextlib.closing(backends.fetch('test', conf)) as be:
         self.assertIsInstance(be, impl_redis.RedisJobBoard)
Пример #23
0
def jobboard(*args, **kwargs):
    jb = job_backends.fetch(*args, **kwargs)
    jb.connect()
    yield jb
    jb.close()
Пример #24
0
def jobboard(*args, **kwargs):
    jb = job_backends.fetch(*args, **kwargs)
    jb.connect()
    yield jb
    jb.close()
Пример #25
0
 def test_zk_entry_point(self):
     conf = {
         'board': 'zookeeper',
     }
     with contextlib.closing(backends.fetch('test', conf)) as be:
         self.assertIsInstance(be, impl_zookeeper.ZookeeperJobBoard)
Пример #26
0
def default_jobboard_backend(name):
    return job_backends.fetch(name,
                              JB_CONF,
                              persistence=default_persistence_backend())
Пример #27
0
def default_jobboard_backend(name):
    return job_backends.fetch(name,
                              JB_CONF,
                              persistence=default_persistence_backend())