def test_fail_run(self): components = self.make_components() components.conductor.connect() consumed_event = threading_utils.Event() def on_consume(state, details): consumed_event.set() components.board.notifier.register(jobboard.REMOVAL, on_consume) with close_many(components.conductor, components.client): t = threading_utils.daemon_thread(components.conductor.run) t.start() lb, fd = pu.temporary_flow_detail(components.persistence) engines.save_factory_details(fd, test_factory, [True], {}, backend=components.persistence) components.board.post('poke', lb, details={'flow_uuid': fd.uuid}) self.assertTrue(consumed_event.wait(test_utils.WAIT_TIMEOUT)) self.assertTrue(components.conductor.stop(test_utils.WAIT_TIMEOUT)) self.assertFalse(components.conductor.dispatching) persistence = components.persistence with contextlib.closing(persistence.get_connection()) as conn: lb = conn.get_logbook(lb.uuid) fd = lb.find(fd.uuid) self.assertIsNotNone(fd) self.assertEqual(st.REVERTED, fd.state)
def run_poster(): # This just posts a single job and then ends... print("Starting poster with pid: %s" % ME) my_name = "poster-%s" % ME persist_backend = persistence_backends.fetch(PERSISTENCE_URI) with contextlib.closing(persist_backend): with contextlib.closing(persist_backend.get_connection()) as conn: conn.upgrade() job_backend = job_backends.fetch(my_name, JB_CONF, persistence=persist_backend) job_backend.connect() with contextlib.closing(job_backend): # Create information in the persistence backend about the # unit of work we want to complete and the factory that # can be called to create the tasks that the work unit needs # to be done. lb = models.LogBook("post-from-%s" % my_name) fd = models.FlowDetail("song-from-%s" % my_name, uuidutils.generate_uuid()) lb.add(fd) with contextlib.closing(persist_backend.get_connection()) as conn: conn.save_logbook(lb) engines.save_factory_details(fd, make_bottles, [HOW_MANY_BOTTLES], {}, backend=persist_backend) # Post, and be done with it! jb = job_backend.post("song-from-%s" % my_name, book=lb) print("Posted: %s" % jb) print("Goodbye...")
def test_combined_store(self): components = self.make_components() components.conductor.connect() consumed_event = threading.Event() def on_consume(state, details): consumed_event.set() flow_store = {'x': True, 'y': False} job_store = {'z': None} components.board.notifier.register(base.REMOVAL, on_consume) with close_many(components.conductor, components.client): t = threading_utils.daemon_thread(components.conductor.run) t.start() lb, fd = pu.temporary_flow_detail(components.persistence, meta={'store': flow_store}) engines.save_factory_details(fd, test_store_factory, [], {}, backend=components.persistence) components.board.post('poke', lb, details={'flow_uuid': fd.uuid, 'store': job_store}) self.assertTrue(consumed_event.wait(test_utils.WAIT_TIMEOUT)) components.conductor.stop() self.assertTrue(components.conductor.wait(test_utils.WAIT_TIMEOUT)) self.assertFalse(components.conductor.dispatching) persistence = components.persistence with contextlib.closing(persistence.get_connection()) as conn: lb = conn.get_logbook(lb.uuid) fd = lb.find(fd.uuid) self.assertIsNotNone(fd) self.assertEqual(st.SUCCESS, fd.state)
def submit_task(self, flow_factory, **kwargs): """submit a task. """ with self.persistence as persistence: with self.driver.job_board(self.jobboard_backend_conf.copy(), persistence=persistence) as board: job_id = uuidutils.generate_uuid() job_name = '-'.join([flow_factory.__name__, job_id]) job_logbook = models.LogBook(job_name) flow_detail = models.FlowDetail(job_name, uuidutils.generate_uuid()) factory_args = () factory_kwargs = {} engines.save_factory_details(flow_detail, flow_factory, factory_args, factory_kwargs) job_logbook.add(flow_detail) persistence.get_connection().save_logbook(job_logbook) job_details = {'store': kwargs} job = board.post(job_name, book=job_logbook, details=job_details) LOG.info("{0} posted".format(job))
def test_run_max_dispatches(self): components = self.make_components() components.conductor.connect() consumed_event = threading.Event() def on_consume(state, details): consumed_event.set() components.board.notifier.register(base.REMOVAL, on_consume) with close_many(components.client, components.conductor): t = threading_utils.daemon_thread( lambda: components.conductor.run(max_dispatches=5)) t.start() lb, fd = pu.temporary_flow_detail(components.persistence) engines.save_factory_details(fd, test_factory, [False], {}, backend=components.persistence) for _ in range(5): components.board.post('poke', lb, details={'flow_uuid': fd.uuid}) self.assertTrue(consumed_event.wait( test_utils.WAIT_TIMEOUT)) components.board.post('poke', lb, details={'flow_uuid': fd.uuid}) components.conductor.stop() self.assertTrue(components.conductor.wait(test_utils.WAIT_TIMEOUT)) self.assertFalse(components.conductor.dispatching)
def post_remote_pipeline_job(pipeline): ME = os.getpid() print("Starting poster with pid: %s" % ME) my_name = "poster-%s" % ME persist_backend = backend_helper.default_persistence_backend() with contextlib.closing(persist_backend): with contextlib.closing(persist_backend.get_connection()) as conn: conn.upgrade() job_backend = backend_helper.default_jobboard_backend(my_name) job_backend.connect() with contextlib.closing(job_backend): # Create information in the persistence backend about the # unit of work we want to complete and the factory that # can be called to create the tasks that the work unit needs # to be done. lb = logbook.LogBook("post-from-%s" % my_name) fd = logbook.FlowDetail("sample-from-%s" % my_name, uuidutils.generate_uuid()) lb.add(fd) with contextlib.closing(persist_backend.get_connection()) as conn: conn.save_logbook(lb) engines.save_factory_details(fd, pipeline_factory.make_pipeline_flow, [pipeline.name], pipeline.kwargs, backend=persist_backend) # Post, and be done with it! jb = job_backend.post("sample-job-from-%s" % my_name, book=lb) print("Posted: %s" % jb) return jb
def test_run(self): components = self.make_components() components.conductor.connect() consumed_event = threading.Event() def on_consume(state, details): consumed_event.set() components.board.notifier.register(base.REMOVAL, on_consume) with close_many(components.conductor, components.client): t = threading_utils.daemon_thread(components.conductor.run) t.start() lb, fd = pu.temporary_flow_detail(components.persistence) engines.save_factory_details(fd, test_factory, [False], {}, backend=components.persistence) components.board.post('poke', lb, details={'flow_uuid': fd.uuid}) self.assertTrue(consumed_event.wait(test_utils.WAIT_TIMEOUT)) components.conductor.stop() self.assertTrue(components.conductor.wait(test_utils.WAIT_TIMEOUT)) self.assertFalse(components.conductor.dispatching) persistence = components.persistence with contextlib.closing(persistence.get_connection()) as conn: lb = conn.get_logbook(lb.uuid) fd = lb.find(fd.uuid) self.assertIsNotNone(fd) self.assertEqual(st.SUCCESS, fd.state)
def submit_task(self, flow_factory, **kwargs): """submit a task. """ with self.persistence as persistence: with self.driver.job_board( self.jobboard_backend_conf_worker.copy(), persistence=persistence) as board: job_id = uuidutils.generate_uuid() job_name = '-'.join([flow_factory.__name__, job_id]) job_logbook = logbook.LogBook(job_name) flow_detail = logbook.FlowDetail(job_name, uuidutils.generate_uuid()) factory_args = () factory_kwargs = {} engines.save_factory_details(flow_detail, flow_factory, factory_args, factory_kwargs) job_logbook.add(flow_detail) persistence.get_connection().save_logbook(job_logbook) job_details = { 'store': kwargs } job = board.post(job_name, book=job_logbook, details=job_details) LOG.info("Posted: {0}".format(job))
def test_run_max_dispatches(self): components = self.make_components() components.conductor.connect() consumed_event = threading.Event() def on_consume(state, details): consumed_event.set() components.board.notifier.register(base.REMOVAL, on_consume) with close_many(components.client, components.conductor): t = threading_utils.daemon_thread( lambda: components.conductor.run(max_dispatches=5)) t.start() lb, fd = pu.temporary_flow_detail(components.persistence) engines.save_factory_details(fd, test_factory, [False], {}, backend=components.persistence) for _ in range(5): components.board.post('poke', lb, details={'flow_uuid': fd.uuid}) self.assertTrue(consumed_event.wait(test_utils.WAIT_TIMEOUT)) components.board.post('poke', lb, details={'flow_uuid': fd.uuid}) components.conductor.stop() self.assertTrue(components.conductor.wait(test_utils.WAIT_TIMEOUT)) self.assertFalse(components.conductor.dispatching)
def make_save_book(persistence, job_id, flow_plugin, plugin_args=(), plugin_kwds={}): flow_id = book_id = job_id # Do these need to be different? book = models.LogBook(book_id) detail = models.FlowDetail(flow_id, uuidutils.generate_uuid()) book.add(detail) factory_args = [flow_plugin] + list(plugin_args) factory_kwargs = plugin_kwds engines.save_factory_details(detail, workflow_factory, factory_args, factory_kwargs) with contextlib.closing(persistence.get_connection()) as conn: conn.save_logbook(book) return book
def post(self, flow_factory, job_args=None, flow_args=None, flow_kwargs=None, tx_uuid=None): """Method for posting a new job to the jobboard :param flow_factory: Flow factory function for creating a flow instance that will be executed as part of the job. :param job_args: 'store' arguments to be supplied to the engine executing the flow for the job :param flow_args: Positional arguments to be passed to the flow factory function :param flow_kwargs: Keyword arguments to be passed to the flow factory function :param tx_uuid: Transaction UUID which will be injected as 'tx_uuid' in job_args. A tx_uuid will be generated if one is not provided as an argument. :return: A taskflow.job.Job instance that represents the job that was posted. """ if isinstance(job_args, dict) and 'tx_uuid' in job_args: raise AttributeError("tx_uuid needs to be provided as an argument" "to Client.post, not as a member of job_args") if tx_uuid is None: tx_uuid = uuidutils.generate_uuid() job_name = "%s[%s]" % (flow_factory.__name__, tx_uuid) book = persistence_models.LogBook(job_name, uuid=tx_uuid) if flow_factory is not None: flow_detail = persistence_models.FlowDetail( job_name, str(uuid.uuid4())) book.add(flow_detail) job_details = {'store': job_args or {}} job_details['store'].update({'tx_uuid': tx_uuid}) job_details['flow_uuid'] = flow_detail.uuid self.persistence.get_connection().save_logbook(book) engines.save_factory_details(flow_detail, flow_factory, flow_args, flow_kwargs, self.persistence) job = self.jobboard.post(job_name, book, details=job_details) return job
def test_stop_aborts_engine(self): components = self.make_components() components.conductor.connect() consumed_event = threading.Event() job_consumed_event = threading.Event() job_abandoned_event = threading.Event() running_start_event = threading.Event() def on_running_start(event, details): running_start_event.set() def on_consume(state, details): consumed_event.set() def on_job_consumed(event, details): if event == 'job_consumed': job_consumed_event.set() def on_job_abandoned(event, details): if event == 'job_abandoned': job_abandoned_event.set() components.board.notifier.register(base.REMOVAL, on_consume) components.conductor.notifier.register("job_consumed", on_job_consumed) components.conductor.notifier.register("job_abandoned", on_job_abandoned) components.conductor.notifier.register("running_start", on_running_start) with close_many(components.conductor, components.client): t = threading_utils.daemon_thread(components.conductor.run) t.start() lb, fd = pu.temporary_flow_detail(components.persistence) engines.save_factory_details(fd, sleep_factory, [], {}, backend=components.persistence) components.board.post('poke', lb, details={'flow_uuid': fd.uuid, 'store': {'duration': 2}}) running_start_event.wait(test_utils.WAIT_TIMEOUT) components.conductor.stop() job_abandoned_event.wait(test_utils.WAIT_TIMEOUT) self.assertTrue(job_abandoned_event.is_set()) self.assertFalse(job_consumed_event.is_set()) self.assertFalse(consumed_event.is_set())
def save_flow_factory_into_flow_detail(flow_detail, flow_factory, factory_args=None, factory_kwargs=None): """ Save a flow factory into a flow detail :param obj flow_detail: A flow detail :param obj flow_factory: A function that returns a flow :param list factory_args: The args to pass to the flow factory during flow pickup time in the conductor :param dict factory_kwargs: The kwargs to pass to the flow factory during flow pickup time in the conductor :return None: """ persist_backend = persistence_backends.fetch(PERSISTENCE_CONF) engines.save_factory_details(flow_detail=flow_detail, flow_factory=flow_factory, factory_args=factory_args or list(), factory_kwargs=factory_kwargs or dict(), backend=persist_backend)
def run_poster(self, flow_factory, *args, wait=False, **kwargs): with self.driver.persistence_driver.get_persistence() as persistence: with self.driver.job_board(persistence) as job_board: job_id = uuidutils.generate_uuid() job_name = '-'.join([flow_factory.__name__, job_id]) job_logbook = models.LogBook(job_name) flow_detail = models.FlowDetail(job_name, job_id) job_details = {'store': kwargs.pop('store')} job_logbook.add(flow_detail) persistence.get_connection().save_logbook(job_logbook) engines.save_factory_details(flow_detail, flow_factory, args, kwargs, backend=persistence) job_board.post(job_name, book=job_logbook, details=job_details) if wait: self._wait_for_job(job_board) return job_id
def post_remote_pipeline_job_and_wait(pipeline, jobboard_name): """Post a pipeline job and wait until it is finished.""" my_name = POSTER_NAME logger.info("Starting poster with name: %s" % my_name) persist_backend = backend_helper.default_persistence_backend() with contextlib.closing(persist_backend): with contextlib.closing(persist_backend.get_connection()) as conn: conn.upgrade() jobboard = backend_helper.get_jobboard(my_name, jobboard_name) jobboard.connect() with contextlib.closing(jobboard): # Create information in the persistence backend about the # unit of work we want to complete and the factory that # can be called to create the tasks that the work unit needs # to be done. lb = logbook.LogBook("post-from-%s" % my_name) flow_uuid = uuidutils.generate_uuid() fd = logbook.FlowDetail("flow-of-%s" % my_name, flow_uuid) lb.add(fd) with contextlib.closing(persist_backend.get_connection()) as conn: conn.save_logbook(lb) engines.save_factory_details(fd, pipeline_factory.make_pipeline_flow, [pipeline.name, True], pipeline.kwargs, backend=persist_backend) # Post, and be done with it! jb = jobboard.post("job-from-%s" % my_name, book=lb) logger.info('Posted: %s' % jb) # TODO(cbao): Move wait until into a seperate method. # TODO(lukesneeringer): ...and fix the logging. state = states.UNCLAIMED print('Job status: %s' % state) while state != states.COMPLETE: if (jb.state != state): state = jb.state print('Job status: %s' % state) time.sleep(1) return jb
def make_save_book(saver, review_id): # Record what we want to happen (sometime in the future). book = models.LogBook("book_%s" % review_id) detail = models.FlowDetail("flow_%s" % review_id, uuidutils.generate_uuid()) book.add(detail) # Associate the factory method we want to be called (in the future) # with the book, so that the conductor will be able to call into # that factory to retrieve the workflow objects that represent the # work. # # These args and kwargs *can* be used to save any specific parameters # into the factory when it is being called to create the workflow # objects (typically used to tell a factory how to create a unique # workflow that represents this review). factory_args = () factory_kwargs = {} engines.save_factory_details(detail, create_review_workflow, factory_args, factory_kwargs) with contextlib.closing(saver.get_connection()) as conn: conn.save_logbook(book) return book
def make_save_book(saver, review_id): # Record what we want to happen (sometime in the future). book = logbook.LogBook("book_%s" % review_id) detail = logbook.FlowDetail("flow_%s" % review_id, uuidutils.generate_uuid()) book.add(detail) # Associate the factory method we want to be called (in the future) # with the book, so that the conductor will be able to call into # that factory to retrieve the workflow objects that represent the # work. # # These args and kwargs *can* be used to save any specific parameters # into the factory when it is being called to create the workflow # objects (typically used to tell a factory how to create a unique # workflow that represents this review). factory_args = () factory_kwargs = {} engines.save_factory_details(detail, create_review_workflow, factory_args, factory_kwargs) with contextlib.closing(saver.get_connection()) as conn: conn.save_logbook(book) return book
def get_or_create_book(name): for lb in conn.get_logbooks(): if lb.name == name: return lb return models.LogBook(name) book = get_or_create_book(app_name) flow_detail = models.FlowDetail("some flow (testflow)", uuid=uuidutils.generate_uuid()) book.add(flow_detail) conn.save_logbook(book) save_factory_details(flow_detail, flow_factory, (), {}, backend=persistence) board = HypernodeJobBoard("my-board", {"hosts": "localhost"}, persistence=persistence) # board = job_backends.fetch("my-board", { # "board": "zookeeper", # "hosts": "localhost", # }, persistence=persistence) board.connect() with contextlib.closing(board): job = board.post( "my-first-job", book, details={"flow_uuid": flow_detail.uuid, "store": {"msg": "hoi", "app": app_name}, "app": app_name},