def get_backend(backend_uri=None): tmp_dir = None if not backend_uri: if not backend_uri: tmp_dir = tempfile.mkdtemp() backend_uri = "file:///%s" % tmp_dir try: backend = backends.fetch(_make_conf(backend_uri)) except exceptions.NotFound as e: # Fallback to one that will work if the provided backend is not found. if not tmp_dir: tmp_dir = tempfile.mkdtemp() backend_uri = "file:///%s" % tmp_dir LOG.exception("Falling back to file backend using temporary" " directory located at: %s", tmp_dir) backend = backends.fetch(_make_conf(backend_uri)) else: raise e try: # Ensure schema upgraded before we continue working. with contextlib.closing(backend.get_connection()) as conn: conn.upgrade() yield backend finally: # Make sure to cleanup the temporary path if one was created for us. if tmp_dir: rm_path(tmp_dir)
def get_backend(backend_uri=None): tmp_dir = None if not backend_uri: if len(sys.argv) > 1: backend_uri = str(sys.argv[1]) if not backend_uri: tmp_dir = tempfile.mkdtemp() backend_uri = "file:///%s" % tmp_dir try: backend = backends.fetch(_make_conf(backend_uri)) except exceptions.NotFound as e: # Fallback to one that will work if the provided backend is not found. if not tmp_dir: tmp_dir = tempfile.mkdtemp() backend_uri = "file:///%s" % tmp_dir LOG.exception( "Falling back to file backend using temporary" " directory located at: %s", tmp_dir) backend = backends.fetch(_make_conf(backend_uri)) else: raise e try: # Ensure schema upgraded before we continue working. with contextlib.closing(backend.get_connection()) as conn: conn.upgrade() yield backend finally: # Make sure to cleanup the temporary path if one was created for us. if tmp_dir: rm_path(tmp_dir)
def save_factory_details(flow_detail, flow_factory, factory_args, factory_kwargs, backend=None): """Saves the given factories reimportable attributes into the flow detail. This function saves the factory name, arguments, and keyword arguments into the given flow details object and if a backend is provided it will also ensure that the backend saves the flow details after being updated. :param flow_detail: FlowDetail that holds state of the flow to load :param flow_factory: function or string: function that creates the flow :param factory_args: list or tuple of factory positional arguments :param factory_kwargs: dict of factory keyword arguments :param backend: storage backend to use or configuration """ if not factory_args: factory_args = [] if not factory_kwargs: factory_kwargs = {} factory_name, _factory_fun = _fetch_validate_factory(flow_factory) factory_data = {"factory": {"name": factory_name, "args": factory_args, "kwargs": factory_kwargs}} if not flow_detail.meta: flow_detail.meta = factory_data else: flow_detail.meta.update(factory_data) if backend is not None: if isinstance(backend, dict): backend = p_backends.fetch(backend) with contextlib.closing(backend.get_connection()) as conn: conn.update_flow_details(flow_detail)
def get_persistence(self): # Rewrite taskflow get backend, so it won't run migrations on each call backend = persistence_backends.fetch(self.persistence_conf) with contextlib.closing(backend): with contextlib.closing(backend.get_connection()) as conn: conn.validate() yield backend
def execute_flow(flow): """ Create all necessary prerequisites like task database and thread pool and execute TaskFlow flow. :param flow: TaskFlow flow instance """ backend = backends.fetch({ 'connection': 'sqlite:///' + TASK_DATABASE_FILE, 'isolation_level': 'SERIALIZABLE' }) executor = futurist.ThreadPoolExecutor(max_workers=MAX_WORKERS) conn = backend.get_connection() logbook, flow_detail = _ensure_db_initialized(conn, flow) engine = engines.load( flow, flow_detail=flow_detail, backend=backend, book=logbook, engine='parallel', executor=executor) engine.compile() _workaround_reverted_reset(flow_detail) try: engine.run() except exceptions.WrappedFailure as wf: for failure in wf: if failure.exc_info is not None: traceback.print_exception(*failure.exc_info) else: print failure
def _create_engine(**kwargs): flow = lf.Flow('test-flow').add(utils.DummyTask()) backend = backends.fetch({'connection': 'memory'}) flow_detail = pu.create_flow_detail(flow, backend=backend) options = kwargs.copy() return engine.WorkerBasedActionEngine(flow, flow_detail, backend, options)
def upgrade_backend(self, persistence_backend): try: backend = backends.fetch(persistence_backend) with contextlib.closing(backend.get_connection()) as conn: conn.upgrade() except exceptions.NotFound as e: raise e
def load(flow, store=None, flow_detail=None, book=None, engine_conf=None, backend=None, namespace=ENGINES_NAMESPACE, engine=ENGINE_DEFAULT, **kwargs): """Load a flow into an engine. This function creates and prepares an engine to run the provided flow. All that is left after this returns is to run the engine with the engines :py:meth:`~taskflow.engines.base.Engine.run` method. Which engine to load is specified via the ``engine`` parameter. It can be a string that names the engine type to use, or a string that is a URI with a scheme that names the engine type to use and further options contained in the URI's host, port, and query parameters... Which storage backend to use is defined by the backend parameter. It can be backend itself, or a dictionary that is passed to :py:func:`~taskflow.persistence.backends.fetch` to obtain a viable backend. :param flow: flow to load :param store: dict -- data to put to storage to satisfy flow requirements :param flow_detail: FlowDetail that holds the state of the flow (if one is not provided then one will be created for you in the provided backend) :param book: LogBook to create flow detail in if flow_detail is None :param engine_conf: engine type or URI and options (**deprecated**) :param backend: storage backend to use or configuration that defines it :param namespace: driver namespace for stevedore (or empty for default) :param engine: string engine type or URI string with scheme that contains the engine type and any URI specific components that will become part of the engine options. :param kwargs: arbitrary keyword arguments passed as options (merged with any extracted ``engine`` and ``engine_conf`` options), typically used for any engine specific options that do not fit as any of the existing arguments. :returns: engine """ kind, options = _extract_engine(engine_conf=engine_conf, engine=engine, **kwargs) if isinstance(backend, dict): backend = p_backends.fetch(backend) if flow_detail is None: flow_detail = p_utils.create_flow_detail(flow, book=book, backend=backend) LOG.debug('Looking for %r engine driver in %r', kind, namespace) try: mgr = stevedore.driver.DriverManager( namespace, kind, invoke_on_load=True, invoke_args=(flow, flow_detail, backend, options)) engine = mgr.driver except RuntimeError as e: raise exc.NotFound("Could not find engine '%s'" % (kind), e) else: if store: engine.storage.inject(store) return engine
def load_from_factory(flow_factory, factory_args=None, factory_kwargs=None, store=None, book=None, engine_conf=None, backend=None, namespace=ENGINES_NAMESPACE, engine=ENGINE_DEFAULT, **kwargs): """Loads a flow from a factory function into an engine. Gets flow factory function (or name of it) and creates flow with it. Then, the flow is loaded into an engine with the :func:`load() <load>` function, and the factory function fully qualified name is saved to flow metadata so that it can be later resumed. :param flow_factory: function or string: function that creates the flow :param factory_args: list or tuple of factory positional arguments :param factory_kwargs: dict of factory keyword arguments Further arguments are interpreted as for :func:`load() <load>`. :returns: engine """ _factory_name, factory_fun = _fetch_validate_factory(flow_factory) if not factory_args: factory_args = [] if not factory_kwargs: factory_kwargs = {} flow = factory_fun(*factory_args, **factory_kwargs) if isinstance(backend, dict): backend = p_backends.fetch(backend) flow_detail = p_utils.create_flow_detail(flow, book=book, backend=backend) save_factory_details(flow_detail, flow_factory, factory_args, factory_kwargs, backend=backend) return load(flow=flow, store=store, flow_detail=flow_detail, book=book, engine_conf=engine_conf, backend=backend, namespace=namespace, engine=engine, **kwargs)
def main(): # Need to share the same backend, so that data can be shared... persistence_conf = { 'connection': 'memory', } saver = persistence.fetch(persistence_conf) with contextlib.closing(saver.get_connection()) as conn: # This ensures that the needed backend setup/data directories/schema # upgrades and so on... exist before they are attempted to be used... conn.upgrade() fc1 = fake_client.FakeClient() # Done like this to share the same client storage location so the correct # zookeeper features work across clients... fc2 = fake_client.FakeClient(storage=fc1.storage) entities = [ generate_reviewer(fc1, saver), generate_conductor(fc2, saver), ] for t, stopper in entities: t.start() try: watch = timeutils.StopWatch(duration=RUN_TIME) watch.start() while not watch.expired(): time.sleep(0.1) finally: for t, stopper in reversed(entities): stopper() t.join()
def test_entrypoint(self): # Test that the entrypoint fetching also works (even with dialects) # using the same configuration we used in setUp() but not using # the impl_sqlalchemy SQLAlchemyBackend class directly... with contextlib.closing(backends.fetch(self.db_conf)) as backend: with contextlib.closing(backend.get_connection()): pass
def get_notification_recovery_workflow_details(self, context, recovery_method, notification): """Retrieve progress details in notification""" backend = backends.fetch(PERSISTENCE_BACKEND) with contextlib.closing(backend.get_connection()) as conn: progress_details = [] flow_details = conn.get_flows_for_book( notification.notification_uuid) for flow in flow_details: od = OrderedDict() atom_details = list(conn.get_atoms_for_flow(flow.uuid)) # TODO(ShilpaSD): In case recovery_method is auto_priority/ # rh_priority, there is no way to figure out whether the # recovery was done successfully using AUTO or RH flow. # Taskflow stores 'retry_instance_evacuate_engine_retry' task # in case of RH flow so if # 'retry_instance_evacuate_engine_retry' is stored in the # given flow details then the sorting of task details should # happen based on the RH flow. # This logic won't be required after LP #1815738 is fixed. if recovery_method in ['AUTO_PRIORITY', 'RH_PRIORITY']: persisted_task_list = [atom.name for atom in atom_details] if ('retry_instance_evacuate_engine_retry' in persisted_task_list): recovery_method = ( fields.FailoverSegmentRecoveryMethod.RESERVED_HOST) else: recovery_method = ( fields.FailoverSegmentRecoveryMethod.AUTO) # TODO(ShilpaSD): Taskflow doesn't support to return task # details in the same sequence in which all tasks are # executed. Reported this issue in LP #1815738. To resolve # this issue load the tasks based on the recovery method and # later sort it based on this task list so progress_details # can be returned in the expected order. task_list = self._get_taskflow_sequence( context, recovery_method, notification) for task in task_list: for atom in atom_details: if task == atom.name: od[atom.name] = atom for key, value in od.items(): # Add progress_details only if tasks are executed and meta # is available in which progress_details are stored. if value.meta and value.meta.get("progress_details"): progress_details_obj = ( objects.NotificationProgressDetails.create( value.name, value.meta['progress'], value.meta['progress_details']['details'] ['progress_details'], value.state)) progress_details.append(progress_details_obj) return progress_details
def _taskflow_backend_init(): global _taskflow_backend connection = get_config(None, 'taskflow', 'backend_connection') if not connection: raise Exception( 'can not find taskflow:backend_connection from configuration file') _taskflow_backend = backends.fetch(conf={'connection': connection})
def load(flow, store=None, flow_detail=None, book=None, engine_conf=None, backend=None, namespace=ENGINES_NAMESPACE): """Load flow into engine This function creates and prepares engine to run the flow. All that is left is to run the engine with 'run()' method. Which engine to load is specified in 'engine_conf' parameter. It can be a string that names engine type or a dictionary which holds engine type (with 'engine' key) and additional engine-specific configuration (for example, executor for multithreaded engine). Which storage backend to use is defined by backend parameter. It can be backend itself, or a dictionary that is passed to taskflow.persistence.backends.fetch to obtain backend. :param flow: flow to load :param store: dict -- data to put to storage to satisfy flow requirements :param flow_detail: FlowDetail that holds state of the flow :param book: LogBook to create flow detail in if flow_detail is None :param engine_conf: engine type and configuration configuration :param backend: storage backend to use or configuration :param namespace: driver namespace for stevedore (default is fine if you don't know what is it) :returns: engine """ if engine_conf is None: engine_conf = {'engine': 'default'} # NOTE(imelnikov): this allows simpler syntax if isinstance(engine_conf, six.string_types): engine_conf = {'engine': engine_conf} if isinstance(backend, dict): backend = p_backends.fetch(backend) if flow_detail is None: flow_detail = p_utils.create_flow_detail(flow, book=book, backend=backend) mgr = stevedore.driver.DriverManager(namespace, engine_conf['engine'], invoke_on_load=True, invoke_kwds={ 'conf': engine_conf.copy(), 'flow': flow, 'flow_detail': flow_detail, 'backend': backend }) engine = mgr.driver if store: engine.storage.inject(store) return engine
def run_poster(): # This just posts a single job and then ends... print("Starting poster with pid: %s" % ME) my_name = "poster-%s" % ME persist_backend = persistence_backends.fetch(PERSISTENCE_URI) with contextlib.closing(persist_backend): with contextlib.closing(persist_backend.get_connection()) as conn: conn.upgrade() job_backend = job_backends.fetch(my_name, JB_CONF, persistence=persist_backend) job_backend.connect() with contextlib.closing(job_backend): # Create information in the persistence backend about the # unit of work we want to complete and the factory that # can be called to create the tasks that the work unit needs # to be done. lb = models.LogBook("post-from-%s" % my_name) fd = models.FlowDetail("song-from-%s" % my_name, uuidutils.generate_uuid()) lb.add(fd) with contextlib.closing(persist_backend.get_connection()) as conn: conn.save_logbook(lb) engines.save_factory_details(fd, make_bottles, [HOW_MANY_BOTTLES], {}, backend=persist_backend) # Post, and be done with it! jb = job_backend.post("song-from-%s" % my_name, book=lb) print("Posted: %s" % jb) print("Goodbye...")
def execute_flow(flow): """ Create all necessary prerequisites like task database and thread pool and execute TaskFlow flow. :param flow: TaskFlow flow instance """ backend = backends.fetch({ 'connection': 'sqlite:///' + TASK_DATABASE_FILE, 'isolation_level': 'SERIALIZABLE' }) executor = futurist.ThreadPoolExecutor(max_workers=MAX_WORKERS) conn = backend.get_connection() logbook, flow_detail = _ensure_db_initialized(conn, flow) engine = engines.load(flow, flow_detail=flow_detail, backend=backend, book=logbook, engine='parallel', executor=executor) engine.compile() _workaround_reverted_reset(flow_detail) with MetadataSavingListener(engine, flow_detail): try: engine.run() except exceptions.WrappedFailure as wf: for failure in wf: if failure.exc_info is not None: traceback.print_exception(*failure.exc_info) else: print failure
def test_file_persistence_entry_point(self): conf = { 'connection': 'file:', 'path': self.path } with contextlib.closing(backends.fetch(conf)) as be: self.assertIsInstance(be, impl_dir.DirBackend)
def get_backend(): try: backend_uri = sys.argv[1] except Exception: backend_uri = 'sqlite://' backend = backends.fetch({'connection': backend_uri}) backend.get_connection().upgrade() return backend
def test_dir_persistence_entry_point(self): conf = { 'connection': 'dir:', 'path': self.path } backend = backends.fetch(conf) self.assertIsInstance(backend, impl_dir.DirBackend) backend.close()
def load(flow, store=None, flow_detail=None, book=None, engine_conf=None, backend=None, namespace=ENGINES_NAMESPACE, engine=ENGINE_DEFAULT, **kwargs): """Load a flow into an engine. This function creates and prepares an engine to run the provided flow. All that is left after this returns is to run the engine with the engines ``run()`` method. Which engine to load is specified via the ``engine`` parameter. It can be a string that names the engine type to use, or a string that is a URI with a scheme that names the engine type to use and further options contained in the URI's host, port, and query parameters... Which storage backend to use is defined by the backend parameter. It can be backend itself, or a dictionary that is passed to ``taskflow.persistence.backends.fetch()`` to obtain a viable backend. :param flow: flow to load :param store: dict -- data to put to storage to satisfy flow requirements :param flow_detail: FlowDetail that holds the state of the flow (if one is not provided then one will be created for you in the provided backend) :param book: LogBook to create flow detail in if flow_detail is None :param engine_conf: engine type or URI and options (**deprecated**) :param backend: storage backend to use or configuration that defines it :param namespace: driver namespace for stevedore (or empty for default) :param engine: string engine type or URI string with scheme that contains the engine type and any URI specific components that will become part of the engine options. :param kwargs: arbitrary keyword arguments passed as options (merged with any extracted ``engine`` and ``engine_conf`` options), typically used for any engine specific options that do not fit as any of the existing arguments. :returns: engine """ kind, options = _extract_engine(engine_conf=engine_conf, engine=engine, **kwargs) if isinstance(backend, dict): backend = p_backends.fetch(backend) if flow_detail is None: flow_detail = p_utils.create_flow_detail(flow, book=book, backend=backend) try: mgr = stevedore.driver.DriverManager( namespace, kind, invoke_on_load=True, invoke_args=(flow, flow_detail, backend, options)) engine = mgr.driver except RuntimeError as e: raise exc.NotFound("Could not find engine '%s'" % (kind), e) else: if store: engine.storage.inject(store) return engine
def persistence_backend_connection(): """ Get a connection to the persistence backend and yield the connection to the context :yield obj conn: The persistence backend connection """ persist_backend = persistence_backends.fetch(PERSISTENCE_CONF) with closing(persist_backend.get_connection()) as conn: yield conn
def load(flow, store=None, flow_detail=None, book=None, engine_conf=None, backend=None, namespace=ENGINES_NAMESPACE): """Load flow into engine. This function creates and prepares engine to run the flow. All that is left is to run the engine with 'run()' method. Which engine to load is specified in 'engine_conf' parameter. It can be a string that names engine type or a dictionary which holds engine type (with 'engine' key) and additional engine-specific configuration (for example, executor for multithreaded engine). Which storage backend to use is defined by backend parameter. It can be backend itself, or a dictionary that is passed to taskflow.persistence.backends.fetch to obtain backend. :param flow: flow to load :param store: dict -- data to put to storage to satisfy flow requirements :param flow_detail: FlowDetail that holds the state of the flow (if one is not provided then one will be created for you in the provided backend) :param book: LogBook to create flow detail in if flow_detail is None :param engine_conf: engine type and configuration configuration :param backend: storage backend to use or configuration :param namespace: driver namespace for stevedore (default is fine if you don't know what is it) :returns: engine """ if engine_conf is None: engine_conf = {'engine': 'default'} # NOTE(imelnikov): this allows simpler syntax. if isinstance(engine_conf, six.string_types): engine_conf = {'engine': engine_conf} if isinstance(backend, dict): backend = p_backends.fetch(backend) if flow_detail is None: flow_detail = p_utils.create_flow_detail(flow, book=book, backend=backend) mgr = stevedore.driver.DriverManager( namespace, engine_conf['engine'], invoke_on_load=True, invoke_kwds={ 'conf': engine_conf.copy(), 'flow': flow, 'flow_detail': flow_detail, 'backend': backend }) engine = mgr.driver if store: engine.storage.inject(store) return engine
def load_from_factory(flow_factory, factory_args=None, factory_kwargs=None, store=None, book=None, engine_conf=None, backend=None): """Load flow from factory function into engine Gets flow factory function (or name of it) and creates flow with it. Then, flow is loaded into engine with load(), and factory function fully qualified name is saved to flow metadata so that it can be later resumed with resume. :param flow_factory: function or string: function that creates the flow :param factory_args: list or tuple of factory positional arguments :param factory_kwargs: dict of factory keyword arguments :param store: dict -- data to put to storage to satisfy flow requirements :param book: LogBook to create flow detail in :param engine_conf: engine type and configuration configuration :param backend: storage backend to use or configuration :returns: engine """ if isinstance(flow_factory, six.string_types): factory_fun = importutils.import_class(flow_factory) factory_name = flow_factory else: factory_fun = flow_factory factory_name = reflection.get_callable_name(flow_factory) try: reimported = importutils.import_class(factory_name) assert reimported == factory_fun except (ImportError, AssertionError): raise ValueError('Flow factory %r is not reimportable by name %s' % (factory_fun, factory_name)) args = factory_args or [] kwargs = factory_kwargs or {} flow = factory_fun(*args, **kwargs) factory_data = dict(name=factory_name, args=args, kwargs=kwargs) if isinstance(backend, dict): backend = p_backends.fetch(backend) flow_detail = p_utils.create_flow_detail(flow, book=book, backend=backend, meta={'factory': factory_data}) return load(flow=flow, flow_detail=flow_detail, store=store, book=book, engine_conf=engine_conf, backend=backend)
def run_conductor(only_run_once=False): # This continuously consumers until its stopped via ctrl-c or other # kill signal... event_watches = {} # This will be triggered by the conductor doing various activities # with engines, and is quite nice to be able to see the various timing # segments (which is useful for debugging, or watching, or figuring out # where to optimize). def on_conductor_event(cond, event, details): print("Event '%s' has been received..." % event) print("Details = %s" % details) if event.endswith("_start"): w = timing.StopWatch() w.start() base_event = event[0:-len("_start")] event_watches[base_event] = w if event.endswith("_end"): base_event = event[0:-len("_end")] try: w = event_watches.pop(base_event) w.stop() print("It took %0.3f seconds for event '%s' to finish" % (w.elapsed(), base_event)) except KeyError: pass if event == 'running_end' and only_run_once: cond.stop() print("Starting conductor with pid: %s" % ME) my_name = "conductor-%s" % ME persist_backend = persistence_backends.fetch(PERSISTENCE_URI) with contextlib.closing(persist_backend): with contextlib.closing(persist_backend.get_connection()) as conn: conn.upgrade() job_backend = job_backends.fetch(my_name, JB_CONF, persistence=persist_backend) job_backend.connect() with contextlib.closing(job_backend): cond = conductor_backends.fetch('blocking', my_name, job_backend, persistence=persist_backend) on_conductor_event = functools.partial(on_conductor_event, cond) cond.notifier.register(cond.notifier.ANY, on_conductor_event) # Run forever, and kill -9 or ctrl-c me... try: cond.run() finally: cond.stop() cond.wait()
def jobboard_backend_connection(): """ Get a connection to the job board backend and yield the connection to the context :yield obj conn: The job board backend connection """ persistence_backend = persistence_backends.fetch(PERSISTENCE_CONF) job_board_backend = jobboard_backends.fetch( CONDUCTOR_NAME, JOBBOARD_CONF, persistence=persistence_backend) job_board_backend.connect() with closing(job_board_backend) as conn: conn.unfiltered_iterjobs = conn.iterjobs conn.iterjobs = jobboard_iterator(conn.unfiltered_iterjobs) yield conn
def load_from_factory(flow_factory, factory_args=None, factory_kwargs=None, store=None, book=None, engine_conf=None, backend=None, namespace=ENGINES_NAMESPACE): """Loads a flow from a factory function into an engine. Gets flow factory function (or name of it) and creates flow with it. Then, flow is loaded into engine with load(), and factory function fully qualified name is saved to flow metadata so that it can be later resumed with resume. :param flow_factory: function or string: function that creates the flow :param factory_args: list or tuple of factory positional arguments :param factory_kwargs: dict of factory keyword arguments :param store: dict -- data to put to storage to satisfy flow requirements :param book: LogBook to create flow detail in :param engine_conf: engine type and configuration configuration :param backend: storage backend to use or configuration :param namespace: driver namespace for stevedore (default is fine if you don't know what is it) :returns: engine """ _factory_name, factory_fun = _fetch_validate_factory(flow_factory) if not factory_args: factory_args = [] if not factory_kwargs: factory_kwargs = {} flow = factory_fun(*factory_args, **factory_kwargs) if isinstance(backend, dict): backend = p_backends.fetch(backend) flow_detail = p_utils.create_flow_detail(flow, book=book, backend=backend) save_factory_details(flow_detail, flow_factory, factory_args, factory_kwargs, backend=backend) return load(flow=flow, store=store, flow_detail=flow_detail, book=book, engine_conf=engine_conf, backend=backend, namespace=namespace)
def load_taskflow_into_engine(action, nested_flow, process_what): book = None backend = None if PERSISTENCE_BACKEND: backend = backends.fetch(PERSISTENCE_BACKEND) with contextlib.closing(backend.get_connection()) as conn: try: book = conn.get_logbook(process_what['notification_uuid']) except exceptions.NotFound: pass if book is None: book = models.LogBook(action, process_what['notification_uuid']) return taskflow.engines.load(nested_flow, store=process_what, backend=backend, book=book)
def get_backend(): global __backend if __backend is not None: return __backend backend_uri = get_backend_uri() try: __backend = backends.fetch(_make_conf(backend_uri)) except Exception as e: _logger.error(r'call backends.fetch failed : {}'.format(e), exc_info=True) raise e # Ensure schema upgraded before we continue working. with contextlib.closing(__backend.get_connection()) as conn: conn.upgrade() return __backend
def save_flow_factory_into_flow_detail(flow_detail, flow_factory, factory_args=None, factory_kwargs=None): """ Save a flow factory into a flow detail :param obj flow_detail: A flow detail :param obj flow_factory: A function that returns a flow :param list factory_args: The args to pass to the flow factory during flow pickup time in the conductor :param dict factory_kwargs: The kwargs to pass to the flow factory during flow pickup time in the conductor :return None: """ persist_backend = persistence_backends.fetch(PERSISTENCE_CONF) engines.save_factory_details(flow_detail=flow_detail, flow_factory=flow_factory, factory_args=factory_args or list(), factory_kwargs=factory_kwargs or dict(), backend=persist_backend)
def load_from_factory( flow_factory, factory_args=None, factory_kwargs=None, store=None, book=None, engine_conf=None, backend=None ): """Load flow from factory function into engine Gets flow factory function (or name of it) and creates flow with it. Then, flow is loaded into engine with load(), and factory function fully qualified name is saved to flow metadata so that it can be later resumed with resume. :param flow_factory: function or string: function that creates the flow :param factory_args: list or tuple of factory positional arguments :param factory_kwargs: dict of factory keyword arguments :param store: dict -- data to put to storage to satisfy flow requirements :param book: LogBook to create flow detail in :param engine_conf: engine type and configuration configuration :param backend: storage backend to use or configuration :returns: engine """ if isinstance(flow_factory, six.string_types): factory_fun = importutils.import_class(flow_factory) factory_name = flow_factory else: factory_fun = flow_factory factory_name = reflection.get_callable_name(flow_factory) try: reimported = importutils.import_class(factory_name) assert reimported == factory_fun except (ImportError, AssertionError): raise ValueError("Flow factory %r is not reimportable by name %s" % (factory_fun, factory_name)) args = factory_args or [] kwargs = factory_kwargs or {} flow = factory_fun(*args, **kwargs) factory_data = dict(name=factory_name, args=args, kwargs=kwargs) if isinstance(backend, dict): backend = p_backends.fetch(backend) flow_detail = p_utils.create_flow_detail(flow, book=book, backend=backend, meta={"factory": factory_data}) return load(flow=flow, flow_detail=flow_detail, store=store, book=book, engine_conf=engine_conf, backend=backend)
def main(): persistence = persistence_backends.fetch({ 'connection': 'sqlite:////tmp/taskflow.db' }) board = HypernodeJobBoard('my-board', { "hosts": "localhost", }, persistence=persistence) # board = job_backends.fetch("my-board", { # "board": "zookeeper", # "hosts": "localhost", # "path": "/jobboard", # }, persistence=persistence) board.connect() # conductor = conductors.fetch("blocking", "executor 1", board, engine="parallel", wait_timeout=.1) conductor = AsyncConductor("async", board, engine="parallel") with contextlib.closing(board): conductor.run()
def save_factory_details(flow_detail, flow_factory, factory_args, factory_kwargs, backend=None): """Saves the given factories reimportable name, args, kwargs into the flow detail. This function saves the factory name, arguments, and keyword arguments into the given flow details object and if a backend is provided it will also ensure that the backend saves the flow details after being updated. :param flow_detail: FlowDetail that holds state of the flow to load :param flow_factory: function or string: function that creates the flow :param factory_args: list or tuple of factory positional arguments :param factory_kwargs: dict of factory keyword arguments :param backend: storage backend to use or configuration """ if not factory_args: factory_args = [] if not factory_kwargs: factory_kwargs = {} factory_name, _factory_fun = _fetch_validate_factory(flow_factory) factory_data = { 'factory': { 'name': factory_name, 'args': factory_args, 'kwargs': factory_kwargs, }, } if not flow_detail.meta: flow_detail.meta = factory_data else: flow_detail.meta.update(factory_data) if backend is not None: if isinstance(backend, dict): backend = p_backends.fetch(backend) with contextlib.closing(backend.get_connection()) as conn: conn.update_flow_details(flow_detail)
def create_persistence(conf=None, **kwargs): """Factory method for creating a persistence backend instance :param conf: Configuration parameters for the persistence backend. If no conf is provided, zookeeper configuration parameters for the job backend will be used to configure the persistence backend. :param kwargs: Keyword arguments to be passed forward to the persistence backend constructor :return: A persistence backend instance. """ if conf is None: connection = cfg.CONF.taskflow.persistence_connection if connection is None: connection = ("zookeeper://%s/%s" % ( cfg.CONF.taskflow.zk_hosts, cfg.CONF.taskflow.zk_path, )) conf = _make_conf(connection) be = persistence_backends.fetch(conf=conf, **kwargs) with contextlib.closing(be.get_connection()) as conn: conn.upgrade() return be
def load_from_factory(flow_factory, factory_args=None, factory_kwargs=None, store=None, book=None, engine_conf=None, backend=None, namespace=ENGINES_NAMESPACE, **kwargs): """Loads a flow from a factory function into an engine. Gets flow factory function (or name of it) and creates flow with it. Then, flow is loaded into engine with load(), and factory function fully qualified name is saved to flow metadata so that it can be later resumed with resume. :param flow_factory: function or string: function that creates the flow :param factory_args: list or tuple of factory positional arguments :param factory_kwargs: dict of factory keyword arguments :param store: dict -- data to put to storage to satisfy flow requirements :param book: LogBook to create flow detail in :param engine_conf: engine type and configuration configuration :param backend: storage backend to use or configuration :param namespace: driver namespace for stevedore (default is fine if you don't know what is it) :returns: engine """ _factory_name, factory_fun = _fetch_validate_factory(flow_factory) if not factory_args: factory_args = [] if not factory_kwargs: factory_kwargs = {} flow = factory_fun(*factory_args, **factory_kwargs) if isinstance(backend, dict): backend = p_backends.fetch(backend) flow_detail = p_utils.create_flow_detail(flow, book=book, backend=backend) save_factory_details(flow_detail, flow_factory, factory_args, factory_kwargs, backend=backend) return load(flow=flow, store=store, flow_detail=flow_detail, book=book, engine_conf=engine_conf, backend=backend, namespace=namespace, **kwargs)
from taskflow import task from taskflow.utils import persistence_utils as pu # INTRO: in this example we create a dummy flow with a dummy task, and run # it using a in-memory backend and pre/post run we dump out the contents # of the in-memory backends tree structure (which can be quite useful to # look at for debugging or other analysis). class PrintTask(task.Task): def execute(self): print("Running '%s'" % self.name) backend = backends.fetch({ 'connection': 'memory://', }) book, flow_detail = pu.temporary_flow_detail(backend=backend) # Make a little flow and run it... f = lf.Flow('root') for alpha in ['a', 'b', 'c']: f.add(PrintTask(alpha)) e = engines.load(f, flow_detail=flow_detail, book=book, backend=backend) e.compile() e.prepare() print("----------") print("Before run")
def test_memory_backend_fetch_by_name(self): conf = {'connection': 'memory'} # note no colon with contextlib.closing(backends.fetch(conf)) as be: self.assertIsInstance(be, impl_memory.MemoryBackend)
def test_memory_backend_entry_point(self): conf = {'connection': 'memory:'} with contextlib.closing(backends.fetch(conf)) as be: self.assertIsInstance(be, impl_memory.MemoryBackend)
def test_zk_persistence_entry_point(self): conf = {'connection': 'zookeeper:'} with contextlib.closing(backends.fetch(conf)) as be: self.assertIsInstance(be, impl_zookeeper.ZkBackend)
def default_persistence_backend(): return persistence_backends.fetch(PERSISTENCE_CONF)
def setUp(self): super(StorageSQLTest, self).setUp() self.backend = backends.fetch({"connection": "sqlite://"}) with contextlib.closing(self.backend.get_connection()) as conn: conn.upgrade()
def test_sqlite_persistence_entry_point(self): conf = {'connection': 'sqlite:///'} with contextlib.closing(backends.fetch(conf)) as be: self.assertIsInstance(be, impl_sqlalchemy.SQLAlchemyBackend)
def test_postgres_persistence_entry_point(self): uri = "postgresql://%s:%s@localhost/%s" % (USER, PASSWD, DATABASE) conf = {'connection': uri} with contextlib.closing(backends.fetch(conf)) as be: self.assertIsInstance(be, impl_sqlalchemy.SQLAlchemyBackend)
def test_postgres_persistence_entry_point(self): uri = _get_connect_string('postgres', USER, PASSWD, database=DATABASE) conf = {'connection': uri} with contextlib.closing(backends.fetch(conf)) as be: self.assertIsInstance(be, impl_sqlalchemy.SQLAlchemyBackend)
import contextlib from taskflow.persistence import backends as persistence_backends from taskflow.jobs import backends as job_backends import logging from board import HypernodeJobBoard logging.basicConfig(level=logging.WARNING) persistence = persistence_backends.fetch({ "connection": "zookeeper", "hosts": "localhost", "path": "/taskflow", }) # board = job_backends.fetch("my-board", { # "board": "zookeeper", # "hosts": "localhost", # }, persistence=persistence) board = HypernodeJobBoard('my-board', { "hosts": "localhost", }, persistence=persistence) board.connect() with contextlib.closing(board): print("All jobs:") for job in board.iterjobs(ensure_fresh=True, only_unclaimed=False): print job
def get_taskflow_backend(): backend = backends.fetch(conf) with contextlib.closing(backend.get_connection()) as conn: conn.upgrade() return backend
def _get_persistence_backend(conf): return persistence_backends.fetch({ 'connection': conf.taskflow.connection, })
def get_notification_recovery_workflow_details(self, context, recovery_method, notification): """Retrieve progress details in notification""" backend = backends.fetch(PERSISTENCE_BACKEND) with contextlib.closing(backend.get_connection()) as conn: progress_details = [] flow_details = conn.get_flows_for_book( notification.notification_uuid) for flow in flow_details: od = OrderedDict() atom_details = list(conn.get_atoms_for_flow(flow.uuid)) # TODO(ShilpaSD): In case recovery_method is auto_priority/ # rh_priority, there is no way to figure out whether the # recovery was done successfully using AUTO or RH flow. # Taskflow stores 'retry_instance_evacuate_engine_retry' task # in case of RH flow so if # 'retry_instance_evacuate_engine_retry' is stored in the # given flow details then the sorting of task details should # happen based on the RH flow. # This logic won't be required after LP #1815738 is fixed. if recovery_method in ['AUTO_PRIORITY', 'RH_PRIORITY']: persisted_task_list = [atom.name for atom in atom_details] if ('retry_instance_evacuate_engine_retry' in persisted_task_list): recovery_method = ( fields.FailoverSegmentRecoveryMethod. RESERVED_HOST) else: recovery_method = ( fields.FailoverSegmentRecoveryMethod.AUTO) # TODO(ShilpaSD): Taskflow doesn't support to return task # details in the same sequence in which all tasks are # executed. Reported this issue in LP #1815738. To resolve # this issue load the tasks based on the recovery method and # later sort it based on this task list so progress_details # can be returned in the expected order. task_list = self._get_taskflow_sequence(context, recovery_method, notification) for task in task_list: for atom in atom_details: if task == atom.name: od[atom.name] = atom for key, value in od.items(): # Add progress_details only if tasks are executed and meta # is available in which progress_details are stored. if value.meta: progress_details_obj = ( objects.NotificationProgressDetails.create( value.name, value.meta['progress'], value.meta['progress_details']['details'] ['progress_details'], value.state)) progress_details.append(progress_details_obj) return progress_details
def setUp(self): super(StorageMemoryTest, self).setUp() self.backend = backends.fetch({"connection": "memory://"})