def print_task_states(flowdetail, msg): eu.print_wrapped(msg) print("Flow '%s' state: %s" % (flowdetail.name, flowdetail.state)) # Sort by these so that our test validation doesn't get confused by the # order in which the items in the flow detail can be in. items = sorted((td.name, td.version, td.state, td.results) for td in flowdetail) for item in items: print(" %s==%s: %s, result=%s" % item)
def print_task_states(flowdetail, msg): eu.print_wrapped(msg) print("Flow '%s' state: %s" % (flowdetail.name, flowdetail.state)) # Sort by these so that our test validation doesn't get confused by the # order in which the items in the flow detail can be in. items = sorted( (td.name, td.version, td.state, td.results) for td in flowdetail) for item in items: print(" %s==%s: %s, result=%s" % item)
def execute(self): if self._no_slow: eu.print_wrapped(self._text) else: with slow_down(): eu.print_wrapped(self._text)
WriteNetworkSettings("write_net_settings"), ), # This does all the volume stuff. gf.Flow("volume-maker").add( AllocateVolumes("allocate_my_volumes", provides='volumes'), FormatVolumes("volume_formatter"), ), # Finally boot it all. BootVM("boot-it"), ), # Ya it worked! PrintText("Finished vm create.", no_slow=True), PrintText("Instance is running!", no_slow=True)) return flow eu.print_wrapped("Initializing") # Setup the persistence & resumption layer. with eu.get_backend() as backend: try: book_id, flow_id = sys.argv[2].split("+", 1) if not uuidutils.is_uuid_like(book_id): book_id = None if not uuidutils.is_uuid_like(flow_id): flow_id = None except (IndexError, ValueError): book_id = None flow_id = None # Set up how we want our engine to run, serial, parallel... executor = None
with utils.wrap_all_failures(): zag.engines.run(flow, store=store, engine='parallel') except exceptions.WrappedFailure as ex: unknown_failures = [] for a_failure in ex: if a_failure.check(FirstException): print("Got FirstException: %s" % a_failure.exception_str) elif a_failure.check(SecondException): print("Got SecondException: %s" % a_failure.exception_str) else: print("Unknown failure: %s" % a_failure) unknown_failures.append(a_failure) failure.Failure.reraise_if_any(unknown_failures) eu.print_wrapped("Raise and catch first exception only") run(sleep1=0.0, raise1=True, sleep2=0.0, raise2=False) # NOTE(imelnikov): in general, sleeping does not guarantee that we'll have both # task running before one of them fails, but with current implementation this # works most of times, which is enough for our purposes here (as an example). eu.print_wrapped("Raise and catch both exceptions") run(sleep1=1.0, raise1=True, sleep2=1.0, raise2=True) eu.print_wrapped("Handle one exception, and re-raise another") try: run(sleep1=1.0, raise1=True, sleep2=1.0, raise2='boom') except TypeError as ex: print("As expected, TypeError is here: %s" % ex) else: assert False, "TypeError expected"
), # This does all the volume stuff. gf.Flow("volume-maker").add( AllocateVolumes("allocate_my_volumes", provides='volumes'), FormatVolumes("volume_formatter"), ), # Finally boot it all. BootVM("boot-it"), ), # Ya it worked! PrintText("Finished vm create.", no_slow=True), PrintText("Instance is running!", no_slow=True)) return flow eu.print_wrapped("Initializing") # Setup the persistence & resumption layer. with eu.get_backend() as backend: # Try to find a previously passed in tracking id... try: book_id, flow_id = sys.argv[2].split("+", 1) if not uuidutils.is_uuid_like(book_id): book_id = None if not uuidutils.is_uuid_like(flow_id): flow_id = None except (IndexError, ValueError): book_id = None flow_id = None
taskflow.engines.run(flow, store=store, engine='parallel') except exceptions.WrappedFailure as ex: unknown_failures = [] for a_failure in ex: if a_failure.check(FirstException): print("Got FirstException: %s" % a_failure.exception_str) elif a_failure.check(SecondException): print("Got SecondException: %s" % a_failure.exception_str) else: print("Unknown failure: %s" % a_failure) unknown_failures.append(a_failure) failure.Failure.reraise_if_any(unknown_failures) eu.print_wrapped("Raise and catch first exception only") run(sleep1=0.0, raise1=True, sleep2=0.0, raise2=False) # NOTE(imelnikov): in general, sleeping does not guarantee that we'll have both # task running before one of them fails, but with current implementation this # works most of times, which is enough for our purposes here (as an example). eu.print_wrapped("Raise and catch both exceptions") run(sleep1=1.0, raise1=True, sleep2=1.0, raise2=True) eu.print_wrapped("Handle one exception, and re-raise another") try: run(sleep1=1.0, raise1=True, sleep2=1.0, raise2='boom') except TypeError as ex:
object_targets = [] store = {} for source in source_files: source_stored = '%s-source' % source object_stored = '%s-object' % source store[source_stored] = source object_targets.append(object_stored) flow.add(CompileTask(name='compile-%s' % source, rebind={'source_filename': source_stored}, provides=object_stored)) flow.add(BuildDocsTask(requires=list(store.keys()))) # Try this to see executable_only switch broken: object_targets.append('docs') link_task = LinkTask('build/executable', requires=object_targets) flow.add(link_task) if executable_only: flow.set_target(link_task) return flow, store if __name__ == "__main__": SOURCE_FILES = ['first.c', 'second.cpp', 'main.cpp'] eu.print_wrapped('Running all tasks:') flow, store = make_flow_and_store(SOURCE_FILES) taskflow.engines.run(flow, store=store) eu.print_wrapped('Building executable, no docs:') flow, store = make_flow_and_store(SOURCE_FILES, executable_only=True) taskflow.engines.run(flow, store=store)
"engine_installed": True, "doors_installed": True, "windows_installed": True, "wheels_installed": True, } engine = taskflow.engines.load(flow, store={'spec': spec.copy()}) # This registers all (ANY) state transitions to trigger a call to the # flow_watch function for flow state transitions, and registers the # same all (ANY) state transitions for task state transitions. engine.notifier.register(ANY, flow_watch) engine.task_notifier.register(ANY, task_watch) eu.print_wrapped("Building a car") engine.run() # Alter the specification and ensure that the reverting logic gets triggered # since the resultant car that will be built by the build_wheels function will # build a car with 4 doors only (not 5), this will cause the verification # task to mark the car that is produced as not matching the desired spec. spec['doors'] = 5 engine = taskflow.engines.load(flow, store={'spec': spec.copy()}) engine.notifier.register(ANY, flow_watch) engine.task_notifier.register(ANY, task_watch) eu.print_wrapped("Building a wrong car that doesn't match specification") try: engine.run()
backend_uri = "sqlite:///%s" % (persist_path) else: persist_path = os.path.join(tempfile.gettempdir(), "persisting") backend_uri = "file:///%s" % (persist_path) if os.path.exists(persist_path): blowup = False else: blowup = True with eu.get_backend(backend_uri) as backend: # Make a flow that will blow up if the file didn't exist previously, if it # did exist, assume we won't blow up (and therefore this shows the undo # and redo that a flow will go through). book = models.LogBook("my-test") flow = make_flow(blowup=blowup) eu.print_wrapped("Running") try: eng = engines.load(flow, engine='serial', backend=backend, book=book) eng.run() if not blowup: eu.rm_path(persist_path) except Exception: # NOTE(harlowja): don't exit with non-zero status code, so that we can # print the book contents, as well as avoiding exiting also makes the # unit tests (which also runs these examples) pass. traceback.print_exc(file=sys.stdout) eu.print_wrapped("Book contents") print(book.pformat())
def trash(**kwargs): eu.print_wrapped("Throwing away pieces of car!")
# installed is not a car after all. "engine_installed": True, "doors_installed": True, "windows_installed": True, "wheels_installed": True, } engine = taskflow.engines.load(flow, store={'spec': spec.copy()}) # This registers all (ANY) state transitions to trigger a call to the # flow_watch function for flow state transitions, and registers the # same all (ANY) state transitions for task state transitions. engine.notifier.register(ANY, flow_watch) engine.task_notifier.register(ANY, task_watch) eu.print_wrapped("Building a car") engine.run() # Alter the specification and ensure that the reverting logic gets triggered # since the resultant car that will be built by the build_wheels function will # build a car with 4 doors only (not 5), this will cause the verification # task to mark the car that is produced as not matching the desired spec. spec['doors'] = 5 engine = taskflow.engines.load(flow, store={'spec': spec.copy()}) engine.notifier.register(ANY, flow_watch) engine.task_notifier.register(ANY, task_watch) eu.print_wrapped("Building a wrong car that doesn't match specification") try: engine.run()
else: persist_path = os.path.join(tempfile.gettempdir(), "persisting") backend_uri = "file:///%s" % (persist_path) if os.path.exists(persist_path): blowup = False else: blowup = True with eu.get_backend(backend_uri) as backend: # Make a flow that will blow up if the file didn't exist previously, if it # did exist, assume we won't blow up (and therefore this shows the undo # and redo that a flow will go through). book = logbook.LogBook("my-test") flow = make_flow(blowup=blowup) eu.print_wrapped("Running") try: eng = engines.load(flow, engine='serial', backend=backend, book=book) eng.run() if not blowup: eu.rm_path(persist_path) except Exception: # NOTE(harlowja): don't exit with non-zero status code, so that we can # print the book contents, as well as avoiding exiting also makes the # unit tests (which also runs these examples) pass. traceback.print_exc(file=sys.stdout) eu.print_wrapped("Book contents") print(p_utils.pformat(book))
# Create a place where the persistence information will be stored. book = models.LogBook("example") flow_detail = models.FlowDetail("resume from backend example", uuid=uuidutils.generate_uuid()) book.add(flow_detail) with contextlib.closing(backend.get_connection()) as conn: conn.save_logbook(book) # CREATE AND RUN THE FLOW: FIRST ATTEMPT #################### flow = flow_factory() engine = taskflow.engines.load(flow, flow_detail=flow_detail, book=book, backend=backend) print_task_states(flow_detail, "At the beginning, there is no state") eu.print_wrapped("Running") engine.run() print_task_states(flow_detail, "After running") # RE-CREATE, RESUME, RUN #################################### eu.print_wrapped("Resuming and running again") # NOTE(harlowja): reload the flow detail from backend, this will allow us # to resume the flow from its suspended state, but first we need to search # for the right flow details in the correct logbook where things are # stored. # # We could avoid re-loading the engine and just do engine.run() again, but # this example shows how another process may unsuspend a given flow and # start it again for situations where this is useful to-do (say the process
flow_detail = models.FlowDetail("resume from backend example", uuid=uuidutils.generate_uuid()) book.add(flow_detail) with contextlib.closing(backend.get_connection()) as conn: conn.save_logbook(book) # CREATE AND RUN THE FLOW: FIRST ATTEMPT #################### flow = flow_factory() engine = zag.engines.load(flow, flow_detail=flow_detail, book=book, backend=backend) print_task_states(flow_detail, "At the beginning, there is no state") eu.print_wrapped("Running") engine.run() print_task_states(flow_detail, "After running") # RE-CREATE, RESUME, RUN #################################### eu.print_wrapped("Resuming and running again") # NOTE(harlowja): reload the flow detail from backend, this will allow us # to resume the flow from its suspended state, but first we need to search # for the right flow details in the correct logbook where things are # stored. # # We could avoid re-loading the engine and just do engine.run() again, but # this example shows how another process may unsuspend a given flow and # start it again for situations where this is useful to-do (say the process
def trash(**kwargs): print 'this is trash func' eu.print_wrapped("Throwing away pieces of car!")