def testRunWorkflow(self): filename = os.path.join(os.path.dirname(__file__), 'xml/openwfe/workflow1.xml') wf_specs = self.reader.parse_file(filename) wf_spec = wf_specs[0] for name in wf_spec.task_specs: wf_spec.task_specs[name].reached_event.connect(self.on_reached_cb) wf_spec.task_specs[name].completed_event.connect(on_complete_cb, self.taken_path) workflow = Workflow(wf_spec) try: workflow.complete_all() except: workflow.dump() raise path = [( 1, 'Start'), ( 2, 'concurrence_1'), ( 3, 'task_a1'), ( 4, 'task_a2'), ( 5, 'if_condition_1'), ( 6, 'task_a3'), ( 7, 'if_condition_1_end'), ( 8, 'if_condition_2'), ( 9, 'task_a5'), (10, 'if_condition_2_end'), ( 3, 'task_b1'), ( 4, 'task_b2'), ( 5, 'concurrence_1_end'), ( 6, 'task_c1'), ( 7, 'task_c2'), ( 8, 'End')] assert_same_path(self, path, self.taken_path)
def testRunWorkflow(self): filename = os.path.join(os.path.dirname(__file__), 'xml/openwfe/workflow1.xml') wf_specs = self.reader.parse_file(filename) wf_spec = wf_specs[0] for name in wf_spec.task_specs: wf_spec.task_specs[name].reached_event.connect(self.on_reached_cb) wf_spec.task_specs[name].completed_event.connect( on_complete_cb, self.taken_path) workflow = Workflow(wf_spec) try: workflow.complete_all() except: workflow.dump() raise path = [(1, 'Start'), (2, 'concurrence_1'), (3, 'task_a1'), (4, 'task_a2'), (5, 'if_condition_1'), (6, 'task_a3'), (7, 'if_condition_1_end'), (8, 'if_condition_2'), (9, 'task_a5'), (10, 'if_condition_2_end'), (3, 'task_b1'), (4, 'task_b2'), (5, 'concurrence_1_end'), (6, 'task_c1'), (7, 'task_c2'), (8, 'End')] assert_same_path(self, path, self.taken_path)
def runWorkflow(self, wf_spec, xml_filename): taken_path = [] for name in wf_spec.task_specs: wf_spec.task_specs[name].reached_event.connect(on_reached_cb, taken_path) wf_spec.task_specs[name].completed_event.connect(on_complete_cb, taken_path) # Execute all tasks within the Workflow workflow = Workflow(wf_spec) self.assert_(not workflow.is_completed(), "Workflow is complete before start") try: workflow.complete_all(False) except: workflow.task_tree.dump() raise # workflow.task_tree.dump() self.assert_( workflow.is_completed(), "complete_all() returned, but workflow is not complete\n" + workflow.task_tree.get_dump(), ) # Make sure that there are no waiting tasks left in the tree. for thetask in Task.Iterator(workflow.task_tree, Task.READY): workflow.task_tree.dump() raise Exception("Task with state READY: %s" % thetask.name) # Check whether the correct route was taken. filename = xml_filename + ".path" if os.path.exists(filename): file = open(filename, "r") expected = file.read() file.close() taken_path = "\n".join(taken_path) + "\n" error = "%s:\n" % name error += "Expected:\n" error += "%s\n" % expected error += "but got:\n" error += "%s\n" % taken_path self.assert_(taken_path == expected, error) # Check attribute availibility. filename = xml_filename + ".data" if os.path.exists(filename): file = open(filename, "r") expected = file.read() file.close() result = workflow.get_attribute("data", "") error = "%s:\n" % name error += "Expected:\n" error += "%s\n" % expected error += "but got:\n" error += "%s\n" % result self.assert_(result == expected, error)
def runWorkflow(self, wf_spec, xml_filename): taken_path = [] for name in wf_spec.task_specs: wf_spec.task_specs[name].reached_event.connect(on_reached_cb, taken_path) wf_spec.task_specs[name].completed_event.connect(on_complete_cb, taken_path) # Execute all tasks within the Workflow workflow = Workflow(wf_spec) self.assert_(not workflow.is_completed(), 'Workflow is complete before start') try: workflow.complete_all(False) except: workflow.task_tree.dump() raise #workflow.task_tree.dump() self.assert_(workflow.is_completed(), 'complete_all() returned, but workflow is not complete\n' + workflow.task_tree.get_dump()) # Make sure that there are no waiting tasks left in the tree. for thetask in Task.Iterator(workflow.task_tree, Task.READY): workflow.task_tree.dump() raise Exception('Task with state READY: %s' % thetask.name) # Check whether the correct route was taken. filename = xml_filename + '.path' if os.path.exists(filename): file = open(filename, 'r') expected = file.read() file.close() taken_path = '\n'.join(taken_path) + '\n' error = '%s:\n' % name error += 'Expected:\n' error += '%s\n' % expected error += 'but got:\n' error += '%s\n' % taken_path self.assert_(taken_path == expected, error) # Check attribute availibility. filename = xml_filename + '.data' if os.path.exists(filename): file = open(filename, 'r') expected = file.read() file.close() result = workflow.get_attribute('data', '') error = '%s:\n' % name error += 'Expected:\n' error += '%s\n' % expected error += 'but got:\n' error += '%s\n' % result self.assert_(result == expected, error)
def run_workflow(test, wf_spec, expected_path, expected_data, max_tries=1): # Execute all tasks within the Workflow. taken_path = track_workflow(wf_spec) workflow = Workflow(wf_spec) test.assert_(not workflow.is_completed(), 'Workflow is complete before start') try: # We allow the workflow to require a maximum of 5 seconds to # complete, to allow for testing long running tasks. for i in range(10): workflow.complete_all(False) if workflow.is_completed(): break time.sleep(0.5) except: workflow.task_tree.dump() raise #workflow.task_tree.dump() complete = False while max_tries > 0 and complete is False: max_tries -= 1 complete = workflow.is_completed() test.assert_( complete, 'complete_all() returned, but workflow is not complete\n' + workflow.task_tree.get_dump()) # Make sure that there are no waiting tasks left in the tree. for thetask in Task.Iterator(workflow.task_tree, Task.READY): workflow.task_tree.dump() raise Exception('Task with state READY: %s' % thetask.name) # Check whether the correct route was taken. if expected_path is not None: taken_path = '\n'.join(taken_path) + '\n' error = 'Expected:\n' error += '%s\n' % expected_path error += 'but got:\n' error += '%s\n' % taken_path test.assert_(taken_path == expected_path, error) # Check attribute availibility. if expected_data is not None: result = workflow.get_attribute('data', '') error = 'Expected:\n' error += '%s\n' % expected_data error += 'but got:\n' error += '%s\n' % result test.assert_(result == expected_data, error) return workflow
def run_workflow(test, wf_spec, expected_path, expected_data, workflow=None): # Execute all tasks within the Workflow. if workflow is None: taken_path = track_workflow(wf_spec) workflow = Workflow(wf_spec) else: taken_path = track_workflow(workflow.spec) test.assert_(not workflow.is_completed(), 'Workflow is complete before start') try: # We allow the workflow to require a maximum of 5 seconds to # complete, to allow for testing long running tasks. for i in range(10): workflow.complete_all(False) if workflow.is_completed(): break time.sleep(0.5) except: workflow.task_tree.dump() raise #workflow.task_tree.dump() test.assert_(workflow.is_completed(), 'complete_all() returned, but workflow is not complete\n' + workflow.task_tree.get_dump()) # Make sure that there are no waiting tasks left in the tree. for thetask in Task.Iterator(workflow.task_tree, Task.READY): workflow.task_tree.dump() raise Exception('Task with state READY: %s' % thetask.name) # Check whether the correct route was taken. if expected_path is not None: taken_path = '\n'.join(taken_path) + '\n' error = 'Expected:\n' error += '%s\n' % expected_path error += 'but got:\n' error += '%s\n' % taken_path test.assert_(taken_path == expected_path, error) # Check data availibility. if expected_data is not None: result = workflow.get_data('data', '') error = 'Expected:\n' error += '%s\n' % expected_data error += 'but got:\n' error += '%s\n' % result test.assert_(result == expected_data, error) return workflow
def run_workflow(test, wf_spec, expected_path, expected_data, workflow=None): # Execute all tasks within the Workflow. if workflow is None: taken_path = track_workflow(wf_spec) workflow = Workflow(wf_spec) else: taken_path = track_workflow(workflow.spec) test.assertFalse(workflow.is_completed()) try: # We allow the workflow to require a maximum of 5 seconds to # complete, to allow for testing long running tasks. for i in range(10): workflow.complete_all(False) if workflow.is_completed(): break time.sleep(0.5) except: workflow.task_tree.dump() raise # workflow.task_tree.dump() test.assertTrue(workflow.is_completed(), workflow.task_tree.get_dump()) # Make sure that there are no waiting tasks left in the tree. for thetask in Task.Iterator(workflow.task_tree, Task.READY): workflow.task_tree.dump() raise Exception('Task with state READY: %s' % thetask.name) # Check whether the correct route was taken. if expected_path is not None: taken_path = '\n'.join(taken_path) + '\n' test.assertEqual(taken_path, expected_path) # Check data availibility. if expected_data is not None: result = workflow.get_data('data', '') test.assertIn(result, expected_data) return workflow
def _runWorkflow(self, wf_spec): taken_path = {'reached': [], 'completed': []} for name, task in wf_spec.task_specs.iteritems(): task.reached_event.connect(on_reached_cb, taken_path['reached']) task.completed_event.connect(on_complete_cb, taken_path['completed']) # Execute all tasks within the Workflow. workflow = Workflow(wf_spec) self.assert_(not workflow.is_completed(), 'Workflow complete before start') try: workflow.complete_all() except: workflow.dump() raise self.assert_(workflow.is_completed(), 'complete_all() returned, but workflow is not complete\n' + workflow.task_tree.get_dump()) #workflow.task_tree.dump() assert_same_path(self, self.expected_path, taken_path['completed'])
def _runWorkflow(self, wf_spec): taken_path = {'reached': [], 'completed': []} for name, task in wf_spec.task_specs.iteritems(): task.reached_event.connect(on_reached_cb, taken_path['reached']) task.completed_event.connect(on_complete_cb, taken_path['completed']) # Execute all tasks within the Workflow. workflow = Workflow(wf_spec) self.assert_(not workflow.is_completed(), 'Workflow complete before start') try: workflow.complete_all() except: workflow.dump() raise self.assert_( workflow.is_completed(), 'complete_all() returned, but workflow is not complete\n' + workflow.task_tree.get_dump()) #workflow.task_tree.dump() assert_same_path(self, self.expected_path, taken_path['completed'])
import time with open('ansible-workflow-spec.json') as fp: workflow_json = fp.read() serializer = AnsibleSerializer() spec = WorkflowSpec.deserialize(serializer, workflow_json) # Create the workflow. workflow = Workflow(spec) # Execute until all tasks are done or require manual intervention. # For the sake of this tutorial, we ignore the "manual" flag on the # tasks. In practice, you probably don't want to do that. for i in range(20): print(i) workflow.complete_all(False) if workflow.is_completed(): break time.sleep(0.5) #workflow.complete_all(halt_on_manual=False) #workflow.complete_next() #tasks = workflow.get_tasks(Task.WAITING) #for t in tasks: # print(t.get_name()) # t.complete() #time.sleep(10) # 类似get_dump(返回当前内部任务树的完整转储以进行调试),但是将输出打印到终端,而不是返回输出 workflow.dump() # get_tasks_from_spec_name:返回其规范具有给定名称的所有任务。
import json from SpiffWorkflow import Workflow from SpiffWorkflow.specs import WorkflowSpec from serializer import NuclearSerializer # Load from JSON with open('nuclear') as fp: workflow_json = fp.read() serializer = NuclearSerializer() spec = WorkflowSpec.deserialize(serializer, workflow_json) # Create the workflow. workflow = Workflow(spec) workflow.complete_all()
from SpiffWorkflow import Workflow from architecture.scheduler.spiff_wf_demo import NuclearStrikeWorkflowSpec spec = NuclearStrikeWorkflowSpec() wf = Workflow(spec) wf.complete_all()
def test_Merge_data_merging(self): """Test that Merge task actually merges data""" wf_spec = WorkflowSpec() first = Simple(wf_spec, 'first') second = Simple(wf_spec, 'second') third = Simple(wf_spec, 'third') bump = Simple(wf_spec, 'bump') fourth = Simple(wf_spec, 'fourth') merge1 = Merge(wf_spec, 'merge 1') simple1 = Simple(wf_spec, 'simple 1') merge2 = Merge(wf_spec, 'merge 2') simple2 = Simple(wf_spec, 'simple 2') unmerged = Simple(wf_spec, 'unmerged') wf_spec.start.connect(first) wf_spec.start.connect(second) wf_spec.start.connect(third) wf_spec.start.connect(bump) bump.connect(fourth) # Test join at different depths in tree first.connect(merge1) second.connect(merge1) second.connect(unmerged) first.connect(merge2) second.connect(merge2) third.connect(merge2) fourth.connect(merge2) merge1.connect(simple1) merge2.connect(simple2) workflow = Workflow(wf_spec) workflow.task_tree.set_attribute(everywhere=1) for task in workflow.get_tasks(): task.set_attribute(**{'name': task.get_name(), task.get_name(): 1}) workflow.complete_all() self.assertTrue(workflow.is_completed()) found = {} for task in workflow.get_tasks(): if task.task_spec is simple1: self.assertIn('first', task.attributes) self.assertIn('second', task.attributes) self.assertDictEqual(task.attributes, {'Start': 1, 'merge 1': 1, 'name': 'Start', 'simple 1': 1, 'second': 1, 'first': 1}) found['simple1'] = task if task.task_spec is simple2: self.assertIn('first', task.attributes) self.assertIn('second', task.attributes) self.assertIn('third', task.attributes) self.assertIn('fourth', task.attributes) self.assertDictEqual(task.attributes, {'merge 2': 1, 'simple 2': 1, 'name': 'Start', 'third': 1, 'bump': 1, 'Start': 1, 'second': 1, 'first': 1, 'fourth': 1}) found['simple2'] = task if task.task_spec is unmerged: self.assertDictEqual(task.attributes, {'Start': 1, 'second': 1, 'name': 'Start', 'unmerged': 1}) found['unmerged'] = task self.assertIn('simple1', found) self.assertIn('simple2', found) self.assertIn('unmerged', found)
strike.completed_event.connect(my_nuclear_strike) # As soon as all tasks are either "completed" or "aborted", the # workflow implicitely ends. from SpiffWorkflow import Workflow from SpiffWorkflow.specs import WorkflowSpec from SpiffWorkflow.serializer.json import JSONSerializer # Load from JSON with open('nuclear.json') as fp: workflow_json = fp.read() serializer = JSONSerializer() spec = WorkflowSpec.deserialize(serializer, workflow_json) # Alternatively, create an instance of the Python based specification. #from nuclear import NuclearStrikeWorkflowSpec #spec = NuclearStrikeWorkflowSpec() # Create the workflow. workflow = Workflow(spec) # Execute until all tasks are done or require manual intervention. # For the sake of this tutorial, we ignore the "manual" flag on the # tasks. In practice, you probably don't want to do that. workflow.complete_all(halt_on_manual=False) # Alternatively, this is what a UI would do for a manual task. #workflow.complete_task_from_id(...)
import json from SpiffWorkflow import Workflow from SpiffWorkflow.specs import WorkflowSpec from SpiffWorkflow.serializer.json import JSONSerializer # Load from JSON with open('nuclear.json') as fp: workflow_json = fp.read() serializer = JSONSerializer() spec = WorkflowSpec.deserialize(serializer, workflow_json) # Alternatively, create an instance of the Python based specification. #from nuclear import NuclearStrikeWorkflowSpec #spec = NuclearStrikeWorkflowSpec() # Create the workflow. workflow = Workflow(spec) # Execute until all tasks are done or require manual intervention. # For the sake of this tutorial, we ignore the "manual" flag on the # tasks. In practice, you probably don't want to do that. workflow.complete_all(halt_on_manual=False) # Alternatively, this is what a UI would do for a manual task. #workflow.complete_task_from_id(...)