def init_workflow_from_spec_json(spec_json): serializer = TradeFlowSerializer() spec = WorkflowSpec.deserialize(serializer, spec_json) workflow = Workflow(spec) workflow_meta = {"id": str(uuid.uuid4()), "fields": []} workflow_skip_start_and_root(workflow) for t in workflow.get_tasks(): t.task_spec.x_instances = [] for t in workflow.get_tasks(): if "_meta" not in t.task_spec.data: t.task_spec.data["_meta"] = {} if not isinstance(t.task_spec.data["_meta"], dict): t.task_spec.data["_meta"] = {} t.task_spec.data["_meta"]["locked"] = False t.task_spec.data["_meta"]["id"] = str(t.id) t.task_spec.data["_meta"]["type"] = t.task_spec.__class__.__name__ # workflow_meta["fields"] += t.task_spec.data["_meta"].get("fields", []) workflow.data["_meta"] = workflow_meta return workflow
def deserialize_workflow(self, s_state, **kwargs): wf_spec = self.deserialize_workflow_spec(s_state['wf_spec'], **kwargs) workflow = Workflow(wf_spec) # data workflow.data = self._deserialize_dict(s_state['data']) # outer_workflow #workflow.outer_workflow = find_workflow_by_id(remap_workflow_id(s_state['outer_workflow'])) # success workflow.success = s_state['success'] # workflow workflow.spec = wf_spec # task_tree workflow.task_tree = self._deserialize_task(workflow, s_state['task_tree']) # Re-connect parents for task in workflow.get_tasks(): task.parent = workflow.get_task(task.parent) # last_task workflow.last_task = workflow.get_task(s_state['last_task']) return workflow
def testRunWorkflow(self): filename = os.path.join(os.path.dirname(__file__), 'xml/openwfe/workflow1.xml') wf_specs = self.reader.parse_file(filename) wf_spec = wf_specs[0] for name in wf_spec.task_specs: wf_spec.task_specs[name].reached_event.connect(self.on_reached_cb) wf_spec.task_specs[name].completed_event.connect(on_complete_cb, self.taken_path) workflow = Workflow(wf_spec) try: workflow.complete_all() except: workflow.dump() raise path = [( 1, 'Start'), ( 2, 'concurrence_1'), ( 3, 'task_a1'), ( 4, 'task_a2'), ( 5, 'if_condition_1'), ( 6, 'task_a3'), ( 7, 'if_condition_1_end'), ( 8, 'if_condition_2'), ( 9, 'task_a5'), (10, 'if_condition_2_end'), ( 3, 'task_b1'), ( 4, 'task_b2'), ( 5, 'concurrence_1_end'), ( 6, 'task_c1'), ( 7, 'task_c2'), ( 8, 'End')] assert_same_path(self, path, self.taken_path)
def testSerializeWorkflow(self, path_file=None, data=None): if self.serializer is None: return if path_file is None: path_file = os.path.join(data_dir, 'spiff', 'workflow1.path') path = open(path_file).read() elif os.path.exists(path_file): path = open(path_file).read() else: path = None # run a workflow fresh from the spec to completion, see if it # serialises and deserialises correctly. workflow_without_save = run_workflow(self, self.wf_spec, path, data) try: serialized1 = workflow_without_save.serialize(self.serializer) restored_wf = Workflow.deserialize(self.serializer, serialized1) serialized2 = restored_wf.serialize(self.serializer) except TaskNotSupportedError as e: return else: self.assert_(isinstance(serialized1, self.serial_type)) self.assert_(isinstance(serialized2, self.serial_type)) self.compareSerialization(serialized1, serialized2) # try an freshly started workflow, see if it serialises and # deserialiases correctly. (no longer catch for exceptions: if they # were going to happen they should have happened already.) workflow = Workflow(self.wf_spec) serialized1 = workflow.serialize(self.serializer) restored_wf = Workflow.deserialize(self.serializer, serialized1) serialized2 = restored_wf.serialize(self.serializer) self.assert_(isinstance(serialized1, self.serial_type)) self.assert_(isinstance(serialized2, self.serial_type)) self.compareSerialization(serialized1, serialized2) self.assertFalse(restored_wf.is_completed()) # Run it to completion, see if it serialises and deserialises correctly # also check if the restored and unrestored ones are the same after # being run through. workflow_unrestored = run_workflow(self, self.wf_spec, path, data, workflow=workflow) workflow_restored = run_workflow(self, self.wf_spec, path, data, workflow=restored_wf) serialized1 = workflow_restored.serialize(self.serializer) restored_wf = Workflow.deserialize(self.serializer, serialized1) serialized2 = restored_wf.serialize(self.serializer) self.assert_(isinstance(serialized1, self.serial_type)) self.assert_(isinstance(serialized2, self.serial_type)) self.compareSerialization(serialized1, serialized2) serialized_crosscheck = workflow_unrestored.serialize(self.serializer) self.assert_(isinstance(serialized_crosscheck, self.serial_type)) # compare the restored and unrestored completed ones. Because they ran # separately, exclude the last_state_change time. Because you can have # dynamically created tasks, don't compare (uu)ids. self.compareSerialization(serialized_crosscheck, serialized2, exclude_dynamic=True)
def _advance_to_a1(self, wf_spec): workflow = Workflow(wf_spec) tasks = workflow.get_tasks(Task.READY) task_start = tasks[0] workflow.complete_task_from_id(task_start.id) tasks = workflow.get_tasks(Task.READY) multichoice = tasks[0] workflow.complete_task_from_id(multichoice.id) tasks = workflow.get_tasks(Task.READY) task_a1 = tasks[0] workflow.complete_task_from_id(task_a1.id) return workflow
def setup_workflow(self, structured=True, threshold=None, cancel=False): wf_spec = WorkflowSpec() split = Simple(wf_spec, 'split') wf_spec.start.connect(split) if structured: join = Join(wf_spec, 'join', threshold=threshold, split_task=split.name, cancel=cancel) else: join = Join(wf_spec, 'join', threshold=threshold, cancel=cancel) single = Simple(wf_spec, 'first', manual=True) default = Simple(wf_spec, 'default') choice = ExclusiveChoice(wf_spec, 'choice', manual=True) end = Simple(wf_spec, 'end') single.connect(join) join_condition = Equal(Attrib('should_join'), True) choice.connect_if(join_condition, join) choice.connect(default) split.connect(single) split.connect(choice) join.connect(end) workflow = Workflow(wf_spec) return workflow
def testDeserialization(self): """ Tests the that deserialized workflow matches the original workflow """ old_workflow = self.workflow old_workflow.spec.start.set_property(marker=True) serializer = DictionarySerializer() serialized_workflow = old_workflow.serialize(serializer) serializer = DictionarySerializer() new_workflow = Workflow.deserialize(serializer, serialized_workflow) self.assertEqual(len(new_workflow.get_tasks()), len(old_workflow.get_tasks())) self.assertEqual(new_workflow.spec.start.get_property('marker'), old_workflow.spec.start.get_property('marker')) self.assertEqual( 1, len([ t for t in new_workflow.get_tasks() if t.task_spec.name == 'Start' ])) self.assertEqual( 1, len([ t for t in new_workflow.get_tasks() if t.task_spec.name == 'Root' ]))
def set_up(self, filename): # Test patterns that are defined in XML format. xml = open(filename).read() self.wf_spec = WorkflowSpec.deserialize(XmlSerializer(), xml, filename=filename) self.taken_path = self.track_workflow(self.wf_spec) self.workflow = Workflow(self.wf_spec)
def start_workflow(workflow_name): """ start workflow for graduation project """ # create workflow instance workflow = Workflow(workflow_specs[workflow_name]) # complete start task start_task = workflow.get_tasks(state=Task.READY)[0] workflow.complete_task_from_id(start_task.id) # save username in workflow workflow.data["student"] = g.user.username # save workflow instance to database save_workflow_instance(workflow, g.user.id) return redirect(url_for('.user_page', username=g.user.username))
def deserialize_workflow(self, s_state, **kwargs): wf_spec = self.deserialize_workflow_spec(s_state['wf_spec'], **kwargs) original_root = wf_spec.task_specs['Root'] workflow = Workflow(wf_spec, deserializing=True) new_root = wf_spec.task_specs['Root'] assert original_root is new_root # attributes workflow.attributes = s_state['attributes'] # last_task workflow.last_task = s_state['last_task'] # outer_workflow #workflow.outer_workflow = find_workflow_by_id(remap_workflow_id(s_state['outer_workflow'])) # success workflow.success = s_state['success'] # workflow workflow.spec = wf_spec # task_tree old_root_task = workflow.task_tree workflow.task_tree = self._deserialize_task(workflow, s_state['task_tree']) assert old_root_task is workflow.task_tree return workflow
def testPickle(self): # Read a complete workflow. file = os.path.join(os.path.dirname(__file__), 'xml/spiff/workflow1.xml') for i in xrange(5): wf_spec_list = self.reader.parse_file(file) wf_spec = wf_spec_list[0] workflow = Workflow(wf_spec) self.doPickleSingle(workflow)
def testSerialize(self): # Read a complete workflow spec. xml_file = os.path.join(data_dir, 'spiff', 'workflow1.xml') xml = open(xml_file).read() path_file = os.path.splitext(xml_file)[0] + '.path' expected_path = open(path_file).read().strip().split('\n') wf_spec = WorkflowSpec.deserialize(serializer, xml) for i in range(5): workflow = Workflow(wf_spec) self.doPickleSingle(workflow, expected_path)
def run_workflow(test, wf_spec, expected_path, expected_data, max_tries=1): # Execute all tasks within the Workflow. taken_path = track_workflow(wf_spec) workflow = Workflow(wf_spec) test.assert_(not workflow.is_completed(), 'Workflow is complete before start') try: # We allow the workflow to require a maximum of 5 seconds to # complete, to allow for testing long running tasks. for i in range(10): workflow.complete_all(False) if workflow.is_completed(): break time.sleep(0.5) except: workflow.task_tree.dump() raise #workflow.task_tree.dump() complete = False while max_tries > 0 and complete is False: max_tries -= 1 complete = workflow.is_completed() test.assert_( complete, 'complete_all() returned, but workflow is not complete\n' + workflow.task_tree.get_dump()) # Make sure that there are no waiting tasks left in the tree. for thetask in Task.Iterator(workflow.task_tree, Task.READY): workflow.task_tree.dump() raise Exception('Task with state READY: %s' % thetask.name) # Check whether the correct route was taken. if expected_path is not None: taken_path = '\n'.join(taken_path) + '\n' error = 'Expected:\n' error += '%s\n' % expected_path error += 'but got:\n' error += '%s\n' % taken_path test.assert_(taken_path == expected_path, error) # Check attribute availibility. if expected_data is not None: result = workflow.get_attribute('data', '') error = 'Expected:\n' error += '%s\n' % expected_data error += 'but got:\n' error += '%s\n' % result test.assert_(result == expected_data, error) return workflow
def testRunWorkflow(self): filename = os.path.join(os.path.dirname(__file__), 'xml/openwfe/workflow1.xml') wf_specs = self.reader.parse_file(filename) wf_spec = wf_specs[0] for name in wf_spec.task_specs: wf_spec.task_specs[name].reached_event.connect(self.on_reached_cb) wf_spec.task_specs[name].completed_event.connect( on_complete_cb, self.taken_path) workflow = Workflow(wf_spec) try: workflow.complete_all() except: workflow.dump() raise path = [(1, 'Start'), (2, 'concurrence_1'), (3, 'task_a1'), (4, 'task_a2'), (5, 'if_condition_1'), (6, 'task_a3'), (7, 'if_condition_1_end'), (8, 'if_condition_2'), (9, 'task_a5'), (10, 'if_condition_2_end'), (3, 'task_b1'), (4, 'task_b2'), (5, 'concurrence_1_end'), (6, 'task_c1'), (7, 'task_c2'), (8, 'End')] assert_same_path(self, path, self.taken_path)
def deserialize_workflow(self, s_state): wf_spec_class = get_class(s_state['workflow']) wf_spec = wf_spec_class() workflow = Workflow(wf_spec) workflow.attributes = s_state['attributes'] workflow.last_task = s_state['last_task'] workflow.success = s_state['success'] tasks = [self.deserialize_task(workflow, serialized_task) for serialized_task in s_state['task_tree']] workflow.task_tree = [task for task in tasks if task.task_spec.name == 'Root'][0] workflow.spec = wf_spec return workflow
def testDictionarySerializer(self): """ Tests the SelectivePickler serializer for persisting Workflows and Tasks. """ old_workflow = self.workflow serializer = DictionarySerializer() serialized_workflow = old_workflow.serialize(serializer) serializer = DictionarySerializer() new_workflow = Workflow.deserialize(serializer, serialized_workflow) before = old_workflow.get_dump() after = new_workflow.get_dump() self.assertEqual(before, after)
def runWorkflow(self, wf_spec, xml_filename): taken_path = [] for name in wf_spec.task_specs: wf_spec.task_specs[name].reached_event.connect(on_reached_cb, taken_path) wf_spec.task_specs[name].completed_event.connect(on_complete_cb, taken_path) # Execute all tasks within the Workflow workflow = Workflow(wf_spec) self.assert_(not workflow.is_completed(), 'Workflow is complete before start') try: workflow.complete_all(False) except: workflow.task_tree.dump() raise #workflow.task_tree.dump() self.assert_(workflow.is_completed(), 'complete_all() returned, but workflow is not complete\n' + workflow.task_tree.get_dump()) # Make sure that there are no waiting tasks left in the tree. for thetask in Task.Iterator(workflow.task_tree, Task.READY): workflow.task_tree.dump() raise Exception('Task with state READY: %s' % thetask.name) # Check whether the correct route was taken. filename = xml_filename + '.path' if os.path.exists(filename): file = open(filename, 'r') expected = file.read() file.close() taken_path = '\n'.join(taken_path) + '\n' error = '%s:\n' % name error += 'Expected:\n' error += '%s\n' % expected error += 'but got:\n' error += '%s\n' % taken_path self.assert_(taken_path == expected, error) # Check attribute availibility. filename = xml_filename + '.data' if os.path.exists(filename): file = open(filename, 'r') expected = file.read() file.close() result = workflow.get_attribute('data', '') error = '%s:\n' % name error += 'Expected:\n' error += '%s\n' % expected error += 'but got:\n' error += '%s\n' % result self.assert_(result == expected, error)
def testDictionarySerializer(self): """ Tests the SelectivePickler serializer for persisting Workflows and Tasks. """ old_workflow = self.workflow serializer = DictionarySerializer() serialized_workflow = old_workflow.serialize(serializer) serializer = DictionarySerializer() new_workflow = Workflow.deserialize(serializer, serialized_workflow) before = old_workflow.get_dump() after = new_workflow.get_dump() self.assert_(before == after, 'Before:\n' + before + '\n' \ + 'After:\n' + after + '\n')
def run_workflow(test, wf_spec, expected_path, expected_data, workflow=None): # Execute all tasks within the Workflow. if workflow is None: taken_path = track_workflow(wf_spec) workflow = Workflow(wf_spec) else: taken_path = track_workflow(workflow.spec) test.assert_(not workflow.is_completed(), 'Workflow is complete before start') try: # We allow the workflow to require a maximum of 5 seconds to # complete, to allow for testing long running tasks. for i in range(10): workflow.complete_all(False) if workflow.is_completed(): break time.sleep(0.5) except: workflow.task_tree.dump() raise #workflow.task_tree.dump() test.assert_(workflow.is_completed(), 'complete_all() returned, but workflow is not complete\n' + workflow.task_tree.get_dump()) # Make sure that there are no waiting tasks left in the tree. for thetask in Task.Iterator(workflow.task_tree, Task.READY): workflow.task_tree.dump() raise Exception('Task with state READY: %s' % thetask.name) # Check whether the correct route was taken. if expected_path is not None: taken_path = '\n'.join(taken_path) + '\n' error = 'Expected:\n' error += '%s\n' % expected_path error += 'but got:\n' error += '%s\n' % taken_path test.assert_(taken_path == expected_path, error) # Check data availibility. if expected_data is not None: result = workflow.get_data('data', '') error = 'Expected:\n' error += '%s\n' % expected_data error += 'but got:\n' error += '%s\n' % result test.assert_(result == expected_data, error) return workflow
def testDeserialization(self): """ Tests the that deserialized workflow matches the original workflow """ old_workflow = self.workflow old_workflow.spec.start.set_data(marker=True) serializer = DictionarySerializer() serialized_workflow = old_workflow.serialize(serializer) serializer = DictionarySerializer() new_workflow = Workflow.deserialize(serializer, serialized_workflow) self.assertEqual(len(new_workflow.get_tasks()), len(old_workflow.get_tasks())) self.assertEqual(new_workflow.spec.start.get_data('marker'), old_workflow.spec.start.get_data('marker')) self.assertEqual(1, len([t for t in new_workflow.get_tasks() if t.task_spec.name == 'Start'])) self.assertEqual(1, len([t for t in new_workflow.get_tasks() if t.task_spec.name == 'Root']))
def testDeserialization(self): """ Tests the that deserialized workflow can be completed. """ old_workflow = self.workflow old_workflow.complete_next() self.assertEqual('task_a2', old_workflow.last_task.get_name()) serializer = DictionarySerializer() serialized_workflow = old_workflow.serialize(serializer) serializer = DictionarySerializer() new_workflow = Workflow.deserialize(serializer, serialized_workflow) self.assertEqual('task_a2', old_workflow.last_task.get_name()) new_workflow.complete_all() self.assertEqual('task_a2', old_workflow.last_task.get_name())
def testDeserialization(self): """ Tests the that deserialized workflow can be completed. """ old_workflow = self.workflow old_workflow.complete_next() self.assertEquals('task_a2', old_workflow.last_task.get_name()) serializer = DictionarySerializer() serialized_workflow = old_workflow.serialize(serializer) serializer = DictionarySerializer() new_workflow = Workflow.deserialize(serializer, serialized_workflow) self.assertEquals('task_a2', old_workflow.last_task.get_name()) new_workflow.complete_all() self.assertEquals('task_a2', old_workflow.last_task.get_name())
def runWorkflow(self, wf_spec, xml_filename): taken_path = [] for name in wf_spec.task_specs: wf_spec.task_specs[name].reached_event.connect(on_reached_cb, taken_path) wf_spec.task_specs[name].completed_event.connect(on_complete_cb, taken_path) # Execute all tasks within the Workflow workflow = Workflow(wf_spec) self.assert_(not workflow.is_completed(), "Workflow is complete before start") try: workflow.complete_all(False) except: workflow.task_tree.dump() raise # workflow.task_tree.dump() self.assert_( workflow.is_completed(), "complete_all() returned, but workflow is not complete\n" + workflow.task_tree.get_dump(), ) # Make sure that there are no waiting tasks left in the tree. for thetask in Task.Iterator(workflow.task_tree, Task.READY): workflow.task_tree.dump() raise Exception("Task with state READY: %s" % thetask.name) # Check whether the correct route was taken. filename = xml_filename + ".path" if os.path.exists(filename): file = open(filename, "r") expected = file.read() file.close() taken_path = "\n".join(taken_path) + "\n" error = "%s:\n" % name error += "Expected:\n" error += "%s\n" % expected error += "but got:\n" error += "%s\n" % taken_path self.assert_(taken_path == expected, error) # Check attribute availibility. filename = xml_filename + ".data" if os.path.exists(filename): file = open(filename, "r") expected = file.read() file.close() result = workflow.get_attribute("data", "") error = "%s:\n" % name error += "Expected:\n" error += "%s\n" % expected error += "but got:\n" error += "%s\n" % result self.assert_(result == expected, error)
def run_workflow(test, wf_spec, expected_path, expected_data, workflow=None): # Execute all tasks within the Workflow. if workflow is None: taken_path = track_workflow(wf_spec) workflow = Workflow(wf_spec) else: taken_path = track_workflow(workflow.spec) test.assertFalse(workflow.is_completed()) try: # We allow the workflow to require a maximum of 5 seconds to # complete, to allow for testing long running tasks. for i in range(10): workflow.complete_all(False) if workflow.is_completed(): break time.sleep(0.5) except: workflow.task_tree.dump() raise # workflow.task_tree.dump() test.assertTrue(workflow.is_completed(), workflow.task_tree.get_dump()) # Make sure that there are no waiting tasks left in the tree. for thetask in Task.Iterator(workflow.task_tree, Task.READY): workflow.task_tree.dump() raise Exception('Task with state READY: %s' % thetask.name) # Check whether the correct route was taken. if expected_path is not None: taken_path = '\n'.join(taken_path) + '\n' test.assertEqual(taken_path, expected_path) # Check data availibility. if expected_data is not None: result = workflow.get_data('data', '') test.assertIn(result, expected_data) return workflow
def run(self, UserSelection, restart=False): if restart: self.workflow = Workflow(self.wf_spec) workflow = self.workflow condition_keys = [] if UserSelection is None: UserSelection = {} task_data_dict = UserSelection.copy() while not workflow.is_completed(): tasks = workflow.get_tasks(Task.READY) for t in tasks: print("Ready:", t.task_spec.name) if hasattr(t.task_spec, "cond_task_specs"): for cond, name in t.task_spec.cond_task_specs: for cond_unit in cond.args: if hasattr(cond_unit, "name"): condition_keys.append(cond_unit.name) flag_keys_in_user_select = True for cond_key in condition_keys: if not task_data_dict.has_key(cond_key): print(cond_key) flag_keys_in_user_select = False break if not flag_keys_in_user_select: # some tast's condition's key not in input userselect dict return for t in tasks: t.set_data(**task_data_dict) workflow.complete_next() if not workflow.is_completed(): raise QuestionError('invalid feature')
def _runWorkflow(self, wf_spec): taken_path = {'reached': [], 'completed': []} for name, task in wf_spec.task_specs.iteritems(): task.reached_event.connect(on_reached_cb, taken_path['reached']) task.completed_event.connect(on_complete_cb, taken_path['completed']) # Execute all tasks within the Workflow. workflow = Workflow(wf_spec) self.assert_(not workflow.is_completed(), 'Workflow complete before start') try: workflow.complete_all() except: workflow.dump() raise self.assert_( workflow.is_completed(), 'complete_all() returned, but workflow is not complete\n' + workflow.task_tree.get_dump()) #workflow.task_tree.dump() assert_same_path(self, self.expected_path, taken_path['completed'])
def _runWorkflow(self, wf_spec): taken_path = {'reached': [], 'completed': []} for name, task in wf_spec.task_specs.iteritems(): task.reached_event.connect(on_reached_cb, taken_path['reached']) task.completed_event.connect(on_complete_cb, taken_path['completed']) # Execute all tasks within the Workflow. workflow = Workflow(wf_spec) self.assert_(not workflow.is_completed(), 'Workflow complete before start') try: workflow.complete_all() except: workflow.dump() raise self.assert_(workflow.is_completed(), 'complete_all() returned, but workflow is not complete\n' + workflow.task_tree.get_dump()) #workflow.task_tree.dump() assert_same_path(self, self.expected_path, taken_path['completed'])
def deserialize_workflow(self, s_state, **kwargs): wf_spec = self.deserialize_workflow_spec(s_state['wf_spec'], **kwargs) workflow = Workflow(wf_spec) # attributes workflow.attributes = s_state['attributes'] # last_task workflow.last_task = s_state['last_task'] # outer_workflow #workflow.outer_workflow = find_workflow_by_id(remap_workflow_id(s_state['outer_workflow'])) # success workflow.success = s_state['success'] # workflow workflow.spec = wf_spec # task_tree workflow.task_tree = self._deserialize_task(workflow, s_state['task_tree']) return workflow
from SpiffWorkflow.specs import WorkflowSpec from SpiffWorkflow import Task,Workflow #from SpiffWorkflow.serializer.json import JSONSerializer from ansibleserializer import AnsibleSerializer import time with open('ansible-workflow-spec.json') as fp: workflow_json = fp.read() serializer = AnsibleSerializer() spec = WorkflowSpec.deserialize(serializer, workflow_json) # Create the workflow. workflow = Workflow(spec) # Execute until all tasks are done or require manual intervention. # For the sake of this tutorial, we ignore the "manual" flag on the # tasks. In practice, you probably don't want to do that. for i in range(20): print(i) workflow.complete_all(False) if workflow.is_completed(): break time.sleep(0.5) #workflow.complete_all(halt_on_manual=False) #workflow.complete_next() #tasks = workflow.get_tasks(Task.WAITING) #for t in tasks: # print(t.get_name()) # t.complete()
def testBeginWorkflowStepByStep(self): """ Simulates interactive calls, as would be issued by a user. """ xml_file = os.path.join(data_dir, 'spiff', 'workflow1.xml') xml = open(xml_file).read() wf_spec = WorkflowSpec.deserialize(XmlSerializer(), xml) workflow = Workflow(wf_spec) tasks = workflow.get_tasks(Task.READY) self.assertEqual(len(tasks), 1) self.assertEqual(tasks[0].task_spec.name, 'Start') workflow.complete_task_from_id(tasks[0].id) self.assertEqual(tasks[0].state, Task.COMPLETED) tasks = workflow.get_tasks(Task.READY) self.assertEqual(len(tasks), 2) task_a1 = tasks[0] task_b1 = tasks[1] self.assertEqual(task_a1.task_spec.__class__, Simple) self.assertEqual(task_a1.task_spec.name, 'task_a1') self.assertEqual(task_b1.task_spec.__class__, Simple) self.assertEqual(task_b1.task_spec.name, 'task_b1') workflow.complete_task_from_id(task_a1.id) self.assertEqual(task_a1.state, Task.COMPLETED) tasks = workflow.get_tasks(Task.READY) self.assertEqual(len(tasks), 2) self.assertTrue(task_b1 in tasks) task_a2 = tasks[0] self.assertEqual(task_a2.task_spec.__class__, Simple) self.assertEqual(task_a2.task_spec.name, 'task_a2') workflow.complete_task_from_id(task_a2.id) tasks = workflow.get_tasks(Task.READY) self.assertEqual(len(tasks), 1) self.assertTrue(task_b1 in tasks) workflow.complete_task_from_id(task_b1.id) tasks = workflow.get_tasks(Task.READY) self.assertEqual(len(tasks), 1) workflow.complete_task_from_id(tasks[0].id) tasks = workflow.get_tasks(Task.READY) self.assertEqual(len(tasks), 1) self.assertEqual(tasks[0].task_spec.name, 'synch_1')
def testBeginWorkflowStepByStep(self): """ Simulates interactive calls, as would be issued by a user. """ wf_spec = self._createWorkflowSpec() workflow = Workflow(wf_spec) tasks = workflow.get_tasks(Task.READY) self.assertEqual(len(tasks), 1) self.assertEqual(tasks[0].task_spec.name, 'Start') workflow.complete_task_from_id(tasks[0].id) self.assertEqual(tasks[0].state, Task.COMPLETED) tasks = workflow.get_tasks(Task.READY) self.assertEqual(len(tasks), 2) task_a1 = tasks[0] task_b1 = tasks[1] self.assertEqual(task_a1.task_spec.__class__, Simple) self.assertEqual(task_a1.task_spec.name, 'task_a1') self.assertEqual(task_b1.task_spec.__class__, Simple) self.assertEqual(task_b1.task_spec.name, 'task_b1') workflow.complete_task_from_id(task_a1.id) self.assertEqual(task_a1.state, Task.COMPLETED) tasks = workflow.get_tasks(Task.READY) self.assertEqual(len(tasks), 2) self.assertTrue(task_b1 in tasks) task_a2 = tasks[0] self.assertEqual(task_a2.task_spec.__class__, Simple) self.assertEqual(task_a2.task_spec.name, 'task_a2') workflow.complete_task_from_id(task_a2.id) tasks = workflow.get_tasks(Task.READY) self.assertEqual(len(tasks), 1) self.assertTrue(task_b1 in tasks) workflow.complete_task_from_id(task_b1.id) tasks = workflow.get_tasks(Task.READY) self.assertEqual(len(tasks), 1) workflow.complete_task_from_id(tasks[0].id) tasks = workflow.get_tasks(Task.READY) self.assertEqual(len(tasks), 1) self.assertEqual(tasks[0].task_spec.name, 'synch_1')
import json from SpiffWorkflow import Workflow from SpiffWorkflow.specs import WorkflowSpec from serializer import NuclearSerializer # Load from JSON with open('nuclear') as fp: workflow_json = fp.read() serializer = NuclearSerializer() spec = WorkflowSpec.deserialize(serializer, workflow_json) # Create the workflow. workflow = Workflow(spec) workflow.complete_all()
import json from SpiffWorkflow import Workflow from SpiffWorkflow.specs import WorkflowSpec from SpiffWorkflow.serializer.json import JSONSerializer # Load from JSON with open('nuclear.json') as fp: workflow_json = fp.read() serializer = JSONSerializer() spec = WorkflowSpec.deserialize(serializer, workflow_json) # Alternatively, create an instance of the Python based specification. #from nuclear import NuclearStrikeWorkflowSpec #spec = NuclearStrikeWorkflowSpec() # Create the workflow. workflow = Workflow(spec) # Execute until all tasks are done or require manual intervention. # For the sake of this tutorial, we ignore the "manual" flag on the # tasks. In practice, you probably don't want to do that. workflow.complete_all(halt_on_manual=False) # Alternatively, this is what a UI would do for a manual task. #workflow.complete_task_from_id(...)
def testConstructor(self): wf_spec = WorkflowSpec() wf_spec.start.connect(Cancel(wf_spec, 'name')) workflow = Workflow(wf_spec)
import json from SpiffWorkflow import Workflow from SpiffWorkflow.serializer.json import JSONSerializer from nuclear import NuclearStrikeWorkflowSpec serializer = JSONSerializer() spec = NuclearStrikeWorkflowSpec() workflow = Workflow(spec) data = workflow.serialize(serializer) # This next line is unnecessary in practice; it just makes the JSON pretty. pretty = json.dumps(json.loads(data), indent=4, separators=(',', ': ')) open('workflow.json', 'w').write(pretty)
def run_workflow(spec): wflow = Workflow(spec) for _ in range(0, 4): wflow.task_tree.dump() print '----' wflow.complete_next(False)
import json from SpiffWorkflow import Workflow from SpiffWorkflow.serializer.json import JSONSerializer from nuclear import NuclearStrikeWorkflowSpec serializer = JSONSerializer() spec = NuclearStrikeWorkflowSpec() workflow = Workflow(spec) data = workflow.serialize(serializer) pretty = json.dumps(data, indent=4, separators=(',', ': ')) open('workflow.json', 'w').write(pretty)
from SpiffWorkflow import Workflow from SpiffWorkflow.serializer.json import JSONSerializer serializer = JSONSerializer() with open('workflow.json') as fp: workflow_json = fp.read() workflow = Workflow.deserialize(serializer, workflow_json)
from SpiffWorkflow import Workflow from architecture.scheduler.spiff_wf_demo import NuclearStrikeWorkflowSpec spec = NuclearStrikeWorkflowSpec() wf = Workflow(spec) wf.complete_all()
def task(): spec = WorkflowSpec() # (Add tasks to the spec here, or create one directly from an existing file) wf = Workflow(spec) wf.complete_task_from_id(123)
def testBeginWorkflowStepByStep(self): """ Simulates interactive calls, as would be issued by a user. """ xml_file = os.path.join(data_dir, 'spiff', 'workflow1.xml') with open(xml_file) as fp: xml = fp.read() wf_spec = WorkflowSpec.deserialize(XmlSerializer(), xml) workflow = Workflow(wf_spec) tasks = workflow.get_tasks(Task.READY) self.assertEqual(len(tasks), 1) self.assertEqual(tasks[0].task_spec.name, 'Start') workflow.complete_task_from_id(tasks[0].id) self.assertEqual(tasks[0].state, Task.COMPLETED) tasks = workflow.get_tasks(Task.READY) self.assertEqual(len(tasks), 2) task_a1 = tasks[0] task_b1 = tasks[1] self.assertEqual(task_a1.task_spec.__class__, Simple) self.assertEqual(task_a1.task_spec.name, 'task_a1') self.assertEqual(task_b1.task_spec.__class__, Simple) self.assertEqual(task_b1.task_spec.name, 'task_b1') workflow.complete_task_from_id(task_a1.id) self.assertEqual(task_a1.state, Task.COMPLETED) tasks = workflow.get_tasks(Task.READY) self.assertEqual(len(tasks), 2) self.assertTrue(task_b1 in tasks) task_a2 = tasks[0] self.assertEqual(task_a2.task_spec.__class__, Simple) self.assertEqual(task_a2.task_spec.name, 'task_a2') workflow.complete_task_from_id(task_a2.id) tasks = workflow.get_tasks(Task.READY) self.assertEqual(len(tasks), 1) self.assertTrue(task_b1 in tasks) workflow.complete_task_from_id(task_b1.id) tasks = workflow.get_tasks(Task.READY) self.assertEqual(len(tasks), 1) workflow.complete_task_from_id(tasks[0].id) tasks = workflow.get_tasks(Task.READY) self.assertEqual(len(tasks), 1) self.assertEqual(tasks[0].task_spec.name, 'synch_1')
def workflow_deserialize(workflow_json): serializer = TradeFlowSerializer() workflow_instance = Workflow.deserialize(serializer, workflow_json) return workflow_instance
def test_Merge_data_merging(self): """Test that Merge task actually merges data""" wf_spec = WorkflowSpec() first = Simple(wf_spec, 'first') second = Simple(wf_spec, 'second') third = Simple(wf_spec, 'third') bump = Simple(wf_spec, 'bump') fourth = Simple(wf_spec, 'fourth') merge1 = Merge(wf_spec, 'merge 1') simple1 = Simple(wf_spec, 'simple 1') merge2 = Merge(wf_spec, 'merge 2') simple2 = Simple(wf_spec, 'simple 2') unmerged = Simple(wf_spec, 'unmerged') wf_spec.start.connect(first) wf_spec.start.connect(second) wf_spec.start.connect(third) wf_spec.start.connect(bump) bump.connect(fourth) # Test join at different depths in tree first.connect(merge1) second.connect(merge1) second.connect(unmerged) first.connect(merge2) second.connect(merge2) third.connect(merge2) fourth.connect(merge2) merge1.connect(simple1) merge2.connect(simple2) workflow = Workflow(wf_spec) workflow.task_tree.set_attribute(everywhere=1) for task in workflow.get_tasks(): task.set_attribute(**{'name': task.get_name(), task.get_name(): 1}) workflow.complete_all() self.assertTrue(workflow.is_completed()) found = {} for task in workflow.get_tasks(): if task.task_spec is simple1: self.assertIn('first', task.attributes) self.assertIn('second', task.attributes) self.assertDictEqual(task.attributes, {'Start': 1, 'merge 1': 1, 'name': 'Start', 'simple 1': 1, 'second': 1, 'first': 1}) found['simple1'] = task if task.task_spec is simple2: self.assertIn('first', task.attributes) self.assertIn('second', task.attributes) self.assertIn('third', task.attributes) self.assertIn('fourth', task.attributes) self.assertDictEqual(task.attributes, {'merge 2': 1, 'simple 2': 1, 'name': 'Start', 'third': 1, 'bump': 1, 'Start': 1, 'second': 1, 'first': 1, 'fourth': 1}) found['simple2'] = task if task.task_spec is unmerged: self.assertDictEqual(task.attributes, {'Start': 1, 'second': 1, 'name': 'Start', 'unmerged': 1}) found['unmerged'] = task self.assertIn('simple1', found) self.assertIn('simple2', found) self.assertIn('unmerged', found)