示例#1
0
    def __init__(self):
        WorkflowSpec.__init__(self)

        # The first step of our workflow is to let the general confirm
        # the nuclear strike.
        general_choice = ExclusiveChoice(self, 'general')
        self.start.connect(general_choice)

        # The default choice of the general is to abort.
        cancel = Cancel(self, 'workflow_aborted')
        general_choice.connect(cancel)

        # Otherwise, we will ask the president to confirm.
        president_choice = ExclusiveChoice(self, 'president')
        cond = Equal(Attrib('confirmation'), 'yes')
        general_choice.connect_if(cond, president_choice)

        # The default choice of the president is to abort.
        president_choice.connect(cancel)

        # Otherwise, we will perform the nuclear strike.
        strike = Simple(self, 'nuclear_strike')
        president_choice.connect_if(cond, strike)

        # Now we connect our Python function to the Task named 'nuclear_strike'
        strike.completed_event.connect(my_nuclear_strike)
示例#2
0
    def run_pattern(self, filename):
        # Load the .path file.
        path_file = os.path.splitext(filename)[0] + '.path'
        

        # Load the .data file.
        data_file = os.path.splitext(filename)[0] + '.data'
        if os.path.exists(data_file):
            expected_data = open(data_file, 'r').read()
        else:
            expected_data = None

        # Test patterns that are defined in XML format.
        if filename.endswith('.xml'):
            xml     = open(filename).read()
            wf_spec = WorkflowSpec.deserialize(self.serializer, xml, filename = filename)
            self.serializerTestClass.wf_spec = wf_spec
            self.serializerTestClass.testSerializeWorkflowSpec(path_file=path_file,
                                                               data=expected_data)
            self.serializerTestClass.testSerializeWorkflow(path_file=path_file,
                                                           data=expected_data)

        # Test patterns that are defined in Python.
        if filename.endswith('.py') and not filename.endswith('__.py'):
            code    = compile(open(filename).read(), filename, 'exec')
            thedict = {}
            result  = eval(code, thedict)
            wf_spec = thedict['TestWorkflowSpec']()
            self.serializerTestClass.wf_spec = wf_spec
            self.serializerTestClass.testSerializeWorkflowSpec(path_file=path_file,
                                                               data=expected_data)
            self.serializerTestClass.testSerializeWorkflow(path_file=path_file,
                                                           data=expected_data)
示例#3
0
    def testSerializeWorkflowSpec(self, path_file=None, data=None):
        if self.serializer is None:
            return

        # Back to back testing.
        try:
            serialized1 = self.wf_spec.serialize(self.serializer)
            wf_spec     = WorkflowSpec.deserialize(self.serializer, serialized1)
            serialized2 = wf_spec.serialize(self.serializer)
        except TaskSpecNotSupportedError as e:
            pass
        else:
            self.assert_(isinstance(serialized1, self.serial_type))
            self.assert_(isinstance(serialized2, self.serial_type))
            self.compareSerialization(serialized1, serialized2)

            # Test whether the restored workflow still works.
            if path_file is None:
                path_file = os.path.join(data_dir, 'spiff', 'workflow1.path')
                path      = open(path_file).read()
            elif os.path.exists(path_file):
                path = open(path_file).read()
            else:
                path = None

            run_workflow(self, wf_spec, path, data)
    def testDeserializeWorkflowSpec(self):
        xml_file  = os.path.join(data_dir, 'spiff', 'workflow1.xml')
        xml       = open(xml_file).read()
        path_file = os.path.splitext(xml_file)[0] + '.path'
        path      = open(path_file).read()
        wf_spec   = WorkflowSpec.deserialize(self.serializer, xml)

        run_workflow(self, wf_spec, path, None)
示例#5
0
 def _create_subworkflow(self, my_task):
     from SpiffWorkflow.specs import WorkflowSpec
     file           = valueof(my_task, self.file)
     serializer     = self.serializer_cls()
     s_state        = open(file).read()
     wf_spec        = WorkflowSpec.deserialize(serializer, s_state, filename = file)
     outer_workflow = my_task.workflow.outer_workflow
     return SpiffWorkflow.Workflow(wf_spec, parent = outer_workflow)
示例#6
0
 def _create_subworkflow(self, my_task):
     from SpiffWorkflow.storage import XmlSerializer
     from SpiffWorkflow.specs import WorkflowSpec
     file           = valueof(my_task, self.file)
     serializer     = XmlSerializer()
     xml            = open(file).read()
     wf_spec        = WorkflowSpec.deserialize(serializer, xml, filename = file)
     outer_workflow = my_task.workflow.outer_workflow
     return SpiffWorkflow.Workflow(wf_spec, parent = outer_workflow)
示例#7
0
 def load_workflow_spec(self, folder, f):
     file = os.path.join(
         os.path.dirname(__file__), '..', 'data', 'spiff', folder, f)
     serializer = XmlSerializer()
     with open(file) as fp:
         xml = fp.read()
     self.wf_spec = WorkflowSpec.deserialize(
         serializer, xml, filename=file)
     self.workflow = Workflow(self.wf_spec)
    def testPickle(self):
        # Read a complete workflow.
        xml_file      = os.path.join(data_dir, 'spiff', 'workflow1.xml')
        xml           = open(xml_file).read()
        path_file     = os.path.splitext(xml_file)[0] + '.path'
        expected_path = open(path_file).read().strip().split('\n')
        wf_spec       = WorkflowSpec.deserialize(self.serializer, xml)

        for i in xrange(5):
            workflow = Workflow(wf_spec)
            self.doPickleSingle(workflow, expected_path)
示例#9
0
    def testSerializeWorkflowSpec(self):
        if self.serializer is None:
            return

        # Back to back testing.
        serialized1 = self.wf_spec.serialize(self.serializer)
        wf_spec     = WorkflowSpec.deserialize(self.serializer, serialized1)
        serialized2 = wf_spec.serialize(self.serializer)
        self.assert_(isinstance(serialized1, self.serial_type))
        self.assert_(isinstance(serialized2, self.serial_type))
        self.compareSerialization(serialized1, serialized2)

        # Test whether the restored workflow still works.
        path_file = os.path.join(data_dir, 'spiff', 'workflow1.path')
        path      = open(path_file).read()
        run_workflow(self, wf_spec, path, None)
示例#10
0
class WorkflowSpecTest(unittest.TestCase):
    def setUp(self):
        self.wf_spec = WorkflowSpec()

    def test_cyclic_wait(self):
        """
        Tests that we can detect when two wait taks are witing on each other.
        """
        task1 = Join(self.wf_spec, 'First')
        self.wf_spec.start.connect(task1)
        task2 = Join(self.wf_spec, 'Second')
        task1.connect(task2)

        task2.follow(task1)
        task1.follow(task2)

        results = self.wf_spec.validate()
        self.assertIn("Found loop with 'Second': Second->First then 'Second' "
                "again", results)
        self.assertIn("Found loop with 'First': First->Second then 'First' "
                "again", results)
示例#11
0
    def _on_ready_before_hook(self, my_task):
        from SpiffWorkflow.storage import XmlSerializer
        from SpiffWorkflow.specs import WorkflowSpec
        file           = valueof(my_task, self.file)
        serializer     = XmlSerializer()
        xml            = open(file).read()
        wf_spec        = WorkflowSpec.deserialize(serializer, xml, filename = file)
        outer_workflow = my_task.workflow.outer_workflow
        subworkflow    = SpiffWorkflow.Workflow(wf_spec, parent = outer_workflow)
        subworkflow.completed_event.connect(self._on_subworkflow_completed, my_task)

        # Integrate the tree of the subworkflow into the tree of this workflow.
        my_task._sync_children(self.outputs, Task.FUTURE)
        for child in my_task.children:
            child.task_spec._update_state(child)
            child._inherit_attributes()
        for child in subworkflow.task_tree.children:
            my_task.children.insert(0, child)
            child.parent = my_task

        my_task._set_internal_attribute(subworkflow = subworkflow)
示例#12
0
from SpiffWorkflow.specs import WorkflowSpec
from SpiffWorkflow.serializer.json import JSONSerializer

serializer = JSONSerializer()
with open('workflow-spec.json') as fp:
    workflow_json = fp.read()
spec = WorkflowSpec.deserialize(serializer, workflow_json)
示例#13
0
 def setUp(self):
     self.wf_spec = WorkflowSpec()
示例#14
0
 def setUp(self):
     self.wf_spec = WorkflowSpec()
示例#15
0
 def testConstructor(self):
     spec = WorkflowSpec('my spec')
     self.assertEqual('my spec', spec.name)
示例#16
0
    def __init__(self):
        WorkflowSpec.__init__(self)

        # The first step of our workflow is to let the general confirm
        # the nuclear strike.

        #workflow_run = taskSpec(self, 'Ping','hostname')
        #self.start.connect(workflow_run)
        #workflow_execute = taskSpec(self, 'Shell', ["ping", "-t", "1", "127.0.0.1"])
        #workflow_run.connect(workflow_execute)

        # data = {'post_assign':{'name':'Test','value':'TestValues'}}
        # MultiInstance对当前任务进行拆分,1:要创建的任务数
        multi_inst = MultiInstance(self,'workflow_task',1)

        self.start.connect(multi_inst)


        #taskSpec为任务规范,引用工作流规范,给定任务规范名称
        workflow_1 = taskSpec(self, 'SQL')
        workflow_2 = taskSpec(self, '脚本')
        workflow_3 = taskSpec(self, 'SQL3')


        # TaskSpec,将给定对任务作为输出任务添加
        multi_inst.connect(workflow_1)
        multi_inst.connect(workflow_2)
        multi_inst.connect(workflow_3)

        # 同步之前分割对任务,使用MultiInstance多实例模式时,join可以跨所有实例工作,在使用ThreadSplit时join将忽略来自另一个线程的实例。
        synch_1 = Join(self, 'synch_1')
        #self.start.connect(synch_1)

        workflow_1.connect(synch_1)
        workflow_2.connect(synch_1)
        workflow_3.connect(synch_1)

        #gate_test = Gate(self,'gate1','synch_1')
        #synch_1.connect(gate_test)

        # 实现具有一个或多个输入,和任意数量输出的任务。
        # 如果连接了多个输入,则任务执行隐式多重合并。
        # 如果连接了多个输出,则任务执行隐式并行分割。
        end = Simple(self, 'End')

        # 表示一个if条件,其中多个条件可能同时匹配,从而创建多个传出分支。此任务有一个或多个输入,以及一个或多个传入分支。
        # multichoice = MultiChoice(self, 'multi_choice_1')
        #
        synch_1.connect(end)

        #gate_test.connect(end)
        #synch_1.connect(end)
        #synch_1.connect(multi_inst)

        #end = Simple(self, 'End')
        #workflow_execute.connect(end)
        # As soon as all tasks are either "completed" or  "aborted", the
        # workflow implicitely ends.


# ids = []
# for i in ids2:
#     afterNodeIds = [x for x in lineList if x['from'] == i['to']]
#     for each in afterNodeIds:
#         beforeId = each['from']
#         afterId = each['to']
#         task, afterNodeName, beforenode_index = self.buildFLows(beforeId, afterId, nodeIdList, nodeList)
#         task_list[task.name] = task
#         beforeNodeName = nodeList[beforenode_index]['name']
#         beforeNode = task_list[beforeNodeName]
#
#         if task_list[afterNodeName] not in beforeNode.outputs:
#             beforeNode.connect(task_list[afterNodeName])
#         ids.append(each)
#
# ids2 = []
# task = None
# for i in ids:
#     afterNodeIds = [x for x in lineList if x['from'] == i['to']]
#     toNode = [x['to'] for x in afterNodeIds]
#     # num=len(set(toNode))
#     for each in afterNodeIds:
#         beforeId = each['from']
#         afterId = each['to']
#         task, afterNodeName, beforenode_index = self.buildFLows(beforeId, afterId, nodeIdList, nodeList)
#         # num-=1
#         task_list[task.name] = task
#         beforeNodeName = nodeList[beforenode_index]['name']
#         beforeNode = task_list[beforeNodeName]
#
#         if task_list[afterNodeName] not in beforeNode.outputs:
#             beforeNode.connect(task_list[afterNodeName])
#         ids2.append(each)
#
# ids = []
# for i in ids2:
#     afterNodeIds = [x for x in lineList if x['from'] == i['to']]
#     for each in afterNodeIds:
#         beforeId = each['from']
#         afterId = each['to']
#         task, afterNodeName, beforenode_index = self.buildFLows(beforeId, afterId, nodeIdList, nodeList)
#         task_list[task.name] = task
#         beforeNodeName = nodeList[beforenode_index]['name']
#         beforeNode = task_list[beforeNodeName]
#
#         if task_list[afterNodeName] not in beforeNode.outputs:
#             beforeNode.connect(task_list[afterNodeName])
#         ids.append(each)
#
# ids2 = []
# task = None
# for i in ids:
#     afterNodeIds = [x for x in lineList if x['from'] == i['to']]
#     toNode = [x['to'] for x in afterNodeIds]
#     # num=len(set(toNode))
#     for each in afterNodeIds:
#         beforeId = each['from']
#         afterId = each['to']
#         task, afterNodeName, beforenode_index = self.buildFLows(beforeId, afterId, nodeIdList, nodeList)
#         # num-=1
#         task_list[task.name] = task
#         beforeNodeName = nodeList[beforenode_index]['name']
#         beforeNode = task_list[beforeNodeName]
#
#         if task_list[afterNodeName] not in beforeNode.outputs:
#             beforeNode.connect(task_list[afterNodeName])
#         ids2.append(each)
#
# ids = []
# for i in ids2:
#     afterNodeIds = [x for x in lineList if x['from'] == i['to']]
#     for each in afterNodeIds:
#         beforeId = each['from']
#         afterId = each['to']
#         task, afterNodeName, beforenode_index = self.buildFLows(beforeId, afterId, nodeIdList, nodeList)
#         task_list[task.name] = task
#         beforeNodeName = nodeList[beforenode_index]['name']
#         beforeNode = task_list[beforeNodeName]
#
#         if task_list[afterNodeName] not in beforeNode.outputs:
#             beforeNode.connect(task_list[afterNodeName])
#         ids.append(each)
#
# ids2 = []
# task = None
# for i in ids:
#     afterNodeIds = [x for x in lineList if x['from'] == i['to']]
#     toNode = [x['to'] for x in afterNodeIds]
#     # num=len(set(toNode))
#     for each in afterNodeIds:
#         beforeId = each['from']
#         afterId = each['to']
#         task, afterNodeName, beforenode_index = self.buildFLows(beforeId, afterId, nodeIdList, nodeList)
#         # num-=1
#         task_list[task.name] = task
#         beforeNodeName = nodeList[beforenode_index]['name']
#         beforeNode = task_list[beforeNodeName]
#
#         if task_list[afterNodeName] not in beforeNode.outputs:
#             beforeNode.connect(task_list[afterNodeName])
#         ids2.append(each)
#
# ids = []
# for i in ids2:
#     afterNodeIds = [x for x in lineList if x['from'] == i['to']]
#     for each in afterNodeIds:
#         beforeId = each['from']
#         afterId = each['to']
#         task, afterNodeName, beforenode_index = self.buildFLows(beforeId, afterId, nodeIdList, nodeList)
#         task_list[task.name] = task
#         beforeNodeName = nodeList[beforenode_index]['name']
#         beforeNode = task_list[beforeNodeName]
#
#         if task_list[afterNodeName] not in beforeNode.outputs:
#             beforeNode.connect(task_list[afterNodeName])
#         ids.append(each)
#
# ids2 = []
# task = None
# for i in ids:
#     afterNodeIds = [x for x in lineList if x['from'] == i['to']]
#     toNode = [x['to'] for x in afterNodeIds]
#     # num=len(set(toNode))
#     for each in afterNodeIds:
#         beforeId = each['from']
#         afterId = each['to']
#         task, afterNodeName, beforenode_index = self.buildFLows(beforeId, afterId, nodeIdList, nodeList)
#         # num-=1
#         task_list[task.name] = task
#         beforeNodeName = nodeList[beforenode_index]['name']
#         beforeNode = task_list[beforeNodeName]
#
#         if task_list[afterNodeName] not in beforeNode.outputs:
#             beforeNode.connect(task_list[afterNodeName])
#         ids2.append(each)
#
# ids = []
# for i in ids2:
#     afterNodeIds = [x for x in lineList if x['from'] == i['to']]
#     for each in afterNodeIds:
#         beforeId = each['from']
#         afterId = each['to']
#         task, afterNodeName, beforenode_index = self.buildFLows(beforeId, afterId, nodeIdList, nodeList)
#         task_list[task.name] = task
#         beforeNodeName = nodeList[beforenode_index]['name']
#         beforeNode = task_list[beforeNodeName]
#
#         if task_list[afterNodeName] not in beforeNode.outputs:
#             beforeNode.connect(task_list[afterNodeName])
#         ids.append(each)
示例#17
0
    def testSerializationWithoutKwargs(self):
        new_wf_spec = WorkflowSpec()
        serializer = DictionarySerializer()
        nokw = Celery(self.wf_spec,
                      'testnokw',
                      'call.name',
                      call_args=[Attrib('the_attribute'), 1])
        data = nokw.serialize(serializer)
        nokw2 = Celery.deserialize(serializer, new_wf_spec, data)
        self.assertDictEqual(nokw.kwargs, nokw2.kwargs)

        kw = Celery(self.wf_spec,
                    'testkw',
                    'call.name',
                    call_args=[Attrib('the_attribute'), 1],
                    some_arg={"key": "value"})
        data = kw.serialize(serializer)
        kw2 = Celery.deserialize(serializer, new_wf_spec, data)
        self.assertDictEqual(kw.kwargs, kw2.kwargs)

        # Has kwargs, but they belong to TaskSpec
        kw_defined = Celery(self.wf_spec,
                            'testkwdef',
                            'call.name',
                            call_args=[Attrib('the_attribute'), 1],
                            some_ref=Attrib('value'),
                            defines={"key": "value"})
        data = kw_defined.serialize(serializer)
        kw_defined2 = Celery.deserialize(serializer, new_wf_spec, data)
        self.assertIsInstance(kw_defined2.kwargs['some_ref'], Attrib)

        # Comes from live data. Bug not identified, but there we are...
        data = {
            u'inputs': [u'Wait:1'],
            u'lookahead':
            2,
            u'description':
            u'',
            u'outputs': [],
            u'args': [[u'Attrib', u'ip'],
                      [u'spiff:value', u'dc455016e2e04a469c01a866f11c0854']],
            u'manual':
            False,
            u'data': {
                u'R': u'1'
            },
            u'locks': [],
            u'pre_assign': [],
            u'call':
            u'call.x',
            u'internal':
            False,
            u'post_assign': [],
            u'id':
            8,
            u'result_key':
            None,
            u'defines': {
                u'R': u'1'
            },
            u'class':
            u'SpiffWorkflow.specs.Celery.Celery',
            u'name':
            u'RS1:1'
        }
        Celery.deserialize(serializer, new_wf_spec, data)
示例#18
0
    def test_Merge_data_merging(self):
        """Test that Merge task actually merges data"""
        wf_spec = WorkflowSpec()
        first = Simple(wf_spec, 'first')
        second = Simple(wf_spec, 'second')
        third = Simple(wf_spec, 'third')
        bump = Simple(wf_spec, 'bump')
        fourth = Simple(wf_spec, 'fourth')
        merge1 = Merge(wf_spec, 'merge 1')
        simple1 = Simple(wf_spec, 'simple 1')
        merge2 = Merge(wf_spec, 'merge 2')
        simple2 = Simple(wf_spec, 'simple 2')
        unmerged = Simple(wf_spec, 'unmerged')

        wf_spec.start.connect(first)
        wf_spec.start.connect(second)
        wf_spec.start.connect(third)
        wf_spec.start.connect(bump)
        bump.connect(fourth)  # Test join at different depths in tree

        first.connect(merge1)
        second.connect(merge1)
        second.connect(unmerged)

        first.connect(merge2)
        second.connect(merge2)
        third.connect(merge2)
        fourth.connect(merge2)

        merge1.connect(simple1)
        merge2.connect(simple2)

        workflow = Workflow(wf_spec)
        workflow.task_tree.set_data(everywhere=1)
        for task in workflow.get_tasks():
            task.set_data(**{'name': task.get_name(), task.get_name(): 1})
        workflow.complete_all()
        self.assertTrue(workflow.is_completed())
        found = {}
        for task in workflow.get_tasks():
            if task.task_spec is simple1:
                self.assert_('first' in task.data)
                self.assert_('second' in task.data)
                self.assertEqual(
                    task.data, {
                        'Start': 1,
                        'merge 1': 1,
                        'name': 'Start',
                        'simple 1': 1,
                        'second': 1,
                        'first': 1
                    })
                found['simple1'] = task
            if task.task_spec is simple2:
                self.assert_('first' in task.data)
                self.assert_('second' in task.data)
                self.assert_('third' in task.data)
                self.assert_('fourth' in task.data)
                self.assertEqual(
                    task.data, {
                        'merge 2': 1,
                        'simple 2': 1,
                        'name': 'Start',
                        'third': 1,
                        'bump': 1,
                        'Start': 1,
                        'second': 1,
                        'first': 1,
                        'fourth': 1
                    })
                found['simple2'] = task
            if task.task_spec is unmerged:
                self.assertEqual(task.data, {
                    'Start': 1,
                    'second': 1,
                    'name': 'Start',
                    'unmerged': 1
                })
                found['unmerged'] = task
        self.assert_('simple1' in found)
        self.assert_('simple2' in found)
        self.assert_('unmerged' in found)
示例#19
0
 def setUp(self):
     self.wf_spec = WorkflowSpec()
     self.spec = self.create_instance()
示例#20
0
    def load_workflow(self, test_file_path):
        json_string = load_json_string(test_file_path)

        workflow_spec = WorkflowSpec.deserialize(self.serializer, 
                                                 json_string)
        self.workflow = Workflow(workflow_spec)
示例#21
0
import json
from SpiffWorkflow import Workflow
from SpiffWorkflow.specs import WorkflowSpec
from SpiffWorkflow.serializer.json import JSONSerializer

# Load from JSON
with open('nuclear.json') as fp:
    workflow_json = fp.read()
serializer = JSONSerializer()
spec = WorkflowSpec.deserialize(serializer, workflow_json)

# Alternatively, create an instance of the Python based specification.
#from nuclear import NuclearStrikeWorkflowSpec
#spec = NuclearStrikeWorkflowSpec()

# Create the workflow.
workflow = Workflow(spec)

# Execute until all tasks are done or require manual intervention.
# For the sake of this tutorial, we ignore the "manual" flag on the
# tasks. In practice, you probably don't want to do that.
workflow.complete_all(halt_on_manual=False)

# Alternatively, this is what a UI would do for a manual task.
#workflow.complete_task_from_id(...)
示例#22
0
class WorkflowSpecTest(unittest.TestCase):
    CORRELATE = WorkflowSpec

    def setUp(self):
        self.wf_spec = WorkflowSpec()

    def testConstructor(self):
        spec = WorkflowSpec('my spec')
        self.assertEqual('my spec', spec.name)

    def testGetTaskSpecFromName(self):
        pass  #FIXME

    def testGetDump(self):
        pass  #FIXME

    def testDump(self):
        pass  #FIXME

    def doPickleSingle(self, workflow, expected_path):
        taken_path = track_workflow(workflow.spec)

        # Execute a random number of steps.
        for i in xrange(randint(0, len(workflow.spec.task_specs))):
            workflow.complete_next()

        # Store the workflow instance in a file.
        output = open(data_file, 'wb')
        pickle.dump(workflow, output, -1)
        output.close()
        before = workflow.get_dump()

        # Load the workflow instance from a file and delete the file.
        input = open(data_file, 'rb')
        workflow = pickle.load(input)
        input.close()
        os.remove(data_file)
        after = workflow.get_dump()

        # Make sure that the state of the workflow did not change.
        self.assert_(before == after, 'Before:\n' + before + '\n' \
                                    + 'After:\n'  + after  + '\n')

        # Re-connect signals, because the pickle dump now only contains a
        # copy of taken_path.
        taken_path = track_workflow(workflow.spec, taken_path)

        # Run the rest of the workflow.
        workflow.complete_all()
        after = workflow.get_dump()
        self.assert_(workflow.is_completed(), 'Workflow not complete:' + after)
        #taken_path = '\n'.join(taken_path) + '\n'
        if taken_path != expected_path:
            for taken, expected in zip(taken_path, expected_path):
                print "TAKEN:   ", taken
                print "EXPECTED:", expected
        self.assertEqual(expected_path, taken_path)

    def testSerialize(self):
        # Read a complete workflow spec.
        xml_file = os.path.join(data_dir, 'spiff', 'workflow1.xml')
        xml = open(xml_file).read()
        path_file = os.path.splitext(xml_file)[0] + '.path'
        expected_path = open(path_file).read().strip().split('\n')
        wf_spec = WorkflowSpec.deserialize(serializer, xml)

        for i in xrange(5):
            workflow = Workflow(wf_spec)
            self.doPickleSingle(workflow, expected_path)

    def testValidate(self):
        """
        Tests that we can detect when two wait taks are waiting on each
        other.
        """
        task1 = Join(self.wf_spec, 'First')
        self.wf_spec.start.connect(task1)
        task2 = Join(self.wf_spec, 'Second')
        task1.connect(task2)

        task2.follow(task1)
        task1.follow(task2)

        results = self.wf_spec.validate()
        self.assert_("Found loop with 'Second': Second->First then 'Second' "
                     "again" in results)
        self.assert_("Found loop with 'First': First->Second then 'First' "
                     "again" in results)
示例#23
0
class WorkflowSpecTest(unittest.TestCase):
    CORRELATE = WorkflowSpec

    def setUp(self):
        self.wf_spec = WorkflowSpec()

    def testConstructor(self):
        spec = WorkflowSpec('my spec')
        self.assertEqual('my spec', spec.name)

    def testGetTaskSpecFromName(self):
        pass #FIXME

    def testGetDump(self):
        pass #FIXME

    def testDump(self):
        pass #FIXME

    def doPickleSingle(self, workflow, expected_path):
        taken_path = track_workflow(workflow.spec)

        # Execute a random number of steps.
        for i in range(randint(0, len(workflow.spec.task_specs))):
            workflow.complete_next()

        # Store the workflow instance in a file.
        output = open(data_file, 'wb')
        pickle.dump(workflow, output, -1)
        output.close()
        before = workflow.get_dump()

        # Load the workflow instance from a file and delete the file.
        input = open(data_file, 'rb')
        workflow = pickle.load(input)
        input.close()
        os.remove(data_file)
        after = workflow.get_dump()

        # Make sure that the state of the workflow did not change.
        self.assert_(before == after, 'Before:\n' + before + '\n' \
                                    + 'After:\n'  + after  + '\n')

        # Re-connect signals, because the pickle dump now only contains a
        # copy of taken_path.
        taken_path = track_workflow(workflow.spec, taken_path)

        # Run the rest of the workflow.
        workflow.complete_all()
        after = workflow.get_dump()
        self.assert_(workflow.is_completed(), 'Workflow not complete:' + after)
        #taken_path = '\n'.join(taken_path) + '\n'
        if taken_path != expected_path:
            for taken, expected in zip(taken_path, expected_path):
                print("TAKEN:   ", taken)
                print("EXPECTED:", expected)
        self.assertEqual(expected_path, taken_path)

    def testSerialize(self):
        # Read a complete workflow spec.
        xml_file      = os.path.join(data_dir, 'spiff', 'workflow1.xml')
        xml           = open(xml_file).read()
        path_file     = os.path.splitext(xml_file)[0] + '.path'
        expected_path = open(path_file).read().strip().split('\n')
        wf_spec       = WorkflowSpec.deserialize(serializer, xml)

        for i in range(5):
            workflow = Workflow(wf_spec)
            self.doPickleSingle(workflow, expected_path)

    def testValidate(self):
        """
        Tests that we can detect when two wait taks are waiting on each
        other.
        """
        task1 = Join(self.wf_spec, 'First')
        self.wf_spec.start.connect(task1)
        task2 = Join(self.wf_spec, 'Second')
        task1.connect(task2)

        task2.follow(task1)
        task1.follow(task2)

        results = self.wf_spec.validate()
        self.assert_("Found loop with 'Second': Second->First then 'Second' "
                "again" in results)
        self.assert_("Found loop with 'First': First->Second then 'First' "
                "again" in results)