def test_pop_pipeline(self):
        subproc_pipeline = PipelineObject()
        process = PipelineProcess.objects.create()
        pipeline_stack = Stack(['pipeline1', 'pipeline2'])
        subprocess_stack = Stack(['subprocess1', 'subprocess2'])
        children = ['child1', 'child2']
        root_pipeline = 'root_pipeline'
        mock_snapshot = ProcessSnapshot.objects.create_snapshot(
            pipeline_stack=pipeline_stack,
            children=children,
            root_pipeline=root_pipeline,
            subprocess_stack=subprocess_stack)
        process.snapshot = mock_snapshot
        process.id = uniqid()

        process.push_pipeline(subproc_pipeline, is_subprocess=True)
        self.assertEqual(process.top_pipeline, subproc_pipeline)
        self.assertTrue(
            SubProcessRelationship.objects.filter(
                subprocess_id=subproc_pipeline.id,
                process_id=process.id).exists())

        pop_pipeline = process.pop_pipeline()
        self.assertEqual(pop_pipeline.id, subproc_pipeline.id)
        self.assertFalse(
            SubProcessRelationship.objects.filter(
                subprocess_id=subproc_pipeline.id,
                process_id=process.id).exists())

        pop_pipeline = process.pop_pipeline()
        self.assertEqual(pop_pipeline, 'pipeline2')

        pop_pipeline = process.pop_pipeline()
        self.assertEqual(pop_pipeline, 'pipeline1')
    def test_clean_children(self):
        from pipeline.engine.core.data import del_object

        mock_snapshot = ProcessSnapshot(
            data={
                '_pipeline_stack': Stack(),
                '_children': ['1', '2', '3'],
                '_root_pipeline': IdentifyObject(),
                '_subprocess_stack': Stack([])
            })
        mock_snapshot.clean_children = MagicMock()
        mock_snapshot.save = MagicMock()

        process = PipelineProcess.objects.create()
        process.snapshot = mock_snapshot

        process.clean_children()
        del_object.assert_has_calls([
            mock.call(process._context_key('1')),
            mock.call(process._data_key('1')),
            mock.call(process._context_key('2')),
            mock.call(process._data_key('2')),
            mock.call(process._context_key('3')),
            mock.call(process._data_key('3')),
        ])
        mock_snapshot.clean_children.assert_called()
        mock_snapshot.save.assert_called()
Beispiel #3
0
 def setUp(self):
     self.pipeline_stack = Stack(['pipeline1', 'pipeline2'])
     self.subprocess_stack = Stack(['subprocess1', 'subprocess2'])
     self.children = ['child1', 'child2']
     self.root_pipeline = 'root_pipeline'
     self.snapshot = ProcessSnapshot.objects.create_snapshot(
         pipeline_stack=self.pipeline_stack,
         children=self.children,
         root_pipeline=self.root_pipeline,
         subprocess_stack=self.subprocess_stack
     )
    def test_subproc_sleep_check(self):
        mock_snapshot = ProcessSnapshot.objects.create_snapshot(
            pipeline_stack=Stack(),
            children=[],
            root_pipeline=IdentifyObject(),
            subprocess_stack=Stack([1, 2, 3, 4]))
        process = PipelineProcess.objects.create()
        process.snapshot = mock_snapshot

        def return_all_running(*args, **kwargs):
            return [
                StatusObject(id=1, state=states.RUNNING),
                StatusObject(id=2, state=states.RUNNING),
                StatusObject(id=3, state=states.RUNNING),
                StatusObject(id=4, state=states.RUNNING)
            ]

        def return_one_suspended(*args, **kwargs):
            return [
                StatusObject(id=1, state=states.RUNNING),
                StatusObject(id=2, state=states.SUSPENDED),
                StatusObject(id=3, state=states.RUNNING),
                StatusObject(id=4, state=states.RUNNING)
            ]

        def return_first_suspended(*args, **kwargs):
            return [
                StatusObject(id=1, state=states.SUSPENDED),
                StatusObject(id=2, state=states.RUNNING),
                StatusObject(id=3, state=states.RUNNING),
                StatusObject(id=4, state=states.RUNNING)
            ]

        def return_last_suspended(*args, **kwargs):
            return [
                StatusObject(id=1, state=states.RUNNING),
                StatusObject(id=2, state=states.RUNNING),
                StatusObject(id=3, state=states.RUNNING),
                StatusObject(id=4, state=states.SUSPENDED)
            ]

        with mock.patch(PIPELINE_STATUS_FILTER, return_all_running):
            self.assertEqual(process.subproc_sleep_check(),
                             (False, [1, 2, 3, 4]))

        with mock.patch(PIPELINE_STATUS_FILTER, return_one_suspended):
            self.assertEqual(process.subproc_sleep_check(), (True, [1]))

        with mock.patch(PIPELINE_STATUS_FILTER, return_first_suspended):
            self.assertEqual(process.subproc_sleep_check(), (True, []))

        with mock.patch(PIPELINE_STATUS_FILTER, return_last_suspended):
            self.assertEqual(process.subproc_sleep_check(), (True, [1, 2, 3]))
    def test_join(self):
        children = [IdentifyObject(), IdentifyObject(), IdentifyObject()]
        mock_snapshot = ProcessSnapshot.objects.create_snapshot(
            pipeline_stack=Stack(),
            children=[],
            root_pipeline='root_pipeline',
            subprocess_stack=Stack())
        process = PipelineProcess.objects.create()
        process.snapshot = mock_snapshot

        process.join(children)
        self.assertEqual(process.need_ack, len(children))
        for i in range(len(children)):
            self.assertEqual(process.children[i], children[i].id)
Beispiel #6
0
    def test_destroy_all(self):
        mock_snapshot = ProcessSnapshot(
            data={
                '_pipeline_stack': Stack(),
                '_children': [],
                '_root_pipeline': PipelineObject(),
                '_subprocess_stack': Stack([])
            }
        )
        process = PipelineProcess.objects.create()
        process.snapshot = mock_snapshot
        process.is_alive = False
        process.destroy_all()
        process.destroy.assert_not_called()

        process.is_alive = True
        process.destroy_all()
        process.destroy.assert_called()
        process.destroy.reset_mock()

        mock_snapshot.data['_children'] = [1, 2, 3]

        child_1 = Object()
        child_1.children = []
        child_1.destroy = MagicMock()
        child_1.is_alive = True
        child_2 = Object()
        child_2.children = []
        child_2.destroy = MagicMock()
        child_2.is_alive = False
        child_3 = Object()
        child_3.children = [1]
        child_3.destroy = MagicMock()
        child_3.is_alive = True

        def get_child(id):
            return {
                1: child_1,
                2: child_2,
                3: child_3
            }[id]

        with mock.patch(PIPELINE_PROCESS_GET, get_child):
            process.destroy_all()
            child_1.destroy.assert_called()
            child_2.destroy.assert_not_called()
            child_3.destroy.assert_called()
            self.assertEqual(child_1.destroy.call_count, 2)
Beispiel #7
0
def validate_converge_gateway(data):
    """
    检测对应汇聚网关及合法性
    """
    converge_list = {}
    gateway_list = {}

    for i, item in data['gateways'].iteritems():
        node = {
            "incoming": item['incoming'] if isinstance(item['incoming'], list) else [item['incoming']],
            "outgoing": item['outgoing'] if isinstance(item['outgoing'], list) else [item['outgoing']],
            "type": item["type"],
            "target": [],
            "match": None,
            "id": item['id']
        }

        for index in node['outgoing']:
            index = data['flows'][index]['target']
            while index in data['activities']:
                index = data['flows'][data['activities'][index]['outgoing']]['target']
            node['target'].append(index)

        if item['type'] == "ConvergeGateway":
            converge_list[i] = node
        else:
            gateway_list[i] = node

    for i in gateway_list:
        if not gateway_list[i]['match']:
            new_find_closest_converge(converge_list, gateway_list, i, Stack(), data['end_event']['id'])

    for i in gateway_list:
        if gateway_list[i]['match']:
            data['gateways'][i]['converge_gateway_id'] = gateway_list[i]['match']
Beispiel #8
0
 def test_top(self):
     stack = Stack()
     self.assertRaises(IndexError, stack.top)
     stack.push(1)
     self.assertEqual(stack.top(), 1)
     self.assertEqual(stack, [1])
     stack.push(2)
     self.assertEqual(stack.top(), 2)
     self.assertEqual(stack, [1, 2])
 def test_properties(self):
     process = PipelineProcess.objects.create()
     pipeline_stack = Stack(['pipeline1', 'pipeline2'])
     subprocess_stack = Stack(['subprocess1', 'subprocess2'])
     children = ['child1', 'child2']
     root_pipeline = 'root_pipeline'
     mock_snapshot = ProcessSnapshot.objects.create_snapshot(
         pipeline_stack=pipeline_stack,
         children=children,
         root_pipeline=root_pipeline,
         subprocess_stack=subprocess_stack)
     process.snapshot = mock_snapshot
     self.assertEqual(process.pipeline_stack, pipeline_stack)
     self.assertEqual(process.children, children)
     self.assertEqual(process.root_pipeline, root_pipeline)
     self.assertEqual(process.top_pipeline, pipeline_stack.top())
     self.assertEqual(process.subprocess_stack, subprocess_stack)
    def test_in_subprocess__false(self):
        snapshot = ProcessSnapshot(data={
            '_pipeline_stack': Stack([1]),
        })
        process = PipelineProcess()
        process.snapshot = snapshot

        self.assertFalse(process.in_subprocess)
    def test_exit_gracefully(self):
        mock_snapshot = ProcessSnapshot(
            data={
                '_pipeline_stack': Stack(),
                '_children': ['1', '2', '3'],
                '_root_pipeline': PipelineObject(),
                '_subprocess_stack': Stack([])
            })

        process = PipelineProcess.objects.create()
        process.snapshot = mock_snapshot
        process.sleep = MagicMock()
        e = Exception('test')

        process.current_node_id = uniqid()
        process.exit_gracefully(e)
        Status.objects.fail.assert_called_with(process.current_node_id,
                                               ex_data=traceback.format_exc(e))
        Status.objects.raw_fail.assert_not_called()
        process.sleep.assert_called_with(adjust_status=True)

        Status.objects.fail.reset_mock()
        process.sleep.reset_mock()

        # when stack is not empty
        mock_snapshot.data['_pipeline_stack'] = Stack([PipelineObject()])
        process.current_node_id = uniqid()
        process.exit_gracefully(e)
        Status.objects.fail.assert_called_with(process.current_node_id,
                                               ex_data=traceback.format_exc(e))
        Status.objects.raw_fail.assert_not_called()
        process.sleep.assert_called_with(adjust_status=True)

        Status.objects.fail.reset_mock()
        process.sleep.reset_mock()

        # when current_node is none
        top_pipeline = PipelineObject()
        top_pipeline.node = MagicMock(return_value=None)
        mock_snapshot.data['_pipeline_stack'] = Stack([top_pipeline])
        process.current_node_id = uniqid()
        process.exit_gracefully(e)
        Status.objects.fail.assert_not_called()
        Status.objects.raw_fail.assert_called_with(
            process.current_node_id, ex_data=traceback.format_exc(e))
        process.sleep.assert_called_with(adjust_status=True)
    def test_save(self):
        process = PipelineProcess.objects.create()
        mock_snapshot = ProcessSnapshot.objects.create_snapshot(
            pipeline_stack=Stack(),
            children=[1, 2, 3, 4],
            root_pipeline=IdentifyObject(),
            subprocess_stack=Stack([]))
        mock_snapshot.save = MagicMock()
        process.snapshot = mock_snapshot

        process.save(save_snapshot=False)
        mock_snapshot.save.assert_not_called()
        process.save(save_snapshot=True)
        mock_snapshot.save.assert_called()
        mock_snapshot.save.reset_mock()
        process.save()
        mock_snapshot.save.assert_called()
Beispiel #13
0
    def test_revoke_subprocess(self):
        mock_snapshot = ProcessSnapshot(
            data={
                '_pipeline_stack': Stack(),
                '_children': [],
                '_root_pipeline': PipelineObject(),
                '_subprocess_stack': Stack([1, 2, 3, 4])
            }
        )

        process = PipelineProcess.objects.create(id=uniqid())
        process.snapshot = mock_snapshot
        process.sleep = MagicMock()

        process.revoke_subprocess()
        Status.objects.batch_transit.assert_called_with(id_list=[1, 2, 3, 4], state=states.REVOKED)

        child_1 = Object()
        child_2 = Object()
        child_3 = Object()
        child_1.revoke_subprocess = MagicMock()
        child_2.revoke_subprocess = MagicMock()
        child_3.revoke_subprocess = MagicMock()

        def get_child(id):
            return {
                1: child_1,
                2: child_2,
                3: child_3
            }[id]

        mock_snapshot.data['_children'] = [1, 2, 3]

        with mock.patch(PIPELINE_PROCESS_GET, get_child):
            process.revoke_subprocess()
            Status.objects.batch_transit.assert_called_with(id_list=[1, 2, 3, 4], state=states.REVOKED)
            child_1.revoke_subprocess.assert_called()
            child_2.revoke_subprocess.assert_called()
            child_3.revoke_subprocess.assert_called()

        # test when subprocess_stack and children return None
        process = PipelineProcess.objects.create(id=uniqid())
        self.assertIsNone(process.subprocess_stack)
        self.assertIsNone(process.children)
        process.revoke_subprocess()
    def test_root_sleep_check(self):
        def return_suspended(*args, **kwargs):
            return states.SUSPENDED

        def return_revoked(*args, **kwargs):
            return states.REVOKED

        def return_blocked(*args, **kwargs):
            return states.BLOCKED

        another_status = MagicMock()
        status = [
            states.CREATED, states.READY, states.RUNNING, states.FINISHED,
            states.FAILED
        ]
        another_status.side_effect = status

        mock_snapshot = ProcessSnapshot.objects.create_snapshot(
            pipeline_stack=Stack(),
            children=[],
            root_pipeline=IdentifyObject(),
            subprocess_stack=Stack())
        process = PipelineProcess.objects.create()
        process.snapshot = mock_snapshot

        with mock.patch(PIPELINE_STATUS_STATE_FOR, return_suspended):
            self.assertEqual(process.root_sleep_check(),
                             (True, states.SUSPENDED))

        with mock.patch(PIPELINE_STATUS_STATE_FOR, return_revoked):
            self.assertEqual(process.root_sleep_check(),
                             (True, states.REVOKED))

        with mock.patch(PIPELINE_STATUS_STATE_FOR, return_blocked):
            self.assertEqual(process.root_sleep_check(),
                             (True, states.BLOCKED))
            process.parent_id = 'parent_id'
            self.assertEqual(process.root_sleep_check(),
                             (False, states.BLOCKED))

        with mock.patch(PIPELINE_STATUS_STATE_FOR, another_status):
            for s in status:
                self.assertEqual(process.root_sleep_check(), (False, s))
    def test_sync_with_children(self):
        outputs = {'output_key': 'output_value'}
        variables = {'variable_key': 'varaiable_value'}

        process = PipelineProcess.objects.create()
        context = Object()
        context.update_global_var = MagicMock()
        context.sync_change = MagicMock()

        data = Object()
        data.update_outputs = MagicMock()

        mock_snapshot = ProcessSnapshot(
            data={
                '_pipeline_stack':
                Stack([PipelineObject(context=context, data=data)]),
                '_children': [1, 2, 3, 4],
                '_root_pipeline':
                IdentifyObject(),
                '_subprocess_stack':
                Stack([])
            })
        process.snapshot = mock_snapshot
        process.clean_children = MagicMock()

        def return_none(*args, **kwargs):
            return None

        def return_mock(id):
            if id.endswith('data'):
                return DataObject(outputs=outputs)
            if id.endswith('context'):
                return ContextObject(variables=variables)

        with mock.patch(PIPELINE_ENGINE_CORE_DATA_GET_OBJECT, return_none):
            self.assertRaises(exceptions.ChildDataSyncError,
                              process.sync_with_children)

        with mock.patch(PIPELINE_ENGINE_CORE_DATA_GET_OBJECT, return_mock):
            process.sync_with_children()
            context.sync_change.assert_called()
            data.update_outputs.assert_called_with(outputs)
            process.clean_children.assert_called()
    def test_destroy(self):
        from pipeline.engine.models import ProcessCeleryTask

        process = PipelineProcess.objects.create()
        process.id = uniqid()
        process.current_node_id = 'current_node_id'

        mock_snapshot = ProcessSnapshot.objects.create_snapshot(
            pipeline_stack=Stack(),
            children=[1, 2, 3, 4],
            root_pipeline=IdentifyObject(),
            subprocess_stack=Stack([]))
        mock_snapshot.delete = MagicMock()
        process.snapshot = mock_snapshot

        process.destroy()
        self.assertFalse(process.is_alive)
        self.assertEqual(process.current_node_id, '')
        self.assertIsNone(process.snapshot)
        mock_snapshot.delete.assert_called()
        ProcessCeleryTask.objects.destroy.assert_called_with(process.id)
    def test_sleep(self):
        from pipeline.engine.models import ProcessCeleryTask
        pipeline = PipelineObject()
        process = PipelineProcess.objects.prepare_for_pipeline(pipeline)

        process.sleep(do_not_save=True, adjust_status=True)
        process.adjust_status.assert_called_with(None)
        ProcessCeleryTask.objects.unbind.assert_not_called()
        process.adjust_status.reset_mock()

        process.sleep(do_not_save=True,
                      adjust_status=True,
                      adjust_scope=[1, 2, 3, 4])
        process.adjust_status.assert_called_with([1, 2, 3, 4])
        ProcessCeleryTask.objects.unbind.assert_not_called()
        process.adjust_status.reset_mock()

        process.sleep(do_not_save=False, adjust_status=False)
        process.adjust_status.assert_not_called()
        self.assertTrue(process.sleep)
        ProcessCeleryTask.objects.unbind.assert_called_with(process.id)

        with mock.patch(PIPELINE_PROCESS_CHILD_PROCESS_READY, MagicMock()):
            process = PipelineProcess.objects.prepare_for_pipeline(pipeline)
            mock_snapshot = ProcessSnapshot.objects.create_snapshot(
                pipeline_stack=Stack(),
                children=[1, 2, 3, 4],
                root_pipeline=IdentifyObject(),
                subprocess_stack=Stack([]))
            process.snapshot = mock_snapshot
            process.sleep(do_not_save=False, adjust_status=False)
            PipelineProcess.objects.child_process_ready.assert_has_calls(
                [mock.call(1),
                 mock.call(2),
                 mock.call(3),
                 mock.call(4)])
Beispiel #18
0
 def test_pop(self):
     stack = Stack()
     self.assertRaises(IndexError, stack.pop)
     stack.push(1)
     stack.push(2)
     r = stack.pop()
     self.assertEqual(r, 2)
     self.assertEqual(stack, [1])
     r = stack.pop()
     self.assertEqual(r, 1)
     self.assertEqual(stack, [])
Beispiel #19
0
def validate_converge_gateway(data):
    """
    检测对应汇聚网关及合法性
    """
    converge_list = {}
    gateway_list = {}

    # 将所有网关以及其输出流对应的网关找出来
    for i, item in data[PE.gateways].items():
        node = {
            'incoming': item[PE.incoming] if isinstance(item[PE.incoming], list) else [item[PE.incoming]],
            'outgoing': item[PE.outgoing] if isinstance(item[PE.outgoing], list) else [item[PE.outgoing]],
            'type': item[PE.type],
            'target': [],
            'match': None,
            'id': item[PE.id]
        }

        for index in node[PE.outgoing]:
            index = data[PE.flows][index][PE.target]
            while index in data[PE.activities]:
                index = data[PE.flows][data[PE.activities][index][PE.outgoing]][PE.target]
            node[PE.target].append(index)

        if item[PE.type] == PE.ConvergeGateway:
            converge_list[i] = node
        else:
            gateway_list[i] = node

    for i in gateway_list:
        if not gateway_list[i]['match']:
            new_find_closest_converge(converge_list, gateway_list, i, Stack(), data[PE.end_event][PE.id])

    for i in gateway_list:
        if gateway_list[i]['match']:
            data[PE.gateways][i][PE.converge_gateway_id] = gateway_list[i]['match']
Beispiel #20
0
def match_converge(converges,
                   gateways,
                   cur_index,
                   end_event_id,
                   block_start,
                   block_nodes,
                   converged,
                   dist_from_start,
                   converge_in_len,
                   stack=None):
    """
    find converge for parallel and exclusive in blocks, and check sanity of gateway
    :param converges:
    :param gateways:
    :param cur_index:
    :param end_event_id:
    :param block_start:
    :param block_nodes:
    :param converged:
    :param dist_from_start:
    :param stack:
    :param converge_in_len:
    :return:
    """

    if stack is None:
        stack = Stack()

    if cur_index not in gateways:
        return None, False

    # return if this node is already matched
    if gateways[cur_index]['match']:
        return gateways[cur_index]['match'], gateways[cur_index][
            'share_converge']

    current_gateway = gateways[cur_index]
    target = gateways[cur_index][PE.target]
    stack.push(gateways[cur_index])
    stack_id_set = set([g[PE.id] for g in stack])

    # find closest converge recursively
    for i in range(len(target)):

        # do not process prev blocks nodes
        if matched_in_prev_blocks(target[i], block_start, block_nodes):
            target[i] = None
            continue

        block_nodes[block_start].add(target[i])

        # do not find self's converge node again
        while target[i] in gateways and target[i] != current_gateway[PE.id]:

            if target[i] in stack_id_set:
                # return to previous gateway

                if not_in_parallel_gateway(stack, start_from=target[i]):
                    # do not trace back
                    target[i] = None
                    break
                else:
                    raise exceptions.ConvergeMatchError(
                        cur_index, _(u"并行网关中的分支网关必须将所有分支汇聚到一个汇聚网关"))

            converge_id, shared = match_converge(
                converges=converges,
                gateways=gateways,
                cur_index=target[i],
                end_event_id=end_event_id,
                block_start=block_start,
                block_nodes=block_nodes,
                stack=stack,
                converged=converged,
                dist_from_start=dist_from_start,
                converge_in_len=converge_in_len)
            if converge_id:
                target[i] = converge_id

                if not shared:
                    # try to get next node fo converge which is not shared
                    target[i] = converges[converge_id][PE.target][0]

            else:
                # can't find corresponding converge gateway, which means this gateway will reach end event directly
                target[i] = end_event_id

        if target[i] in converges and dist_from_start[
                target[i]] < dist_from_start[cur_index]:
            # do not match previous converge
            target[i] = None

    stack.pop()

    is_exg = current_gateway[PE.type] == PE.ExclusiveGateway
    converge_id = None
    shared = False
    cur_to_converge = len(target)
    converge_end = False

    # gateway match validation
    for i in range(len(target)):

        # mark first converge
        if target[i] in converges and not converge_id:
            converge_id = target[i]

        # same converge node
        elif target[i] in converges and converge_id == target[i]:
            pass

        # exclusive gateway point to end
        elif is_exg and target[i] == end_event_id:
            if not_in_parallel_gateway(stack):
                converge_end = True
            else:
                raise exceptions.ConvergeMatchError(
                    cur_index, _(u"并行网关中的分支网关必须将所有分支汇聚到一个汇聚网关"))

        # exclusive gateway point back to self
        elif is_exg and target[i] == current_gateway[PE.id]:
            # not converge behavior
            cur_to_converge -= 1
            pass

        # exclusive gateway converge at different converge gateway
        elif is_exg and target[i] in converges and converge_id != target[i]:
            raise exceptions.ConvergeMatchError(
                cur_index, _(u"分支网关的所有分支第一个遇到的汇聚网关必须是同一个"))

        # meet previous node
        elif is_exg and target[i] is None:
            # not converge behavior
            cur_to_converge -= 1
            pass

        # invalid cases
        else:
            raise exceptions.ConvergeMatchError(cur_index,
                                                _(u"非法网关,请检查其分支是否符合规则"))

    if is_exg:
        if converge_id in converges:
            # this converge is shared by multiple gateway
            # only compare to the number of positive incoming
            shared = converge_in_len[converge_id] > cur_to_converge
    else:
        # for parallel gateway

        converge_incoming = len(converges[converge_id][PE.incoming])
        gateway_outgoing = len(target)

        if converge_incoming > gateway_outgoing:
            for gateway_id in converged.get(converge_id, []):
                # find another parallel gateway
                if gateways[gateway_id][PE.type] in PARALLEL_GATEWAYS:
                    raise exceptions.ConvergeMatchError(
                        converge_id, _(u"汇聚网关只能汇聚来自同一个并行网关的分支"))

            shared = True

        elif converge_incoming < gateway_outgoing:
            raise exceptions.ConvergeMatchError(converge_id,
                                                _(u"汇聚网关没有汇聚其对应的并行网关的所有分支"))

    current_gateway['match'] = converge_id
    current_gateway['share_converge'] = shared
    current_gateway['converge_end'] = converge_end

    converged.setdefault(converge_id, []).append(current_gateway[PE.id])
    block_nodes[block_start].add(current_gateway[PE.id])

    return converge_id, shared
Beispiel #21
0
 def test_push(self):
     stack = Stack()
     self.assertEqual(stack, [])
     stack.push(1)
     stack.push(2)
     self.assertEqual(stack, [1, 2])
    def test_blocked_by_failure_or_suspended(self):
        process = PipelineProcess.objects.create()
        mock_snapshot = ProcessSnapshot.objects.create_snapshot(
            pipeline_stack=Stack(),
            children=[],
            root_pipeline=IdentifyObject(),
            subprocess_stack=Stack([]))
        process.snapshot = mock_snapshot

        def return_suspended(*args, **kwargs):
            return states.SUSPENDED

        def return_failed(*args, **kwargs):
            return states.FAILED

        def return_none(*args, **kwargs):
            return None

        class MockChild(object):
            def __init__(self, failed=False, suspended=False):
                self.failed = failed
                self.suspended = suspended

            def blocked_by_failure_or_suspended(self):
                return self.failed or self.suspended

        def return_child_no_anomaly(*args, **kwargs):
            return [MockChild(), MockChild(), MockChild()]

        def return_child_has_failed(*args, **kwargs):
            return [MockChild(), MockChild(), MockChild(failed=True)]

        def return_child_has_suspended(*args, **kwargs):
            return [MockChild(), MockChild(), MockChild(suspended=True)]

        process.is_sleep = False
        self.assertFalse(process.blocked_by_failure_or_suspended())

        # 当前节点已经执行失败
        with mock.patch(PIPELINE_STATUS_STATE_FOR, return_failed):
            process.is_sleep = True
            self.assertTrue(process.blocked_by_failure_or_suspended())

        # 当前节点被暂停
        with mock.patch(PIPELINE_STATUS_STATE_FOR, return_suspended):
            process.is_sleep = True
            self.assertTrue(process.blocked_by_failure_or_suspended())

        # 整个流程进入了 SUSPENDED 状态,未开始执行下一个节点
        with mock.patch(PIPELINE_STATUS_STATE_FOR, return_none):
            process.is_sleep = True
            self.assertFalse(process.blocked_by_failure_or_suspended())

            mock_snapshot = ProcessSnapshot.objects.create_snapshot(
                pipeline_stack=Stack(),
                children=[1, 2, 3],
                root_pipeline=IdentifyObject(),
                subprocess_stack=Stack([]))
            process.snapshot = mock_snapshot

            # 子进程都没有异常
            with mock.patch(PIPELINE_PROCESS_FILTER, return_child_no_anomaly):
                process.is_sleep = True
                self.assertFalse(process.blocked_by_failure_or_suspended())

            # 子进程中存在失败的进程
            with mock.patch(PIPELINE_PROCESS_FILTER, return_child_has_failed):
                process.is_sleep = True
                self.assertTrue(process.blocked_by_failure_or_suspended())

            # 子进程中存在暂停的进程
            with mock.patch(PIPELINE_PROCESS_FILTER,
                            return_child_has_suspended):
                process.is_sleep = True
                self.assertTrue(process.blocked_by_failure_or_suspended())
    def test_adjust_status(self):
        process = PipelineProcess.objects.create()
        mock_snapshot = ProcessSnapshot.objects.create_snapshot(
            pipeline_stack=Stack(),
            children=[],
            root_pipeline=IdentifyObject(id='root_pipeline_id'),
            subprocess_stack=Stack([1, 2, 3, 4]))
        process.snapshot = mock_snapshot
        process.current_node_id = 'current_node_id'

        def return_suspended_for_node(id, may_not_exist=False):
            if id == 'current_node_id':
                return states.SUSPENDED

        def return_failed_for_node(id, may_not_exist=False):
            if id == 'current_node_id':
                return states.FAILED

        def return_suspended_for_root_pipeline(id, may_not_exist=False):
            if id == 'root_pipeline_id':
                return states.SUSPENDED

        def return_none_for_node(*args, **kwargs):
            return None

        def return_empty_list_for_subproc(subprocess_stack):
            return []

        def return_all_running_for_subproc(subprocess_stack):
            return [
                states.RUNNING, states.RUNNING, states.RUNNING, states.RUNNING
            ]

        def return_last_suspended_for_subproc(subprocess_stack):
            return [
                states.RUNNING, states.RUNNING, states.RUNNING,
                states.SUSPENDED
            ]

        def return_one_suspended_for_subproc(subprocess_stack):
            return [
                states.RUNNING, states.SUSPENDED, states.RUNNING,
                states.RUNNING
            ]

        node_state_possibility = [
            return_suspended_for_node, return_failed_for_node
        ]

        with mock.patch(PIPELINE_STATUS_STATES_FOR,
                        return_empty_list_for_subproc):
            for case in node_state_possibility:
                with mock.patch(PIPELINE_STATUS_STATE_FOR, case):
                    process.adjust_status()
                    Status.objects.batch_transit.assert_called_with(
                        id_list=[1, 2, 3, 4],
                        state=states.BLOCKED,
                        from_state=states.RUNNING)
                    Status.objects.transit.assert_called_with(
                        'root_pipeline_id',
                        to_state=states.BLOCKED,
                        is_pipeline=True)
                    Status.objects.batch_transit.reset_mock()
                    Status.objects.transit.reset_mock()

            with mock.patch(PIPELINE_STATUS_STATE_FOR,
                            return_suspended_for_root_pipeline):
                process.adjust_status()
                Status.objects.batch_transit.assert_called_with(
                    id_list=[1, 2, 3, 4],
                    state=states.SUSPENDED,
                    from_state=states.RUNNING)
                Status.objects.batch_transit.reset_mock()