Exemple #1
0
 def test_pop(self):
     stack = Stack()
     self.assertRaises(IndexError, stack.pop)
     stack.push(1)
     stack.push(2)
     r = stack.pop()
     self.assertEqual(r, 2)
     self.assertEqual(stack, [1])
     r = stack.pop()
     self.assertEqual(r, 1)
     self.assertEqual(stack, [])
Exemple #2
0
def match_converge(converges,
                   gateways,
                   cur_index,
                   end_event_id,
                   block_start,
                   block_nodes,
                   converged,
                   dist_from_start,
                   converge_in_len,
                   stack=None):
    """
    find converge for parallel and exclusive in blocks, and check sanity of gateway
    :param converges:
    :param gateways:
    :param cur_index:
    :param end_event_id:
    :param block_start:
    :param block_nodes:
    :param converged:
    :param dist_from_start:
    :param stack:
    :param converge_in_len:
    :return:
    """

    if stack is None:
        stack = Stack()

    if cur_index not in gateways:
        return None, False

    # return if this node is already matched
    if gateways[cur_index]['match']:
        return gateways[cur_index]['match'], gateways[cur_index][
            'share_converge']

    current_gateway = gateways[cur_index]
    target = gateways[cur_index][PE.target]
    stack.push(gateways[cur_index])
    stack_id_set = set([g[PE.id] for g in stack])

    # find closest converge recursively
    for i in range(len(target)):

        # do not process prev blocks nodes
        if matched_in_prev_blocks(target[i], block_start, block_nodes):
            target[i] = None
            continue

        block_nodes[block_start].add(target[i])

        # do not find self's converge node again
        while target[i] in gateways and target[i] != current_gateway[PE.id]:

            if target[i] in stack_id_set:
                # return to previous gateway

                if not_in_parallel_gateway(stack, start_from=target[i]):
                    # do not trace back
                    target[i] = None
                    break
                else:
                    raise exceptions.ConvergeMatchError(
                        cur_index, _(u"并行网关中的分支网关必须将所有分支汇聚到一个汇聚网关"))

            converge_id, shared = match_converge(
                converges=converges,
                gateways=gateways,
                cur_index=target[i],
                end_event_id=end_event_id,
                block_start=block_start,
                block_nodes=block_nodes,
                stack=stack,
                converged=converged,
                dist_from_start=dist_from_start,
                converge_in_len=converge_in_len)
            if converge_id:
                target[i] = converge_id

                if not shared:
                    # try to get next node fo converge which is not shared
                    target[i] = converges[converge_id][PE.target][0]

            else:
                # can't find corresponding converge gateway, which means this gateway will reach end event directly
                target[i] = end_event_id

        if target[i] in converges and dist_from_start[
                target[i]] < dist_from_start[cur_index]:
            # do not match previous converge
            target[i] = None

    stack.pop()

    is_exg = current_gateway[PE.type] == PE.ExclusiveGateway
    converge_id = None
    shared = False
    cur_to_converge = len(target)
    converge_end = False

    # gateway match validation
    for i in range(len(target)):

        # mark first converge
        if target[i] in converges and not converge_id:
            converge_id = target[i]

        # same converge node
        elif target[i] in converges and converge_id == target[i]:
            pass

        # exclusive gateway point to end
        elif is_exg and target[i] == end_event_id:
            if not_in_parallel_gateway(stack):
                converge_end = True
            else:
                raise exceptions.ConvergeMatchError(
                    cur_index, _(u"并行网关中的分支网关必须将所有分支汇聚到一个汇聚网关"))

        # exclusive gateway point back to self
        elif is_exg and target[i] == current_gateway[PE.id]:
            # not converge behavior
            cur_to_converge -= 1
            pass

        # exclusive gateway converge at different converge gateway
        elif is_exg and target[i] in converges and converge_id != target[i]:
            raise exceptions.ConvergeMatchError(
                cur_index, _(u"分支网关的所有分支第一个遇到的汇聚网关必须是同一个"))

        # meet previous node
        elif is_exg and target[i] is None:
            # not converge behavior
            cur_to_converge -= 1
            pass

        # invalid cases
        else:
            raise exceptions.ConvergeMatchError(cur_index,
                                                _(u"非法网关,请检查其分支是否符合规则"))

    if is_exg:
        if converge_id in converges:
            # this converge is shared by multiple gateway
            # only compare to the number of positive incoming
            shared = converge_in_len[converge_id] > cur_to_converge
    else:
        # for parallel gateway

        converge_incoming = len(converges[converge_id][PE.incoming])
        gateway_outgoing = len(target)

        if converge_incoming > gateway_outgoing:
            for gateway_id in converged.get(converge_id, []):
                # find another parallel gateway
                if gateways[gateway_id][PE.type] in PARALLEL_GATEWAYS:
                    raise exceptions.ConvergeMatchError(
                        converge_id, _(u"汇聚网关只能汇聚来自同一个并行网关的分支"))

            shared = True

        elif converge_incoming < gateway_outgoing:
            raise exceptions.ConvergeMatchError(converge_id,
                                                _(u"汇聚网关没有汇聚其对应的并行网关的所有分支"))

    current_gateway['match'] = converge_id
    current_gateway['share_converge'] = shared
    current_gateway['converge_end'] = converge_end

    converged.setdefault(converge_id, []).append(current_gateway[PE.id])
    block_nodes[block_start].add(current_gateway[PE.id])

    return converge_id, shared