예제 #1
0
def test_update_rscp_correctness(graph, new_edge, initial_partition):
    vertexes, qblocks = decorate_nx_graph(graph, initial_partition)
    qblocks = paige_tarjan_qblocks(qblocks)

    # compute incrementally
    update_result = saha(qblocks, vertexes, new_edge)
    update_result = vertexes_to_set(update_result)

    # compute from scratch
    graph2 = nx.DiGraph()
    graph2.add_nodes_from(graph.nodes)
    graph2.add_edges_from(graph.edges)
    graph2.add_edge(*new_edge)
    new_vertexes, new_qblocks = decorate_nx_graph(graph2, initial_partition)
    new_rscp = paige_tarjan_qblocks(new_qblocks)
    new_rscp = vertexes_to_set(new_rscp)

    assert update_result == new_rscp
예제 #2
0
def test_pt_same_initial_partition(graph, initial_partition):
    _, q_partition = decorate_nx_graph(graph, initial_partition)
    s = [tuple(block.vertexes) for block in paige_tarjan_qblocks(q_partition)]

    vertex_to_initial_partition_id = [None for _ in graph.nodes]
    for idx, block in enumerate(initial_partition):
        for vertex in block:
            vertex_to_initial_partition_id[vertex] = idx

    for block in s:
        for vertex in block:
            assert (vertex.initial_partition_block_id ==
                    vertex_to_initial_partition_id[vertex.label])
예제 #3
0
def test_update_rank_procedures(graph, new_edge, initial_partition):
    vertexes, qblocks = decorate_nx_graph(graph, initial_partition)
    qblocks = paige_tarjan_qblocks(qblocks)

    # compute incrementally
    saha(qblocks, vertexes, new_edge)

    # compute from scratch
    graph2 = nx.DiGraph()
    graph2.add_nodes_from(graph.nodes)
    graph2.add_edges_from(graph.edges)
    graph2.add_edge(*new_edge)
    new_vertexes, new_qblocks = decorate_nx_graph(graph2, initial_partition)

    for i in range(len(vertexes)):
        assert vertexes[i].rank == new_vertexes[i].rank
예제 #4
0
def saha(graph,
         initial_partition=None,
         is_integer_graph=False) -> SahaPartition:
    """
    Returns an instance of the class :class:`SahaPartition` which can be used
    to recompute the maximum bisimulation incrementally.

    :param graph: The initial graph.
    :initial_partition: The initial partition, or labeling set. This is
        **not** the partition from which we start, but an indication of which
        nodes cannot be bisimilar. Defaultsto `None`, in which case the trivial
        labeling set (one block which contains all the nodes) is used.
    :param is_integer_graph: If `True`, the function assumes that
        the graph is integer, and skips the integer check (may slightly
        improve performance). Defaults to `False`.
    """

    if not isinstance(graph, nx.DiGraph):
        raise Exception("graph should be a directed graph (nx.DiGraph)")

    # if True, the input graph is already an integer graph
    original_graph_is_integer = is_integer_graph or check_normal_integer_graph(
        graph)
    if not original_graph_is_integer:
        # convert the graph to an "integer" graph
        integer_graph, node_to_idx = convert_to_integer_graph(graph)

        if initial_partition is not None:
            # convert the initial partition to a integer partition
            integer_initial_partition = [[
                node_to_idx[old_node] for old_node in block
            ] for block in initial_partition]
        else:
            integer_initial_partition = None
    else:
        integer_graph = graph
        integer_initial_partition = initial_partition
        node_to_idx = None

    vertexes, q_partition = decorate_nx_graph(
        integer_graph,
        integer_initial_partition,
    )

    # compute the current maximum bisimulation
    q_partition = paige_tarjan_qblocks(q_partition)
    return SahaPartition(q_partition, vertexes, node_to_idx)
예제 #5
0
def test_merge_condition():
    graph = nx.DiGraph()
    graph.add_nodes_from(range(6))
    graph.add_edges_from([(0, 1), (1, 2), (3, 1), (4, 6), (0, 6), (5, 6)])
    ip = [(0, 1, 2, 3), (4, 5), (6,)]

    vertexes, qblocks = decorate_nx_graph(graph, ip)
    rscp_qblocks = paige_tarjan_qblocks(qblocks)

    node_to_qblock = [None for _ in graph.nodes]
    for qb in rscp_qblocks:
        for vertex in qb.vertexes:
            node_to_qblock[vertex.label] = qb

    # can't merge because no same initial partition
    assert not merge_condition(
        node_to_qblock[2],
        node_to_qblock[6],
    )

    # different rank
    assert not merge_condition(
        node_to_qblock[2],
        node_to_qblock[1],
    )

    # same block
    assert not merge_condition(
        node_to_qblock[4],
        node_to_qblock[4],
    )

    # exists causal splitter
    assert not merge_condition(
        node_to_qblock[0],
        node_to_qblock[3],
    )
예제 #6
0
def test_pt_result_is_stable_partition(graph, initial_partition):
    vertexes, q_partition = decorate_nx_graph(graph, initial_partition)
    s = paige_tarjan_qblocks(q_partition)
    assert is_stable_partition(s)
예제 #7
0
파일: saha.py 프로젝트: fAndreuzzi/BisPy
def merge_split_phase(qpartition: List[_Block],
                      finishing_time_list: List[_Vertex]) -> List[_Block]:
    """
    The function `MergeAndSplitPhase` from the paper.

    :param qpartition: The current partition.
    :param finishing_time_list: List of vertexes in the graph ordered by
        finishing time.
    :returns: The updated partition.
    """

    max_rank = float("-inf")
    for block in qpartition:
        max_rank = max(max_rank, block.rank)

    # a dict of lists of blocks (the key is the initial partition ID)
    # where each couple can't be merged
    cant_merge_dict = {}

    # keep track in order to remove the 'visited' flag
    visited_vertexes = []

    # a partition containing all the touched blocks
    X = []

    # visit G in order of decreasing finishing times of the first DFS
    for vertex in finishing_time_list:
        # a vertex may be reached more than one time
        if not vertex.visited:
            merge_step(vertex, X, visited_vertexes, cant_merge_dict)

    X = list(filter(lambda block: not block.deteached, X))

    # clear visited flag
    for vx in visited_vertexes:
        vx.visited = False

    # reset block.visited flag (was set by first DFS) and tried_merge
    for block in qpartition:
        block.visited = False
        block.tried_merge = False

    # ------------
    # Split phase
    # ------------

    # we need to scale in order to use PTA (and then scale back)
    scaled_to_nonscaled = []

    xblock = _XBlock()
    for block in X:
        # this is needed for PTA
        xblock.append_qblock(block)
        # set visited flag in order to compute the set (qpartition - X) easily
        block.visited = True

        for vx in block.vertexes:
            # mark as reachable by PTA
            vx.allow_visit = True

            # scale label in order to use PTA
            vx.scale_label(len(scaled_to_nonscaled))
            scaled_to_nonscaled.append(vx.label)

    # build the new qpartition, without the blocks in X (which may be split).
    # this is just the set qpartition - X
    new_qpartition = []
    for block in qpartition:
        if not (block.visited or block.deteached):
            new_qpartition.append(block)
        else:
            # now we can clean the flag, this block was already discarded
            block.visited = False

    for block in X:
        for vx in block.vertexes:
            vx.restrict_to_subgraph(validation=lambda v: v.allow_visit)

    # apply PTA and append the blocks to the new partition
    preprocess_initial_partition(X)
    X2 = paige_tarjan_qblocks(X)
    new_qpartition.extend(X2)

    for block in X2:
        for vx in block.vertexes:
            # restore the original image/counterimage
            vx.back_to_original_graph()
            # clean allow_visit
            vx.allow_visit = False
            # restore original label
            vx.back_to_original_label()

    # select the blocks which are the result of a split
    # split, and clean block.visited
    for block in filter(attrgetter("is_new_qblock"), X2):
        # split
        new_qpartition = ranked_split(new_qpartition, block, max_rank)
        # clean
        block.is_new_qblock = False

    return new_qpartition
예제 #8
0
def dovier_piazza_policriti_partition(
    partition: RankedPartition,
) -> Tuple[RankedPartition, List[List[_Vertex]]]:
    """Apply *Dovier-Piazza-Policriti*'s algorithm to the given ranked
    partition.

    :param partition: A ranked partition (:math:`P` in the paper).
    :returns: A tuple such that the first item is the partition at the end of
        the algorithm (which at this point is made of blocks of size 1
        containing only the vertexes which survived the collapse), and the
        second is a list which maps a survivor nodes to the list of nodes
        collapsed to that survivor node.
    """

    # maps each survivor node to a list of nodes collapsed into it
    collapse_map = [None for _ in range(partition.nvertexes)]

    # loop over the ranks
    for partition_idx in range(len(partition)):
        if len(partition[partition_idx]) == 1:
            if len(partition[partition_idx][0].vertexes):
                block = partition[partition_idx][0]
                survivor_vertex, collapsed_vertexes = collapse(block)
                if survivor_vertex is not None:
                    # update the collapsed nodes map
                    collapse_map[survivor_vertex.label] = collapsed_vertexes
                    # update the partition
                    split_upper_ranks(partition, block)
        # OPTIMIZATION: if at the current rank we only have blocks of single
        # vertexes, skip this step.
        elif any(map(lambda block: block.size > 1, partition[partition_idx])):
            current_label = 0
            for block in partition[partition_idx]:
                for vertex in block.vertexes:
                    # scale vertex
                    vertex.scale_label(current_label)
                    current_label += 1

                    # exclude nodes having the wrong rank from the image and
                    # counterimage of the vertex. from now they're gone
                    # forever.
                    vertex.restrict_to_subgraph(
                        validation=lambda vx: vx.rank == vertex.rank)

            # apply PTA to the subgraph at the current examined rank
            # CAREFUL: if you debug here, you'll see that there are some
            # "duplicate" nodes (nodes with the same label in different blocks
            # of the partition). this happens becaus of the SCALING (which is
            # used to pass a normal graph to PTA)
            rscp = paige_tarjan_qblocks(partition[partition_idx])

            # clear the partition at the current rank
            partition.clear_index(partition_idx)

            # insert the new blocks in the partition at the current rank, and
            # collapse each block.
            for block in rscp:
                block_vertexes = []
                for scaled_vertex in block.vertexes:
                    scaled_vertex.back_to_original_label()
                    scaled_vertex.back_to_original_graph()
                    block_vertexes.append(scaled_vertex)

                # we can set XBlock to None because PTA won't be called again
                # on these blocks
                internal_block = _Block(block_vertexes, None)

                survivor_vertex, collapsed_vertexes = collapse(internal_block)

                if survivor_vertex is not None:
                    # update the collapsed nodes map
                    collapse_map[survivor_vertex.label] = collapsed_vertexes
                    # add the new block to the partition
                    partition.append_at_index(internal_block, partition_idx)
                    # update the upper ranks with respect to this block
                    split_upper_ranks(partition, internal_block)
        else:
            for block in partition[partition_idx]:
                # update the upper ranks with respect to this block
                split_upper_ranks(partition, block)

    return (partition, collapse_map)