Esempio n. 1
0
def compute_treewidth_from_BIF(reader):
    edges = ["{} {}".format(e1, e2) for e1, e2 in reader.get_edges()
             ]  # make a suitable format for network

    network = nx.parse_edgelist(edges, create_using=nx.DiGraph)

    undirected_network = network.to_undirected()
    treewidth_deg, tree_decomposition = treewidth_min_degree(
        undirected_network)
    treewidth_fill, tree_decomposition = treewidth_min_fill_in(
        undirected_network)

    return network, treewidth_deg, treewidth_fill
def get_tree_decomposition(cir,
                           num_qubit,
                           index_set,
                           index_2_node,
                           node_2_index,
                           connect_in_and_out=False):
    lin_graph = nx.Graph()
    lin_graph.add_nodes_from(index_set)
    for k in cir.nodes():
        operation = cir.nodes[k]['operation']
        nam = operation.name
        gate_qubits = len(operation.involve_qubits_list)
        if nam == 'CX':
            lin_graph.add_edge(node_2_index[k][0].key, node_2_index[k][3].key)
            lin_graph.add_edge(node_2_index[k][0].key, node_2_index[k][4].key)
            lin_graph.add_edge(node_2_index[k][3].key, node_2_index[k][4].key)
            continue
        if gate_qubits == 2:
            if is_diagonal(operation.u_matrix):
                lin_graph.add_edge(node_2_index[k][0].key,
                                   node_2_index[k][2].key)
            else:
                lin_graph.add_edge(node_2_index[k][0].key,
                                   node_2_index[k][1].key)
                lin_graph.add_edge(node_2_index[k][0].key,
                                   node_2_index[k][2].key)
                lin_graph.add_edge(node_2_index[k][0].key,
                                   node_2_index[k][3].key)
                lin_graph.add_edge(node_2_index[k][1].key,
                                   node_2_index[k][2].key)
                lin_graph.add_edge(node_2_index[k][1].key,
                                   node_2_index[k][3].key)
                lin_graph.add_edge(node_2_index[k][2].key,
                                   node_2_index[k][3].key)
            continue
        if node_2_index[k][0].key != node_2_index[k][1].key:
            lin_graph.add_edge(node_2_index[k][0].key, node_2_index[k][1].key)

    if connect_in_and_out:
        for k in range(num_qubit):
            if len(node_2_index['q' + str(k)]) == 0:
                continue
            if node_2_index['q' +
                            str(k)][0].key != node_2_index['q' +
                                                           str(k)][1].key:
                lin_graph.add_edge(node_2_index['q' + str(k)][0].key,
                                   node_2_index['q' + str(k)][1].key)

    tree_width, de_graph = treewidth_min_fill_in(lin_graph)
    #     print('The treewidth is',tree_width)
    return de_graph, tree_width
Esempio n. 3
0
    def create(self, size, dependencies):

        # iterate tree decomposition heuristics over randomized graphs

        def translate(xs, perm):
            return [[perm[y] for y in ys] for ys in xs]

        bindependencies = self.expand_to_cliques(dependencies)

        # produce networkx graph from size, dependencies
        from networkx import Graph

        from networkx.algorithms.approximation.treewidth import treewidth_min_fill_in, treewidth_min_degree

        perm = list(range(size))

        best_width = None

        for k in range(self.iterations):
            MyRandom.shuffle(perm)
            inv_perm = {perm[i]: i for i in perm}

            G = Graph()
            G.add_nodes_from(range(size))
            G.add_edges_from(translate(bindependencies, perm))

            width, tree = treewidth_min_fill_in(G)

            if best_width is None or width < best_width:
                best_width, best_tree = width, tree
                best_inv_perm = inv_perm

        bags = list(map(list, best_tree.nodes))
        edges = [(bags.index(list(i)), bags.index(list(j)))
                 for i, j in best_tree.edges]
        bags = translate(bags, best_inv_perm)
        td = TreeDecomposition(bags, edges)
        td.expand_treedecomposition(self.maxdiffsize)

        return td
Created on Sat Apr 13 13:49:51 2019

@author: acb16ua
"""

import networkx as nx
#from networkx.algorithms import approximation
#from networkx.algorithms.approximation import clique
from networkx.algorithms.approximation import treewidth
import matplotlib.pyplot as plt

infile = open('alarm.dgf')
g = nx.Graph()

for line in infile:
    edge = (line.split())
    if edge:
        if edge[0] == 'e':
            g.add_edge(int(edge[1]), int(edge[2]))

plt.subplot(121)
nx.draw(g, with_labels=True, font_weight='bold')

#bing = clique.max_clique(g)
tw, decomp_graph = treewidth.treewidth_min_fill_in(g)

print(tw)
#print(bing)
plt.subplot(122)
nx.draw(decomp_graph, with_labels=False, font_weight='bold')
Esempio n. 5
0
def to_junction_tree_model(model, algorithm) -> JunctionizedModel:
    """Builds equivalent model on a junction tree.

    First, builds a junction tree using algorithm from NetworkX which uses
    Minimum Fill-in heuristic.

    Then, builds a new model in which variables correspond to nodes in junction
    tree - we will call them "supervariables". Values of new supervariables are
    encoded values of original variables. New alphabet size is original
    alphabet size to the power of maximaljunction size. If some supervariables
    have less variables than others, we just don't use all available for
    encoding "address space". We mark those impossible values as having
    probability 0 (i.e log probability -inf).

    Fields in new model are calculated by multiplying all field and
    interaction factors on variables in the same supervariable. While doing
    this, we make sure that every factor is counted only once. If some factor
    was accounted for in one supervariable field, it won't be accounted for
    again in other supervariables.

    Interaction factors in new model contain consistency requirement. If
    a variable of original model appears in multiple supervariables, we allow
    only those states where it takes the same value in all supervariables. We
    achieve that by using interaction factors which are equal to 1 if values
    of the same original variable in different supervariables are equal, and
    0 if they are not equal. We actually use values 0 and -inf, because we
    work with logarithms.

    See https://en.wikipedia.org/wiki/Tree_decomposition.

    :param model: original model.
    :param algorithm: decomposition algorithm.
    :return: JunctionizedModel object, which contains junction tree and the
      new model, which is equivalent to original model, but whose graph is a
      tree.
    """
    # Build junction tree.
    graph = model.get_graph()
    if algorithm == 'min_fill_in':
        tree_width, junc_tree = treewidth_min_fill_in(graph)
    elif algorithm == 'min_degree':
        tree_width, junc_tree = treewidth_min_degree(graph)
    elif algorithm == 'auto':
        tree_width_1, junc_tree_1 = treewidth_min_fill_in(graph)
        tree_width_2, junc_tree_2 = treewidth_min_degree(graph)
        if tree_width_1 < tree_width_2:
            tree_width, junc_tree = tree_width_1, junc_tree_1
        else:
            tree_width, junc_tree = tree_width_2, junc_tree_2
    else:
        raise ValueError('Unknown treewidth decomposition algorithm %s' %
                         algorithm)

    jt_nodes = list(junc_tree.nodes())
    sv_size = tree_width + 1  # Supervariable size.

    new_gr_size = len(jt_nodes)  # New graph size.
    new_al_size = model.al_size**sv_size  # New alphabet size.
    if new_al_size > 1e6:
        raise TooMuchStatesError("New domain size is too large: %d." %
                                 new_al_size)

    # Build edge list in terms of indices in new graph.
    nodes_lookup = {jt_nodes[i]: i for i in range(len(jt_nodes))}
    new_edges = np.array([[nodes_lookup[u], nodes_lookup[v]]
                          for u, v in junc_tree.edges()])

    # Convert node lists to numpy arrays.
    jt_nodes = [np.fromiter(node, dtype=np.int32) for node in jt_nodes]

    # Calculate fields which describe interaction beteen supervariables.
    # If supervariable has less than ``sv_size`` variables, pad with -inf.
    # Then, when decoding, we will just throw away values from the left.
    # We should account for each factor of the old graph in exactly one factor
    # in the new graph. So, for field and interaction factors of the old graph
    # we keep track of whether we already took them, and don't take them for
    # the second time.
    new_field = np.ones((new_gr_size, new_al_size), dtype=np.float64) * -np.inf
    used_node_fields = set()
    for new_node_id in range(new_gr_size):
        old_nodes = jt_nodes[new_node_id]
        node_field = model.get_subgraph_factor_values(
            old_nodes, vars_skip=used_node_fields)
        new_field[new_node_id, 0:len(node_field)] = node_field
        used_node_fields.update(old_nodes)

    # Now, for every edge in new graph - add interaction factor requiring that
    # the same variable appearing in two supervariables always has the same
    # values.
    # We achieve this by using Kroenker delta function.
    # As we working with logarithms, we populate -inf for impossible states,
    # and 0 for possible states.
    new_interactions = np.zeros((len(new_edges), new_al_size, new_al_size))
    for edge_id in range(len(new_edges)):
        u, v = new_edges[edge_id]
        allowed = build_multi_delta(sv_size, model.al_size, jt_nodes[u],
                                    jt_nodes[v])
        new_interactions[edge_id, np.logical_not(allowed)] = -np.inf

    from inferlo.pairwise.pwf_model import PairWiseFiniteModel
    new_model = PairWiseFiniteModel.create(new_field, new_edges,
                                           new_interactions)
    return JunctionizedModel(new_model, jt_nodes, model.gr_size, model.al_size)