Esempio n. 1
0
def checkloop(nov, noe, nol):

    heatmap = np.zeros([noe, nol], dtype=float)

    for i in range(noe):
        for j in range(nol):
            G = nx.Graph()
            nodes = [i for i in range(nov)]
            G.add_nodes_from(nodes)

            for i in range(i):
                a = random.randint(0, nov - 1)
                b = random.randint(0, nov - 1)

                while a == b:
                    b = random.randint(0, nov - 1)

                G.add_edge(a, b)

            tw1 = tw.treewidth_min_degree(G)[0]

            for i in range(j):
                a = random.randint(0, nov - 1)
                G.add_edge(a, a)

            tw2 = tw.treewidth_min_degree(G)[0]

            if tw1 == 0:
                heatmap[i, j] = 0
            else:
                heatmap[i, j] = tw2 / tw1

    heatmap = go.Heatmap(z=heatmap)
    data = [heatmap]
    plotly.offline.plot(data)
Esempio n. 2
0
def compute_treewidth_from_BIF(reader):
    edges = ["{} {}".format(e1, e2) for e1, e2 in reader.get_edges()
             ]  # make a suitable format for network

    network = nx.parse_edgelist(edges, create_using=nx.DiGraph)

    undirected_network = network.to_undirected()
    treewidth_deg, tree_decomposition = treewidth_min_degree(
        undirected_network)
    treewidth_fill, tree_decomposition = treewidth_min_fill_in(
        undirected_network)

    return network, treewidth_deg, treewidth_fill
Esempio n. 3
0
def plot_graph(
    n, d
):  #Plots how the treewidth scales with increasing number of qubits in an IQP circuit represented by a random graph
    data = []
    for pvalue in range(
            d + 1):  #Loops over each value for the depth of the circuit
        tws = []  #Empty list for the treewidth values to go
        print(str(1 + 5 * pvalue / d) + '*n')
        for nvalue in range(
                3, n + 1):  #Loops for each number of qubits in the circuit
            nvalue = nvalue**2
            temp = []  #Tempary values to be averaged over
            depth = int(
                nvalue *
                (1 +
                 5 * pvalue / d))  #Depth of the circuit ranges from 1 to n^4
            nov = 3 * nvalue
            noe = depth
            p = 2 * noe / ((nov) * (nov - 1))

            print(nov, noe, p)
            for trials in range(100):
                G = nx.Graph()  #Creates an empty graph
                edges_nodes = edge_node_list(
                    make_graph(nov, p)
                )  #Makes a random sudo quantum circuit with nvalue qubits and a depth of depth
                G.add_nodes_from(edges_nodes[0])
                G.add_edges_from(edges_nodes[1])
                treewidth = tw.treewidth_min_degree(G)[
                    0]  #Finds the treewidth of the quantum circuit
                temp.append(treewidth)

            tws.append(np.average(temp))  #Averages the treewidth

        data.append(
            go.Scatter(x=[i**2 for i in range(3, n + 1)],
                       y=tws,
                       name='Depth: ' + str(1 + 5 * pvalue / d) + '*n'))

    layout = dict(
        title='Treewidth with diffrent scaling depths',
        xaxis=dict(title='Number of qubits'),
        yaxis=dict(title='Treewidth'),
    )

    fig = dict(data=data, layout=layout)
    plotly.offline.plot(
        fig)  #Plots the treewidth as a function of number of qubits
Esempio n. 4
0
def plot_quantum(
    n, res
):  #Plots the how the treewidth scales with increasing number of qubits as the depth scales given a max number of qubits and the resolution of the graph
    for pvalue in range(
            res + 1):  #Loops over each value for the depth of the circuit
        tws = []  #Empty list for the treewidth values to go
        for nvalue in range(
                3, n + 1):  #Loops for each number of qubits in the circuit
            conn = []  #Creats the connectivity of the quantum circuit
            for i in range(nvalue):
                for j in range(nvalue):
                    if i != j:
                        conn.append([i, j])  #Complete array of qubits
            temp = []  #Tempary values to be averaged over
            for trials in range(1000):
                depth = nvalue**(2 * pvalue / res
                                 )  #Depth of the circuit ranges from 1 to n^2
                G = nx.Graph()  #Creates an empty graph
                edges_nodes = edge_node_list(
                    makerandq(nvalue, depth, conn)
                )  #Makes a random quantum circuit with nvalue qubits and a depth of depth with conectivity conn
                G.add_nodes_from(edges_nodes[0])
                G.add_edges_from(edges_nodes[1])
                treewidth = tw.treewidth_min_degree(G)[
                    0]  #Finds the treewidth of the quantum circuit
                temp.append(treewidth)

            tws.append(np.average(temp))  #Averages the treewidth
        plt.plot(range(3, n + 1),
                 tws)  #Plots the treewidth as a function of number of qubits


#        blue_patch = mpatches.Patch(color='blue', label='n^' + str(2*pvalue/res))
#        plt.legend(handles=[blue_patch])
    plt.xlabel("Number of qubits")
    plt.ylabel("Treewidth")
    plt.savefig('all.png')
    plt.show()
Esempio n. 5
0
def plot_sudoquantum(
    n, res
):  #Plots how the treewidth scales with increasing number of qubits in a sudo quantum circuit represented by a random graph
    for pvalue in range(
            res + 1):  #Loops over each value for the sudo depth of the circuit
        tws = []  #Empty list for the treewidth values to go
        for nvalue in range(
                3,
                n + 1):  #Loops for each sudo number of qubits in the circuit
            temp = []  #Tempary values to be averaged over
            depth = int(
                nvalue**(2 * pvalue /
                         res))  #Sudo depth of the circuit ranges from 1 to n^2
            nov = depth + 2 * nvalue
            noe = nvalue + 2 * depth
            p = 2 * noe / ((nov) * (nov - 1))
            for trials in range(1000):
                G = nx.Graph()  #Creates an empty graph
                edges_nodes = edge_node_list(
                    make_graph(nov, p)
                )  #Makes a random sudo quantum circuit with nvalue qubits and a depth of depth
                G.add_nodes_from(edges_nodes[0])
                G.add_edges_from(edges_nodes[1])
                treewidth = tw.treewidth_min_degree(G)[
                    0]  #Finds the treewidth of the quantum circuit
                temp.append(treewidth)

            tws.append(np.average(temp))  #Averages the treewidth
        plt.plot(range(3, n + 1),
                 tws)  #Plots the treewidth as a function of number of qubits


#        blue_patch = mpatches.Patch(color='blue', label='n^' + str(2*pvalue/res))
#        plt.legend(handles=[blue_patch])
    plt.xlabel("Sudo number of qubits")
    plt.ylabel("Treewidth")
    plt.savefig('sudoall.png')
    plt.show()
Esempio n. 6
0
def treeDecompPlayground(G):
    tree_decomp_graph = treewidth_min_degree(G)
    tree_decomp = tree_decomp_graph[1]
    print("tree decomposition edges:\n", nx.edges(tree_decomp))
    return tree_decomp
Esempio n. 7
0
fig = plt.figure(1, figsize=(9, 6))
ax = fig.add_subplot(111)
bp = ax.boxplot(to_plot, showfliers=False)
plt.xlabel('Grafo')
plt.ylabel('Tiempo (segundos)')
plt.title('Problema liga de amigos')
plt.savefig('BP3.eps', format='eps', dpi=1000)
plt.show()

# *********************treewidth_min_degree*********************

for j in range(1, 31):
    algorithm2_start_time = time.time()
    g1_start_time = time.time()
    for i in range(20):
        tree.treewidth_min_degree(G11)
    a4_t1_times.append(time.time() - g1_start_time)

    g2_start_time = time.time()
    for i in range(20):
        tree.treewidth_min_degree(G12)
    a4_t2_times.append(time.time() - g2_start_time)

    g3_start_time = time.time()
    for i in range(20):
        tree.treewidth_min_degree(G13)
    a4_t3_times.append(time.time() - g3_start_time)

    g4_start_time = time.time()
    for i in range(20):
        tree.treewidth_min_degree(G14)
Esempio n. 8
0
def to_junction_tree_model(model, algorithm) -> JunctionizedModel:
    """Builds equivalent model on a junction tree.

    First, builds a junction tree using algorithm from NetworkX which uses
    Minimum Fill-in heuristic.

    Then, builds a new model in which variables correspond to nodes in junction
    tree - we will call them "supervariables". Values of new supervariables are
    encoded values of original variables. New alphabet size is original
    alphabet size to the power of maximaljunction size. If some supervariables
    have less variables than others, we just don't use all available for
    encoding "address space". We mark those impossible values as having
    probability 0 (i.e log probability -inf).

    Fields in new model are calculated by multiplying all field and
    interaction factors on variables in the same supervariable. While doing
    this, we make sure that every factor is counted only once. If some factor
    was accounted for in one supervariable field, it won't be accounted for
    again in other supervariables.

    Interaction factors in new model contain consistency requirement. If
    a variable of original model appears in multiple supervariables, we allow
    only those states where it takes the same value in all supervariables. We
    achieve that by using interaction factors which are equal to 1 if values
    of the same original variable in different supervariables are equal, and
    0 if they are not equal. We actually use values 0 and -inf, because we
    work with logarithms.

    See https://en.wikipedia.org/wiki/Tree_decomposition.

    :param model: original model.
    :param algorithm: decomposition algorithm.
    :return: JunctionizedModel object, which contains junction tree and the
      new model, which is equivalent to original model, but whose graph is a
      tree.
    """
    # Build junction tree.
    graph = model.get_graph()
    if algorithm == 'min_fill_in':
        tree_width, junc_tree = treewidth_min_fill_in(graph)
    elif algorithm == 'min_degree':
        tree_width, junc_tree = treewidth_min_degree(graph)
    elif algorithm == 'auto':
        tree_width_1, junc_tree_1 = treewidth_min_fill_in(graph)
        tree_width_2, junc_tree_2 = treewidth_min_degree(graph)
        if tree_width_1 < tree_width_2:
            tree_width, junc_tree = tree_width_1, junc_tree_1
        else:
            tree_width, junc_tree = tree_width_2, junc_tree_2
    else:
        raise ValueError('Unknown treewidth decomposition algorithm %s' %
                         algorithm)

    jt_nodes = list(junc_tree.nodes())
    sv_size = tree_width + 1  # Supervariable size.

    new_gr_size = len(jt_nodes)  # New graph size.
    new_al_size = model.al_size**sv_size  # New alphabet size.
    if new_al_size > 1e6:
        raise TooMuchStatesError("New domain size is too large: %d." %
                                 new_al_size)

    # Build edge list in terms of indices in new graph.
    nodes_lookup = {jt_nodes[i]: i for i in range(len(jt_nodes))}
    new_edges = np.array([[nodes_lookup[u], nodes_lookup[v]]
                          for u, v in junc_tree.edges()])

    # Convert node lists to numpy arrays.
    jt_nodes = [np.fromiter(node, dtype=np.int32) for node in jt_nodes]

    # Calculate fields which describe interaction beteen supervariables.
    # If supervariable has less than ``sv_size`` variables, pad with -inf.
    # Then, when decoding, we will just throw away values from the left.
    # We should account for each factor of the old graph in exactly one factor
    # in the new graph. So, for field and interaction factors of the old graph
    # we keep track of whether we already took them, and don't take them for
    # the second time.
    new_field = np.ones((new_gr_size, new_al_size), dtype=np.float64) * -np.inf
    used_node_fields = set()
    for new_node_id in range(new_gr_size):
        old_nodes = jt_nodes[new_node_id]
        node_field = model.get_subgraph_factor_values(
            old_nodes, vars_skip=used_node_fields)
        new_field[new_node_id, 0:len(node_field)] = node_field
        used_node_fields.update(old_nodes)

    # Now, for every edge in new graph - add interaction factor requiring that
    # the same variable appearing in two supervariables always has the same
    # values.
    # We achieve this by using Kroenker delta function.
    # As we working with logarithms, we populate -inf for impossible states,
    # and 0 for possible states.
    new_interactions = np.zeros((len(new_edges), new_al_size, new_al_size))
    for edge_id in range(len(new_edges)):
        u, v = new_edges[edge_id]
        allowed = build_multi_delta(sv_size, model.al_size, jt_nodes[u],
                                    jt_nodes[v])
        new_interactions[edge_id, np.logical_not(allowed)] = -np.inf

    from inferlo.pairwise.pwf_model import PairWiseFiniteModel
    new_model = PairWiseFiniteModel.create(new_field, new_edges,
                                           new_interactions)
    return JunctionizedModel(new_model, jt_nodes, model.gr_size, model.al_size)