예제 #1
0
def compute_line_graph_details(A):
    edge_labels = dict()
    count = 0
    for r in range(len(A)-1):
        for c in range(r+1, len(A)):
            if A[r][c] == 1.0:
                edge_labels[(r,c)] = count
                count += 1
    inv_labels = {v: k for k, v in edge_labels.items()}
    keys = list(edge_labels.keys())
    edges = int(np.sum(A) / 2)
    for k in keys:
        r,c = k
        edge_labels[(c,r)] = edge_labels[(r,c)]
    output = np.zeros((edges, edges))
    for i in range(edges - 1):
        a, b = inv_labels[i]
        for j in range(i + 1, edges):
            c, d = inv_labels[j]
            if a == c or a == d or b == c or b == d:
                if not ((a == d and b == c) or (a == c and b == d)):
                    output[i][j], output[j][i] = 1, 1
    return edge_labels, inv_labels, output
예제 #2
0
def core_periphery_analysis(network0):
    network0 /= np.sum(network0)
    C, Q_core = bct.core_periphery_dir(network0)
    per_nodes = []
    for i in range(len(C)):
        if C[i] == 0:
            per_nodes.append(i)
    G = nx.from_numpy_matrix(network0)
    G_per = G.subgraph(per_nodes)
    per_network = np.array(nx.to_numpy_matrix(G_per))
    M_per, Q_comm_per = bct.community_louvain(per_network)
    print(Q_comm_per, "Q")
    # print(M_per, Q_comm_per)
    per_comm_assignments = {}
    for i in range(len(per_nodes)):
        per_comm_assignments[per_nodes[i]] = M_per[i]
    classifications = [
        [], [], []
    ]  # index 0 means periphery-periphery edge, 1 means periphery-core, 2 means core-core
    for i in range(len(network0) - 1):
        for j in range(i + 1, len(network0)):
            if network0[i][j] > 0:
                classifications[C[i] + C[j]].append((i, j))
    return classifications, per_comm_assignments, G_per, M_per
예제 #3
0
def get_hierarchical_modular(n, modules, edges, p, alpha, getCommInfo=False):
    pairings = {}
    assignments = np.zeros(n, dtype=int)
    cross_module_edges = []
    weights = np.array([(1 + i)**-alpha for i in range(n)])
    dists = []
    module_dist = np.zeros(modules)
    for i in range(modules):
        pairings[i] = []
    A = np.zeros((n, n))
    for i in range(n):
        randomModule = seeded_rng.randint(0, modules)
        pairings[randomModule].append(i)
        assignments[i] = randomModule
    for j in range(modules):
        dist = np.array([weights[i] for i in pairings[j]])
        module_dist[j] = np.sum(dist)
        dist /= np.sum(dist)
        dists.append(dist)
    module_dist /= np.sum(module_dist)

    # nodesPerMod = n // modules
    # for i in range(modules):
    #     for j in range(nodesPerMod):
    #         pairings[i].append(nodesPerMod * i + j)
    #         assignments[nodesPerMod *i + j] = i
    # for i in range(modules - 1):
    #     if len(pairings[i]) < 3 or len(pairings[i+1]) < 3:
    #         return None, None
    #     e0, e1 = seeded_rng.choice(pairings[i], 1), seeded_rng.choice(pairings[i+1], 1)
    #     A[e0, e1], A[e1, e0] = 1, 1
    #     cross_module_edges.append((e0, e1))
    def add_modular_edge():
        randomComm = seeded_rng.choice(modules, p=module_dist)
        while len(pairings[randomComm]) < 2:
            randomComm = seeded_rng.choice(modules, p=module_dist)
        selection = seeded_rng.choice(pairings[randomComm],
                                      2,
                                      replace=False,
                                      p=dists[randomComm])
        while A[selection[0], selection[1]] != 0:
            randomComm = seeded_rng.choice(modules, p=module_dist)
            while len(pairings[randomComm]) < 2:
                randomComm = seeded_rng.choice(modules, p=module_dist)
            selection = seeded_rng.choice(pairings[randomComm],
                                          2,
                                          replace=False,
                                          p=dists[randomComm])
        A[selection[0], selection[1]] += 1
        A[selection[1], selection[0]] += 1

    def add_between_edge():
        randomComm, randomComm2, e0, e1 = 0, 0, 0, 0
        while randomComm == randomComm2 or A[e0, e1] != 0:
            randomComm, randomComm2 = seeded_rng.choice(
                modules, p=module_dist), seeded_rng.choice(modules,
                                                           p=module_dist)
            e0 = seeded_rng.choice(pairings[randomComm],
                                   1,
                                   replace=False,
                                   p=dists[randomComm])
            e1 = seeded_rng.choice(pairings[randomComm2],
                                   1,
                                   replace=False,
                                   p=dists[randomComm2])
        A[e0, e1] += 1
        A[e1, e0] += 1
        cross_module_edges.append((e0, e1))

    inModuleEdges = int(round(edges * p))
    betweenEdges = edges - inModuleEdges
    # betweenEdges = edges - inModuleEdges - modules + 1
    # if betweenEdges < 0:
    #     print("NEGATIVE")
    for i in range(inModuleEdges):
        add_modular_edge()
    for i in range(betweenEdges):
        add_between_edge()

    def parameterized(cc_weight):
        B = deepcopy(A)
        for e in cross_module_edges:
            B[e[0], e[1]], B[e[1], e[0]] = cc_weight, cc_weight
        return B

    if getCommInfo:
        return A, parameterized, pairings, assignments
    else:
        return A, parameterized
예제 #4
0
def get_laplacian(A):
    result = -deepcopy(A)
    for i in range(len(result[0])):
        result[i][i] += np.sum(A[i])
    return result
예제 #5
0
def get_stationary3(A):  #only applies for unnormalized weighted graph inputs
    output = np.sum(A, axis=0) / (np.sum(A))
    return output
예제 #6
0
def get_diff_stats(network0, network_opt):
    edge_vals = []
    tri_participation = []
    betweenness = []
    edge_degrees = []
    network0 /= np.sum(network0)
    network_opt /= np.sum(network_opt)
    edges, tri_count, total_triangles = compute_triangle_participation(
        network0)
    edge_factors = get_edge_values(network0, network_opt)
    betweenness_dict = nx.centrality.edge_betweenness_centrality(
        nx.from_numpy_matrix(network0), weight='weight')
    for i in range(len(edges)):
        # if edge_factors[(edges[i][0], edges[i][1])] > 0:
        edge_degrees.append(
            .5 *
            (np.sum(network0[edges[i][0]]) + np.sum(network0[edges[i][1]])))
        edge_vals.append(edge_factors[(edges[i][0], edges[i][1])])
        tri_participation.append(tri_count[(edges[i][0], edges[i][1])])
        betweenness.append(betweenness_dict[(edges[i][0], edges[i][1])])

    # classifications, per_comm_assignments, G_per, M_per = core_periphery_analysis(network0)
    # classified_vals, classified_edges = classify_vals(classifications, network0, network_opt, per_comm_assignments)
    #
    # # A = np.zeros((len(network0), len(network0)))
    # # for i in range(4):
    # #     if i == 0:
    # #         val = .2
    # #     if i == 1:
    # #         val = 1
    # #     if i == 2:
    # #         val = 2
    # #     if i == 3:
    # #         val = 4
    # #     for j in range(len(classified_edges[i])):
    # #         e0, e1 = classified_edges[i][j]
    # #         A[e0][e1], A[e1][e0] = val, val
    # # print("IM RENDERING BRO")
    # # gr.render_network(A, 10)
    # # X, Y = bct.grid_communities(M_per)
    # # print(X, Y)
    # per_network = nx.to_numpy_matrix(G_per)
    # per_network /= np.sum(per_network)
    # layout_mask = np.zeros((len(per_network), len(per_network)))
    # for i in range(len(layout_mask)):
    #     for j in range(len(layout_mask)):
    #         if M_per[i] == M_per[j] and i != j:
    #             layout_mask[i][j] = 1
    #
    # graph_pos = nx.spring_layout(nx.from_numpy_matrix(layout_mask), k = .45)
    # gr.render_network(per_network,11, graph_pos = graph_pos, nodecolors= .25 * np.array(M_per))
    # plt.figure(200, figsize = (4.4, 3.6))
    # plt.rcParams.update({'font.size': 16})
    # plt.xlabel("P-P within-cluster weight scaling", fontsize = 14)
    # plt.ylabel("Probability density")
    # plt.rcParams.update({'font.size': 16})
    # plt.hist(classified_vals[0], bins=30, density = True, color = "tomato", linewidth = .6, edgecolor='black')
    # plt.tight_layout()
    # plt.figure(201, figsize = (4.4, 3.6))
    # plt.rcParams.update({'font.size': 16})
    # plt.xlabel("P-P cross-cluster weight scaling", fontsize = 14)
    # plt.ylabel("Probability density")
    # plt.rcParams.update({'font.size': 16})
    # plt.hist(classified_vals[1], bins=30, density = True, color = "lightgreen", linewidth = .6,  edgecolor='black')
    # plt.tight_layout()
    # plt.figure(202, figsize = (4.4, 3.6))
    # plt.rcParams.update({'font.size': 16})
    # plt.xlabel("P-C weight scaling")
    # plt.ylabel("Probability density")
    # plt.rcParams.update({'font.size': 16})
    # plt.hist(classified_vals[2], bins=30, density = True, color = "cornflowerblue", linewidth = .6,  edgecolor='black')
    # plt.tight_layout()
    # plt.figure(203, figsize = (4.4, 3.6))
    # plt.xlabel("C-C weight scaling")
    # plt.ylabel("Probability density")
    # plt.hist(classified_vals[3], bins=30, density=True, color="grey", linewidth=.6, edgecolor='black')
    # plt.tight_layout()

    # #binned_vals, dividers, _ = binned_statistic(tri_participation, edge_vals, 'mean', bins=20)
    # #dividers = dividers[:-1]
    # plt.figure(5)
    # plt.rcParams.update({'font.size': 16})
    # #plt.scatter(dividers, binned_vals)
    # plt.scatter(tri_participation, edge_vals, s=10, alpha=.1)
    # plt.rcParams.update({'font.size': 16})
    # plt.xlabel("Edge clustering coefficient")
    # plt.ylabel("Optimal edge scaling")
    #
    # # gradient, intercept, r_value, p_value, std_err = stats.linregress(tri_participation, edge_vals)
    # # print(r_value, p_value)
    # # mn = np.amin(tri_participation)
    # # mx = np.amax(tri_participation)
    # # x1 = np.linspace(mn, mx, 500)
    # # y1 = gradient * x1 + intercept
    # # plt.plot(x1, y1, '-r')
    #
    # # binned_vals, dividers, _ = binned_statistic(betweenness, edge_vals, 'mean', bins=10)
    # # dividers = dividers[:-1]
    # plt.figure(6)
    # plt.rcParams.update({'font.size': 16})
    # # plt.scatter(dividers, binned_vals)
    # plt.scatter(betweenness, edge_vals, s=10, alpha=.1)
    # plt.rcParams.update({'font.size': 16})
    # plt.xlabel('Edge betweenness centrality')
    # plt.ylabel("Optimal edge scaling")
    #
    # gradient, intercept, r_value, p_value, std_err = stats.linregress(betweenness, edge_vals)
    # print(r_value, p_value)
    # mn = np.amin(betweenness)
    # mx = np.amax(betweenness)
    # x1 = np.linspace(mn, mx, 500)
    # y1 = gradient * x1 + intercept
    # plt.plot(x1, y1, '-r')

    data = list(zip(tri_participation, edge_vals))
    return data
예제 #7
0
def learn_to_undirected(A, beta, normalizer):
    out = learn(A, beta)
    out = unnormalize(out)
    return normalizer * out / np.sum(out)
예제 #8
0
def uniformity_cost(P_0, A, beta):
    learned = learn(A, beta)
    terms = learned[P_0 > 0].flatten()
    diffs = np.subtract.outer(terms, terms)
    return np.sum(diffs * diffs)
예제 #9
0
    #   A /= np.sum(A)
    #   A *= np.sum(A_0)
    #   gr.render_network(A, 27, graph_pos = graph_pos)
    #   learned = unnormalize(learn(A, beta))
    #   learned /= np.sum(learned)
    #   learned *= np.sum(A_0)
    #   gr.render_network(learned, 28, graph_pos=graph_pos)
    #   plt.show()

    indices = np.load(textbooks + "all_index.npy", allow_pickle=True)
    networks_orig = np.load(textbooks + "cooc_mats.npy", allow_pickle=True)
    for i in range(len(networks_orig)):
        for j in range(len(networks_orig[i])):
            networks_orig[i][j][j] = 0
    for i in range(len(networks_orig)):
        networks_orig[i] /= np.sum(networks_orig[i])
        #networks_orig[i] = normalize(networks_orig[i])

    networks = []
    scores = []
    for i in range(10):
        networks.append(
            np.load(textbooks + str(i) + "_opt_networks.npy",
                    allow_pickle=True))
        scores.append(
            np.load(textbooks + str(i) + "_KL.npy", allow_pickle=True))
    for i in range(len(networks)):
        for j in range(len(betas)):
            networks[i][j] /= np.sum(networks[i][j])

    markers = ["o", "+", "*", "D", "x", "d", "^", "s", "v", ">"]
예제 #10
0
파일: betaModel.py 프로젝트: aletty/skynet
        for i in xrange(self.num_polls):
            alpha = self.b[i]*z
            res *= Beta.pdf(x[i,0], alpha[0], alpha[1])
        return res

if __name__ == "__main__":
    import matplotlib.pyplot as plt

    # Generate a test case
    # Define size of network
    T = 100 # number of time steps
    M = 3   # number of polls

    # Randomly model parameters based on priors
    I = 2*np.ones(2)
    B = np.random.randn(M,2) 
    B = np.exp(B)
    W = np.concatenate((.5*np.random.randn(1) + 7,np.random.randn(M)))
    W = np.exp(W)

    model = BetaModel(i=I,b=B,w=W)

    ## Generate Test Data
    Z, X = model.generate(T)

    ## Plot Test Data
    plt.plot(range(T),Z)
    plt.show()

    print np.sum(model.pState(X,1))