Example #1
0
def test_connected_caveman_graph():
    G = nx.connected_caveman_graph(4,3)
    assert_equal(len(G),12)

    G = nx.connected_caveman_graph(1,5)
    K5 = nx.complete_graph(5)
    K5.remove_edge(3,4)
    assert_true(nx.is_isomorphic(G,K5))
Example #2
0
def test_connected_caveman_graph():
    G = nx.connected_caveman_graph(4, 3)
    assert_equal(len(G), 12)

    G = nx.connected_caveman_graph(1, 5)
    K5 = nx.complete_graph(5)
    K5.remove_edge(3, 4)
    assert_true(nx.is_isomorphic(G, K5))
Example #3
0
def test_connected_caveman_graph():
    G = nx.connected_caveman_graph(4, 3)
    assert len(G) == 12

    G = nx.connected_caveman_graph(1, 5)
    K5 = nx.complete_graph(5)
    K5.remove_edge(3, 4)
    assert nx.is_isomorphic(G, K5)
Example #4
0
def test_connected_caveman_graph():
    G = nx.connected_caveman_graph(4, 3)
    assert len(G) == 12

    G = nx.connected_caveman_graph(1, 5)
    K5 = nx.complete_graph(5)
    K5.remove_edge(3, 4)
    assert nx.is_isomorphic(G, K5)

    # need at least 2 nodes in each clique
    pytest.raises(nx.NetworkXError, nx.connected_caveman_graph, 4, 1)
def make_random_topology(graph_size):

    number_of_nodes = graph_size

    #Creating a realistic and random Erdős-Rényi graph representing the network topology and its fog nodes
    #Graph = nx.gnp_random_graph(number_of_nodes,0.04,4)
    #creates random connected_caveman_graph
    Graph = nx.connected_caveman_graph(number_of_nodes, 6)

    #Create random attributes representing storage capacity in fog nodes

    nx.set_node_attributes(Graph, 3, 'storage_capacity')

    nx.set_node_attributes(Graph, 0, 'storage_usage')

    nx.set_node_attributes(Graph, None, 'cluster_ID')

    nx.set_node_attributes(Graph, False, 'placed')

    nx.set_node_attributes(Graph, False, 'r_block')

    for node in Graph.nodes(data=True):
        graph_node = node[0]
        random_int = randint(1, 3)
        Graph.add_node(graph_node, storage_capacity=random_int)

    return Graph
Example #6
0
def caveman_small_world(p, community_num, community_size, vocab_size, num_words, attr_noise):
    graph = nx.connected_caveman_graph(community_num, community_size)
    attributes = []
    count = 0
    count2 = 0
    p_remove = 0.0

    for (u, v) in graph.edges():
        if rd.random() < p_remove:
            graph.remove_edge(u, v)
            count2 += 1

    for (u, v) in graph.edges():
        if rd.random() < p:  # rewire the edge
            x = rd.choice(list(graph.nodes))
            if graph.has_edge(u, x):
                continue
            graph.remove_edge(u, v)
            graph.add_edge(u, x)
            count += 1
    print('rewire:', count)
    print('removed:',count2)

    #import pdb;pdb.set_trace()
    for u in list(graph.nodes):
        label = u//community_size
        attributes.append(get_attributes(label,community_num,vocab_size,num_words,attr_noise))
    attributes = np.array(attributes)
    attributes = sp.coo_matrix(attributes)
    return graph, attributes
Example #7
0
def test_encode_decode_adj():
    ######## code test ###########
    G = nx.ladder_graph(5)
    G = nx.grid_2d_graph(20, 20)
    G = nx.ladder_graph(200)
    G = nx.karate_club_graph()
    G = nx.connected_caveman_graph(2, 3)
    print(G.number_of_nodes())

    adj = np.asarray(nx.to_numpy_matrix(G))
    G = nx.from_numpy_matrix(adj)
    #
    start_idx = np.random.randint(adj.shape[0])
    x_idx = np.array(bfs_seq(G, start_idx))
    adj = adj[np.ix_(x_idx, x_idx)]

    print("adj\n", adj)
    adj_output = encode_adj(adj, max_prev_node=5)
    print("adj_output\n", adj_output)
    adj_recover = decode_adj(adj_output, max_prev_node=5)
    print("adj_recover\n", adj_recover)
    print("error\n", np.amin(adj_recover - adj), np.amax(adj_recover - adj))

    adj_output = encode_adj_flexible(adj)
    for i in range(len(adj_output)):
        print(len(adj_output[i]))
    adj_recover = decode_adj_flexible(adj_output)
    print(adj_recover)
    print(np.amin(adj_recover - adj), np.amax(adj_recover - adj))
Example #8
0
def setup(
        network='random',  # ['complete', 'random', 'Watts-Strogatz', 'connected caveman', 'Barabasi-Albert'] 
        n_agents=40,  # number of agents 
        deg=4,  # number of connections for each agent
        n_beliefs=25,  # number of knowledge graph elements each agent has
        n_concepts=30):
    """
    Generates the initial conditions of the simulation

    Returns
    -------
    g: networkx graph
        primary graph represents the semantic network,
        each individual (node) has an attribute 'M' representing their semantic network

    all_beliefs: an array of tuples, which represents all edges anywhere in the semantic
        network network of any individual. Does not include edges that could be part of the
        semantic network (because they are present in a complete graph with `n_concepts`, 
        but were not selected).
    """
    np.random.seed()

    connected = False
    while not connected:
        if network == 'complete':
            g = nx.complete_graph(n=n_agents)
        elif network == 'random':
            g = nx.gnm_random_graph(n=n_agents, m=int(n_agents * deg / 2))
        elif network == 'random regular':
            g = nx.random_regular_graph(d=deg, n=n_agents)
        elif network == 'Watts-Strogatz':
            g = nx.connected_watts_strogatz_graph(n=n_agents, k=deg,
                                                  p=.02)  # valid for even deg
        elif network == 'connected caveman':
            g = nx.connected_caveman_graph(l=int(n_agents / deg), k=deg + 1)
        elif network == 'Barabasi-Albert':
            g = nx.barabasi_albert_graph(n=n_agents, m=int(
                deg /
                2))  # approximates deg for large n_agents, when deg is even.
        else:
            raise ValueError('%s is not a valid network name' % network)

        connected = nx.is_connected(g)

    # give starting information
    nx.set_node_attributes(
        g,
        name='M',  # M for mind
        values={i: nx.gnm_random_graph(n_concepts, n_beliefs)
                for i in g})

    beliefs = np.unique([
        tuple(sorted(belief)) for agent in g
        for belief in g.nodes[agent]['M'].edges()
    ],
                        axis=0)
    return g, beliefs
def nxCommunity():
    import networkx as nx
    import matplotlib.pyplot as plt
    G = nx.connected_caveman_graph(6, 4)
    #first compute the best partition
    c = list(nx.k_clique_communities(G, 5))
    print c
    nx.draw(G)
    plt.show()
def caveman_network(num_cliques, size):

    #Generate Network:
    caveman_network = nx.connected_caveman_graph(num_cliques, size)

    #Give the nodes an initial position:
    node_position = position_nodes(caveman_network)

    #If the network is weighted, add edge weights:
    #if weighted:
    #    weight_edges(erdo_network)

    return caveman_network, node_position
Example #11
0
def get_communities(remove_feature):
    community_size = 20

    # Create 20 cliques (communities) of size 20,
    # then rewire a single edge in each clique to a node in an adjacent clique
    graph = nx.connected_caveman_graph(20, community_size)

    # Randomly rewire 1% edges
    node_list = list(graph.nodes)
    for (u, v) in graph.edges():
        if random.random() < 0.01:
            x = random.choice(node_list)
            if graph.has_edge(u, x):
                continue
            graph.remove_edge(u, v)
            graph.add_edge(u, x)

    # remove self-loops
    graph.remove_edges_from(nx.selfloop_edges(graph))
    edge_index = np.array(list(graph.edges))
    # Add (i, j) for an edge (j, i)
    edge_index = np.concatenate((edge_index, edge_index[:, ::-1]), axis=0)
    edge_index = torch.from_numpy(edge_index).long().permute(1, 0)

    n = graph.number_of_nodes()
    label = np.zeros((n, n), dtype=int)
    for u in node_list:
        # the node IDs are simply consecutive integers from 0
        for v in range(u):
            if u // community_size == v // community_size:
                label[u, v] = 1

    if remove_feature:
        feature = torch.ones((n, 1))
    else:
        rand_order = np.random.permutation(n)
        feature = np.identity(n)[:, rand_order]

    data = {
        'edge_index': edge_index,
        'feature': feature,
        'positive_edges': np.stack(np.nonzero(label)),
        'num_nodes': feature.shape[0]
    }

    return data
def caveman_special(c, k, p_path=0.1, p_edge=0.3):
    p = p_path
    path_count = max(int(np.ceil(p * k)), 1)
    G = nx.connected_caveman_graph(c, k)  #nx.caveman_graph(c, k)
    # remove 50% edges
    #p = 1-p_edge
    #for (u, v) in list(G.edges()):
    #    if np.random.rand() < p and ((u < k and v < k) or (u >= k and v >= k)):
    #        G.remove_edge(u, v)

    # add path_count links
    for i in range(path_count):
        u = np.random.randint(0, k)
        v = np.random.randint(k, k * 2)
        G.add_edge(u, v)
    G = max(nx.connected_component_subgraphs(G), key=len)

    return G
Example #13
0
    def __init__(
            self,
            k=99,  # "We set k=99, approximating the cognitive limit to the number of people
            #         with whom one can maintain a stable social relationship (Dunbar 1992)"
            n=1000,
            phi=0.1,  # "We set phi=10%, which Watts (1999) found was sufficient to produce
            #            the characteristic small-world condition" (p1491)
    ):
        """
        Parameters
        ----------
        k: "each agent has k immediate neighbors in the same cave" (p1490)
        n: "n is the population size" (p1490)
        phi: "A chosen percentage phi of the edges (or network ties) are randomly re-wired using
              the degree-preserving procedure introduced by Maslov and Sneppen (2002)" (p1490)
        """

        n_caves = n / (k + 1)  # "the network consists of n/(k+1) caves" (p1490)

        social_network = nx.connected_caveman_graph(n_caves, k + 1)
        social_network = utils.maslov_sneppen(social_network, phi)
Example #14
0
def get_connected_caveman_graph(left, right, **kwargs):
    right = int(right)
    # kwargs are ignored
    if left != right:
        # Assuming left is number of cliques, right is size of each clique
        ncliques = left
        clique_size = right
    else:
        ncliques = 2
        clique_size = left
    print('Generating connected graph with {} cliques of size {} each'.format(
        ncliques, clique_size))
    G = nx.connected_caveman_graph(ncliques, clique_size)
    if ncliques % 2 == 0:
        size_of_part = int(G.number_of_nodes() / 2)
        solution_bitstring = [-1] * size_of_part + [1] * size_of_part
    else:
        size_of_part = int((clique_size * ncliques - 1) / 2)
        solution_bitstring = [-1] * int(
            (clique_size * (ncliques + 1) / 2)) + [1] * int(
                (clique_size * (ncliques - 1) / 2))
    return G, solution_bitstring
Example #15
0
def community_graphs():
    print("Community graphs for social networks")
    print("Caveman graph")
    G = nx.caveman_graph(2, 13)
    draw_graph(G)
    print(" Connected Caveman graph")
    G = nx.connected_caveman_graph(2, 3)
    draw_graph(G)
    print("Relaxed caveman")
    G = nx.relaxed_caveman_graph(2, 5, 0.2)
    draw_graph(G)
    print("Random partition graph")
    G = nx.random_partition_graph([10, 10, 10], .25, .01)
    draw_graph(G)
    print(len(G))
    partition = G.graph['partition']
    print(len(partition))
    print("Planted partition graph")
    G = nx.planted_partition_graph(4, 3, 0.5, 0.1, seed=42)
    draw_graph(G)
    print("Gaussian random partition graph")
    G = nx.gaussian_random_partition_graph(40, 10, 10, .25, .1)
    print(len(G))
    draw_graph(G)
Example #16
0
def load_graphs(dataset_str):
    node_labels = [None]
    edge_labels = [None]
    idx_train = [None]
    idx_val = [None]
    idx_test = [None]

    if dataset_str == 'grid':
        graphs = []
        features = []
        for _ in range(1):
            graph = nx.grid_2d_graph(20, 20)
            graph = nx.convert_node_labels_to_integers(graph)

            feature = np.identity(graph.number_of_nodes())
            graphs.append(graph)
            features.append(feature)

    elif dataset_str == 'communities':
        graphs = []
        features = []
        node_labels = []
        edge_labels = []
        for i in range(1):
            community_size = 20
            community_num = 20
            p=0.01

            graph = nx.connected_caveman_graph(community_num, community_size)

            count = 0

            for (u, v) in graph.edges():
                if random.random() < p:  # rewire the edge
                    x = random.choice(list(graph.nodes))
                    if graph.has_edge(u, x):
                        continue
                    graph.remove_edge(u, v)
                    graph.add_edge(u, x)
                    count += 1
            print('rewire:', count)

            n = graph.number_of_nodes()
            label = np.zeros((n,n),dtype=int)
            for u in list(graph.nodes):
                for v in list(graph.nodes):
                    if u//community_size == v//community_size and u>v:
                        label[u,v] = 1
            rand_order = np.random.permutation(graph.number_of_nodes())
            feature = np.identity(graph.number_of_nodes())[:,rand_order]
            graphs.append(graph)
            features.append(feature)
            edge_labels.append(label)

    elif dataset_str == 'protein':

        graphs_all, features_all, labels_all = Graph_load_batch(name='PROTEINS_full')
        features_all = (features_all-np.mean(features_all,axis=-1,keepdims=True))/np.std(features_all,axis=-1,keepdims=True)
        graphs = []
        features = []
        edge_labels = []
        for graph in graphs_all:
            n = graph.number_of_nodes()
            label = np.zeros((n, n),dtype=int)
            for i,u in enumerate(graph.nodes()):
                for j,v in enumerate(graph.nodes()):
                    if labels_all[u-1] == labels_all[v-1] and u>v:
                        label[i,j] = 1
            if label.sum() > n*n/4:
                continue

            graphs.append(graph)
            edge_labels.append(label)

            idx = [node-1 for node in graph.nodes()]
            feature = features_all[idx,:]
            features.append(feature)

        print('final num', len(graphs))


    elif dataset_str == 'email':

        with open('data/email.txt', 'rb') as f:
            graph = nx.read_edgelist(f)

        label_all = np.loadtxt('data/email_labels.txt')
        graph_label_all = label_all.copy()
        graph_label_all[:,1] = graph_label_all[:,1]//6


        for edge in list(graph.edges()):
            if graph_label_all[int(edge[0])][1] != graph_label_all[int(edge[1])][1]:
                graph.remove_edge(edge[0], edge[1])

        comps = [comp for comp in nx.connected_components(graph) if len(comp)>10]
        graphs = [graph.subgraph(comp) for comp in comps]

        edge_labels = []
        features = []

        for g in graphs:
            n = g.number_of_nodes()
            feature = np.ones((n, 1))
            features.append(feature)

            label = np.zeros((n, n),dtype=int)
            for i, u in enumerate(g.nodes()):
                for j, v in enumerate(g.nodes()):
                    if label_all[int(u)][1] == label_all[int(v)][1] and i>j:
                        label[i, j] = 1
            label = label
            edge_labels.append(label)


    elif dataset_str == 'ppi':
        dataset_dir = 'data/ppi'
        print("Loading data...")
        G = json_graph.node_link_graph(json.load(open(dataset_dir + "/ppi-G.json")))
        edge_labels_internal = json.load(open(dataset_dir + "/ppi-class_map.json"))
        edge_labels_internal = {int(i): l for i, l in edge_labels_internal.items()}

        train_ids = [n for n in G.nodes()]
        train_labels = np.array([edge_labels_internal[i] for i in train_ids])
        if train_labels.ndim == 1:
            train_labels = np.expand_dims(train_labels, 1)

        print("Using only features..")
        feats = np.load(dataset_dir + "/ppi-feats.npy")
        ## Logistic gets thrown off by big counts, so log transform num comments and score
        feats[:, 0] = np.log(feats[:, 0] + 1.0)
        feats[:, 1] = np.log(feats[:, 1] - min(np.min(feats[:, 1]), -1))
        feat_id_map = json.load(open(dataset_dir + "/ppi-id_map.json"))
        feat_id_map = {int(id): val for id, val in feat_id_map.items()}
        train_feats = feats[[feat_id_map[id] for id in train_ids]]

        node_dict = {}
        for id,node in enumerate(G.nodes()):
            node_dict[node] = id

        comps = [comp for comp in nx.connected_components(G) if len(comp)>10]
        graphs = [G.subgraph(comp) for comp in comps]

        id_all = []
        for comp in comps:
            id_temp = []
            for node in comp:
                id = node_dict[node]
                id_temp.append(id)
            id_all.append(np.array(id_temp))

        features = [train_feats[id_temp,:]+0.1 for id_temp in id_all]

    elif dataset_str == 'brightkite':
        dataset_dir = 'data/brightkite'
        print("Loading data...")
        G = nx.read_edgelist(gzip.open(dataset_dir + "/loc-brightkite_edges.txt.gz","rb"))
        node_dict = {}
        for id,node in enumerate(G.nodes()):
            node_dict[node] = id
        feature=[]
        feature_id=[]
        with gzip.open(dataset_dir + "/loc-brightkite_totalCheckins.txt.gz","rb") as f:
            for line in f.readlines():
                # print(line.decode("utf-8") )
                inst = str(line.decode("utf-8") ).strip().split('\t')
                if(len(inst)<5):
                    continue
                feature_id.append(int(inst[0]))
                feature.append([time.mktime(time.strptime(inst[1], '%Y-%m-%dT%H:%M:%SZ')), float(inst[2]),float(inst[3])])
                # LOCATION_INFO[int(inst[4])] = [float(inst[2]), float(inst[3])] 
        # feats = np.load(dataset_dir + "/ppi-feats.npy")
        feature=np.array(feature)
        feature[:, 0] = np.log(feature[:, 0] + 1.0)
        feature[:, 1] = np.log(feature[:, 1] - min(np.min(feature[:, 1]), -1)+1)
        feature[:, 2] = np.log(feature[:, 2] - min(np.min(feature[:, 2]), -1)+1)

        feature_map={}
        for i in range(len(feature_id)):
            if (feature_id[i] not in feature_map):
                feature_map[feature_id[i]]=[]
            feature_map[feature_id[i]].append(feature[i])
        
        feature_actual_map={}
        for k in feature_map:
            feature_actual_map[k]=np.mean(feature_map[k],axis=0)
        # print(feature_actual_map)
        # exit()
        
        # rand_order = np.random.permutation(G.number_of_nodes())
        # feature_map = np.identity(G.number_of_nodes())[:,rand_order]
        comps = [comp for comp in nx.connected_components(G) if len(comp)>10]
        graphs = [G.subgraph(comp) for comp in comps]
        id_all = []
        features=[]
        count=0
        for comp in comps:
            id_temp = []
            feat_temp=[]
            for node in comp:
                id = node_dict[node]
                id_temp.append(id)
                if (id not in feature_actual_map):
                    feat_temp.append([0.0,0.0,0.0])
                    count=count+1
                else:
                    feat_temp.append(feature_actual_map[id])
            id_all.append(np.array(id_temp))
            features.append(np.array(feat_temp))
        print("Not found features")
        print(count)
        # exit()
        # edge_labels_internal = json.load(open(dataset_dir + "/ppi-class_map.json"))
        # edge_labels_internal = {int(i): l for i, l in edge_labels_internal.items()}

        # train_ids = [n for n in G.nodes()]
        # train_labels = np.array([edge_labels_internal[i] for i in train_ids])
        # if train_labels.ndim == 1:
        #     train_labels = np.expand_dims(train_labels, 1)

        # print("Using only features..")
        # feats = np.load(dataset_dir + "/ppi-feats.npy")
        # ## Logistic gets thrown off by big counts, so log transform num comments and score
        # feats[:, 0] = np.log(feats[:, 0] + 1.0)
        # feats[:, 1] = np.log(feats[:, 1] - min(np.min(feats[:, 1]), -1))
        # feat_id_map = json.load(open(dataset_dir + "/ppi-id_map.json"))
        # feat_id_map = {int(id): val for id, val in feat_id_map.items()}
        # train_feats = feats[[feat_id_map[id] for id in train_ids]]

        # node_dict = {}
        # for id,node in enumerate(G.nodes()):
        #     node_dict[node] = id

        # comps = [comp for comp in nx.connected_components(G) if len(comp)>10]
        # graphs = [G.subgraph(comp) for comp in comps]

        # id_all = []
        # for comp in comps:
        #     id_temp = []
        #     for node in comp:
        #         id = node_dict[node]
        #         id_temp.append(id)
        #     id_all.append(np.array(id_temp))

        # features = [train_feats[id_temp,:]+0.1 for id_temp in id_all]

    else:
        raise NotImplementedError

    return graphs, features, edge_labels, node_labels, idx_train, idx_val, idx_test
def connected_caveman(no_cliques, size_cliques):
    """creates a network using the Connected Caveman model """
    g.network = nx.connected_caveman_graph(no_cliques, size_cliques)
    g.empty_nodes = g.network.nodes()
    g.nodes_with_agents = []
    g.neighbors_with_agents = {}
    with open(filename, 'w') as outfile:
        graph = nx.node_link_data(G)
        for n in graph['nodes']:
            n['id'] = str(n['id'])
        for e in graph['links']:
            e['source'] = str(e['source'])
            e['target'] = str(e['target'])
        json.dump(graph, outfile)


# write_json_graph(nx.connected_caveman_graph(3, 5), 'connected_caveman_graph(3,5).json')
# write_json_graph(nx.connected_caveman_graph(4, 2), 'connected_caveman_graph(4,2).json')
# write_json_graph(nx.connected_caveman_graph(4, 3), 'connected_caveman_graph(4,3).json')
# write_json_graph(nx.connected_caveman_graph(4, 4), 'connected_caveman_graph(4,4).json')
# write_json_graph(nx.connected_caveman_graph(4, 5), 'connected_caveman_graph(4,5).json')
write_json_graph(nx.connected_caveman_graph(5, 4),
                 'connected_caveman_graph(5,4).json')
write_json_graph(nx.connected_caveman_graph(7, 4),
                 'connected_caveman_graph(7,4).json')
write_json_graph(nx.connected_caveman_graph(10, 10),
                 'connected_caveman_graph(10,10).json')
write_json_graph(nx.connected_caveman_graph(10, 20),
                 'connected_caveman_graph(10,20).json')
write_json_graph(nx.connected_caveman_graph(15, 30),
                 'connected_caveman_graph(15,30).json')

# write_json_graph(nx.balanced_tree(3, 3), 'balanced_tree(3,3).json')
write_json_graph(nx.balanced_tree(3, 4), 'balanced_tree(3,4).json')
# write_json_graph(nx.balanced_tree(3, 5), 'balanced_tree(3,5).json')
write_json_graph(nx.balanced_tree(3, 6), 'balanced_tree(3,6).json')
# write_json_graph(nx.balanced_tree(3, 7), 'balanced_tree(3,7).json')
Example #19
0
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sbn
from numpy.linalg import eigvals
from scipy import stats

nsamples=50
l=24
k=10
n=l*k

vs = np.array([0]*n)
for i in range(0,nsamples):
	G = nx.connected_caveman_graph(l,k)
	A = nx.to_numpy_matrix(G)
	v = np.real(eigvals(A))
	vs = vs + v

vs = vs/(nsamples*np.sqrt(n))

kde = stats.gaussian_kde(vs,bw_method='silverman')
x = np.linspace(vs.min(), vs.max(), 100)
rho = kde(x)

#plt.subplot(1,3,1)
plt.plot(x,rho)
plt.ylabel('Spectral density')
plt.xlabel('Eigenvalues')
plt.title('Caveman Model spectral density')
plt.show()
Example #20
0

if __name__ == "__main__":
	ResultN =[]
	ResultS =[]
	C=[None]*400
	for i in range(0,100):
		C[i]=1
	for i in range(100,200):
		C[i]=20
	for i in range(200,300):
		C[i]=50
	for i in range(300,400):
		C[i]=80

	G = nx.connected_caveman_graph(4,100)
	G.remove_edge(0,399)
	G=addEdges(G,1,99,101,199,3)
	G=addEdges(G,101,199,201,299,3)
	G=addEdges(G,201,299,301,399,3)
	print nx.diameter(G)
	#True Mu is 37.75
	for k in range(1,31):
		print 'SRW run to'
		print k
		S=[]
		tMu=37.75
		for m in range(0,10):
			S.append(runSRW(G,500*k,C))
		RE= float(np.sum(abs(np.subtract(S,tMu))))/10
		ResultS.append(RE)
Example #21
0
 def new_network():
     return Network(nx.connected_caveman_graph(10, 10), community_size=10)
Example #22
0
    val_map_color = {G1S: 1.0, G1T: 0}
    val_map_size = {G1S: 200.0, G1T: 200}
    map_color = [val_map_color.get(node, 0.5) for node in res_G.nodes()]
    map_size = [val_map_size.get(node, 70) for node in res_G.nodes()]
    edges, weights = zip(*nx.get_edge_attributes(res_G, 'capacity').items())
    labels = nx.get_edge_attributes(res_G, 'capacity')
    plt.figure(8)
    res_pos = nx.spring_layout(res_G)
    nx.draw(res_G, res_pos, node_size=map_size, weight='capacity', edge_color=weights, width=2.0, cmap=plt.get_cmap('viridis'),
            edge_cmap=plt.cm.Blues, edge_labels=labels, node_color=map_color, font_color='white')
    plt.savefig('res-' + str(num_fig) + '.eps', format='eps', dpi=1000)
    plt.clf()
    num_fig+=1
    #plt.show()


G1 = nx.connected_caveman_graph(4, 5)
G2 = nx.connected_caveman_graph(5, 6)
G3 = nx.connected_caveman_graph(6, 7)
G4 = nx.connected_caveman_graph(7, 8)
G5 = nx.connected_caveman_graph(8, 9)
do_graph(G5)
do_graph(G2)
do_graph(G3)
do_graph(G4)
do_graph(G1)
#dataframe = pd.DataFrame(total_data)
#dataframe.to_csv('datos.xls',sep='\t')
#model = ols('Tiempo~G*A*C*Ce*E*P',data=dataframe).fit()
#anova = anova_lm(model,typ=2)
#anova.to_csv('anova1.xls',sep='\t')
Example #23
0
    ResultS = []
    for n in range(1, 11):
        Nvalue.append(5 * 50 * n)
        C = [None] * (5 * 50 * n)
        for i in range(0, 50 * n):
            C[i] = 1
        for i in range(50 * n, 100 * n):
            C[i] = 2
        for i in range(100 * n, 150 * n):
            C[i] = 3
        for i in range(150 * n, 200 * n):
            C[i] = 4
        for i in range(200 * n, 250 * n):
            C[i] = 5

        G = nx.connected_caveman_graph(5, 50 * n)
        G.remove_edge(0, 5 * 50 * n - 1)
        G = addEdges(G, 1, 50 * n - 1, 50 * n + 1, 100 * n - 1, 3)
        G = addEdges(G, 50 * n + 1, 100 * n - 1, 100 * n + 1, 150 * n - 1, 3)
        G = addEdges(G, 100 * n + 1, 150 * n - 1, 150 * n + 1, 200 * n - 1, 3)
        G = addEdges(G, 150 * n + 1, 200 * n - 1, 200 * n + 1, 250 * n - 1, 3)

        TS = runSRW(G, C)

    plt.plot(S, 'b')

    print 'RW run to'
    S2 = []
    X2 = runRW(G, 40000, C)
    for key in X2:
        S2.append(C[key])
Example #24
0
def load_graphs(dataset_str):
    if dataset_str == 'grid':
        graphs = []
        features = []
        for _ in range(1):
            graph = nx.grid_2d_graph(20, 20)
            # graph  = nx.grid_2d_graph(100, 100)
            graph = nx.convert_node_labels_to_integers(graph)

            # node_order = list(range(graph.number_of_nodes()))
            # shuffle(node_order)
            # order_mapping = dict(zip(graph.nodes(), node_order))
            # graph = nx.relabel_nodes(graph, order_mapping, copy=True)

            # feature = np.ones((graph.number_of_nodes(),1))
            feature = np.identity(graph.number_of_nodes())
            # label = nx.adjacency_matrix(graph).toarray()
            graphs.append(graph)
            features.append(feature)
        labels = None

    elif dataset_str == 'caveman_single':
        graph = nx.connected_caveman_graph(20, 20)
        feature = np.ones((graph.number_of_nodes(), 1))
        # feature = np.identity(graph.number_of_nodes())

        # graphs = [graph for _ in range(10)]
        # features = [feature for _ in range(10)]
        graphs = [graph]
        features = [feature]
        labels = None
        #
        # graphs = []
        # features = []
        # labels = None
        # for k in range(10):
        #     graphs.append(caveman_special(c=20, k=20, p_edge=0.2, p_path=500))
        #     features.append(np.ones((400, 1)))

    elif dataset_str == 'caveman':
        graphs = []
        features = []
        labels = []
        # labels = None
        for i in range(50):
            community_size = 20
            graph = nx.connected_caveman_graph(20, community_size)

            # graph,labels_dict = caveman_special(20,20,0)
            # node_dict = {}
            # for id, node in enumerate(graph.nodes()):
            #     node_dict[node] = id
            p = 0.001
            count = 0
            for (u, v) in graph.edges():
                if random.random() < p:  # rewire the edge
                    x = random.choice(graph.nodes())
                    if graph.has_edge(u, x):
                        continue
                    graph.remove_edge(u, v)
                    graph.add_edge(u, x)
                    count += 1
            print('rewire:', count)

            n = graph.number_of_nodes()
            feature = np.ones((n, 1))
            label = np.zeros((n, n))
            for u in graph.nodes():
                for v in graph.nodes():
                    # if labels_dict[u] == labels_dict[v] and u!=v:
                    if u // community_size == v // community_size and u != v:
                        label[u, v] = 1
                        # label[node_dict[u],node_dict[v]] = 1
            # feature = np.identity(graph.number_of_nodes())

            graphs.append(graph)
            features.append(feature)
            labels.append(label)

    elif dataset_str == 'protein':

        graphs_all, features_all, labels_all = Graph_load_batch(
            name='PROTEINS_full')
        features_all = (features_all - np.mean(
            features_all, axis=-1, keepdims=True)) / np.std(
                features_all, axis=-1, keepdims=True)
        graphs = []
        features = []
        labels = []
        for graph in graphs_all:
            n = graph.number_of_nodes()
            label = np.zeros((n, n))
            for i, u in enumerate(graph.nodes()):
                for j, v in enumerate(graph.nodes()):
                    if labels_all[u - 1] == labels_all[v - 1] and u != v:
                        label[i, j] = 1
            if label.sum() > n * n / 2:
                continue

            graphs.append(graph)
            labels.append(label)

            idx = [node - 1 for node in graph.nodes()]
            feature = features_all[idx, :]
            # label_dict = labels_all[graph.nodes()]
            features.append(feature)
            # pdb.set_trace()

        print('final num', len(graphs))

    elif dataset_str == 'email':

        with open('data/email.txt', 'rb') as f:
            graph = nx.read_edgelist(f)

        label_all = np.loadtxt('data/email_labels.txt')
        graph_label_all = label_all.copy()
        graph_label_all[:, 1] = graph_label_all[:, 1] // 6

        for edge in graph.edges():
            if graph_label_all[int(edge[0])][1] != graph_label_all[int(
                    edge[1])][1]:
                graph.remove_edge(edge[0], edge[1])

        comps = [
            comp for comp in nx.connected_components(graph) if len(comp) > 10
        ]
        graphs = [graph.subgraph(comp) for comp in comps]

        labels = []
        features = []

        for g in graphs:
            n = g.number_of_nodes()
            feature = np.ones((n, 1))
            features.append(feature)

            label = np.zeros((n, n))
            for i, u in enumerate(g.nodes()):
                for j, v in enumerate(g.nodes()):
                    if label_all[int(u)][1] == label_all[int(v)][1]:
                        label[i, j] = 1
            label = label - np.identity(n)
            labels.append(label)

    elif dataset_str == 'ppi':
        dataset_dir = 'data/ppi'
        print("Loading data...")
        G = json_graph.node_link_graph(
            json.load(open(dataset_dir + "/ppi-G.json")))
        labels = json.load(open(dataset_dir + "/ppi-class_map.json"))
        labels = {int(i): l for i, l in labels.items()}

        train_ids = [n for n in G.nodes()]
        train_labels = np.array([labels[i] for i in train_ids])
        if train_labels.ndim == 1:
            train_labels = np.expand_dims(train_labels, 1)

        print("Using only features..")
        feats = np.load(dataset_dir + "/ppi-feats.npy")
        ## Logistic gets thrown off by big counts, so log transform num comments and score
        feats[:, 0] = np.log(feats[:, 0] + 1.0)
        feats[:, 1] = np.log(feats[:, 1] - min(np.min(feats[:, 1]), -1))
        feat_id_map = json.load(open(dataset_dir + "/ppi-id_map.json"))
        feat_id_map = {int(id): val for id, val in feat_id_map.items()}
        train_feats = feats[[feat_id_map[id] for id in train_ids]]

        # pdb.set_trace()

        node_dict = {}
        for id, node in enumerate(G.nodes()):
            node_dict[node] = id

        comps = [comp for comp in nx.connected_components(G) if len(comp) > 10]
        graphs = [G.subgraph(comp) for comp in comps]

        id_all = []
        for comp in comps:
            id_temp = []
            for node in comp:
                id = node_dict[node]
                id_temp.append(id)
            id_all.append(np.array(id_temp))

        features = [train_feats[id_temp, :] + 0.1 for id_temp in id_all]

        # graphs = [G.subgraph(comp) for comp in ]
        # pdb.set_trace()

    # real
    else:
        names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
        objects = []
        for i in range(len(names)):
            with open("data/ind.{}.{}".format(dataset_str, names[i]),
                      'rb') as f:
                objects.append(pkl.load(f, encoding='latin1'))

        x, y, tx, ty, allx, ally, graph = tuple(objects)
        test_idx_reorder = parse_index_file(
            "data/ind.{}.test.index".format(dataset_str))
        test_idx_range = np.sort(test_idx_reorder)

        if dataset_str == 'citeseer':
            # Fix citeseer dataset (there are some isolated nodes in the graph)
            # Find isolated nodes, add them as zero-vecs into the right position
            test_idx_range_full = range(min(test_idx_reorder),
                                        max(test_idx_reorder) + 1)
            tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
            tx_extended[test_idx_range - min(test_idx_range), :] = tx
            tx = tx_extended
            ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
            ty_extended[test_idx_range - min(test_idx_range), :] = ty
            ty = ty_extended

        features = sp.vstack((allx, tx)).tolil()
        features[test_idx_reorder, :] = features[test_idx_range, :]
        graph = nx.from_dict_of_lists(graph)
        # keep the max connected component
        nodes_id = sorted(max(nx.connected_components(graph), key=len))
        graph = max(nx.connected_component_subgraphs(graph), key=len)
        # adj = nx.adjacency_matrix(G)

        feature = features[nodes_id, :].toarray()
        # feature = np.concatenate((np.identity(graph.number_of_nodes()), feature), axis=-1)

        graphs = [graph]
        features = [feature]
        labels = None

    return graphs, features, labels
Example #25
0
def buildCavemanGraph(clique, size):

    # Wrapper method to build a caveman graph

    G = nx.connected_caveman_graph(clique, size)
    return G
Example #26
0
def load_graphs(dataset_str):
    node_labels = [None]
    edge_labels = [None]
    idx_train = [None]
    idx_val = [None]
    idx_test = [None]

    if dataset_str == 'grid':
        graphs = []
        features = []
        for _ in range(1):
            graph = nx.grid_2d_graph(20, 20)
            graph = nx.convert_node_labels_to_integers(graph)

            feature = np.identity(graph.number_of_nodes())
            graphs.append(graph)
            features.append(feature)

    elif dataset_str == 'communities':
        graphs = []
        features = []
        node_labels = []
        edge_labels = []
        for i in range(1):
            community_size = 20
            community_num = 20
            p = 0.01

            graph = nx.connected_caveman_graph(community_num, community_size)

            count = 0

            for (u, v) in graph.edges():
                if random.random() < p:  # rewire the edge
                    x = random.choice(list(graph.nodes))
                    if graph.has_edge(u, x):
                        continue
                    graph.remove_edge(u, v)
                    graph.add_edge(u, x)
                    count += 1
            print('rewire:', count)

            n = graph.number_of_nodes()
            label = np.zeros((n, n), dtype=int)
            for u in list(graph.nodes):
                for v in list(graph.nodes):
                    if u // community_size == v // community_size and u > v:
                        label[u, v] = 1
            rand_order = np.random.permutation(graph.number_of_nodes())
            feature = np.identity(graph.number_of_nodes())[:, rand_order]
            graphs.append(graph)
            features.append(feature)
            edge_labels.append(label)

    elif dataset_str == 'protein':

        graphs_all, features_all, labels_all = Graph_load_batch(
            name='PROTEINS_full')
        features_all = (features_all - np.mean(
            features_all, axis=-1, keepdims=True)) / np.std(
                features_all, axis=-1, keepdims=True)
        graphs = []
        features = []
        edge_labels = []
        for graph in graphs_all:
            n = graph.number_of_nodes()
            label = np.zeros((n, n), dtype=int)
            for i, u in enumerate(graph.nodes()):
                for j, v in enumerate(graph.nodes()):
                    if labels_all[u - 1] == labels_all[v - 1] and u > v:
                        label[i, j] = 1
            if label.sum() > n * n / 4:
                continue

            graphs.append(graph)
            edge_labels.append(label)

            idx = [node - 1 for node in graph.nodes()]
            feature = features_all[idx, :]
            features.append(feature)

        print('final num', len(graphs))

    elif dataset_str == 'email':

        with open('data/email.txt', 'rb') as f:
            graph = nx.read_edgelist(f)

        label_all = np.loadtxt('data/email_labels.txt')
        graph_label_all = label_all.copy()
        graph_label_all[:, 1] = graph_label_all[:, 1] // 6

        for edge in list(graph.edges()):
            if graph_label_all[int(edge[0])][1] != graph_label_all[int(
                    edge[1])][1]:
                graph.remove_edge(edge[0], edge[1])

        comps = [
            comp for comp in nx.connected_components(graph) if len(comp) > 10
        ]
        graphs = [graph.subgraph(comp) for comp in comps]

        edge_labels = []
        features = []

        for g in graphs:
            n = g.number_of_nodes()
            feature = np.ones((n, 1))
            features.append(feature)

            label = np.zeros((n, n), dtype=int)
            for i, u in enumerate(g.nodes()):
                for j, v in enumerate(g.nodes()):
                    if label_all[int(u)][1] == label_all[int(v)][1] and i > j:
                        label[i, j] = 1
            label = label
            edge_labels.append(label)

    elif dataset_str == 'ppi':
        dataset_dir = 'data/ppi'
        print("Loading data...")
        G = json_graph.node_link_graph(
            json.load(open(dataset_dir + "/ppi-G.json")))
        edge_labels_internal = json.load(
            open(dataset_dir + "/ppi-class_map.json"))
        edge_labels_internal = {
            int(i): l
            for i, l in edge_labels_internal.items()
        }

        train_ids = [n for n in G.nodes()]
        train_labels = np.array([edge_labels_internal[i] for i in train_ids])
        if train_labels.ndim == 1:
            train_labels = np.expand_dims(train_labels, 1)

        print("Using only features..")
        feats = np.load(dataset_dir + "/ppi-feats.npy")
        ## Logistic gets thrown off by big counts, so log transform num comments and score
        feats[:, 0] = np.log(feats[:, 0] + 1.0)
        feats[:, 1] = np.log(feats[:, 1] - min(np.min(feats[:, 1]), -1))
        feat_id_map = json.load(open(dataset_dir + "/ppi-id_map.json"))
        feat_id_map = {int(id): val for id, val in feat_id_map.items()}
        train_feats = feats[[feat_id_map[id] for id in train_ids]]

        node_dict = {}
        for id, node in enumerate(G.nodes()):
            node_dict[node] = id

        comps = [comp for comp in nx.connected_components(G) if len(comp) > 10]
        graphs = [G.subgraph(comp) for comp in comps]

        id_all = []
        for comp in comps:
            id_temp = []
            for node in comp:
                id = node_dict[node]
                id_temp.append(id)
            id_all.append(np.array(id_temp))

        features = [train_feats[id_temp, :] + 0.1 for id_temp in id_all]

    else:
        raise NotImplementedError

    return graphs, features, edge_labels, node_labels, idx_train, idx_val, idx_test
Example #27
0
#%%
# 1. graph
n_nodes = 50  # number of nodes
d = 3  # dimension of variable at each node
np.random.seed(1000)
# 2. function
# objective value
v = np.random.rand(n_nodes, d)
# optimal value
x_opt = v.mean()

# 3. simulation setting
graphs = [
    nx.lollipop_graph(n_nodes // 2, n_nodes - n_nodes // 2),
    nx.connected_caveman_graph(n_nodes // 5, 5),
    Simulator.erdos_renyi(n_nodes, 0.05, seed=501),
    Simulator.erdos_renyi(n_nodes, 0.1, seed=1000)
]
graph_name = ['Lollipop', 'Caveman', 'ER(p=0.05)', 'ER(p=0.1)']
line_style = ['--rd', '-rd', '--c^', '-c^', '--bs', '-bs', '--go', '-go']
best_penalty = [{
    'D-CADMM': 5,
    'H-CADMM': 5.5
}, {
    'D-CADMM': 1.57,
    'H-CADMM': 2.45
}, {
    'D-CADMM': 1.5,
    'H-CADMM': 1.85
}, {
Example #28
0
#!/usr/bin/env python
import sys, json
import networkx as nx
from networkx.readwrite import json_graph

#g = nx.barabasi_albert_graph(1000, 3)
#g= nx.watts_strogatz_graph(500,20,0.2)
g = nx.connected_caveman_graph(15, 5)

nodes = g.nodes()

data = json_graph.node_link_data(g)

data['nodes'] = [{"name": i} for i in range(len(data['nodes']))]

f = open(sys.argv[1], 'w')

f.write(json.dumps(data))

f.close()
Example #29
0
def load_data(dataset_str):
    """
    Loads input data from gcn/data directory
    ind.dataset_str.x => the feature vectors of the training instances as scipy.sparse.csr.csr_matrix object;
    ind.dataset_str.tx => the feature vectors of the test instances as scipy.sparse.csr.csr_matrix object;
    ind.dataset_str.allx => the feature vectors of both labeled and unlabeled training instances
        (a superset of ind.dataset_str.x) as scipy.sparse.csr.csr_matrix object;
    ind.dataset_str.y => the one-hot labels of the labeled training instances as numpy.ndarray object;
    ind.dataset_str.ty => the one-hot labels of the test instances as numpy.ndarray object;
    ind.dataset_str.ally => the labels for instances in ind.dataset_str.allx as numpy.ndarray object;
    ind.dataset_str.graph => a dict in the format {index: [index_of_neighbor_nodes]} as collections.defaultdict
        object;
    ind.dataset_str.test.index => the indices of test instances in graph, for the inductive setting as list object.
    All objects above must be saved using python pickle module.
    :param dataset_str: Dataset name
    :return: All data input files loaded (as well the training/test data).
    """

    # synthetic
    # todo: design node label
    labels, idx_train, idx_val, idx_test = None, None, None, None
    if dataset_str == 'grid':
        G = nx.grid_2d_graph(20, 20)
        # G = nx.grid_2d_graph(100, 100)
        # features = np.ones((G.number_of_nodes(),1))
        features = np.identity(G.number_of_nodes())
        labels = np.zeros((G.number_of_nodes(), 2))
        labels[0:G.number_of_nodes() // 2, 0] = 1
        labels[G.number_of_nodes() // 2:, 1] = 1
        idx = np.random.permutation(G.number_of_nodes())
        idx_train = idx[0:G.number_of_nodes() // 2]
        idx_val = idx[G.number_of_nodes() // 2:]
    elif dataset_str == 'caveman':
        G = nx.connected_caveman_graph(20, 20)
        features = np.identity(G.number_of_nodes())

        # features = np.ones((G.number_of_nodes(),1))
    elif dataset_str == 'barabasi':
        G = nx.barabasi_albert_graph(1000, 2)
        features = np.identity(G.number_of_nodes())

        # features = np.ones((G.number_of_nodes(), 1))

    # real
    else:
        names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
        objects = []
        for i in range(len(names)):
            with open("data/ind.{}.{}".format(dataset_str, names[i]),
                      'rb') as f:
                objects.append(pkl.load(f, encoding='latin1'))

        x, y, tx, ty, allx, ally, graph = tuple(objects)
        test_idx_reorder = parse_index_file(
            "data/ind.{}.test.index".format(dataset_str))
        test_idx_range = np.sort(test_idx_reorder)

        if dataset_str == 'citeseer':
            # Fix citeseer dataset (there are some isolated nodes in the graph)
            # Find isolated nodes, add them as zero-vecs into the right position
            test_idx_range_full = range(min(test_idx_reorder),
                                        max(test_idx_reorder) + 1)
            tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
            tx_extended[test_idx_range - min(test_idx_range), :] = tx
            tx = tx_extended
            ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
            ty_extended[test_idx_range - min(test_idx_range), :] = ty
            ty = ty_extended

        features = sp.vstack((allx, tx)).tolil()
        features[test_idx_reorder, :] = features[test_idx_range, :]
        G = nx.from_dict_of_lists(graph)
        # print(all(G.nodes()[i] <= G.nodes()[i + 1] for i in range(len(G.nodes()) - 1))) # check if sorted
        # keep the max connected component
        nodes_id = sorted(max(nx.connected_components(G), key=len))
        G = max(nx.connected_component_subgraphs(G), key=len)
        # adj = nx.adjacency_matrix(G)

        features = features[nodes_id, :]

        labels = np.vstack((ally, ty))
        labels[test_idx_reorder, :] = labels[test_idx_range, :]
        labels = labels[nodes_id, :]

        # idx_test = test_idx_range.tolist()
        # idx_train = range(len(y))
        # idx_val = range(len(y), len(y) + 500)
        idx_train = range(500)
        idx_val = range(500, 1000)
        idx_test = range(G.number_of_nodes() - 1000, G.number_of_nodes())
    return G, features, labels, idx_train, idx_val, idx_test
Example #30
0
	for n in range(1,11):
		Nvalue.append(5*50*n)
		C=[None]*(5*50*n)
		for i in range(0,50*n):
			C[i]=1
		for i in range(50*n,100*n):
			C[i]=2
		for i in range(100*n,150*n):
			C[i]=3
		for i in range(150*n,200*n):
			C[i]=4
		for i in range(200*n,250*n):
			C[i]=5


		G = nx.connected_caveman_graph(5,50*n)
		G.remove_edge(0,5*50*n-1)
		G=addEdges(G,1,50*n-1,50*n+1,100*n-1,3)
		G=addEdges(G,50*n+1,100*n-1,100*n+1,150*n-1,3)
		G=addEdges(G,100*n+1,150*n-1,150*n+1,200*n-1,3)
		G=addEdges(G,150*n+1,200*n-1,200*n+1,250*n-1,3)

		TS=runSRW(G,C)


	plt.plot(S,'b')


	print 'RW run to'
	S2=[]
	X2=runRW(G,40000,C)
Example #31
0
def generate_timings(myDijkstra, ID, small_dataset=False):
    """
    Generate timing data
    """
    numpy.random.seed(ID)
    if small_dataset:
        clique_sizes = [i for i in range(4, 9, 4)]
        number_of_cliques = [i for i in range(2, 10, 2)]
    else:
        #        clique_sizes = [i for i in range(4, 30, 4)]
        #        number_of_cliques = [i for i in range(2, 20, 2)]
        clique_sizes = [i for i in range(4, 13, 4)]
        number_of_cliques = [i for i in range(2, 10, 2)]
    graph_n = []
    graph_m = []
    times = []

    number_of_graphs = len(number_of_cliques) * len(clique_sizes)
    print("Your implementation named", myDijkstra.__name__, \
          "will be run on a total of", number_of_graphs, "graphs")
    graph_cntr = 0

    if networkx.__version__[0] == '2':
        for n_cliques in number_of_cliques:
            for clique_size in clique_sizes:
                graph_cntr += 1
                # Construct the networkx graph
                graph = networkx.connected_caveman_graph(
                    n_cliques, clique_size)
                assert (networkx.is_connected(graph))
                this_graph_n = len(graph.nodes())
                this_graph_m = len(graph.edges())
                graph_n.append(this_graph_n)
                graph_m.append(this_graph_m)
                print("Working on graph number", graph_cntr, \
                      "of size n = |V| =", this_graph_n, "and m = |A| =", \
                      this_graph_m)
                # Construct the successors list for the dijkstra function
                successors = []
                for _ in graph.adjacency():
                    successors.append(dict())
                for start_node, adj_dict in graph.adjacency():
                    for end_node in list(adj_dict.keys()):
                        # Add in a random distance
                        distance = numpy.random.randint(1, 10)
                        successors[start_node][end_node] = distance
                        successors[end_node][start_node] = distance
                # Call the function
                this_graph_times = []
                for n in range(1, this_graph_n):
                    t = timeit.Timer(lambda: myDijkstra(successors, n))
                    this_graph_times.append(t.timeit(number=3))
                times.append(this_graph_times)
    else:  # NetworkX version is less than version 2
        for n_cliques in number_of_cliques:
            for clique_size in clique_sizes:
                # Construct the networkx graph
                graph = networkx.connected_caveman_graph(
                    n_cliques, clique_size)
                assert (networkx.is_connected(graph))
                this_graph_n = len(graph.nodes())
                this_graph_m = len(graph.edges())
                graph_n.append(this_graph_n)
                graph_m.append(this_graph_m)
                print("Working on graph number", graph_cntr, \
                      " of size n = |V| = ", this_graph_n, " and m = |A| =", \
                      this_graph_m)
                # Construct the successors list for the dijkstra function
                successors = []
                for _ in graph.adjacency_list():
                    successors.append(dict())
                for start_node, adj in enumerate(graph.adjacency_list()):
                    for end_node in adj:
                        # Add in a random distance
                        distance = numpy.random.randint(1, 10)
                        successors[start_node][end_node] = distance
                        successors[end_node][start_node] = distance
                # Call the function
                this_graph_times = []
                for n in range(1, this_graph_n):
                    t = timeit.Timer(lambda: myDijkstra(successors, n))
                    this_graph_times.append(t.timeit(number=3))
                times.append(this_graph_times)
    # Now write it out to an Excel workbook
    book = xlwt.Workbook()
    sheet = book.add_sheet("Data")
    sheet.row(0).write(0, "n = |V|")
    sheet.row(1).write(0, "m = |A|")
    sheet.row(2).write(0, "Dijkstra times [microsec]")
    #for col, g_size in enumerate(graph_n):
    for col, g_n, g_m in zip(range(0, len(graph_n)), graph_n, graph_m):
        print(col, g_n, g_m)
        sheet.row(0).write(col + 1, g_n)
        sheet.row(1).write(col + 1, g_m)
        for row, t in enumerate(times[col]):
            sheet.row(row + 2).write(col + 1, t)
    #book.save('dijkstra_data.xls')
    #book.save(xls_filename)
    book.save(myDijkstra.__name__ + "_data.xls")
    # Return the data we wrote to the file, just in case
    return graph_n, graph_m, times
Example #32
0
import numpy.random as random

# %%
color_map = {"s": "blue", "i": "red", "r": "gray", "e": "orange"}
options = {"node_size": 100, "arrowstyle": "-|>", "arrowsize": 12}

# %%
alpha = 0.25
beta = 0.50
gamma = 0.50
omega = 0.50
N = 10
random.seed(3)

# %%
G = nx.connected_caveman_graph(3, N)
for i in range(len(G.nodes)):
    G.nodes[i]["group"] = "s"
G.nodes[0]["group"] = "i"
G.nodes[0]["recovery_time_left"] = 1
G.nodes.data()

# %%
for node in G.nodes.data():
    if node[1]["group"] == "i":
        # All other nodes are suspected to be infected!
        for other_node in G.neighbors(node[0]):
            G[node[0]][other_node]["weight"] = beta
G.edges.data()

# %%
def generate_caveman_graph(cliques=5, size=4):
    """
    -> returns a connected caveman graph of l cliques of size k
    """
    return nx.connected_caveman_graph(cliques, size)
Example #34
0
            currentNode = nextNode
        X.append(currentNode)
    return CalculateCMu(X, G, C)


if __name__ == "__main__":
    ResultN = []
    ResultS = []
    C = [None] * 100
    for i in range(0, 50):
        C[i] = 1

    for j in range(50, 100):
        C[j] = 100

    G = nx.connected_caveman_graph(2, 50)
    # for node in G.nodes():
    # 	print G.neighbors(node)
    #True Mu is 50.5
    for k in range(1, 11):
        print 'SRW run to'
        print k
        S = []
        tMu = 50.5
        for m in range(0, 10):
            S.append(runSRW(G, 1000 * k, C))
        RE = float(np.sum(abs(np.subtract(S, tMu)))) / 10
        ResultS.append(RE)
    for k in range(1, 11):
        print 'RW run to'
        print k
Example #35
0
import networkx as nx
import copy
import operator
import random
import time
#G = nx.read_edgelist('networks/booksUSPolitics/polbooks_edges.txt')
#G = nx.karate_club_graph()
#G = nx.read_edgelist('networks/dolphin/dolphins_edges.txt')
G = nx.connected_caveman_graph(20,8)
nodeCommDict = {}
commNodeDict = {}
deltaQ = {}
a_i = {}
connected = {}
secAssTime = 0
firstAssTime = 0
beforeJoinTime = 0
resPars = [10,1,0.5,0.1,0.05,0.01,0.001,0.0001,0.00001,0.000001]

def initComms():
	global nodeCommDict
	global commNodeDict
	nodeCommDict = {}
	commNodeDict = {}
	nodes = nx.nodes(G)
	for node in nodes:
		nodeCommDict[node] = node
		commNodeDict[node] = [node]

def printInfo():
	print(nx.info(G))
Example #36
0
		if r<a:
			currentNode=nextNode
		X.append(currentNode)
	return CalculateCMu(X,G,C)

if __name__ == "__main__":
	ResultN =[]
	ResultS =[]
	C=[None]*100
	for i in range(0,50):
		C[i]=1

	for j in range(50,100):
		C[j]=100

	G = nx.connected_caveman_graph(2, 50)
		# for node in G.nodes():
		# 	print G.neighbors(node)
	#True Mu is 50.5
	for k in range(1,11):
		print 'SRW run to'
		print k
		S=[]
		tMu=50.5
		for m in range(0,10):
			S.append(runSRW(G,1000*k,C))
		RE= float(np.sum(abs(np.subtract(S,tMu))))/10
		ResultS.append(RE)
	for k in range(1,11):
		print 'RW run to'
		print k