Пример #1
0
 def measure(self):
     h = np.array(nx.get_node_attributes(self.network, "opinion").values())
     m = h.mean()
     v = h.var()
     A = np.asarray(nx.attr_matrix(self.network, edge_attr="weight")[0])
     q = (A*h*h[:,None]).mean()
     Enemies = np.where(A > 1/2, 0, 1)
     n = Enemies.mean()
     data = pd.Series([m,v,q,n],index=["m","v","q","n"])
     return data
Пример #2
0
def normalize_matrix(g):

    # laplace
    # then build the W
    # node_size = len(g.nodes())
    # for i in range(0, node_size):
    #    for j in range(0, node_size):
    #        if row_idx[j] in g.neighbors(row_idx[i]):
    #            W[i, j] = g[row_idx[i]][row_idx[j]]["weight"]
    #        else:
    #            W[i, j] = 0
    # return numpy.asmatrix(csgraph.laplacian(W, normed=True))
    # row normal
    w, _ = nx.attr_matrix(g, edge_attr="weight", normalized=True)
    return w
Пример #3
0
def main():
    g = nx.DiGraph(nx.drawing.nx_agraph.read_dot('./ss.dot'))
    for s, t in g.edges():
        g[s][t]['prob'] = eval(g[s][t]['label'])
    eAttrMat, cols = nx.attr_matrix(g, 'prob')
    print(cols)
    u = np.ravel((eAttrMat ** 100)[0,:])
    print(eAttrMat)
    print( '\nStationary distribution:' )
    print(cols)
    print(u)
    rate = 0.0
    for i, ui in enumerate(u):
        pis = np.ravel(eAttrMat[i,:])
        rate += ui * entropy(pis)
    print( f'\nRate is: {rate:.3f} bits per symbol' )
def test_attr_matrix():
    G = nx.Graph()
    G.add_edge(0, 1, thickness=1, weight=3)
    G.add_edge(0, 1, thickness=1, weight=3)
    G.add_edge(0, 2, thickness=2)
    G.add_edge(1, 2, thickness=3)

    def node_attr(u):
        return G.nodes[u].get("size", .5) * 3

    def edge_attr(u, v):
        return G[u][v].get("thickness", .5)

    M = nx.attr_matrix(G, edge_attr=edge_attr, node_attr=node_attr)
    npt.assert_equal(M[0], np.array([[6.]]))
    assert M[1] == [1.5]
Пример #5
0
def nx_to_edge_features(graphs, keys=None, post_processing=None):
    """
    Converts a list of nx.Graphs to a rank 4 np.array of edge features matrices
    of shape `(num_graphs, num_nodes, num_nodes, num_features)`.
    Optionally applies a post-processing function to each attribute in the nx
    graphs.
    :param graphs: a nx.Graph, or a list of nx.Graphs;
    :param keys: a list of keys with which to index edge attributes. It's also
    possible to set this to `None` and the function will attempt to 
    automatically extract the correct keys from the first graph in the list.  
    :param post_processing: a list of functions with which to post process each
    attribute associated to a key. `None` can be passed as post-processing 
    function to leave the attribute unchanged.
    :return: a rank 3 np.array of feature matrices
    """
    if post_processing is not None:
        for i in range(len(post_processing)):
            if post_processing[i] is None:
                post_processing[i] = lambda x: x

    if isinstance(graphs, nx.Graph):
        graphs = [graphs]

    if keys is None:
        # Attempt to extract edge attribute keys from the data itself
        edge_key = graphs[0].edges(0)[0]
        keys = graphs[0].get_edge_data(*edge_key).keys()

    output = []
    for g in graphs:
        edge_features = []
        for key in keys:
            ef = np.array(nx.attr_matrix(g, edge_attr=key)[0])
            if ef.ndim == 2:
                ef = ef[..., None]  # Make it three dimensional to concatenate
            edge_features.append(ef)
        if post_processing is not None:
            edge_features = [
                op(_) for op, _ in zip(post_processing, edge_features)
            ]
        if len(edge_features) > 1:
            edge_features = np.concatenate(edge_features, axis=-1)
        else:
            edge_features = np.array(edge_features[0])
        output.append(edge_features)

    return np.array(output)
Пример #6
0
    def build(self):
        graph = self.create_graph()
        labels = sorted(graph.nodes())
        if not self.weighted:
            adjacency_matrix = nx.adjacency_matrix(
                graph, nodelist=labels).todense().tolist()
        else:
            adjacency_matrix = nx.attr_matrix(graph,
                                              edge_attr='weight',
                                              rc_order=labels).tolist()

        for i in xrange(len(adjacency_matrix)):
            adjacency_matrix[i].insert(0, labels[i])

        labels.insert(0, '')
        return self.get_matrix_name(), pd.DataFrame.from_records(
            adjacency_matrix, columns=labels)
Пример #7
0
def nx_to_edge_features(graphs, keys, post_processing=None):
    """
    Converts a list of nx.Graphs to a rank 4 np.array of edge features matrices
    of shape `(num_graphs, num_nodes, num_nodes, num_features)`.
    Optionally applies a post-processing function to each attribute in the nx
    graphs.
    :param graphs: a nx.Graph, or a list of nx.Graphs;
    :param keys: a list of keys with which to index edge attributes.
    :param post_processing: a list of functions with which to post process each
    attribute associated to a key. `None` can be passed as post-processing 
    function to leave the attribute unchanged.
    :return: a rank 3 np.array of feature matrices
    """
    if post_processing is not None:
        if len(post_processing) != len(keys):
            raise ValueError(
                'post_processing must contain an element for each key')
        for i in range(len(post_processing)):
            if post_processing[i] is None:
                post_processing[i] = lambda x: x

    if isinstance(graphs, nx.Graph):
        graphs = [graphs]

    output = []
    for g in graphs:
        edge_features = []
        for key in keys:
            ef = np.array(nx.attr_matrix(g, edge_attr=key)[0])
            if ef.ndim == 2:
                ef = ef[..., None]  # Make it three dimensional to concatenate
            edge_features.append(ef)
        if post_processing is not None:
            edge_features = [
                op(_) for op, _ in zip(post_processing, edge_features)
            ]
        if len(edge_features) > 1:
            edge_features = np.concatenate(edge_features, axis=-1)
        else:
            edge_features = np.array(edge_features[0])
        output.append(edge_features)

    return np.array(output)
Пример #8
0
def get_edge_matrix(G, return_tensor=False):
    if return_tensor:
        edge_value = lambda u, v: G[u][v]['edge_value']
        node_value = lambda u: u

        ordering = list(set([node_value(n) for n in G]))

        N = len(ordering)
        undirected = not G.is_directed()
        index = dict(zip(ordering, range(N)))
        M = torch.zeros((N, N))
        M_ = torch.zeros((N, N, 4))
        use_extended = False
        seen = set([])

        for u, nbrdict in G.adjacency():
            for v in nbrdict:
                # Obtain the node attribute values.
                i, j = index[node_value(u)], index[node_value(v)]
                if v not in seen:
                    if type(edge_value(u, v)) == torch.Tensor and edge_value(
                            u, v).shape[0] == 1:
                        M[i, j] += edge_value(u, v).item()
                    elif type(edge_value(u, v)) == torch.Tensor and edge_value(
                            u, v).shape[0] == 4:
                        M_[i, j] = edge_value(u, v)
                        use_extended = True
                    else:
                        M[i, j] += edge_value(u, v)

                    if undirected:
                        M[j, i] = M[i, j]

            if undirected:
                seen.add(u)

        if use_extended:
            return M_
        else:
            return M
    else:
        return nx.attr_matrix(G, edge_attr='edge_value')[0]
Пример #9
0
def node_out_128(inp_dir, out_dir, out_coor):
    '''exclude the 128 and -128 values from the node attribute txt files and 
    write new node attribute files'''

    node_path = inp_dir
    f = [f for f in listdir(node_path) if isfile(join(node_path, f))]
    #f = f[0:10]
    for i in f:
        print(i)
        gph = open(node_path + i, 'r')
        cont = gph.readlines()
        ls_node, ls_edge = gphtols_view(cont, False)

        graph = make_graph(ls_node, ls_edge, range(len(ls_node)))
        nodes = np.asarray(ls_node)

        wh_128 = np.where(nodes == out_coor)
        wh_128 = list(wh_128[0])
        wh_128n = np.where(nodes == -out_coor)
        wh_128n = list(wh_128n[0])

        full_list = list(set(wh_128) | set(wh_128n))  #union of two lists

        for j in range(len(full_list)):
            graph.remove_n(full_list[j])

        adj = nx.attr_matrix(graph.get_graph())[0]
        edges = []

        for j in range(adj.shape[0]):
            for k in range(adj.shape[1]):
                if adj[j, k] == 1.:
                    edges.append([j, k])

        nodes = list(
            nx.get_node_attributes(graph.get_graph(), name='coor').values())

        #print(nodes,edges)

        write_gph(out_dir + i, nodes, edges)
Пример #10
0
def deltaPredictorOrdered(path, graph, a, k1, k2, data, r):

    if path is None and graph is None or (k1 < 0 or k2 < 0):
        return

    g = nx.read_gpickle(path) if path is not None else graph
    sorted_x = data[8]
    sorted_y = data[9]

    ratio = (r[1] / r[2])  #c_x*r_x/c_y*r_y
    print r[1], r[2], ratio

    min_k1 = min(k1, len(sorted_x))
    min_k2 = min(k2, len(sorted_y))

    dictio_delta = {}
    dictio_predictor = {}

    adj_mat = np.array(nx.attr_matrix(g)[0])

    for i in range(0, min_k1):
        for j in range(0, min_k2):
            if adj_mat[sorted_x[i][0]][sorted_y[j][0]] == 0:
                e = (sorted_x[i][0], sorted_y[j][0])
                dictio_delta.update({e: deltaRwc(None, g, a, data, e)})
                dictio_predictor.update({e: ut.AdamicAdarIndex(g, e)})

            if adj_mat[sorted_y[j][0]][sorted_x[i][0]] == 0:
                e = (sorted_y[j][0], sorted_x[i][0])
                dictio_delta.update({e: deltaRwc(None, g, a, data, e)})
                dictio_predictor.update({e: ut.AdamicAdarIndex(g, e)})

    dict_delta_sorted = sorted(dictio_delta.iteritems(),
                               key=lambda (k, v): (v, k))
    dict_predictor_sorted = sorted(dictio_predictor.iteritems(),
                                   key=lambda (k, v): (v, k),
                                   reverse=True)

    return (dict_delta_sorted, dictio_delta, dict_predictor_sorted,
            dictio_predictor)
Пример #11
0
    def sample_links(self, graph, negative_edge_type,
                     percent_of_links_to_sample):
        graph_edges = graph.edges()
        num_of_graph_edges = len(graph_edges)
        number_of_links_to_sample = int(
            math.ceil(percent_of_links_to_sample * num_of_graph_edges))
        if (graph.number_of_edges() > number_of_links_to_sample):
            positive_edges = random.sample(graph.edges(),
                                           number_of_links_to_sample)
        else:
            positive_edges = graph.edges()

        negative_edges = []
        if negative_edge_type == 'easy':
            candidate_non_edges = list(nx.non_edges(graph))
            if len(candidate_non_edges) > number_of_links_to_sample:
                negative_edges = list(
                    random.sample(candidate_non_edges,
                                  number_of_links_to_sample))
            else:
                negative_edges = candidate_non_edges
        else:
            matrix = nx.attr_matrix(graph)
            neighbors_distance_two = matrix[0].dot(matrix[0])
            #new_graph = nx.from_numpy_matrix(neighbors_distance_two)

            #for source, dest in new_graph.edges():
            for (source,
                 dest), value in np.ndenumerate(neighbors_distance_two):
                source_name = matrix[1][source]
                dest_name = matrix[1][dest]
                if source == dest or value == 0 or graph.has_edge(
                        source_name, dest_name):
                    continue
                negative_edges.append((source_name, dest_name))
                if (len(negative_edges)) == number_of_links_to_sample:
                    break

        return positive_edges, negative_edges
Пример #12
0
#query = "Suicide"
#query = "Nausea"
#query = "Diarrhoea"
#query = "Constipation"
query = "Anaemia"
#query = "Anaemia megaloblastic"
idx_query = side_effect_nodes.index(query)
GR_TR = matrix[idx_query, :]
#print idx
#print "Ground Truth:", len(matrix[idx_query,:])

Drug_Drug_Adj_mat = nx.adjacency_matrix(G, nodelist=drug_nodes, weight='none')

A = np.array(Drug_Drug_Adj_mat.todense(), dtype=np.float64)

weight_matrix = nx.attr_matrix(G, edge_attr='weight', rc_order=drug_nodes)
weight_matrix = np.array(weight_matrix)

heat_matrix = np.zeros([n, n])
#print "Heat Matrix Creation started:"
G = nx.from_numpy_matrix(A)

print "Heat Matrix filling started:"
for i in range(n):
    for j in range(n):
        if A[j, i] == 1.0:
            heat_matrix[i, j] = weight_matrix[j, i] / G.degree(j)
        if (i == j):
            if G.degree(i):
                heat_matrix[i, j] = (-1.0 / G.degree(i)) * sum(
                    weight_matrix[i, :])
Пример #13
0
    return nodeToCheckOne in G.neighbors(nodeToCheckTwo)


anni = list(range(2000, 2010))
grafiPerAnno = createGraphPerYear(anni)
k = 10
finalTopicsList = dict()

for x in grafiPerAnno:
    G = grafiPerAnno[x]
    pr = nx.pagerank(G)
    #h,a = nx.hits(G,max_iter=10000)
    topKSorted = sorted(pr.items(), key=operator.itemgetter(1))[0:k]
    topKLabels = set(x[0] for x in topKSorted)  #cambia a set in quanto oneroso

    test = nx.attr_matrix(G)

    cluster = AgglomerativeClustering(n_clusters=2,
                                      affinity='euclidean',
                                      linkage='ward')
    #cluster.fit_predict(X)
    topics = linear_threshold(G, topKLabels)
    topicsAsList = np.array(list(topics.values())).flatten()
    if len(topicsAsList) > 1:
        subgraph = G.subgraph(topicsAsList).copy()
        #clusterized = cluster.fit_predict(nx.adjacency_matrix(subgraph).todense())
        clusterized = nx.clustering(subgraph)
        topicsToInsert = [key for key in clusterized if clusterized[key] > 0.3]
        finalTopicsList[x] = {
            'graph': G,
            'subgraph': subgraph,
Пример #14
0
plt.show()

# In[9]:

#b = nx.betweenness_centrality(G , weight='capacity')
#fig = plt.figure(figsize=(20,5))
#plt.bar(range(len(b)) , list(b.values()) , align='center')
#plt.xticks(range(len(b)) , b.keys())
#plt.show()

# In[10]:

nodelist = G.nodes()
nodelist.sort()
m = nx.attr_matrix(G, edge_attr='capacity', rc_order=nodelist)

# In[11]:

m = m * 10.0
print nodelist[0], m[0]
print nodelist[15], m[15]

# In[12]:

print flows['STAR']

# In[13]:

df = pd.DataFrame(flows)
df.fillna(0, inplace=True)
Пример #15
0
def find_event_sequence(graph_file, start_sid, length, top=3):
    G = cPickle.load(open(graph_file, 'rb'))
    
    matrix, pos_sid = nx.attr_matrix(G, edge_attr='weight', normalized=True)
    
    sid_pos = {sid: position for (position, sid) in enumerate(pos_sid)}
    
    results = []
    find_event_sequence_r(start_sid, matrix, pos_sid, sid_pos, length, results, [], top)
    
    _results = np.reshape(np.array(results), (-1, length+1))
    
    ret_unsorted = [{'sequence':_results[i], 'probability':sequence_probability(_results[i], matrix, sid_pos)}
                    for i in range(_results.shape[0])]
    
    ret_sorted = [x for x in sorted(ret_unsorted, key=lambda x: x['probability'], reverse=True)]
    
    return ret_sorted
    #for i in range(_results.shape[0]):
    #    print _results[i], sequence_probability(_results[i], matrix, sid_pos)
    
    #print _results[0]
    #print len(results), _results.shape, _results.size
    #indices =  (np.argsort(matrix[start_sid, :]).T)[::-1][0:3]
    #print indices[::-1]
    #for i in indices:
    #    print i[0,0]
    
    #for i in itertools.permutations(order,length):
    #    print i
    #print node_map
    '''
    my_matrix = np.zeros((len(G), len(G)))
    
    node_map = {node: key for (key, node) in enumerate(G)}
    #for i, node in enumerate(G):
    
    for i, node in enumerate(G):
        for sid, info in G[node].iteritems():
            j = node_map[sid]
            my_matrix[i,j] = info['weight']
    
    n1 = 10
    n2 = 10
    
    total_weights = 0
    for sid, info in G[n1].iteritems():
        total_weights += info['weight']
    
    print my_matrix[n1, n2], matrix[n1, n2], total_weights
    '''
    '''
    for node in G:
        print node,
        for neighbor in G[node]:
            print neighbor,
        print
    '''
    
    
    #print '11,10', matrix[11, 10]
    #print '10,11', matrix[10, 11]
    #print order
    
    #it = np.nditer(matrix[start_sid, 0:20], flags=['c_index'])
    #while not it.finished:
    #    print it.index, order[it.index], it[0]
    #    it.iternext()
    '''
    # plot parameters
    imw = 1024.0 # the full image width
    imh = 1024.0
    lm = 40.0
    rm = 50.0
    tm = 50.0
    bm = 50.0
    res = 72.0
    cbh = 20.0
    cbs = 40.0
    
    #arial14 = mpl.font_manager.FontProperties(family='Arial', style='normal', variant='normal', size=14)
    #arial12 = mpl.font_manager.FontProperties(family='Arial', style='normal', variant='normal', size=12)
    #arial10 = mpl.font_manager.FontProperties(family='Arial', style='normal', variant='normal', size=10)
    #arial7_light = mpl.font_manager.FontProperties(family='Arial', style='normal', variant='normal', size=7, weight='light')
    
    imwi = imw/res
    imhi = imh/res
    fig = mplt.figure(figsize=(imwi, imhi), dpi=res)
    ph = imh - tm - bm - cbh - cbs # the height for both matricies
    pw = imw - lm - rm
    shear_ax = fig.add_axes((lm/imw, (bm+cbh+cbs)/imh, pw/imw, ph/imh))
    
    shear_ax.imshow(matrix.T, interpolation='none')
    #shear_ax.axis('tight')
    #shear_ax.set_ylim((15.5, 0.5))
    #shear_ax.set_xlim((0.5, 15.5))
    '''
    
    
    '''
    fig.savefig('local/graph_matrix.png', format='png')
    '''
    '''
Пример #16
0
def find_event_sequence(graph_file, start_sid, length, top=3):
    G = cPickle.load(open(graph_file, 'rb'))

    matrix, pos_sid = nx.attr_matrix(G, edge_attr='weight', normalized=True)

    sid_pos = {sid: position for (position, sid) in enumerate(pos_sid)}

    results = []
    find_event_sequence_r(start_sid, matrix, pos_sid, sid_pos, length, results,
                          [], top)

    _results = np.reshape(np.array(results), (-1, length + 1))

    ret_unsorted = [{
        'sequence':
        _results[i],
        'probability':
        sequence_probability(_results[i], matrix, sid_pos)
    } for i in range(_results.shape[0])]

    ret_sorted = [
        x for x in sorted(
            ret_unsorted, key=lambda x: x['probability'], reverse=True)
    ]

    return ret_sorted
    #for i in range(_results.shape[0]):
    #    print _results[i], sequence_probability(_results[i], matrix, sid_pos)

    #print _results[0]
    #print len(results), _results.shape, _results.size
    #indices =  (np.argsort(matrix[start_sid, :]).T)[::-1][0:3]
    #print indices[::-1]
    #for i in indices:
    #    print i[0,0]

    #for i in itertools.permutations(order,length):
    #    print i
    #print node_map
    '''
    my_matrix = np.zeros((len(G), len(G)))
    
    node_map = {node: key for (key, node) in enumerate(G)}
    #for i, node in enumerate(G):
    
    for i, node in enumerate(G):
        for sid, info in G[node].iteritems():
            j = node_map[sid]
            my_matrix[i,j] = info['weight']
    
    n1 = 10
    n2 = 10
    
    total_weights = 0
    for sid, info in G[n1].iteritems():
        total_weights += info['weight']
    
    print my_matrix[n1, n2], matrix[n1, n2], total_weights
    '''
    '''
    for node in G:
        print node,
        for neighbor in G[node]:
            print neighbor,
        print
    '''

    #print '11,10', matrix[11, 10]
    #print '10,11', matrix[10, 11]
    #print order

    #it = np.nditer(matrix[start_sid, 0:20], flags=['c_index'])
    #while not it.finished:
    #    print it.index, order[it.index], it[0]
    #    it.iternext()
    '''
    # plot parameters
    imw = 1024.0 # the full image width
    imh = 1024.0
    lm = 40.0
    rm = 50.0
    tm = 50.0
    bm = 50.0
    res = 72.0
    cbh = 20.0
    cbs = 40.0
    
    #arial14 = mpl.font_manager.FontProperties(family='Arial', style='normal', variant='normal', size=14)
    #arial12 = mpl.font_manager.FontProperties(family='Arial', style='normal', variant='normal', size=12)
    #arial10 = mpl.font_manager.FontProperties(family='Arial', style='normal', variant='normal', size=10)
    #arial7_light = mpl.font_manager.FontProperties(family='Arial', style='normal', variant='normal', size=7, weight='light')
    
    imwi = imw/res
    imhi = imh/res
    fig = mplt.figure(figsize=(imwi, imhi), dpi=res)
    ph = imh - tm - bm - cbh - cbs # the height for both matricies
    pw = imw - lm - rm
    shear_ax = fig.add_axes((lm/imw, (bm+cbh+cbs)/imh, pw/imw, ph/imh))
    
    shear_ax.imshow(matrix.T, interpolation='none')
    #shear_ax.axis('tight')
    #shear_ax.set_ylim((15.5, 0.5))
    #shear_ax.set_xlim((0.5, 15.5))
    '''
    '''
    fig.savefig('local/graph_matrix.png', format='png')
    '''
    '''
Пример #17
0
w=[i for i,x in enumerate(q) if x == 1]
##print w[0]

##Closeness centrality of each node

for v in nodes(g):
    c=closeness_centrality(g,v)
    print("Closeness centrality  of %s is %s" %(v,c))
    k.append(c)


 #obtain the adj. matrix for the graph

print 'Adjacency Matrix of the graph'
T= nx.attr_matrix(g,rc_order=[0,1,2,3,4,5,6,7])
print T

for i in range(8):
     q=np.sum(T[i])
     d.setdefault(i,[]).append(q)
values = d.values()
l= list(d.values())
##print l
l1 = list(itertools.chain(*l))
##print l1
##for i in range(5):
##     print g.degree(i)


#Transition Probality Matrix T
Пример #18
0
def mc(rho, beta, eps, config):
    N = config['N']
    D = config['D']
    dw = config['dw']
    dr = config['dr']
    sweeps = config['sweeps']

    sqrtD = math.sqrt(D)
    sqrtN = math.sqrt(N)
    norm = sqrtD
    Q = norm/sqrtD
    Gamma = math.sqrt(1 - rho*rho)/rho

    w = np.random.randn(N,D)
    w *= norm/np.sqrt((w*w).sum(axis=1))[:,None]

    z = np.ones(D)
    z *= norm/np.linalg.norm(z)
    h = z.dot(w.T)/sqrtD

    G = nx.complete_graph(N)
    # G = nx.connected_watts_strogatz_graph(N, 10, .1)
    A0 = np.asarray(nx.attr_matrix(G)[0])/2
    # A0 *= (1+ h*h[:,None]/D)/2
    # A0 += 0.4*np.random.randn(*A0.shape)
    # A0 -= np.diag(np.diag(A0))
    A = A0.copy()

    def measure():
        # h = z.dot(w.T)/D
        m = h.mean()/sqrtD
        v = h.var()
        q = np.abs(A*h*h[:,None]).mean()
        Enemies = np.where(A < A0, 1, 0)
        n = Enemies.mean()
        data = pd.Series([m,v,q,n],
                         index="m v q n".split(" "))
        return data

    trace = pd.DataFrame()
    measured = measure()
    measured["acceptance_ratio"] = 0
    trace = trace.append(measured, ignore_index=True)

    def energy(hi, hj, rij):
        x = hi*np.sign(hj)/Gamma/sqrt2
        # Jij = np.sign(rij - 1/2)
        Jij = 1.0
        a = Jij
        return -a*Gamma*Gamma*np.log(eps + (1-2*eps)*erfc(-x)/2)

    accepted = 0
    for t in xrange(sweeps*N):
        i = np.random.choice(N)
        ni = np.where(A[i] != 0, 1, 0)
        # ni = A[i]
        pij = ni/ni.sum()
        j = np.random.choice(N, p=pij)

        # h = z.dot(w.T)/sqrtD
        hi, hj = h[i], h[j]

        # rij0 = A[i,j]
        # rij = rij0 + rij0*(1-rij0)*np.sign(hi*hj)*dr
        # rij = rij + pI*(1-pI)*np.sign(hi*hj)*dr
        # A[i,j] = rij

        pI = (1+hi*hj/D)/2
        rij0 = A[i,j]
        rij = pI
        # pI = 1.0
        # pI = rij
        if np.random.rand() < pI:
            E0 = energy(hi, hj, rij0)
            u = np.random.randn(D)
            nw = dw*u/np.linalg.norm(u) + w[i]
            nw *= norm/np.linalg.norm(nw)
            nhi = z.dot(nw)/sqrtD
            E = energy(nhi, hj, rij)
            if E < E0 or np.random.rand() < math.exp(beta*(E0-E)):
                accepted += 1
                w[i] = nw.copy()
                h[i] = nhi
                A[i,j] = pI

        if t%N == 0:
            measured = measure()
            measured["acceptance_ratio"] = accepted/(t+1)
            trace = trace.append(measured, ignore_index=True)

    p = (eps, rho, beta)
    names = ("epsilon","rho","beta")
    stats = pd.Panel({p:trace})
    result = {"statistics":stats}
    result["agents"]= pd.Panel({p:pd.DataFrame(w)})
    result["network"] = pd.Panel({p:pd.DataFrame(A)})
    for k,v in result.items():
        v.items.set_names(names, inplace=True)
    return result
Пример #19
0
def exhaustive_enumeration(graph,
                           source,
                           target,
                           edge_attr,
                           normalize=False,
                           rm_diag=False):
    """Calculate the probabiliy of all forward paths between source and target

    Parameters
    ----------
    graph : networkx.DiGraph.
        networkx.DiGraph. Can be build from a transition matrix (numpy matrix/array) using networkx.from_numpy_matrix.

    source : int.
        Starting node for all paths

    target : list (dtype=int).
        List of nodes at which the paths can end.

    edge_attr : str.
        Edge attribute that is used to build transition matrix. Only use 'weight' if it's an explicitly defined edge
        attribute, otherwise networkx will build an adjacency matrix instead of a transition matrix.

    normalize : bool.
        If True, normalize each path probability by the sum of all probabilities, so they sum to 1.

    rm_diag : bool.
        If True, the matrix diagonal, i.e. the probability of self-looping, is set to 0. This will skew the path
        probabilities.

    Returns
    -------
    path_probs : dict.
        Dictionary with paths (dtype=tuple) as dict. keys and probabilities as dict. values.
    """
    # Check arguments.
    if edge_attr == 'weight':
        print(
            "If edge_attr='weight', the transition matrix might be a simple adjacency matrix, unless 'weight' is an explicitly defined edge attribute of your DiGraph."
        )

    # Get all possible paths between all possible pairs of source and target nodes.
    all_paths = []
    pairs = combinations(source, target)
    for pair in pairs:
        all_paths.extend(list(nx.all_shortest_paths(graph, pair[0], pair[1])))

    # Build transition matrix.
    try:
        P = np.array(nx.attr_matrix(graph, edge_attr)[0])
        T = add_self_probability(P)
    except KeyError:
        raise Exception(
            "Edge attribute %s does not exist. Add edge attribute or choose existing."
            % edge_attr)

    if rm_diag:
        T = rm_self_prob(T)

    # Calculate all path probabilities.
    path_probs = {}
    for path in all_paths:
        path_probs[tuple(path)] = path_prob(path, T)

    if normalize:
        # Normalize, i.e. divide by the sum of all path probabilities.
        p_sum = sum(path_probs.values())
        for path, prob in path_probs.items():
            path_probs[path] = prob / p_sum

    return path_probs
Пример #20
0
def mc(rho, beta, eps, config):
    N = config['N']
    D = config['D']
    dw = config['dw']
    dr = config['dr']
    sweeps = config['sweeps']

    sqrtD = math.sqrt(D)
    sqrtN = math.sqrt(N)
    norm = sqrtD
    Q = norm/sqrtD
    Gamma = math.sqrt(1 - rho*rho)/rho

    w = np.random.randn(N,D)
    w *= norm/np.sqrt((w*w).sum(axis=1))[:,None]

    z = np.ones(D)
    z *= norm/np.linalg.norm(z)
    h = z.dot(w.T)/sqrtD

    G = nx.complete_graph(N)
    A0 = np.asarray(nx.attr_matrix(G)[0])/2
    A = A0.copy()

    def measure():
        m = h.mean()/sqrtD
        r = np.abs(h).mean()/sqrtD
        v = h.var()
        q = np.abs(A*h*h[:,None]).mean()
        enemies = np.where(A>1/2,0,1)
        n = (enemies-np.diag(np.diag(enemies))).mean()
        data = pd.Series([m,v,q,r,n],
                         index="m v q r n".split(" "))
        return data

    trace = pd.DataFrame()
    measured = measure()
    measured["acceptance_ratio"] = 0
    trace = trace.append(measured, ignore_index=True)

    def energy(hi, hj):
        x = hi*np.sign(hj)/Gamma/sqrt2
        Jij = 1/D
        a = Jij
        return -a*Gamma*Gamma*np.log(eps + (1-2*eps)*erfc(-x)/2)

    accepted = 0
    for t in xrange(sweeps*N):
        i = np.random.choice(N)
        ni = A[i]
        pij = ni/ni.sum()
        j = np.random.choice(N, p=pij)

        hi, hj = h[i], h[j]
        rij = A[i,j]
        rij = rij + dr*rij*(1-rij)*np.sign(hi*hj)
        A[i,j] = rij

        E0 = energy(hi, hj)
        nw = np.random.multivariate_normal(w[i], dw*np.eye(D))
        # u = np.random.randn(D)
        # nw = dw*u/np.linalg.norm(u) + w[i]
        nw *= norm/np.linalg.norm(nw)
        nhi = z.dot(nw)/sqrtD
        E = energy(nhi, hj)
        if E < E0 or np.random.rand() < math.exp(beta*(E0-E)):
            accepted += 1
            w[i] = nw.copy()
            h[i] = nhi

        if t%N == 0:
            measured = measure()
            measured["acceptance_ratio"] = accepted/(t+1)
            trace = trace.append(measured, ignore_index=True)

    p = (eps, rho, beta)
    names = ("epsilon","rho","beta")
    stats = pd.Panel({p:trace})
    result = {"statistics":stats}
    result["agents"]= pd.Panel({p:pd.DataFrame(w)})
    result["network"] = pd.Panel({p:pd.DataFrame(A)})
    for k,v in result.items():
        v.items.set_names(names, inplace=True)
    return result
Пример #21
0
 def state(self):
     w = np.vstack(nx.get_node_attributes(self.network, "vector").values())
     A = np.asarray(nx.attr_matrix(self.network, edge_attr="weight")[0])
     return {"vectors":w, "adjacency":A}
Пример #22
0
def main():
    # Leggo il file pickle dei retweet
    # Costruisco un grafo con networkx partendo dai dati ottenuti
    with open(
            '../TweetOldSerialization/pickle/BiotestamentoGraph/Gennaio/retweetListBlue.pkl',
            'rb') as input:
        retweetListBlue = pickle.load(input)

    with open(
            '../TweetOldSerialization/pickle/BiotestamentoGraph/Gennaio/retweetListRed.pkl',
            'rb') as input:
        retweetListRed = pickle.load(input)

    with open(
            '../TweetOldSerialization/pickle/BiotestamentoGraph/Gennaio/retweetListYellow.pkl',
            'rb') as input:
        retweetListYellow = pickle.load(input)

    with open(
            '../TweetOldSerialization/pickle/BiotestamentoGraph/Gennaio/probRetBlue.pkl',
            'rb') as input:
        probRetBlue = pickle.load(input)

    with open(
            '../TweetOldSerialization/pickle/BiotestamentoGraph/Gennaio/probRetRed.pkl',
            'rb') as input:
        probRetRed = pickle.load(input)
    List = []

    for i in retweetListBlue:

        List.append(i)
    for i in retweetListRed:

        List.append(i)

    DizPesi = {}

    for i in probRetBlue:

        if not DizPesi.has_key(i):
            DizPesi[i] = probRetBlue[i]
        else:
            continue

    for i in probRetRed:

        if not DizPesi.has_key(i):
            DizPesi[i] = probRetRed[i]
        else:
            continue

    nodi_Blue = NodeDict(retweetListBlue)
    nodi_Red = NodeDict(retweetListRed)

    G = createGraph(List, DizPesi)
    size_node_degree = []

    print "Numero NODI", len(G.nodes)
    UpdateNode(retweetListYellow, nodi_Blue)
    UpdateNode(retweetListYellow, nodi_Red)
    #print(test)

    posizioneBlue = PosNode(G.nodes(), nodi_Blue)
    posizioneRed = PosNode(G.nodes(), nodi_Red)

    dizPosizioneBlue = PosNodeDizionario(G.nodes, nodi_Blue)
    dizPosizioneRed = PosNodeDizionario(G.nodes, nodi_Red)
    #dizPosizioneYellow = PosNodeDizionario(G.nodes,nodi_Yellow);

    #List of Polarization of Elite and Listener
    firstPolar = setFirstPolarization(G, dizPosizioneBlue, dizPosizioneRed)

    #print "Passo 0 di polarizzazione ",firstPolar

    dictFirstPol = {}
    x = 0
    for i in G.nodes():

        if not dictFirstPol.has_key(i):
            dictFirstPol[i] = firstPolar[x]
            x = x + 1

    list = []
    for i in G.nodes():

        list.append(i)

    #matrice di adiacenza partendo dalla lista dei nodi
    mat_attr = nx.attr_matrix(G, rc_order=list)

    at_array = np.array(mat_attr)

    newPol = opinionPolarization(G, at_array, firstPolar, list)

    dictPol = {}
    x = 0
    for i in G.nodes():

        if not dictPol.has_key(i):
            dictPol[i] = newPol[x]
            x = x + 1

    print(len(G.nodes))
    #size = float(len(set(partition.values())))

    #cambio i colori dei nodi a seconda del loro grado
    #Polar = Polarization(p_array,posizioneRed,posizioneBlue,len(G.nodes),matriceProbRetweet)

    #funziona con la partizione
    node_color = colorNode(G, nodi_Blue, nodi_Red)

    #node_colorPol= colorNodePol(len(G.nodes()),newPol)

    testdict = opinionPolarizationDict(G, at_array, firstPolar, list)

    print("testdict ", testdict)
    list_lastPol = testdict.get(len(testdict) - 1)
    #print(list_lastPol)
    #print(set(testdict[1]))
    node_colorPol = colorNodePol(len(G.nodes()), list_lastPol)

    test = {}
    x = 0
    for i in G.nodes():

        if not testdict.has_key(i):
            test[i] = testdict.get(len(testdict) - 1)[x]
            x = x + 1

    for i in range(0, len(testdict)):
        if i + 1 == (len(testdict) - 1):
            break
        print("i", i, "j", i + 1, " simili=",
              set(testdict[i]) == set(testdict[i + 1]))

    #labels= labelPolarization(Polar,G,nodi_Blue,nodi_Red)

    pos = nx.spring_layout(G)
    #Per la partizione
    # list_nodes=[]
    # for com in set(partition.values()):
    #     count = count + 1.
    #     x=0
    #     for nodes in partition.keys():
    #        # print "nodes",nodes
    #         if partition[nodes] == com :
    #             list_nodes.append(nodes)

    #con la partizione
    #nx.draw_networkx_nodes(G, pos Biotestamento,list_nodes,with_labels=False,node_color=node_color)

    nx.write_gpickle(G,
                     '../Test/Biotestamento/Gennaio/grafoBiotestVen.pickle',
                     protocol=pickle.HIGHEST_PROTOCOL)
    with open(
            '../Test/Biotestamento/Gennaio/dizionarioPolarizzazioneVenezuela.pickle',
            "wb") as output:
        pickle.dump(test, output, pickle.HIGHEST_PROTOCOL)
    with open(
            '../Test/Biotestamento/Gennaio/listaColoriPolarizzazioneVenezuela.pickle',
            "wb") as output:
        pickle.dump(node_colorPol, output, pickle.HIGHEST_PROTOCOL)

    nx.draw_networkx_nodes(G,
                           pos,
                           G.nodes(),
                           with_labels=True,
                           node_color=node_colorPol)

    nx.draw_networkx_edges(G, pos, alpha=0.5, edge_color='b')

    nx.draw_networkx_labels(G, pos, test, font_size=8)

    plt.savefig("../Test/Biotestamento/Gennaio/PolarizzazioneVene.png",
                format="PNG")

    plt.show()
Пример #23
0
def main():
    # Leggo il file pickle dei retweet
    # Costruisco un grafo con networkx partendo dai dati ottenuti

    with open(
            '../TweetOldSerialization/pickle/ElezioniSiciliaGraph/Dicembre/retweetListBlue.pkl',
            'rb') as input:
        retweetListBlue = pickle.load(input)

    with open(
            '../TweetOldSerialization/pickle/ElezioniSiciliaGraph/Dicembre/retweetListRed.pkl',
            'rb') as input:
        retweetListRed = pickle.load(input)

    with open(
            '../TweetOldSerialization/pickle/ElezioniSiciliaGraph/Dicembre/retweetListYellow.pkl',
            'rb') as input:
        retweetListYellow = pickle.load(input)

    with open(
            '../TweetOldSerialization/pickle/ElezioniSiciliaGraph/Dicembre/probRetBlue.pkl',
            'rb') as input:
        probRetBlue = pickle.load(input)

    with open(
            '../TweetOldSerialization/pickle/ElezioniSiciliaGraph/Dicembre/probRetRed.pkl',
            'rb') as input:
        probRetRed = pickle.load(input)
    List = []

    for i in retweetListBlue:

        List.append(i)
    for i in retweetListRed:

        List.append(i)

    DizPesi = {}

    for i in probRetBlue:

        if not DizPesi.has_key(i):
            DizPesi[i] = probRetBlue[i]
        else:
            continue

    for i in probRetRed:

        if not DizPesi.has_key(i):
            DizPesi[i] = probRetRed[i]
        else:
            continue

    nodi_Blue = NodeDict(retweetListBlue)
    nodi_Red = NodeDict(retweetListRed)

    G = createGraph(List, DizPesi)

    size_node_degree = []

    UpdateNode(retweetListYellow, nodi_Blue)
    UpdateNode(retweetListYellow, nodi_Red)
    #print(test)

    posizioneBlue = PosNode(G.nodes(), nodi_Blue)
    posizioneRed = PosNode(G.nodes(), nodi_Red)

    dizPosizioneBlue = PosNodeDizionario(G.nodes, nodi_Blue)
    dizPosizioneRed = PosNodeDizionario(G.nodes, nodi_Red)

    #List of Polarization of Elite and Listener
    firstPolar = setFirstPolarization(G, dizPosizioneBlue, dizPosizioneRed)

    dictFirstPol = {}
    x = 0
    for i in G.nodes():

        if not dictFirstPol.has_key(i):
            dictFirstPol[i] = firstPolar[x]
            x = x + 1

    list = []
    for i in G.nodes():

        list.append(i)

    #matrice di adiacenza partendo dalla lista dei nodi
    mat_attr = nx.attr_matrix(G, rc_order=list)

    at_array = np.array(mat_attr)

    newPol = opinionPolarization(G, at_array, firstPolar, list)

    dictPol = {}
    x = 0
    for i in G.nodes():

        if not dictPol.has_key(i):
            dictPol[i] = newPol[x]
            x = x + 1

    print(len(G.nodes))

    #funziona con la partizione
    node_color = colorNode(G, nodi_Blue, nodi_Red)

    testdict = opinionPolarizationDict(G, at_array, firstPolar, list)

    print("testdict ", testdict)
    list_lastPol = testdict.get(len(testdict) - 1)

    node_colorPol = colorNodePol(len(G.nodes()), list_lastPol)

    test = {}
    x = 0
    for i in G.nodes():

        if not testdict.has_key(i):
            test[i] = testdict.get(len(testdict) - 1)[x]
            x = x + 1

    for i in range(0, len(testdict)):
        if i + 1 == (len(testdict) - 1):
            break
        print("i", i, "j", i + 1, " simili=",
              set(testdict[i]) == set(testdict[i + 1]))

    pos = nx.spring_layout(G)

    nx.write_gpickle(G,
                     '../Test/Sicilia/Dicembre/grafoSiciliaVen.pickle',
                     protocol=pickle.HIGHEST_PROTOCOL)
    with open(
            '../Test/Sicilia/Dicembre/dizionarioPolarizzazioneVenezuela.pickle',
            "wb") as output:
        pickle.dump(test, output, pickle.HIGHEST_PROTOCOL)
    with open(
            '../Test/Sicilia/Dicembre/listaColoriPolarizzazioneVenezuela.pickle',
            "wb") as output:
        pickle.dump(node_colorPol, output, pickle.HIGHEST_PROTOCOL)

    nx.draw_networkx_nodes(G,
                           pos,
                           G.nodes(),
                           with_labels=True,
                           node_color=node_colorPol)

    nx.draw_networkx_edges(G, pos, alpha=0.5, edge_color='b')

    nx.draw_networkx_labels(G, pos, test, font_size=8)

    plt.savefig("../Test/Sicilia/Dicembre/PolarizzazioneVene.png",
                format="PNG")

    plt.show()
Пример #24
0
##no.of visits to each node is first assigned to 1

for i in range(r):
    q.append(1)
##print q

e=q

w=[i for i,x in enumerate(q) if x == 1]
##print w[0]


 #obtain the adj. matrix for the graph

print 'Adjacency Matrix of the graph'
T= nx.attr_matrix(g,rc_order=z)
##print T

for i in range(r):
     q=np.sum(T[i])
     d.setdefault(i,[]).append(q)
values = d.values()
l= list(d.values())
##print l
l1 = list(itertools.chain(*l))
##print l1
##for i in range(5):
##     print g.degree(i)


#Transition Probality Matrix T
Пример #25
0
    
    def showGraph(self):
        nx.draw(self.G, with_labels=True, font_weight='bold')
        plt.show()

if __name__=="__main__":
    n_nodes = 6 
    edges = [(0,1),(0,2),(1,2),(0,3),(3,4),(3,5),(4,5)]

    graph = Graphx(n_nodes=6, edges=edges, nname_prefix="")
    print(f"\n==============\nGraph:\n {graph}")
    print(f"Nodes:\n {graph.G.nodes.data()}")
    
    # Obtain adjacency matrix A 
    A = np.array(nx.attr_matrix(graph.G)[0])
    X = np.array(nx.attr_matrix(graph.G)[1])
    X = np.expand_dims(X, 1)
    print(f"\n==============\nAdj matrix:\n {A}")
    print(f"\n==============\nNode feature matrix:\n {X}")
    
    # Shift operator 
    print(f"A mul X: {A@X}")

    # Take the node feature into account
    graph.G = graph.G.copy() 
    self_loops = [] 
    for id, _ in graph.G.nodes.data():
        self_loops.append((id, id))
    print(self_loops)
    graph.addEdges(edges=self_loops)
Пример #26
0
def generate_event_sequence(graph_file, start_sid, length=100, runs=1):
    G = cPickle.load(open(graph_file, 'rb'))
    
    matrix, pos_sid = nx.attr_matrix(G, edge_attr='weight', normalized=True)
    sid_pos = {sid: position for (position, sid) in enumerate(pos_sid)}
    
    duration_mean_matrix, pos_sid_mean = nx.attr_matrix(G, edge_attr='duration_mean')
    
    raw_output = np.empty((length,runs))
    output = np.empty(length)
    time = np.empty(length)
    
    current_run = 0
    while current_run < runs:
        current_step = 0
        current_time = 0.0
        current_node = start_sid
        while current_step < length:
            
            raw_output[current_step, current_run] = current_node
            time[current_step] = current_time
            
            out_probs = np.cumsum(matrix[sid_pos[current_node]], axis=1)
            
            #for out_node in G[current_node]:
            #    print out_node, matrix[sid_pos[current_node], sid_pos[out_node]], out_probs[0,sid_pos[out_node]]
            
            choice = np.random.random_sample()
            
            choice_index = np.argwhere(out_probs<choice)
            
            try:
                next_node = pos_sid[choice_index[-1,0,-1]+1]
            except IndexError:
                next_node = pos_sid[0]
            
            current_time += duration_mean_matrix[sid_pos[current_node], sid_pos[next_node]]
            
            current_node = next_node
            
            #try:
            #    print choice, choice_index[-1,0,-1], pos_sid[choice_index[-1,0,-1]+1]
            #except IndexError:
            #    print 0, pos_sid[0]
            
            #print choice, choice_index[-1,0,-1], pos_sid[choice_index[-1,0,-1]+1]
            
            current_step += 1
        current_run += 1


    print raw_output.shape

    for index in range(length):
        output[index] = np.mean(raw_output[index])

    xs = []
    ys = []

    for index in range(length):
        if index < length - 1:
            xs.append(output[index])
            ys.append(output[index+1])


    # plot parameters
    imw = 1024.0 # the full image width
    imh = 1024.0
    lm = 40.0
    rm = 50.0
    tm = 50.0
    bm = 50.0
    res = 72.0
    
    imwi = imw/res
    imhi = imh/res
    fig = mplt.figure(figsize=(imwi, imhi), dpi=res)
    ph = imh - tm - bm # the height for both matricies
    pw = imw - lm - rm
    ax = fig.add_axes((lm/imw, bm/imh, pw/imw, ph/imh))

    #ax.plot(time, output)
    #ax.set_ylim((1, max(pos_sid)))

    ax.scatter(xs,ys)
    ax.set_ylim((0.5, max(pos_sid)+0.5))
    ax.set_xlim((0.5, max(pos_sid)+0.5))
Пример #27
0
def mc(rho, beta, eps, config):
    N = config['N']
    D = config['D']
    dw = config['dw']
    dr = config['dr']
    sweeps = config['sweeps']

    sqrtD = math.sqrt(D)
    sqrtN = math.sqrt(N)
    norm = sqrtD
    Q = norm / sqrtD
    Gamma = math.sqrt(1 - rho * rho) / rho

    w = np.random.randn(N, D)
    w *= norm / np.sqrt((w * w).sum(axis=1))[:, None]

    z = np.ones(D)
    z *= norm / np.linalg.norm(z)
    h = z.dot(w.T) / sqrtD

    G = nx.complete_graph(N)
    # G = nx.connected_watts_strogatz_graph(N, 10, .1)
    A0 = np.asarray(nx.attr_matrix(G)[0]) / 2
    # A0 *= (1+ h*h[:,None]/D)/2
    # A0 += 0.4*np.random.randn(*A0.shape)
    # A0 -= np.diag(np.diag(A0))
    A = A0.copy()

    def measure():
        # h = z.dot(w.T)/D
        m = h.mean() / sqrtD
        v = h.var()
        q = np.abs(A * h * h[:, None]).mean()
        Enemies = np.where(A < A0, 1, 0)
        n = Enemies.mean()
        data = pd.Series([m, v, q, n], index="m v q n".split(" "))
        return data

    trace = pd.DataFrame()
    measured = measure()
    measured["acceptance_ratio"] = 0
    trace = trace.append(measured, ignore_index=True)

    def energy(hi, hj, rij):
        x = hi * np.sign(hj) / Gamma / sqrt2
        # Jij = np.sign(rij - 1/2)
        Jij = 1.0
        a = Jij
        return -a * Gamma * Gamma * np.log(eps + (1 - 2 * eps) * erfc(-x) / 2)

    accepted = 0
    for t in xrange(sweeps * N):
        i = np.random.choice(N)
        ni = np.where(A[i] != 0, 1, 0)
        # ni = A[i]
        pij = ni / ni.sum()
        j = np.random.choice(N, p=pij)

        # h = z.dot(w.T)/sqrtD
        hi, hj = h[i], h[j]

        # rij0 = A[i,j]
        # rij = rij0 + rij0*(1-rij0)*np.sign(hi*hj)*dr
        # rij = rij + pI*(1-pI)*np.sign(hi*hj)*dr
        # A[i,j] = rij

        pI = (1 + hi * hj / D) / 2
        rij0 = A[i, j]
        rij = pI
        # pI = 1.0
        # pI = rij
        if np.random.rand() < pI:
            E0 = energy(hi, hj, rij0)
            u = np.random.randn(D)
            nw = dw * u / np.linalg.norm(u) + w[i]
            nw *= norm / np.linalg.norm(nw)
            nhi = z.dot(nw) / sqrtD
            E = energy(nhi, hj, rij)
            if E < E0 or np.random.rand() < math.exp(beta * (E0 - E)):
                accepted += 1
                w[i] = nw.copy()
                h[i] = nhi
                A[i, j] = pI

        if t % N == 0:
            measured = measure()
            measured["acceptance_ratio"] = accepted / (t + 1)
            trace = trace.append(measured, ignore_index=True)

    p = (eps, rho, beta)
    names = ("epsilon", "rho", "beta")
    stats = pd.Panel({p: trace})
    result = {"statistics": stats}
    result["agents"] = pd.Panel({p: pd.DataFrame(w)})
    result["network"] = pd.Panel({p: pd.DataFrame(A)})
    for k, v in result.items():
        v.items.set_names(names, inplace=True)
    return result
Пример #28
0
def generate_event_sequence(graph_file, start_sid, length=100, runs=1):
    G = cPickle.load(open(graph_file, 'rb'))

    matrix, pos_sid = nx.attr_matrix(G, edge_attr='weight', normalized=True)
    sid_pos = {sid: position for (position, sid) in enumerate(pos_sid)}

    duration_mean_matrix, pos_sid_mean = nx.attr_matrix(
        G, edge_attr='duration_mean')

    raw_output = np.empty((length, runs))
    output = np.empty(length)
    time = np.empty(length)

    current_run = 0
    while current_run < runs:
        current_step = 0
        current_time = 0.0
        current_node = start_sid
        while current_step < length:

            raw_output[current_step, current_run] = current_node
            time[current_step] = current_time

            out_probs = np.cumsum(matrix[sid_pos[current_node]], axis=1)

            #for out_node in G[current_node]:
            #    print out_node, matrix[sid_pos[current_node], sid_pos[out_node]], out_probs[0,sid_pos[out_node]]

            choice = np.random.random_sample()

            choice_index = np.argwhere(out_probs < choice)

            try:
                next_node = pos_sid[choice_index[-1, 0, -1] + 1]
            except IndexError:
                next_node = pos_sid[0]

            current_time += duration_mean_matrix[sid_pos[current_node],
                                                 sid_pos[next_node]]

            current_node = next_node

            #try:
            #    print choice, choice_index[-1,0,-1], pos_sid[choice_index[-1,0,-1]+1]
            #except IndexError:
            #    print 0, pos_sid[0]

            #print choice, choice_index[-1,0,-1], pos_sid[choice_index[-1,0,-1]+1]

            current_step += 1
        current_run += 1

    print raw_output.shape

    for index in range(length):
        output[index] = np.mean(raw_output[index])

    xs = []
    ys = []

    for index in range(length):
        if index < length - 1:
            xs.append(output[index])
            ys.append(output[index + 1])

    # plot parameters
    imw = 1024.0  # the full image width
    imh = 1024.0
    lm = 40.0
    rm = 50.0
    tm = 50.0
    bm = 50.0
    res = 72.0

    imwi = imw / res
    imhi = imh / res
    fig = mplt.figure(figsize=(imwi, imhi), dpi=res)
    ph = imh - tm - bm  # the height for both matricies
    pw = imw - lm - rm
    ax = fig.add_axes((lm / imw, bm / imh, pw / imw, ph / imh))

    #ax.plot(time, output)
    #ax.set_ylim((1, max(pos_sid)))

    ax.scatter(xs, ys)
    ax.set_ylim((0.5, max(pos_sid) + 0.5))
    ax.set_xlim((0.5, max(pos_sid) + 0.5))
Пример #29
0
 def state(self):
     w = np.vstack(nx.get_node_attributes(self.network, "vector").values())
     A = np.asarray(nx.attr_matrix(self.network, edge_attr="weight")[0])
     return {"vectors": w, "adjacency": A}
Пример #30
0
def main():
    # Leggo il file pickle dei retweet
    # Costruisco un grafo con networkx partendo dai dati ottenuti
    # with open('../TweetOldSerialization/pickle/#EleSiciliaTestAWS/retweetBlue#EleSicilia_2017-09-01_2017-12-20_data.pkl', 'rb') as input:
    #     retweetList = pickle.load(input)
    # #List = retweetList
    # with open('../TweetOldSerialization/pickle/#EleSiciliaTestAWS/retweetRed#EleSicilia_2017-09-01_2017-12-20_data.pkl', 'rb') as input:
    #     retweetListRed = pickle.load(input)
    # with open ('../TweetOldSerialization/pickle/#EleSiciliaTestAWS/tweet#EleSicilia_2017-09-01_2017-12-20_dictionaryReTweetBlue.pkl', 'rb') as input:
    #     probRetBlue = pickle.load(input)
    # with open('../TweetOldSerialization/pickle/#EleSiciliaTestAWS/tweet#EleSicilia_2017-09-01_2017-12-20_dictionaryReTweetRed.pkl','rb') as input:
    #     probRetRed = pickle.load(input)
    #
    # with open('../TweetOldSerialization/pickle/#EleSiciliaTestAWS/tweet#EleSicilia_2017-09-01_2017-12-20_dictionaryReTweetYellow.pkl','rb') as input:
    #     probYellowGraph = pickle.load(input)
    #
    # with open('../TweetOldSerialization/pickle/#EleSiciliaTestAWS/retweetYellow#EleSicilia_2017-09-01_2017-12-20_data.pkl', 'rb') as input:
    #     retweetListYellow = pickle.load(input)

    with open(
            '../TweetOldSerialization/pickle/BiotestamentoGraph/Settembre/retweetListBlue.pkl',
            'rb') as input:
        retweetListBlue = pickle.load(input)

    with open(
            '../TweetOldSerialization/pickle/BiotestamentoGraph/Settembre/retweetListRed.pkl',
            'rb') as input:
        retweetListRed = pickle.load(input)

    with open(
            '../TweetOldSerialization/pickle/BiotestamentoGraph/Settembre/retweetListYellow.pkl',
            'rb') as input:
        retweetListYellow = pickle.load(input)

    with open(
            '../TweetOldSerialization/pickle/BiotestamentoGraph/Settembre/probRetBlue.pkl',
            'rb') as input:
        probRetBlue = pickle.load(input)

    with open(
            '../TweetOldSerialization/pickle/BiotestamentoGraph/Settembre/probRetRed.pkl',
            'rb') as input:
        probRetRed = pickle.load(input)
    List = []

    for i in retweetListBlue:
        #ret= Retweet(retweetList[i].user,retweetList[i].retweet, retweetList[i].date)
        #print("Blue",i.user,i.retweet, i.date)
        List.append(i)
    for i in retweetListRed:
        #print("Red",i.user,i.retweet, i.date)
        #ret= Retweet(retweetListRed[i].user,retweetListRed[i].retweet, retweetListRed[i].date)
        #print("RED",retweetListRed[i].user,retweetListRed[i].retweet, retweetListRed[i].date)
        List.append(i)

    # for i in retweetListYellow:
    #      #print("Red",i.user,i.retweet, i.date)
    #      List.append(i)

    DizPesi = {}

    for i in probRetBlue:
        #print(i.edge,i.count)
        #prob = countOccReTweet(probRetBlue[i].edge, probRetBlue[i].count, probRetBlue[i].date)
        #print("Blue",probRetBlue[i].edge, probRetBlue[i].count, probRetBlue[i].date)
        # if not DizPesi.has_key(probRetBlue[i].edge):
        #     DizPesi[probRetBlue[i].edge]= probRetBlue[i].count
        if not DizPesi.has_key(i):
            DizPesi[i] = probRetBlue[i]
        else:
            continue

    for i in probRetRed:
        #prob = countOccReTweet(probRetRed[i].edge, probRetRed[i].count, probRetRed[i].date)
        #print("RED",probRetRed[i].edge, probRetRed[i].count, probRetRed[i].date)
        # if not DizPesi.has_key(probRetRed[i].edge):
        #     DizPesi[probRetRed[i].edge]= probRetRed[i].count
        if not DizPesi.has_key(i):
            DizPesi[i] = probRetRed[i]
        else:
            continue

    # for i in probYellowGraph:
    #     if not DizPesi.has_key(probYellowGraph[i].edge):
    #         DizPesi[probYellowGraph[i].edge]= probYellowGraph[i].count
    #print(DizPesi)
    nodi_Blue = NodeDict(retweetListBlue)
    nodi_Red = NodeDict(retweetListRed)
    #nodi_Yellow = NodeDict(retweetListYellow)
    #print nodi_Blue
    #G = createUndirectGraph(List)
    G = createGraph(List, DizPesi)
    size_node_degree = []

    UpdateNode(retweetListYellow, nodi_Blue)
    UpdateNode(retweetListYellow, nodi_Red)
    #print(test)

    posizioneBlue = PosNode(G.nodes(), nodi_Blue)
    posizioneRed = PosNode(G.nodes(), nodi_Red)
    #posizioneYellow = PosNode(G.nodes(),nodi_Yellow)
    dizPosizioneBlue = PosNodeDizionario(G.nodes, nodi_Blue)
    dizPosizioneRed = PosNodeDizionario(G.nodes, nodi_Red)
    #dizPosizioneYellow = PosNodeDizionario(G.nodes,nodi_Yellow);
    # print("Nodi=",G.nodes())
    # print("DizPosBlue",dizPosizioneBlue)
    # print("DizPosRed",dizPosizioneRed)
    # print("DizPosYelloq",dizPosizioneYellow)
    #print("Edge=",G.edges(data='weight'))
    #print("posRed",posizioneRed)
    #print ("posBlue",posizioneBlue)
    #print(G.nodes())
    #print("Differenze All-blue",G.nodes()-posizioneBlue)

    #matriceProbRetweet= matrixProbRet(DizPesi,dizPosizioneRed,dizPosizioneBlue,List,G)

    #List of Polarization of Elite and Listener
    firstPolar = setFirstPolarization(G, dizPosizioneBlue, dizPosizioneRed)

    #print "Passo 0 di polarizzazione ",firstPolar

    dictFirstPol = {}
    x = 0
    for i in G.nodes():

        if not dictFirstPol.has_key(i):
            dictFirstPol[i] = firstPolar[x]
            x = x + 1

    list = []
    for i in G.nodes():

        list.append(i)

    #matrice di adiacenza partendo dalla lista dei nodi
    mat_attr = nx.attr_matrix(G, rc_order=list)
    #print(mat_attr[1])

    at_array = np.array(mat_attr)

    newPol = opinionPolarization(G, at_array, firstPolar, list)

    dictPol = {}
    x = 0
    for i in G.nodes():

        if not dictPol.has_key(i):
            dictPol[i] = newPol[x]
            x = x + 1

#settare i vertici dangling
#matrice=nx.google_matrix(G,alpha=1)
#p_array = np.array(matrice)

#print matrice,len(matrice),matrice[131]
# sumBlue=0.
# sumRed = 0.
# sumYellow =0.
# count =0;
# for i in range(0,len(p_array)):
#     if i in posizioneBlue:
#         sumBlue = sumBlue + p_array[15][i]
#         count = count+1
#
#     elif i in posizioneRed:
#         sumRed = sumRed +  p_array[15][i]
#         # print "sumBlue=",sumBlue,"i=",i,"j",j
#     elif i in posizioneYellow:
#         sumYellow= sumYellow + p_array[15][i]

#print p_array[15],"sumBlue",sumBlue,"sumRed",sumRed,"sumYellow",sumYellow,count,len(posizioneBlue),mat_attr

#partition = community.best_partition(G.to_undirected())
#print(partition)

    print(len(G.nodes))
    #size = float(len(set(partition.values())))

    #cambio i colori dei nodi a seconda del loro grado
    #Polar = Polarization(p_array,posizioneRed,posizioneBlue,len(G.nodes),matriceProbRetweet)

    #funziona con la partizione
    node_color = colorNode(G, nodi_Blue, nodi_Red)

    #node_colorPol= colorNodePol(len(G.nodes()),newPol)

    testdict = opinionPolarizationDict(G, at_array, firstPolar, list)

    print("testdict ", testdict)
    list_lastPol = testdict.get(len(testdict) - 1)
    #print(list_lastPol)
    #print(set(testdict[1]))
    node_colorPol = colorNodePol(len(G.nodes()), list_lastPol)

    test = {}
    x = 0
    for i in G.nodes():

        if not testdict.has_key(i):
            test[i] = testdict.get(len(testdict) - 1)[x]
            x = x + 1

    for i in range(0, len(testdict)):
        if i + 1 == (len(testdict) - 1):
            break
        print("i", i, "j", i + 1, " simili=",
              set(testdict[i]) == set(testdict[i + 1]))

    #labels= labelPolarization(Polar,G,nodi_Blue,nodi_Red)

    pos = nx.spring_layout(G)
    #Per la partizione
    # list_nodes=[]
    # for com in set(partition.values()):
    #     count = count + 1.
    #     x=0
    #     for nodes in partition.keys():
    #        # print "nodes",nodes
    #         if partition[nodes] == com :
    #             list_nodes.append(nodes)

    #con la partizione
    #nx.draw_networkx_nodes(G, pos Biotestamento,list_nodes,with_labels=False,node_color=node_color)

    nx.write_gpickle(G,
                     '../Test/Biotestamento/Settembre/grafoBiotestVen.pickle',
                     protocol=pickle.HIGHEST_PROTOCOL)
    with open(
            '../Test/Biotestamento/Settembre/dizionarioPolarizzazioneVenezuela.pickle',
            "wb") as output:
        pickle.dump(test, output, pickle.HIGHEST_PROTOCOL)
    with open(
            '../Test/Biotestamento/Settembre/listaColoriPolarizzazioneVenezuela.pickle',
            "wb") as output:
        pickle.dump(node_colorPol, output, pickle.HIGHEST_PROTOCOL)

    nx.draw_networkx_nodes(G,
                           pos,
                           G.nodes(),
                           with_labels=True,
                           node_color=node_colorPol)

    nx.draw_networkx_edges(G, pos, alpha=0.5, edge_color='b')

    nx.draw_networkx_labels(G, pos, test, font_size=8)

    plt.savefig("../Test/Biotestamento/Settembre/PolarizzazioneVene.png",
                format="PNG")

    plt.show()
Пример #31
0
def AdjacancyMatrix(G):
    return nx.attr_matrix(G)[0]
Пример #32
0
e = q

w = [i for i, x in enumerate(q) if x == 1]
##print w[0]

##Closeness centrality of each node

for v in nodes(g):
    c = closeness_centrality(g, v)
    ##    print("Closeness centrality  of %s is %s" %(v,c))
    k.append(c)

#obtain the adj. matrix for the graph

print 'Adjacency Matrix of the graph'
T = nx.attr_matrix(g, rc_order=z)
##print T

for i in range(r):
    q = np.sum(T[i])
    d.setdefault(i, []).append(q)
values = d.values()
l = list(d.values())
##print l
l1 = list(itertools.chain(*l))
##print l1
##for i in range(5):
##     print g.degree(i)

#Transition Probality Matrix T
for i in range(r):
Пример #33
0
def run_samples_threshold(N, B, n1, nnode1, k1, n2, nnode2, k2):
    import myKernels.RandomWalk as rw
    test_info = pd.DataFrame()
    thresholds = np.linspace(0.8, 1, 20)
    for sample in tqdm.tqdm(range(N)):

        g1 = mg.BinomialGraphs(n1,
                               nnode1,
                               k1,
                               fullyConnected=True,
                               e='random_edge_weights',
                               ul=0.8,
                               uu=0.2)
        g2 = mg.BinomialGraphs(n2,
                               nnode2,
                               k2,
                               fullyConnected=True,
                               e='random_edge_weights',
                               ul=0.8,
                               uu=0.2)
        g1.Generate()
        g2.Generate()
        Gs = g1.Gs + g2.Gs
        for dist_from_disconnection_point in [-1, 0, 1, 2, 3, 20]:
            new_Gs = []
            isconnected = []

            for i in range(len(Gs)):
                A = nx.attr_matrix(Gs[i], edge_attr='weight')[0]
                G = mg.gen_fullyconnected_threshold(
                    A,
                    thresholds=thresholds,
                    dist_from_disconnection_point=dist_from_disconnection_point
                )
                isconnected.append(nx.is_connected(G))

                new_Gs.append(G.copy())

            rw_kernel = rw.RandomWalk(new_Gs, c=0.01, normalize=0)
            K = rw_kernel.fit_ARKU_plus(r=6,
                                        normalize_adj=False,
                                        edge_attr=None,
                                        verbose=False)

            MMD_functions = [mg.MMD_b, mg.MMD_u]

            kernel_hypothesis = mg.BoostrapMethods(MMD_functions)
            function_arguments = [dict(n=g1.n, m=g2.n), dict(n=g1.n, m=g2.n)]
            kernel_hypothesis.Bootstrap(K, function_arguments, B=B)
            #print(f'p_value {kernel_hypothesis.p_values}')
            #print(f"MMD_u {kernel_hypothesis.sample_test_statistic['MMD_u']}")

            test_info = pd.concat(
                (test_info,
                 pd.DataFrame(
                     {
                         'p_val': kernel_hypothesis.p_values['MMD_u'],
                         'dist_from_critical': dist_from_disconnection_point,
                         'sample': sample
                     },
                     index=[0])),
                ignore_index=True)

    return test_info
#See graph info
print('Graph Info:\n', nx.info(G))

#Inspect the node features
print('\nGraph Nodes: ', G.nodes.data())

#Plot the graph
nx.draw(G, with_labels=True, font_weight='bold')
plt.show()

# =============================================================================
# Inserting Adjacency Matrix (A) to Forward Pass Equation
# =============================================================================

# Get the Adjacency Matrix (A) and Node Features Matrix (X) as numpy array
A = np.array(nx.attr_matrix(G, node_attr='name')[0])
X = np.array(nx.attr_matrix(G, node_attr='name')[1])
X = np.expand_dims(X, axis=1)

print('Shape of A: ', A.shape)
print('\nShape of X: ', X.shape)
print('\nAdjacency Matrix (A):\n', A)
print('\nNode Features Matrix (X):\n', X)

#Dot product Adjacency Matrix (A) and Node Features (X)
# The dot product of Adjacency Matrix and Node Features Matrix represents the sum of neighboring node features.
# AX sums up the adjacent node features, but it does not take into account the features of the node itself.
AX = np.dot(A, X)
print("Dot product of A and X (AX):\n", AX)

# =============================================================================
Пример #35
0
def mc(rho, beta, eps, config):
    N = config['N']
    D = config['D']
    dw = config['dw']
    sweeps = config['sweeps']

    sqrtD = math.sqrt(D)
    sqrtN = math.sqrt(N)
    norm = sqrtD
    Q = norm/sqrtD
    Gamma = math.sqrt(1 - rho*rho)/rho

    w = np.random.randn(N,D)
    w *= norm/np.sqrt((w*w).sum(axis=1))[:,None]

    z = np.ones(D)
    z *= norm/np.linalg.norm(z)
    h = z.dot(w.T)/sqrtD

    G = nx.complete_graph(N)
    A = np.asarray(nx.attr_matrix(G)[0])

    def measure():
        m = h.mean()/sqrtD
        r = np.abs(h).mean()/sqrtD
        v = h.var()
        q = np.abs(A*h*h[:,None]).mean()
        data = pd.Series([m,v,q,r],
                         index="m v q r".split(" "))
        return data

    trace = pd.DataFrame()
    measured = measure()
    measured["acceptance_ratio"] = 0
    trace = trace.append(measured, ignore_index=True)

    def energy(hi, hj):
        x = hi*np.sign(hj)/Gamma/sqrt2
        Jij = 1/D
        a = Jij
        return -a*Gamma*Gamma*np.log(eps + (1-2*eps)*erfc(-x)/2)

    accepted = 0
    for t in xrange(sweeps*N):
        i = np.random.choice(N)
        ni = A[i]
        pij = ni/ni.sum()
        j = np.random.choice(N, p=pij)

        hi, hj = h[i], h[j]

        E0 = energy(hi, hj)
        nw = np.random.multivariate_normal(w[i], dw*np.eye(D))
        # u = np.random.randn(D)
        # nw = dw*u/np.linalg.norm(u) + w[i]
        nw *= norm/np.linalg.norm(nw)
        nhi = z.dot(nw)/sqrtD
        E = energy(nhi, hj)
        if E < E0 or np.random.rand() < math.exp(beta*(E0-E)):
            accepted += 1
            w[i] = nw.copy()
            h[i] = nhi

        if t%N == 0:
            measured = measure()
            measured["acceptance_ratio"] = accepted/(t+1)
            trace = trace.append(measured, ignore_index=True)

    p = (eps, rho, beta)
    names = ("epsilon","rho","beta")
    stats = pd.Panel({p:trace})
    result = {"statistics":stats}
    result["agents"]= pd.Panel({p:pd.DataFrame(w)})
    result["network"] = pd.Panel({p:pd.DataFrame(A)})
    for k,v in result.items():
        v.items.set_names(names, inplace=True)
    return result