コード例 #1
0
 def test_edges_iter(self):
     assert_equal(list(self.G.edges_iter()), list(nx.edges_iter(self.G)))
     assert_equal(list(self.DG.edges_iter()), list(nx.edges_iter(self.DG)))
     assert_equal(list(self.G.edges_iter(nbunch=[0, 1, 3])),
                  list(nx.edges_iter(self.G, nbunch=[0, 1, 3])))
     assert_equal(list(self.DG.edges_iter(nbunch=[0, 1, 3])),
                  list(nx.edges_iter(self.DG, nbunch=[0, 1, 3])))
コード例 #2
0
def edge_count_pruner_2(graph,k,nodes=0):
    
    if nodes == 0:
        nodes = graph.nodes()
    start = time.time()
    pruned = []
    for n in nodes:
        found = []
        count = 0
        edges = nx.edges_iter(graph, nbunch=n)
        for x in edges:
            if x[1] not in found:
                count += 1
                found.append(x[1])
            edges2 = nx.edges_iter(graph,nbunch=x[1])
            for y in edges2:
                if y[1] not in found:
                    count += 1
                    found.append(y[1])
        if count > k:
            pruned.append(n)
    print time.time() - start
    
    print float(len(pruned))/(float(len(graph.nodes())))  
    return pruned
コード例 #3
0
ファイル: charmmmatcher.py プロジェクト: drorlab/dabble
def _define_bond(graph, node1, node2, patch):
    """
    Process a bond defined in a psf file and adds it to the graph.
    Checks for + or - in bonded atom name and sets the node "residue"
    attribute accordingly if it is present.

    Args:
      graph (networkx graph): Graph to add bond to
      node1 (str): Atom name from psf file of first atom
      node2 (str): Atom name from psf file of second atom
      patch (bool): If this bond is defined by a patch

    Returns:
      (bool) if bond could be defined

    Raises:
        ValueError if a non +- atom name is not defined in the MASS
          line dictionary
    """

    # Sanity check and process first atom name
    if "+" in node1:
        graph.add_node(node1, type="", residue="+", patched=patch)
    elif "-" in node1:
        graph.add_node(node1, type="", residue="-", patched=patch)
    elif node1 not in graph.nodes():
        return False

    # Now sanity check and process second atom name
    if "+" in node2:
        graph.add_node(node2, type="", residue="+", patched=patch)
    elif "-" in node2:
        graph.add_node(node2, type="", residue="-", patched=patch)
    elif node2 not in graph.nodes():
        return False

    # If we are applying a patch and there are _join atoms attached
    # to the atom we are applying a bond to, delete the _join atom.
    # It can be added back later if it was actually needed.
    if graph.node[node1]["patched"] and not graph.node[node2]["patched"]:
        neighbor_joins = [e[1] for e in nx.edges_iter(graph, nbunch=[node2]) \
                          if graph.node[e[1]]["residue"] != "self" and \
                          not graph.node[e[1]]["patched"]]
        graph.remove_nodes_from(neighbor_joins)
    elif graph.node[node2]["patched"] and not graph.node[node1]["patched"]:
        neighbor_joins = [e[1] for e in nx.edges_iter(graph, nbunch=[node1]) \
                          if graph.node[e[1]]["residue"] != "self" and \
                          not graph.node[e[1]]["patched"]]
        graph.remove_nodes_from(neighbor_joins)

    graph.add_edge(node1, node2, patched=patch)
    return True
コード例 #4
0
ファイル: smt.py プロジェクト: ptsankov/spctl
def encodeUntilAccessConstraint(accessConstraint, resource, visited):
    if accessConstraint[0] == 'EU':
        accessConstraint1 = accessConstraint[1]
        accessConstraint2 = accessConstraint[2]
        
        accessConstraint2Encoded = encodeAccessConstraint(accessConstraint2, resource)
        accessConstraint1Encoded = encodeAccessConstraint(accessConstraint1, resource)
        
        successorConstraints = []
        for PEP in networkx.edges_iter(conf.resourceStructure, resource):
            successor = PEP[1]            
            if successor in visited:
                continue
            succVisited = set(visited)
            succVisited.add(resource)
            successorAccessConstraintEncoded = encodeUntilAccessConstraint(accessConstraint, successor, succVisited)
            pepTemplate = template.PEPTemplate(PEP)
            successorConstraints.append(And(pepTemplate, successorAccessConstraintEncoded))
        return Or(accessConstraint2Encoded, And(accessConstraint1Encoded, Or(successorConstraints)))        
            
    elif accessConstraint[0] == 'AU':
        accessConstraint1 = accessConstraint[1]
        accessConstraint2 = accessConstraint[2]
        
        accessConstraint2Encoded = encodeAccessConstraint(accessConstraint2, resource)
        accessConstraint1Encoded = encodeAccessConstraint(accessConstraint1, resource)
                        
        successorConstraints = []
        noBackEdgesConstraints = []
        existsSuccessorConstraints = []
        
        for PEP in networkx.edges_iter(conf.resourceStructure, resource):
            successor = PEP[1]            
            if successor in visited:
                noBackEdgesConstraints.append(Not(template.PEPTemplate(PEP)))
                continue
            
            existsSuccessorConstraints.append(template.PEPTemplate(PEP))
                                    
            succVisited = set(visited)
            succVisited.add(resource)
            successorAccessConstraintEncoded = encodeUntilAccessConstraint(accessConstraint, successor, succVisited)
            pepTemplate = template.PEPTemplate(PEP)
            successorConstraints.append(Implies(pepTemplate, successorAccessConstraintEncoded))
            
        noBackEdges = And(noBackEdgesConstraints)
        existsSuccessor = Or(existsSuccessorConstraints)
            
        return Or(accessConstraint2Encoded, And(accessConstraint1Encoded, noBackEdges, existsSuccessor, And(successorConstraints)))
    else:
        raise NameError('Not an until access constraint:', accessConstraint)
コード例 #5
0
ファイル: dsd.py プロジェクト: EdwardBetts/matching-metrics
def rewire(M,p):
    R = nx.Graph(M)
    rewireSet = [i for i in nx.edges_iter(R) if random.random() < p]
    R.remove_edges_from(rewireSet)
    for i in rewireSet:
        R.add_edge(i[0],random.sample([k for k in nx.non_neighbors(R,i[0])],1)[0])
    return R
コード例 #6
0
    def community_bridge(self, n):
        N = set(self.G.neighbors(n))
        communities = list(fx.read_communities())
        if self.p is None:
            complete = 0
            for pair in nx.edges_iter(self.G):
                if fx.same_community(communities, pair[0], pair[1]):
                    #Pair is complete
                    complete += 1
            #Probability two linked nodes belong to same community
            self.p = complete / nx.number_of_edges(self.G)
            print("p =", self.p)

        if self.q is None:
            pure = 0
            nonlinked = 0
            for pair in nx.non_edges(self.G):
                nonlinked += 1
                if not fx.same_community(communities, pair[0], pair[1]):
                    #Pair is pure
                    pure += 1
            #Probability two non-linked nodes do not belong to same comunity
            self.q = pure / nonlinked
            print("q =", self.q)

        sum = 0
        for vj in N:
            #Number of common neighbors
            n1 = len(N.intersection(set(self.G.neighbors(vj))))
            n2 = len(N) - n1
            sum += 1 / (1 + n1 * self.p + n2 * (1 - self.q))
        return sum
コード例 #7
0
def N_W_S(graph, beta, nodes_add_rate):
    network = graph
    edges_add = []
    num_of_nodes_add = int(nx.number_of_nodes(network) * nodes_add_rate)
    num = 1
    i = 0
    j = 0
    num = 1
    while num <= num_of_nodes_add:
        node_selected = random.randint(0, nx.number_of_nodes(network) - 1)
        node_1 = node_selected - 1
        node_2 = node_selected + 1
        node_3 = node_selected + 2

    for edge in nx.edges_iter(network):
        x = random.uniform(0, 1)
        if x < beta:
            node_random_selected = random.randint(
                0,
                nx.number_of_nodes(network) - 1)
            #print 'node_random_selected',node_random_selected
            #print network.nodes()[node_random_selected]
            if node_random_selected != edge[0] and node_random_selected != edge[1] \
                    and (not ((edge[0], network.nodes()[node_random_selected]) in edges_add))\
                    and (not ((edge[0], network.nodes()[node_random_selected]) in network.edges())):
                edges_add.append(
                    (edge[0], network.nodes()[node_random_selected]))
    network.add_edges_from(edges_add)
コード例 #8
0
ファイル: RwpIterative.py プロジェクト: dfeng808/multiplex
 def draw_connection_at_node(self, network, node, visited):
     result=None
     neighbs = [edge for edge in nx.edges_iter(network, node)]
     neighbs = filter(lambda x : x not in visited, neighbs)
     if len(neighbs)>0:
         result = random.choice(neighbs)
     return result
コード例 #9
0
def ind_cascade(node_id,nc,activated,q):
    edges = nx.edges_iter(nc,nbunch=node_id)
    for x in edges:
        #print "Edge " + str(x)
        if x[1] not in activated and edge_activate(nc[x[0]][x[1]]['weight'],nc.node[x[1]]['review_count']):
            activated.add(x[1])
            q.put(x[1])
コード例 #10
0
ファイル: dsd.py プロジェクト: mcrovella/matching-metrics
def rewire(M, p):
    R = nx.Graph(M)
    rewireSet = [i for i in nx.edges_iter(R) if random.random() < p]
    R.remove_edges_from(rewireSet)
    for i in rewireSet:
        R.add_edge(i[0],
                   random.sample([k for k in nx.non_neighbors(R, i[0])], 1)[0])
    return R
コード例 #11
0
ファイル: dsd.py プロジェクト: EdwardBetts/matching-metrics
def scramble(M,p):
    R = nx.Graph(M)
    removeSet = [i for i in nx.edges_iter(R) if random.random() < p]
    R.remove_edges_from(removeSet)
    nodes = R.nodes()
    addFromNodes = [random.sample(nodes,1)[0] for i in removeSet]
    for node in addFromNodes:
        R.add_edge(node,random.sample([k for k in nx.non_neighbors(R,node)],1)[0])
    return R
コード例 #12
0
ファイル: dsd.py プロジェクト: mcrovella/matching-metrics
def scramble(M, p):
    R = nx.Graph(M)
    removeSet = [i for i in nx.edges_iter(R) if random.random() < p]
    R.remove_edges_from(removeSet)
    nodes = R.nodes()
    addFromNodes = [random.sample(nodes, 1)[0] for i in removeSet]
    for node in addFromNodes:
        R.add_edge(node,
                   random.sample([k for k in nx.non_neighbors(R, node)], 1)[0])
    return R
コード例 #13
0
 def run(self):
     self.logging.debug("Start monitor-thread.")
     link_usage = {}
     link_usage.clear()
     self.logging.debug("LinkUsage calc IN")
     for src,dst in nx.edges_iter(self.topology):
         #self.logging.debug("LinkUsage calc %s -> %s",src,dst)
         f_list_ = self.bin_content[src][dst]
         contention_ = 0
         for f in f_list_:
             contention_ += self.flow_map[f]
         link_usage[(src,dst)]=contention_
     self.logging.info("LinkUsage %s", link_usage)
コード例 #14
0
ファイル: charmmmatcher.py プロジェクト: drorlab/dabble
def _prune_joins(graph):
    """
    Prunes _join elements that have been fulfilled by the addition of
    this patch.

    DEPRECATED! But a useful function for detecting fulfilled +- joins
                that match by element so I'm keeping it.
                Pruning now done in _define_bond

    Args:
       graph (networkx graph): The residue to prun
    """

    unpatched = [n for n in graph.nodes() if not graph.node[n]["patched"]]
    for uun in unpatched:
        neighbor_joins = [e[1] for e in nx.edges_iter(graph, nbunch=[uun]) if \
                          graph.node[e[1]]["residue"] != "self" and \
                                  not graph.node[e[1]]["patched"]]
        for nei in neighbor_joins:
            if any(graph.node[e[1]]["element"] == graph.node[nei]["element"] for \
                   e in nx.edges_iter(graph, nbunch=[uun]) if \
                   graph.node[e[1]]["patched"]):
                graph.remove_node(nei)
コード例 #15
0
ファイル: RwpIterative.py プロジェクト: dfeng808/multiplex
    def calculate_accuracy(self, net_original, results):
        #results dictionary {node_id: class_result}
        counter=0
        good=0
        areResults = False

        for item in results.iteritems():
            areResults = True
            if(item[0].label==item[1]):
                good+=1
            neighbs = [edge for edge in nx.edges_iter(net_original, item[0])]
            if(len(neighbs)>0):
                counter+=1

        return good/float(counter) if areResults else -1
コード例 #16
0
def volume(nodes, graph):
    """
    Compute the volume for a list of nodes, which
    is the number of edges in `graph` with at least one end in
    nodes.
    Params:
      nodes...a list of strings for the nodes to compute the volume of.
      graph...a networkx graph
    >>> volume(['A', 'B', 'C'], example_graph())
    4
    """
    ###TODO
    pass

    return (len(list(nx.edges_iter(graph, nodes))))
コード例 #17
0
    def getFolloweesAndFollowers(self, graph):
        followees = {}
        followers = {}
        
        for node in networkx.edges_iter(graph):
            if node[0] in followees:
                followees[node[0]].append(node[1])
            else:
                followees[node[0]] = [node[1]]

            
            if node[1] in followers:
                followers[node[1]].append(node[0])
            else:
                followers[node[1]] = [node[0]]

        return followees, followers
コード例 #18
0
def init_full_cascade(nodes,nc,max_iterations= float("inf")):
    activated = set(nodes)
    q = Queue.Queue()
    for node in nodes:
        q.put((node, 0))
    
    while not q.empty():
        node, iteration = q.get()
        if iteration <= max_iterations:
            edges = nx.edges_iter(nc, nbunch=node)
            for x in edges:
                #if x[1] not in activated and edge_activate(nc[x[0]][x[1]]['weight'],nc.node[x[1]]['review_count']):
                if x[1] not in activated and True:
                    activated.add(x[1])
                    q.put((x[1], iteration+1))
        else:
            return activated
        #ind_cascade(node,nc,activated,q)
        
    return activated
コード例 #19
0
ファイル: _write.py プロジェクト: mulkieran/pydevDAG
    def _rewrite(cls, graph, stringize):
        """
        Rewrite objects in graph.

        :param graph: the graph
        :param bool stringize: if True, stringize, otherwise destringize
        """
        if stringize:
            node_methods = [r.stringize for r in cls._NODE_REWRITERS]
            edge_methods = [r.stringize for r in cls._EDGE_REWRITERS]
        else:
            node_methods = [r.destringize for r in cls._NODE_REWRITERS]
            edge_methods = [r.destringize for r in cls._EDGE_REWRITERS]

        for node in nx.nodes_iter(graph):
            for rewriter in node_methods:
                rewriter(graph, node)

        for edge in nx.edges_iter(graph):
            for rewriter in edge_methods:
                rewriter(graph, edge)
コード例 #20
0
ファイル: universal.py プロジェクト: andersjo/nlpkit
 def relation_counts(self):
     edges = chain.from_iterable(self.G[src_n][target_n].values() for src_n, target_n in nx.edges_iter(self.G))
     types = [e['type'] for e in edges]
     grouped = groupby(sorted(types))
     return dict((name, len(list(vals))) for name, vals in grouped)
コード例 #21
0
if __name__ == "__main__":

    TG = nx.MultiDiGraph()

    TG.add_edge('a', 'b', startTime=1, duraTime=1)
    TG.add_edge('a', 'b', startTime=2, duraTime=1)
    TG.add_edge('a', 'c', startTime=2, duraTime=1)
    TG.add_edge('a', 'c', startTime=4, duraTime=1)
    TG.add_edge('b', 'f', startTime=5, duraTime=1)
    TG.add_edge('c', 'f', startTime=6, duraTime=1)
    TG.add_edge('c', 'g', startTime=7, duraTime=1)

    quad_tuple_edges = []
    memo = []
    for edge in nx.edges_iter(TG):
        if edge not in memo:
            memo.append(edge)
            r = quad_tuple(edge)
            quad_tuple_edges.extend(r)
        else:
            continue

    edge_stream = sorted(quad_tuple_edges, key=lambda item: item[2])

    TDG = nx.DiGraph()

    # add edge where duraTime = 1
    for e in edge_stream:
        TDG.add_edge((e[0], e[2]), (e[1], e[2] + e[3]), duraTime=1)
コード例 #22
0
 values = ex.predict_feats(W, b_arr, vect)
 
 # Part 1:
 # Get all neighbour feats of instance feats
 start_feats = set()
 expanded_feats = {} # first-level neighbs
 
 for feat in feats:
     for node in G.nodes_iter():
         # if feat is found in the graph, then we get its neighbours and then break to
         # next feat
         if node == feat:
             start_feats.add(feat)
             
             # first level neighbours
             for edge in nx.edges_iter(G, [node]):
                 nodea = edge[0]
                 nodeb = edge[1]
                 ew = G[nodea][nodeb]["weight"]
                 
                 if nodeb not in start_feats:
                     expanded_feats[nodeb] = ew
             
             # go to next feature
             break
 
 # Dict to hold all new feats
 new_feats = {}
 
 # We add all neighbs
 for key, value in expanded_feats.iteritems():
コード例 #23
0
def augmentEdges(g):
    r1 = nx.edge_betweenness_centrality(g, normalized=True, weight='weight')
    for x in nx.edges_iter(g):
        g[x[0]][x[1]]['edge_betweenness_centrality'] = r1[x]
    return g
コード例 #24
0
 def LocationLinks(self):
     return list(nx.edges_iter(self.graph))
コード例 #25
0
ファイル: defacto-network.py プロジェクト: amyrbrown/assets
  for s in range(runs-1):
    chance = np.random.random_sample();
    if chance < 0.1:#5*GG.number_of_nodes()/nodes: 
      ofile.write(str(s+1) + ',');
      if i in scheduled:
	temp = scheduled[i];
	temp.append(s);
	scheduled[i] = temp;
      else:
	temp = list();
	temp.append(s);
	scheduled[i] = temp;
      centrality = nx.betweenness_centrality(GG);
      for node in GG:
	#print centrality[node];	
      	wins[node,(s)*winsize + int(centrality[node]*200)-1] = i+1;
      	for ad in nx.edges_iter(GG,node):
	  offset = np.random.randint(1,3,[1,1]);
	  wins[ad,(s)*winsize + int(centrality[node]*200) + offset] = i+1;
  ofile.seek(-1, os.SEEK_CUR);
  ofile.write(',-');  
  ofile.write('\n');
  print i;



sampled = np.random.randint(0,nodes-1,[1,100]);
sio.savemat('raster-' + timestamp + '.mat',{'wins':wins[sampled,:]})


コード例 #26
0
ファイル: network3.py プロジェクト: imuhata8ri/kemonotag
def jsonloader(time="day",centrality="eigenvector",percent="100"):
    print "test"
    #-------------------------Define Edges-------------------------:
    
    
    #allLines = open('merged.csv').read().encode('utf-8')
    allLines = open('edges5.Csv').read().encode('utf-8')
    #allLines = open(/csv).read().encode('utf-8')
    
    data = StringIO.StringIO(allLines)
    G = nx.Graph()
    edges = nx.read_edgelist(data, delimiter=',', nodetype=unicode)
    
    for e in edges.edges():
        G.add_edge(*e)
    
    N,K = edges.order(), edges.size()
    print "Nodes: ", N
    print "Edges: ", K
    
    avg_deg = int(math.ceil(float(K)/N))
    print "Average degree: ", avg_deg
    
    degree = G.degree()
    degreelist = []
    for n in degree:
        degreelist.append(degree[n])
    degreelist = filter(lambda x: x>1, degreelist)
    
    #set min %
    degreelist.sort()
    topp = 90
    topn = 50
    toplist = degreelist[int(len(degreelist) * topp/100) : int(len(degreelist))]
    median=sorted(degreelist)[len(degreelist)/2]
    mediantopp=sorted(toplist)[len(toplist)/2]
    print "Degree Median: ", median
    print "Degree Median(top %): ", mediantopp
    
    Range = (max(degreelist)-min(degreelist))
    Rangetopp = (max(toplist)-min(toplist))
    
    print "Degree Range: ", Range
    print "Degree Range(top %): ", Rangetopp
    
    
    #'Fan-boy' trimmer
    def remove_edges(g, in_degree):
        g2=g.copy()
        #d_in=g2.in_degree(g2)
        #d_out=g2.out_degree(g2)
        #print(d_in)
        #print(d_out)
        d = g2.degree(g2)
        for n in g2.nodes():
            #if d_in[n]==in_degree and d_out[n] == out_degree: 
            if d[n] <= in_degree:
                g2.remove_node(n)
        return g2
    
    def remove_minoredges(g, topn):
        import heapq
        g3=g.copy()
        d = g3.degree(g3)
        #d.most_common()
        a = sorted(d, key=d.get, reverse=False)[:int(len(d)-topn)]
        for item in a:
            g3.remove_node(item)
        return g3
    
    
    #G = remove_edges(G,mediantopp)
    G = remove_minoredges(G,topn)
    
    #-------------------------Finding Community-------------------------
    import community
    Gc = community.best_partition(G)
    for n, m in Gc.items():
        Gc[n] = int(m)
    
    color = []
    for nodes in nx.nodes_iter(G):
        value = int(Gc[nodes]*100)
        color.append(value)
    #print "color: ",color
    nx.set_node_attributes(G,'group',Gc)
    #-------------------------Finding Centrality-------------------------
    
    #大きさを定義するリストを作成
    #bb=nx.degree_centrality(G)
    #bb=nx.betweenness_centrality(G)
    #bb=nx.closeness_centrality(G)
    bb=nx.eigenvector_centrality(G)
    for n, m in bb.items():
        bb[n] = int(math.ceil(m*25))
    
    size = []
    for nodes in nx.nodes_iter(G):
        value = int(bb[nodes]*100)
        #print value
        size.append(value)
    #print len(size)
    #print bb
    #for n in len(size):
        #nx.set_node_attributes(G, 'betweenness',
    nx.set_node_attributes(G,'betweenness',bb)
    #-------------------------Finding edge Centrality-------------------------
    
    #線分の大きさを定義するリストを作成
    cc=nx.edge_betweenness_centrality(G)
    for n, m in cc.items():
        cc[n] = int(math.ceil(m*500))
    
    edgesize = []
    for edges in nx.edges_iter(G):
        value = int(cc[edges]*500)
        edgesize.append(value)
    nx.set_edge_attributes(G,'length',cc)
    #--------------------------------------------------
    G = remove_minoredges(G,topn)
    
    size = np.asarray(size)
    pos = nx.spring_layout(G)
    
    
    
    #nx.draw_networkx_nodes(G,pos,node_color=color,alpha=0.8,node_size=size)
    nx.draw_networkx_nodes(G,pos,alpha=0.8,node_size=size)
    nx.draw_networkx_edges(G,pos,alpha=0.2,edge_size=edgesize)
    nx.draw_networkx_labels(G,pos,font_size=10,font_color='black')
    
    def save(G, fname):
        from networkx.readwrite import json_graph
        data = json_graph.dumps(G, sort_keys=True,indent=2)
        f = open(fname, 'w')
        f.write(data)
    
    save(G, "./d3/graph.json")
    plt.savefig('./d3/graph_merged.png')
コード例 #27
0
ファイル: nx_note.py プロジェクト: keganshen/networkx
# 关于图形的方法:
nx.density(G)     #返回图形的密度
nx.degree_histogram(G) #返回列表中每个值的频率
nx.info(G)      #返回图形的摘要信息 :节点数,边数,平均度 etc..
P=nx.create_empty_copy(G)  #返回 移除边的 图形G的 副本
nx.is_directed(DG)       #返回图形是否有向

#关于点的方法
nx.nodes(G)     #以列表的形式返回所有节点
nx.number_of_nodes(G)    #返回节点数
nx.all_neighbors(G,'tank')    #以iterator的形式返回一个节点的所有邻居
nx.non_neighbors(G,'tank')    #以iterator的形式返回一个节点的所有非邻居
nx.common_neighbors(G,'','') #以iterator的形式返回两个节点的公共邻居
nx.nodes_iter(G)              #以iterator的形式返回所有节点
g = nx.compose(G,DG)    #返会合并公共的node???
sorted(G.degree().values())

#关于边的方法
nx.edges(G)     #以列表的形式返回所有的边
nx.number_of_edges(G)   #返回边数
nx.non_edges(G)         #以iterator的形式返回所有不存在的边
nx.edges_iter(G)        #以iterator的形式返回所有边

#关于MultiDiGraph()
G.successors(node)  #返回一个节点的后继
G.predecessor(node) #返回一个节点的前任
G.number_of_edges(node,node)    #返回两个节点间的边数
G.size()    #返回图的边数
G.nodes_with_selfloops()    #以列表形式返回(自己到自己)的节点
G.out_degree()  #返回向外联系的数量;括号中可以填点,返回点的;也可不填,返回所有点的列表
コード例 #28
0
ファイル: network.py プロジェクト: imuhata8ri/kemonotag
def createjson(datas):
    #-------------------------Define Edges-------------------------:
    
    #allLines = open('edges5.Csv').read().encode('utf-8')
    allLines = datas
    #allLines = open(/csv).read().encode('utf-8')
    list = allLines.splitlines()
    datalist = []
    for n in list:
        if n.count(",") > 1:
            logging.info(n)
            n = n.replace(",",u",").replace(u",",",", 1)
        datalist.append(n)
    datalist = '\n'.join(datalist)
    
    datalist = StringIO.StringIO(datalist)
    G = nx.Graph()
    edges = nx.read_edgelist(datalist, delimiter=',', nodetype=unicode)
    for e in edges.edges():
        G.add_edge(*e)
    
    #-------------------------Calculate statistics-------------------------
    N,K = edges.order(), edges.size()
    logging.info("Nodes: ", N)
    logging.debug("Edges: ", K)
    
    avg_deg = int(math.ceil(float(K)/N))
    logging.debug( "Average degree: ", avg_deg)
    
    degree = G.degree()
    degreelist = []
    for n in degree:
        degreelist.append(degree[n])
    degreelist = filter(lambda x: x>1, degreelist)
    
    #set min %
    degreelist.sort()
    topp = 90
    topn = 50
    toplist = degreelist[int(len(degreelist) * topp/100) : int(len(degreelist))]
    median=sorted(degreelist)[len(degreelist)/2]
    mediantopp=sorted(toplist)[len(toplist)/2]
    logging.debug( "Degree Median: ", median)
    logging.debug( "Degree Median(top %): ", mediantopp)
    
    Range = (max(degreelist)-min(degreelist))
    Rangetopp = (max(toplist)-min(toplist))
    
    logging.debug( "Degree Range: ", Range)
    logging.debug( "Degree Range(top %): ", Rangetopp)
    
    
    #-------------------------Trim down tagalongs-------------------------
    #'Fan-boy' trimmer
    def remove_edges(g, in_degree):
        g2=g.copy()
        #d_in=g2.in_degree(g2)
        #d_out=g2.out_degree(g2)
        d = g2.degree(g2)
        for n in g2.nodes():
            #if d_in[n]==in_degree and d_out[n] == out_degree: 
            if d[n] <= in_degree:
                g2.remove_node(n)
        return g2
    
    def remove_minoredges(g, topn):
        import heapq
        g3=g.copy()
        d = g3.degree(g3)
        #d.most_common()
        a = sorted(d, key=d.get, reverse=False)[:int(len(d)-topn)]
        for item in a:
            g3.remove_node(item)
        return g3
    
    
    #G = remove_edges(G,mediantopp)
    G = remove_minoredges(G,topn)
    
    #-------------------------Finding Community-------------------------
    import community
    Gc = community.best_partition(G)
    for n, m in Gc.items():
        Gc[n] = int(m)
    
    color = []
    for nodes in nx.nodes_iter(G):
        value = int(Gc[nodes]*100)
        color.append(value)
    nx.set_node_attributes(G,'group',Gc)
    #-------------------------Finding Centrality-------------------------
    #大きさを定義するリストを作成
    #bb=nx.degree_centrality(G)
    #bb=nx.betweenness_centrality(G)
    #bb=nx.closeness_centrality(G)
    bb=nx.eigenvector_centrality(G)
    for n, m in bb.items():
        bb[n] = int(math.ceil(m*25))
    
    size = []
    for nodes in nx.nodes_iter(G):
        value = int(bb[nodes]*100)
        size.append(value)
    nx.set_node_attributes(G,'betweenness',bb)
    #-------------------------Finding edge Centrality-------------------------
    #線分の大きさを定義するリストを作成
    cc=nx.edge_betweenness_centrality(G)
    for n, m in cc.items():
        cc[n] = int(math.ceil(m*500))
    
    edgesize = []
    for edges in nx.edges_iter(G):
        value = int(cc[edges]*500)
        edgesize.append(value)
    nx.set_edge_attributes(G,'length',cc)
    #--------------------------------------------------
    G = remove_minoredges(G,topn)
    
    from networkx.readwrite import json_graph
    data = json_graph.dumps(G, sort_keys=True,indent=2)
    
    return data
コード例 #29
0
ファイル: ambermatcher.py プロジェクト: drorlab/dabble
    def get_lipid_head(self, selection):
        """
        Obtains a name mapping for a lipid head group given a selection
        describing a possible lipid.

        Args:
            selection (VMD atomsel): Selection to set names for

        Returns:
            (dict int->str) Atom index to resname matched
            (dict int->str) Atom index to atom name matched up
            (int) Atom index corresponding to - direction tail

        Raises:
            KeyError: if no matching possible
        """

        resname = selection.get('resname')[0]
        rgraph = self.parse_vmd_graph(selection)[0]

        # Check if a lipid head group is part of this selection.
        # Remove _join residues from the head so that subgraph match can
        # be successfully completed
        matches = {}
        for matchname in (_ for _ in self.lipid_heads if self.known_res.get(_)):
            graph = self.known_res.get(matchname)
            truncated = nx.Graph(graph)
            truncated.remove_nodes_from([n for n in graph.nodes() if \
                                         graph.node[n]["residue"] != "self"])
            matcher = isomorphism.GraphMatcher(rgraph, truncated,
                                               node_match=self._check_atom_match)
            if matcher.subgraph_is_isomorphic():
                matches[matchname] = next(matcher.match())

        if not matches:
            return (None, None, None)
        matchname = max(matches.keys(), key=(lambda x: len(self.known_res[x])))
        match = matches[matchname]
        graph = self.known_res.get(matchname)

        # Generate naming dictionaries to return
        nammatch = dict((i, graph.node[match[i]].get("atomname")) \
                        for i in match.keys() if \
                        graph.node[match[i]].get("residue") == "self")
        resmatch = dict((i, graph.node[match[i]].get("resname")) \
                        for i in match.keys() if \
                        graph.node[match[i]].get("residue") == "self")

        # Find atom index on non-truncated graph that corresponds to the
        # - direction join atom. Necessary to figure out the order in which
        # to list the tails.
        minusbnded = [_ for _ in match.keys() if match[_] in \
                      [e[1] for e in nx.edges_iter(graph, nbunch=["-"])]]
        if len(minusbnded) != 1:
            raise ValueError("Could not identify tail attached to lipid %s:%s!"
                             % (resname, selection.get('resid')[0]))
        minusidx = [_ for _ in atomsel("index %s" % minusbnded[0]).bonds[0] \
                    if _ not in match.keys()]
        if len(minusidx) != 1:
            raise ValueError("Could not identify tail attached to lipid %s:%s!"
                             % (resname, selection.get('resid')[0]))

        return (resmatch, nammatch, minusidx[0])
コード例 #30
0
 path_feats = {}
 threshold = 0.95
 
 # go through values and save those above threshold into expanded_feats dict
 for j in xrange(len(values)):
     this_target_word = target_words[j]
     value = values[j]
     # only continue if not in feats already
     if this_target_word not in feats:
         # if predicted value exceeds threshold, we continue with it
         if value >= threshold:
             # add to expanded feats
             predicted_feats[this_target_word] = float("{0:.4f}".format(value))
             
             # first-level path feats
             for edge in nx.edges_iter(G, [this_target_word]):
                 nodea = edge[0]
                 nodeb = edge[1]
                 edge_weight = G[nodea][nodeb]["weight"]
                 feat_val = value * edge_weight
                 #print "*", nodea, nodeb, edge_weight
                 
                 if nodeb not in feats and nodeb not in predicted_feats:
                     path_feats[nodeb] = float("{0:.4f}".format(feat_val))
 
 # Add predicted feats
 for key, value in predicted_feats.iteritems():
     if key not in feats:
         sentences[i].append(key + ":" + str(value))
 
 # Add path feats with weighted edges
コード例 #31
0
ファイル: smt.py プロジェクト: ptsankov/spctl
def encodeAccessConstraint(accessConstraint, resource):
    ###
    ### TRUE/FALSE
    ### 
    if accessConstraint == 'true':
        return True
    elif accessConstraint == 'false':
        return False
    ###
    ### ATTRIBUTE CONSTRAINT
    ###
    elif len(accessConstraint) == 3 and accessConstraint[1] == 'in':
        attrName = accessConstraint[0]
        attrVals = accessConstraint[2]
        attrVal = conf.resourceStructure.node[resource][attrName]
        return any(attrVal == x for x in attrVals)
    ###
    ### UNARY: NOT
    ###
    elif accessConstraint[0] == 'not':
        constraint = encodeAccessConstraint(accessConstraint[1], resource)
        return Not(constraint)
    ###
    ### BINARY: =>, AND, OR
    ###    
    elif any(accessConstraint[0] == x for x in ['and', 'or', '=>']):
        constraintLeft = encodeAccessConstraint(accessConstraint[1], resource)
        constraintRight = encodeAccessConstraint(accessConstraint[2], resource)
        if accessConstraint[0] == 'and':
            return And(constraintLeft, constraintRight)
        elif accessConstraint[0] == 'or':
            return Or(constraintLeft, constraintRight)
        elif accessConstraint[0] == '=>':
            return Implies(constraintLeft, constraintRight)
    ###
    ### EX
    ###
    elif accessConstraint[0] == 'EX':
        successorConstraints = []
        for PEP in networkx.edges_iter(conf.resourceStructure, resource):
            successor = PEP[1]
            constraint = encodeAccessConstraint(accessConstraint, successor)
            successorConstraints.append(And(template.PEPTemplate(PEP), constraint))
        return Or(successorConstraints)
    ###
    ### AX
    ###
    elif accessConstraint[0] == 'AX':
        successorConstraints = []
        for PEP in networkx.edges_iter(conf.resourceStructure, resource):
            successor = PEP[1]
            constraint = encodeAccessConstraint(accessConstraint, successor)
            successorConstraints.append(Implies(template.PEPTemplate(PEP), constraint))
        return And(successorConstraints)
    ###
    ### EU
    ###
    elif accessConstraint[0] == 'EU':
        return encodeUntilAccessConstraint(accessConstraint, resource, set())        
    ###
    ### AU
    ###
    elif accessConstraint[0] == 'AU':
        return encodeUntilAccessConstraint(accessConstraint, resource, set())
    ###
    ### SYNTACTIC SHORTHANDS
    ###
    elif accessConstraint[0] == 'AR':
        return encodeAccessConstraint(['not', ['EU', ['not', accessConstraint[1]], ['not', accessConstraint[2]]]], resource)
    elif accessConstraint[0] == 'AG':
        return encodeAccessConstraint(['not', ['EU', 'true', ['not', accessConstraint[1]]]], resource)
    elif accessConstraint[0] == 'EF':                            
        return encodeAccessConstraint(['EU', 'true', accessConstraint[1]], resource)
    elif accessConstraint[0] == 'AF':
        return encodeAccessConstraint(['AU', 'true', accessConstraint[1]], resource)
    else:
        raise NameError('Could not encodeAccessConstraint access constraint: ' + str(accessConstraint))
コード例 #32
0
                                           default=0):
                    bib_graph[clean_author_handle][clean_next_author_handle][
                        'weight'] = bib_graph[clean_author_handle][
                            clean_next_author_handle]['weight'] + 1
                else:
                    bib_graph.add_edge(clean_author_handle,
                                       clean_next_author_handle,
                                       weight=1)

    node_weight = []
    edge_weight = [
        data.values()[0] for a, b, data in bib_graph.edges(data=True)
    ]
    for n in range(0, len(bib_graph)):
        node_weight.append(0)
        for e in nx.edges_iter(bib_graph, bib_graph.nodes()[n]):
            node_weight[n] = node_weight[n] + bib_graph.get_edge_data(
                e[0], e[1]).values()[0]
    node_weight = [n * 2.5 for n in node_weight]

    fig = plt.figure(1, figsize=(8, 8))
    if year_iter == max_year_input:
        init_pos = nx.pydot_layout(bib_graph)
    for node in bib_graph:
        pos[node] = init_pos[node]
    nx.draw_networkx(bib_graph,
                     pos,
                     node_size=node_weight,
                     node_color=node_weight,
                     edge_color=edge_weight,
                     cmap=plt.cm.OrRd,
コード例 #33
0
        #values = ex.predict_feats(W, b_arr, vect)

        # Part 1:
        # Get all neighbour feats of instance feats
        start_feats = set()
        expanded_feats = {}  # first-level neighbs

        for feat in feats:
            for node in G.nodes_iter():
                # if feat is found in the graph, then we get its neighbours and then break to
                # next feat
                if node == feat:
                    start_feats.add(feat)

                    # first level neighbours
                    for edge in nx.edges_iter(G, [node]):
                        nodea = edge[0]
                        nodeb = edge[1]
                        ew = G[nodea][nodeb]["weight"]

                        if nodeb not in start_feats:
                            expanded_feats[nodeb] = ew

                    # go to next feature
                    break

        # Dict to hold all new feats
        new_feats = {}

        # We add all neighbs
        for key, value in expanded_feats.iteritems():
コード例 #34
0
ファイル: bib-graph.py プロジェクト: samerlahoud/bibviz
			continue
			
		for author_index, author_handle in enumerate(item_handle.get_authorsList()[:len(item_handle.get_authorsList())-1]):
			clean_author_handle = str(author_handle)
			for next_author_index, next_author_handle in enumerate(item_handle.get_authorsList()[author_index+1:]):
				clean_next_author_handle = str(next_author_handle)
				if bib_graph.get_edge_data(clean_author_handle, clean_next_author_handle, default=0):
					bib_graph[clean_author_handle][clean_next_author_handle]['weight'] = bib_graph[clean_author_handle][clean_next_author_handle]['weight'] + 1
				else:
					bib_graph.add_edge(clean_author_handle, clean_next_author_handle, weight = 1)
		
	node_weight=[]
	edge_weight = [data.values()[0] for a,b,data in bib_graph.edges(data=True)]
	for n in range(0,len(bib_graph)):
		node_weight.append(0)
		for e in nx.edges_iter(bib_graph, bib_graph.nodes()[n]):
			node_weight[n] = node_weight[n] + bib_graph.get_edge_data(e[0],e[1]).values()[0]
	node_weight = [n*2.5 for n in node_weight]
	
	fig = plt.figure(1, figsize=(8, 8))
	if year_iter == max_year_input:
		init_pos=nx.pydot_layout(bib_graph)
	for node in bib_graph:
		pos[node] = init_pos[node]
	nx.draw_networkx(bib_graph, pos, node_size=node_weight, node_color=node_weight, edge_color=edge_weight, cmap=plt.cm.OrRd, font_size=9)
	
	if year_iter == max_year_input:
		xmax=max(xx for xx,yy in pos.values())
		xmin=min(xx for xx,yy in pos.values())
		ymax=max(yy for xx,yy in pos.values())
		ymin=min(yy for xx,yy in pos.values())
コード例 #35
0
ファイル: SNA.py プロジェクト: winterProf/BIA_Fall2013
# <codecell>

nx.draw_spring(G2000)
plt.show()

# <codecell>

G2000.degree('michael')

# <codecell>

nx.write_adjlist(karate, 'karateclub_adj.csv')

# <codecell>

for edge in nx.edges_iter(karate):
    print edge

# <codecell>

karate_json = {"nodes":[],"links":[]}
for node in nx.nodes_iter(karate):
    n = {"name":node, "group":1}
    karate_json["nodes"].append(n)
for edge in nx.edges_iter(karate):
    e = {"source":edge[0], "target":edge[1], "value":1}
    karate_json["links"].append(e)

# <codecell>

print karate_json
コード例 #36
0
ファイル: smt.py プロジェクト: ptsankov/spctl
def encodeAccessConstraint(accessConstraint, resource):
    ###
    ### TRUE/FALSE
    ###
    if accessConstraint == 'true':
        return True
    elif accessConstraint == 'false':
        return False
    ###
    ### ATTRIBUTE CONSTRAINT
    ###
    elif len(accessConstraint) == 3 and accessConstraint[1] == 'in':
        attrName = accessConstraint[0]
        attrVals = accessConstraint[2]
        attrVal = conf.resourceStructure.node[resource][attrName]
        return any(attrVal == x for x in attrVals)
    ###
    ### UNARY: NOT
    ###
    elif accessConstraint[0] == 'not':
        constraint = encodeAccessConstraint(accessConstraint[1], resource)
        return Not(constraint)
    ###
    ### BINARY: =>, AND, OR
    ###
    elif any(accessConstraint[0] == x for x in ['and', 'or', '=>']):
        constraintLeft = encodeAccessConstraint(accessConstraint[1], resource)
        constraintRight = encodeAccessConstraint(accessConstraint[2], resource)
        if accessConstraint[0] == 'and':
            return And(constraintLeft, constraintRight)
        elif accessConstraint[0] == 'or':
            return Or(constraintLeft, constraintRight)
        elif accessConstraint[0] == '=>':
            return Implies(constraintLeft, constraintRight)
    ###
    ### EX
    ###
    elif accessConstraint[0] == 'EX':
        successorConstraints = []
        for PEP in networkx.edges_iter(conf.resourceStructure, resource):
            successor = PEP[1]
            constraint = encodeAccessConstraint(accessConstraint, successor)
            successorConstraints.append(
                And(template.PEPTemplate(PEP), constraint))
        return Or(successorConstraints)
    ###
    ### AX
    ###
    elif accessConstraint[0] == 'AX':
        successorConstraints = []
        for PEP in networkx.edges_iter(conf.resourceStructure, resource):
            successor = PEP[1]
            constraint = encodeAccessConstraint(accessConstraint, successor)
            successorConstraints.append(
                Implies(template.PEPTemplate(PEP), constraint))
        return And(successorConstraints)
    ###
    ### EU
    ###
    elif accessConstraint[0] == 'EU':
        return encodeUntilAccessConstraint(accessConstraint, resource, set())
    ###
    ### AU
    ###
    elif accessConstraint[0] == 'AU':
        return encodeUntilAccessConstraint(accessConstraint, resource, set())
    ###
    ### SYNTACTIC SHORTHANDS
    ###
    elif accessConstraint[0] == 'AR':
        return encodeAccessConstraint([
            'not',
            ['EU', ['not', accessConstraint[1]], ['not', accessConstraint[2]]]
        ], resource)
    elif accessConstraint[0] == 'AG':
        return encodeAccessConstraint(
            ['not', ['EU', 'true', ['not', accessConstraint[1]]]], resource)
    elif accessConstraint[0] == 'EF':
        return encodeAccessConstraint(['EU', 'true', accessConstraint[1]],
                                      resource)
    elif accessConstraint[0] == 'AF':
        return encodeAccessConstraint(['AU', 'true', accessConstraint[1]],
                                      resource)
    else:
        raise NameError(
            'Could not encodeAccessConstraint access constraint: ' +
            str(accessConstraint))
コード例 #37
0
        # Get all neighbour feats of instance feats
        start_feats = set()
        expanded_feats = {}  # first-level neighbs
        # to add only mutual, we record an array of all nodes each is a neighbour of, then at the end
        # we can check it's neighb array and see if it's more than 1
        counts = {}

        # instance feats
        for feat in feats:
            for node in G.nodes_iter():
                # if feat is found in the graph, then we get its neighbours and then break to next feat
                if node == feat:
                    start_feats.add(feat)

                    # first level neighbours
                    for edge in nx.edges_iter(G, [node]):
                        nodeb = edge[1]
                        ew = G[node][nodeb]["weight"]

                        if nodeb not in start_feats:
                            # add weight to dict
                            expanded_feats[nodeb] = ew

                            # and also log node in set of neighbs
                            try:
                                n = counts[nodeb]
                                n.add(node)
                                counts[nodeb] = n
                            except:
                                counts[nodeb] = set([node])
コード例 #38
0
def lbp(g):

#initial guess for messages (set them all to 1)    
    for (u,v) in g.edges():
        g[u][v]['mssd_h']=float(1)
        g[u][v]['mssd_a']=float(1)
        g[u][v]['mssd_f']=float(1)
        g[u][v]['msds_h']=float(1)
        g[u][v]['msds_a']=float(1)
        g[u][v]['msds_f']=float(1)
        g[u][v]['mssdO_h']=float(1)
        g[u][v]['mssdO_a']=float(1)
        g[u][v]['mssdO_f']=float(1)
        g[u][v]['msdsO_h']=float(1)
        g[u][v]['msdsO_a']=float(1)
        g[u][v]['msdsO_f']=float(1)
    
#compute degree of each node
    for u in g.nodes():
        g.node[u]['neighbours']=list(set(nx.edges_iter(g,nbunch=u)))
        g.node[u]['degree']=float(len(g.node[u]['neighbours']))
        
#compute sum of the weight.
    for u in g.nodes():
        n=g.node[u]['neighbours']
        suma=0
        for (h,w) in n:
            suma=suma+g[h][w]['weight']
        g.node[u]['sumweights']=float(suma)
    
# the prior for all the users (based on weight and degree)
    for u in g.nodes():
        g.node[u]['prior_h']=float(1)/(g.node[u]['sumweights']/g.node[u]['degree'])
        g.node[u]['prior_a']=g.node[u]['prior_f']=float((float(1)-g.node[u]['prior_h'])/2)
#         g.node[u]['prior_h']=g.node[u]['prior_a']=g.node[u]['prior_f']=float(0.333)

#compute propagation matrix values
    for (u,v) in g.edges():
        comp=auxi.propagation(g[u][v]['weight'],0.05)
        g[u][v]['c_ff']=float(comp[0])
        g[u][v]['c_fa']=float(comp[1])
        g[u][v]['c_fh']=float(comp[2])
        g[u][v]['c_af']=float(comp[3])
        g[u][v]['c_aa']=float(comp[4])
        g[u][v]['c_ah']=float(comp[5])
        g[u][v]['c_hf']=float(comp[6])
        g[u][v]['c_ha']=float(comp[7])
        g[u][v]['c_hh']=float(comp[8])

#set who is the source and who is the sink.
    for (u,v) in g.edges():
        g[u][v]['source']=u
        g[u][v]['dest']=v

# save the neighbours.
    for (u,v) in g.edges():
        g[v][u]['ns']=list(set(nx.edges_iter(g,nbunch=g[u][v]['source']))-set([(u,v)]))
        g[u][v]['nd']=list(set(nx.edges_iter(g,nbunch=g[u][v]['dest']))-set([(v,u)]))
        
#main loop: we iterate until the stopping criterion is reached on the  L-2 norm of the messages.
    tol=1
    numedges=len(g.edges())
    vector0=[float(10)]*numedges*6
    j=1
    
    while j<5:    
        
        vector=[]
        
        #message update from source to dest
        for (u,v) in g.edges():
            a=auxi.prods((u,v),'h',g)
            b=auxi.prods((u,v),'a',g)
            c=auxi.prods((u,v),'f',g)
            g[u][v]['mssd_h']=g[u][v]['c_hh']*g.node[g[u][v]['source']]['prior_h']*a+g[u][v]['c_ah']*g.node[g[u][v]['source']]['prior_a']*b+g[u][v]['c_fh']*g.node[g[u][v]['source']]['prior_f']*c
            g[u][v]['mssd_a']=g[u][v]['c_ha']*g.node[g[u][v]['source']]['prior_h']*a+g[u][v]['c_aa']*g.node[g[u][v]['source']]['prior_a']*b+g[u][v]['c_fa']*g.node[g[u][v]['source']]['prior_f']*c
            g[u][v]['mssd_f']=g[u][v]['c_hf']*g.node[g[u][v]['source']]['prior_h']*a+g[u][v]['c_af']*g.node[g[u][v]['source']]['prior_a']*b+g[u][v]['c_ff']*g.node[g[u][v]['source']]['prior_f']*c
            alpha=g[u][v]['mssd_h']+g[u][v]['mssd_a']+g[u][v]['mssd_f']
            g[u][v]['mssd_h']=g[u][v]['mssd_h']/alpha
            g[u][v]['mssd_a']=g[u][v]['mssd_a']/alpha
            g[u][v]['mssd_f']=g[u][v]['mssd_f']/alpha
            vector.append(g[u][v]['mssd_h'])
            vector.append(g[u][v]['mssd_a'])
            vector.append(g[u][v]['mssd_f'])
            
        #message update from dest to source
        for (u,v) in g.edges():
            a=auxi.prodd((u,v),'h',g)
            b=auxi.prodd((u,v),'a',g)
            c=auxi.prodd((u,v),'f',g)
            g[u][v]['msds_h']=g[u][v]['c_hh']*g.node[g[u][v]['dest']]['prior_h']*a+g[u][v]['c_ha']*g.node[g[u][v]['dest']]['prior_a']*b+g[u][v]['c_hf']*g.node[g[u][v]['dest']]['prior_f']*c
            g[u][v]['msds_a']=g[u][v]['c_ah']*g.node[g[u][v]['dest']]['prior_h']*a+g[u][v]['c_aa']*g.node[g[u][v]['dest']]['prior_a']*b+g[u][v]['c_af']*g.node[g[u][v]['dest']]['prior_f']*c
            g[u][v]['msds_f']=g[u][v]['c_fh']*g.node[g[u][v]['dest']]['prior_h']*a+g[u][v]['c_fa']*g.node[g[u][v]['dest']]['prior_a']*b+g[u][v]['c_ff']*g.node[g[u][v]['dest']]['prior_f']*c
            alpha=g[u][v]['msds_h']+g[u][v]['msds_a']+g[u][v]['msds_f']
            g[u][v]['msds_h']=g[u][v]['msds_h']/alpha
            g[u][v]['msds_a']=g[u][v]['msds_a']/alpha
            g[u][v]['msds_f']=g[u][v]['msds_f']/alpha
            vector.append(g[u][v]['msds_h'])
            vector.append(g[u][v]['msds_a'])
            vector.append(g[u][v]['msds_f'])
        
        #update old messages            
        for (u,v) in g.edges():
            g[u][v]['mssdO_h']=g[u][v]['mssd_h']
            g[u][v]['mssdO_a']=g[u][v]['mssd_a']
            g[u][v]['mssdO_f']=g[u][v]['mssd_f']
            g[u][v]['msdsO_h']=g[u][v]['msds_h']
            g[u][v]['msdsO_a']=g[u][v]['msds_a']
            g[u][v]['msdsO_f']=g[u][v]['msds_f']

        for u in g.nodes():
            g.node[u]['belief_h']=g.node[u]['prior_h']*auxi.prodnode(u,'h',g)
            g.node[u]['belief_a']=g.node[u]['prior_a']*auxi.prodnode(u,'a',g)
            g.node[u]['belief_f']=g.node[u]['prior_f']*auxi.prodnode(u,'f',g)
            alpha=g.node[u]['belief_h']+g.node[u]['belief_a']+g.node[u]['belief_f']
            g.node[u]['belief_h']=g.node[u]['belief_h']/alpha
            g.node[u]['belief_a']=g.node[u]['belief_a']/alpha
            g.node[u]['belief_f']=g.node[u]['belief_f']/alpha
            g.node[u]['prior_h']=g.node[u]['belief_h']
            g.node[u]['prior_a']=g.node[u]['belief_a']
            g.node[u]['prior_f']=g.node[u]['belief_f']
                  
        tol=np.linalg.norm(np.array(vector)-np.array(vector0),ord=2)
        vector0=vector    
        
        
        print j
        print tol
        j=j+1    


#compute final beliefs:    
    for u in g.nodes():
        g.node[u]['belief_h']=g.node[u]['prior_h']*auxi.prodnode(u,'h',g)
        g.node[u]['belief_a']=g.node[u]['prior_a']*auxi.prodnode(u,'a',g)
        g.node[u]['belief_f']=g.node[u]['prior_f']*auxi.prodnode(u,'f',g)
        alpha=g.node[u]['belief_h']+g.node[u]['belief_a']+g.node[u]['belief_f']
        g.node[u]['belief_h']=g.node[u]['belief_h']/alpha
        g.node[u]['belief_a']=g.node[u]['belief_a']/alpha
        g.node[u]['belief_f']=g.node[u]['belief_f']/alpha
              
#convert back to float:

    for u in g.nodes():
        g.node[u]['belief_h']=float(g.node[u]['belief_h'])
        g.node[u]['belief_a']=float(g.node[u]['belief_a'])
        g.node[u]['belief_f']=float(g.node[u]['belief_f'])
        g.node[u]['prior_h']=float(g.node[u]['prior_h'])
        g.node[u]['prior_a']=float(g.node[u]['prior_a'])
        g.node[u]['prior_f']=float(g.node[u]['prior_f'])
        
    for (u,v) in g.edges():
         g[u][v]['mssd_h']=float(g[u][v]['mssd_h'])
         g[u][v]['mssd_a']=float(g[u][v]['mssd_a'])
         g[u][v]['mssd_f']=float(g[u][v]['mssd_f'])
         g[u][v]['msds_h']=float(g[u][v]['msds_h'])
         g[u][v]['msds_a']=float(g[u][v]['msds_a'])
         g[u][v]['msds_f']=float(g[u][v]['msds_f'])       
         g[u][v]['c_ff']=float(g[u][v]['c_ff'])
         g[u][v]['c_fa']=float(g[u][v]['c_fa'])
         g[u][v]['c_fh']=float(g[u][v]['c_fh'])
         g[u][v]['c_af']=float(g[u][v]['c_af'])
         g[u][v]['c_aa']=float(g[u][v]['c_aa'])
         g[u][v]['c_ah']=float(g[u][v]['c_ah'])
         g[u][v]['c_hf']=float(g[u][v]['c_hf'])
         g[u][v]['c_ha']=float(g[u][v]['c_ha'])
         g[u][v]['c_hh']=float(g[u][v]['c_hh'])


    fraud_belief={}
    
    for u in g.nodes():
        fraud_belief[u]={'belief_f':g.node[u]['belief_f'], 'degree':len(g.node[u]['neighbours']),'sumweight':g.node[u]['sumweights']}

    users=pd.DataFrame.from_dict(fraud_belief,orient='index')
    users=users.reset_index()
    users.columns=['userid','belief_f','sumweight','degree']
    users = users.sort(['userid'], ascending=False)
    users[['userid']] = users[['userid']].astype(str)

    return users, j
コード例 #39
0
        for key in sorted(predicted_feats,
                          key=predicted_feats.get,
                          reverse=True):
            ranked_by_pred.append(key)

        # Get instance feats that are in ClassiNet
        instance_feats = []
        for feat in feats:
            for node in G.nodes_iter():
                if node == feat:
                    instance_feats.append(feat)

        # Get neighbouring nodes in graph from starting points given by instance feats and predicted feats
        neighb_feats = {}
        for feat in instance_feats:
            for edge in nx.edges_iter(G, [feat]):
                nodea = edge[0]
                nodeb = edge[1]
                #edge_weight = G[nodea][nodeb]
                #print nodea, nodeb
                if nodeb not in feats:
                    try:
                        neighb_feats[nodeb] += 1
                    except:
                        neighb_feats[nodeb] = 1
        for feat in predicted_feats:
            for edge in nx.edges_iter(G, [feat]):
                nodea = edge[0]
                nodeb = edge[1]
                #edge_weight = G[nodea][nodeb]
                #print nodea, nodeb
コード例 #40
0
# <codecell>

nx.draw_spring(G2000)
plt.show()

# <codecell>

G2000.degree('michael')

# <codecell>

nx.write_adjlist(karate, 'karateclub_adj.csv')

# <codecell>

for edge in nx.edges_iter(karate):
    print edge

# <codecell>

karate_json = {"nodes": [], "links": []}
for node in nx.nodes_iter(karate):
    n = {"name": node, "group": 1}
    karate_json["nodes"].append(n)
for edge in nx.edges_iter(karate):
    e = {"source": edge[0], "target": edge[1], "value": 1}
    karate_json["links"].append(e)

# <codecell>

print karate_json
コード例 #41
0
ファイル: dsd.py プロジェクト: EdwardBetts/matching-metrics
def thin(G,p):
    R = nx.Graph(G)
    removeSet = [i for i in nx.edges_iter(R) if random.random() < p]
    R.remove_edges_from(removeSet)
    return R
コード例 #42
0
 def relation_counts(self):
     edges = chain.from_iterable(self.G[src_n][target_n].values() for src_n, target_n in nx.edges_iter(self.G))
     types = [e['type'] for e in edges]
     grouped = groupby(sorted(types))
     return dict((name, len(list(vals))) for name, vals in grouped)
コード例 #43
0
 threshold = 0.9
 min_value = 0.5
 
 # go through values and save those above threshold into expanded_feats dict
 for j in xrange(len(values)):
     this_target_word = target_words[j]
     value = values[j]
     # only continue if not in feats already
     if this_target_word not in feats:
         # if predicted value exceeds threshold, we continue with it
         if value >= threshold:
             # add to expanded feats
             predicted_feats[this_target_word] = float("{0:.4f}".format(value))
             
             # first-level path feats
             for edge in nx.edges_iter(G, [this_target_word]):
                 nodea = edge[0]
                 nodeb = edge[1]
                 edge_weight = G[nodea][nodeb]["weight"]
                 feat_val = value * edge_weight
                 print "*", nodea, nodeb, edge_weight
                 
                 # only continue if value is reasonable
                 if feat_val >= min_value:
                     if nodeb not in feats and nodeb not in predicted_feats:
                         path_feats[nodeb] = float("{0:.4f}".format(feat_val))
                     
                     # include second-level path feats if set
                     if n == 2:
                         for edge2 in nx.edges_iter(G, [nodeb]):
                             nodea2 = edge2[0]
コード例 #44
0
ファイル: test_function.py プロジェクト: Bludge0n/AREsoft
 def test_edges_iter(self):
     assert_equal(list(self.G.edges_iter()),list(networkx.edges_iter(self.G)))
     assert_equal(list(self.DG.edges_iter()),list(networkx.edges_iter(self.DG)))
     assert_equal(list(self.G.edges_iter(nbunch=[0,1,3])),list(networkx.edges_iter(self.G,nbunch=[0,1,3])))
     assert_equal(list(self.DG.edges_iter(nbunch=[0,1,3])),list(networkx.edges_iter(self.DG,nbunch=[0,1,3])))
コード例 #45
0
        threshold = 0.95

        # go through values and save those above threshold into expanded_feats dict
        for j in xrange(len(values)):
            this_target_word = target_words[j]
            value = values[j]
            # only continue if not in feats already
            if this_target_word not in feats:
                # if predicted value exceeds threshold, we continue with it
                if value >= threshold:
                    # add to expanded feats
                    predicted_feats[this_target_word] = float(
                        "{0:.4f}".format(value))

                    # first-level path feats
                    for edge in nx.edges_iter(G, [this_target_word]):
                        nodea = edge[0]
                        nodeb = edge[1]
                        edge_weight = G[nodea][nodeb]["weight"]
                        feat_val = value * edge_weight
                        #print "*", nodea, nodeb, edge_weight

                        if nodeb not in feats and nodeb not in predicted_feats:
                            path_feats[nodeb] = float(
                                "{0:.4f}".format(feat_val))

        # Add predicted feats
        for key, value in predicted_feats.iteritems():
            if key not in feats:
                sentences[i].append(key + ":" + str(value))
コード例 #46
0
def count_triad_motifs(network, directed=None):
    """
    Counts the occurences of triad motifs in a network.

    Arguments:
        network => The input network.
        directed => Whether or not the network is directed.

    Returns:
        A fixed-size array with indices representing unique triad motifs and the values 
        representing their number of occurences within the network.
    """

    if directed or nx.is_directed(network):

        # Initialize an array for storing our motif counts. Each index represents the following motif:
        # 0 => a <- b -> c
        # 1 => a -> b <- c
        # 2 => b -> a -> c
        # 3 => a -> b <-> c
        # 4 => c <-> a -> b
        # 5 => a <-> b <-> c
        # 6 => a <- b -> c <- a
        # 7 => a -> b -> c -> a
        # 8 => c <-> a -> b <- c
        # 9 => c <- a -> b <-> c
        # 10 => a <-> c -> b -> a
        # 11 => a <-> b <-> c -> a
        # 12 => a <-> b <-> c <-> a
        motif_counts = np.zeros(shape=(13,), dtype=np.int)

        # Store a set of visited node combinations so repeats don't occur.
        visited_triplets = []

        # Iterate through the valid edges.
        for a, b in sorted(nx.edges_iter(network)):

            # Take all unique c nodes that form valid a, b, c triplets.
            # By ensuring that all neighbors have node ID greater than b we prevent any unique
            # node triplets from being repeated.
            c_neighbors = set(
                [
                    neighbor
                    for neighbor in itertools.chain(nx.all_neighbors(network, a), nx.all_neighbors(network, b))
                    if neighbor != a and neighbor != b
                ]
            )

            # Iterate through the valid a, b, c triplets.
            for c in c_neighbors:

                # Make sure unique node triplets aren't repeated.
                sorted_abc = sorted((a, b, c))
                if sorted_abc in visited_triplets:
                    continue
                else:
                    visited_triplets.append(sorted_abc)

                # a <-------> b
                # a     c     b
                if network.has_edge(b, a):

                    # a <-------> b
                    # a --> c     b
                    if network.has_edge(a, c):

                        # a <-------> b
                        # a <-> c     b
                        if network.has_edge(c, a):

                            # a <-------> b
                            # a <-> c --> b
                            if network.has_edge(c, b):

                                # a <-------> b
                                # a <-> c <-> b
                                if network.has_edge(b, c):
                                    motif_counts[12] += 1

                                # a <-------> b
                                # a <-> c --> b
                                else:
                                    motif_counts[11] += 1

                            # a <-------> b
                            # a <-> c <-- b
                            elif network.has_edge(b, c):
                                motif_counts[11] += 1

                            # a <-------> b
                            # a <-> c     b
                            else:
                                motif_counts[5] += 1

                        # a <-------> b
                        # a --> c     b
                        else:

                            # a <-------> b
                            # a --> c --> b
                            if network.has_edge(c, b):

                                # a <-------> b
                                # a --> c <-> b
                                if network.has_edge(b, c):
                                    motif_counts[11] += 1

                                # a <-------> b
                                # a --> c --> b
                                else:
                                    motif_counts[10] += 1

                            # a <-------> b
                            # a --> c <-- b
                            elif network.has_edge(b, c):
                                motif_counts[8] += 1

                            # a <-------> b
                            # a --> c     b
                            else:
                                motif_counts[4] += 1

                    # a <-------> b
                    # a <-- c     b
                    elif network.has_edge(c, a):

                        # a <-------> b
                        # a <-- c --> b
                        if network.has_edge(c, b):

                            # a <-------> b
                            # a <-- c <-> b
                            if network.has_edge(b, c):
                                motif_counts[11] += 1

                            # a <-------> b
                            # a <-- c --> b
                            else:
                                motif_counts[9] += 1

                        # a <-------> b
                        # a <-- c <-- b
                        elif network.has_edge(b, c):
                            motif_counts[10] += 1

                        # a <-------> b
                        # a <-- c     b
                        else:
                            motif_counts[3] += 1

                    # a <-------> b
                    # a     c <-- b
                    elif network.has_edge(b, c):

                        # a <-------> b
                        # a     c <-> b
                        if network.has_edge(c, b):
                            motif_counts[5] += 1

                        # a <-------> b
                        # a     c <-- b
                        else:
                            motif_counts[4] += 1

                    # a <-------> b
                    # a     c --> b
                    else:
                        motif_counts[3] += 1

                # a --------> b
                # a     c     b
                else:

                    # a --------> b
                    # a --> c     b
                    if network.has_edge(a, c):

                        # a --------> b
                        # a <-> c     b
                        if network.has_edge(c, a):

                            # a --------> b
                            # a <-> c --> b
                            if network.has_edge(c, b):

                                # a --------> b
                                # a <-> c <-> b
                                if network.has_edge(b, c):
                                    motif_counts[11] += 1

                                # a --------> b
                                # a <-> c --> b
                                else:
                                    motif_counts[8] += 1

                            # a --------> b
                            # a <-> c <-- b
                            elif network.has_edge(b, c):
                                motif_counts[10] += 1

                            # a --------> b
                            # a <-> c     b
                            else:
                                motif_counts[4] += 1

                        # a --------> b
                        # a --> c     b
                        else:

                            # a --------> b
                            # a --> c --> b
                            if network.has_edge(c, b):

                                # a --------> b
                                # a --> c <-> b
                                if network.has_edge(b, c):
                                    motif_counts[9] += 1

                                # a --------> b
                                # a --> c --> b
                                else:
                                    motif_counts[6] += 1

                            # a --------> b
                            # a --> c <-- b
                            elif network.has_edge(b, c):
                                motif_counts[6] += 1

                            # a --------> b
                            # a --> c     b
                            else:
                                motif_counts[0] += 1

                    # a --------> b
                    # a <-- c     b
                    elif network.has_edge(c, a):

                        # a --------> b
                        # a <-- c --> b
                        if network.has_edge(c, b):

                            # a --------> b
                            # a <-- c <-> b
                            if network.has_edge(b, c):
                                motif_counts[10] += 1

                            # a --------> b
                            # a <-- c --> b
                            else:
                                motif_counts[6] += 1

                        # a --------> b
                        # a <-- c <-- b
                        elif network.has_edge(b, c):
                            motif_counts[7] += 1

                        # a --------> b
                        # a <-- c     b
                        else:
                            motif_counts[2] += 1

                    # a --------> b
                    # a     c <-- b
                    elif network.has_edge(b, c):

                        # a --------> b
                        # a     c <-> b
                        if network.has_edge(c, b):
                            motif_counts[3] += 1

                        # a --------> b
                        # a     c <-- b
                        else:
                            motif_counts[2] += 1

                    # a --------> b
                    # a     c --> b
                    else:
                        motif_counts[1] += 1

    else:

        # Initialize an array for storing our motif counts. Each index represents the following motif:
        # 0 => a - b - c - a
        # 1 => a - b - c
        motif_counts = np.zeros(shape=(2,), dtype=np.int)

        # Iterate through the edges in the network.
        for a, b in sorted(nx.edges_iter(network)):

            # Find all node a neighbors such that their node ID is greater than node b (which, by the sorted
            # nature of nx.edges_iter, will always be greater than node a).
            a_neighbors = set([neighbor for neighbor in network[a] if neighbor > b])

            # Find all node b neighbors such that their node ID is greater than node b. The prevents repeated
            # consideration of node triplets.
            b_neighbors = set([neighbor for neighbor in network[b] if neighbor > b])

            # The number of triangle motifs is the number of common neighbors of a and b.
            motif_counts[0] += len(a_neighbors.intersection(b_neighbors))

            # The number of chain motifs is the number of unshared neighbors or a and b.
            motif_counts[1] += len(a_neighbors.symmetric_difference(b_neighbors))

    return motif_counts
コード例 #47
0
def lbp(g, delta, df_1):
    # initial guess for messages (set them all to 1)
    arr1 = g.edges()
    arr2 = range(len(df_1))
    for (u, v), i in zip(arr1, arr2):
        g[u][v]['mssd_h'] = Decimal(df_1['honest_propabiliy'][i])
        g[u][v]['mssd_s'] = Decimal(df_1['fruad_propabiliy'][i])
        g[u][v]['msds_h'] = Decimal(df_1['honest_propabiliy'][i])
        g[u][v]['msds_s'] = Decimal(df_1['fruad_propabiliy'][i])
        g[u][v]['mssdO_h'] = Decimal(df_1['honest_propabiliy'][i])
        g[u][v]['mssdO_s'] = Decimal(df_1['fruad_propabiliy'][i])
        g[u][v]['msdsO_h'] = Decimal(df_1['honest_propabiliy'][i])
        g[u][v]['msdsO_s'] = Decimal(df_1['fruad_propabiliy'][i])

    # compute degree of each node
    for u in g.nodes():
        g.node[u]['neighbours'] = list(set(nx.edges_iter(g, nbunch=u)))
        g.node[u]['degree'] = Decimal(len(g.node[u]['neighbours']))

    # compute sum of the weight.
    for u in g.nodes():
        n = g.node[u]['neighbours']
        suma = 0
        for (h, w) in n:
            suma = suma + g[h][w]['weight']
        g.node[u]['sumweights'] = Decimal(suma)

    # the prior for all the users (based on weight and degree)
    for u in g.nodes():
        g.node[u]['prior_h'] = Decimal(1) / (g.node[u]['sumweights'] /
                                             g.node[u]['degree'])
        g.node[u]['prior_s'] = Decimal(1) - g.node[u]['prior_h']

    # compute compatibility potentials
    for (u, v) in g.edges():
        comp = auxi.compatibility(g[u][v]['weight'], delta)
        g[u][v]['c_hh'] = comp[0]
        g[u][v]['c_sh'] = comp[1]
        g[u][v]['c_hs'] = comp[2]
        g[u][v]['c_ss'] = comp[3]

    # set who is the source and who is the sink.
    for (u, v) in g.edges():
        g[u][v]['source'] = u
        g[u][v]['dest'] = v

    # save the neighbours.
    for (u, v) in g.edges():
        g[v][u]['ns'] = list(
            set(nx.edges_iter(g, nbunch=g[u][v]['source'])) - set([(u, v)]))
        g[u][v]['nd'] = list(
            set(nx.edges_iter(g, nbunch=g[u][v]['dest'])) - set([(v, u)]))

    # main loop: we iterate until the stopping criterion is reached on the  L-2 norm of the messages.

    delta = 0.0001
    tol = 1
    numedges = len(g.edges())
    vector0 = [Decimal(10)] * numedges * 4
    j = 1
    str2 = '’'

    while tol > delta:

        vector = []

        # message update from source to dest
        for (u, v) in g.edges():
            a = auxi.prods(u, v, 'h', g)
            b = auxi.prods(u, v, 's', g)
            g[u][v]['mssd_h'] = g[u][v]['c_hh'] * g.node[
                g[u][v]['source']]['prior_h'] * a + g[u][v]['c_sh'] * g.node[
                    g[u][v]['source']]['prior_s'] * b
            g[u][v]['mssd_s'] = g[u][v]['c_hs'] * g.node[
                g[u][v]['source']]['prior_h'] * a + g[u][v]['c_ss'] * g.node[
                    g[u][v]['source']]['prior_s'] * b
            alpha = g[u][v]['mssd_h'] + g[u][v]['mssd_s']
            g[u][v]['mssd_h'] = g[u][v]['mssd_h'] / alpha
            g[u][v]['mssd_s'] = g[u][v]['mssd_s'] / alpha
            vector.append(g[u][v]['mssd_h'])
            vector.append(g[u][v]['mssd_s'])

        # message update from dest to source
        for (u, v) in g.edges():
            a = auxi.prodd(u, v, 'h', g)
            b = auxi.prodd(u, v, 's', g)
            g[u][v]['msds_h'] = g[u][v]['c_hh'] * g.node[
                g[u][v]['dest']]['prior_h'] * a + g[u][v]['c_hs'] * g.node[
                    g[u][v]['dest']]['prior_s'] * b
            g[u][v]['msds_s'] = g[u][v]['c_sh'] * g.node[
                g[u][v]['dest']]['prior_h'] * a + g[u][v]['c_ss'] * g.node[
                    g[u][v]['dest']]['prior_s'] * b
            alpha = g[u][v]['msds_h'] + g[u][v]['msds_s']
            g[u][v]['msds_h'] = g[u][v]['msds_h'] / alpha
            g[u][v]['msds_s'] = g[u][v]['msds_s'] / alpha
            vector.append(g[u][v]['msds_h'])
            vector.append(g[u][v]['msds_s'])

        # update old messages
        for (u, v) in g.edges():
            g[u][v]['mssdO_h'] = g[u][v]['mssd_h']
            g[u][v]['mssdO_s'] = g[u][v]['mssd_s']
            g[u][v]['msdsO_h'] = g[u][v]['msds_h']
            g[u][v]['msdsO_s'] = g[u][v]['msds_s']

        tol = np.linalg.norm(np.array(vector) - np.array(vector0), ord=2)
        vector0 = vector

        print(j)
        print(tol)
        j = j + 1

    # compute final beliefs:

    for u in g.nodes():
        g.node[u]['belief_h'] = g.node[u]['prior_h'] * auxi.prodnode(u, 'h', g)
        g.node[u]['belief_s'] = g.node[u]['prior_s'] * auxi.prodnode(u, 's', g)
        alpha = g.node[u]['belief_h'] + g.node[u]['belief_s']
        g.node[u]['belief_h'] = g.node[u]['belief_h'] / alpha
        g.node[u]['belief_s'] = g.node[u]['belief_s'] / alpha

    # convert back to float:

    for u in g.nodes():
        g.node[u]['belief_h'] = float(g.node[u]['belief_h'])
        g.node[u]['belief_s'] = float(g.node[u]['belief_s'])
        g.node[u]['prior_h'] = float(g.node[u]['prior_h'])
        g.node[u]['prior_s'] = float(g.node[u]['prior_s'])

    for (u, v) in g.edges():
        g[u][v]['mssd_h'] = float(g[u][v]['mssd_h'])
        g[u][v]['mssd_s'] = float(g[u][v]['mssd_s'])
        g[u][v]['msds_h'] = float(g[u][v]['msds_h'])
        g[u][v]['msds_s'] = float(g[u][v]['msds_s'])
        g[u][v]['c_hh'] = float(g[u][v]['c_hh'])
        g[u][v]['c_hs'] = float(g[u][v]['c_hs'])
        g[u][v]['c_sh'] = float(g[u][v]['c_sh'])
        g[u][v]['c_ss'] = float(g[u][v]['c_ss'])

    sybil_belief = {}

    for u in g.nodes():
        sybil_belief[u] = {
            'belief_s': g.node[u]['belief_s'],
            'degree': len(g.node[u]['neighbours']),
            'sumweight': g.node[u]['sumweights']
        }

    users = pd.DataFrame.from_dict(sybil_belief, orient='index')
    users = users.reset_index()
    users.columns = ['userid', 'belief_s', 'degree', 'sumweight']
    users = users.sort(['userid'], ascending=False)
    users[['userid']] = str2 + users[['userid']].astype(str)

    return users, j
コード例 #48
0
        min_value = 0.5

        # go through values and save those above threshold into expanded_feats dict
        for j in xrange(len(values)):
            this_target_word = target_words[j]
            value = values[j]
            # only continue if not in feats already
            if this_target_word not in feats:
                # if predicted value exceeds threshold, we continue with it
                if value >= threshold:
                    # add to expanded feats
                    predicted_feats[this_target_word] = float(
                        "{0:.4f}".format(value))

                    # first-level path feats
                    for edge in nx.edges_iter(G, [this_target_word]):
                        nodea = edge[0]
                        nodeb = edge[1]
                        edge_weight = G[nodea][nodeb]["weight"]
                        feat_val = value * edge_weight
                        print "*", nodea, nodeb, edge_weight

                        # only continue if value is reasonable
                        if feat_val >= min_value:
                            if nodeb not in feats and nodeb not in predicted_feats:
                                path_feats[nodeb] = float(
                                    "{0:.4f}".format(feat_val))

                            # include second-level path feats if set
                            if n == 2:
                                for edge2 in nx.edges_iter(G, [nodeb]):
コード例 #49
0
ファイル: smt.py プロジェクト: ptsankov/spctl
def encodeUntilAccessConstraint(accessConstraint, resource, visited):
    if accessConstraint[0] == 'EU':
        accessConstraint1 = accessConstraint[1]
        accessConstraint2 = accessConstraint[2]

        accessConstraint2Encoded = encodeAccessConstraint(
            accessConstraint2, resource)
        accessConstraint1Encoded = encodeAccessConstraint(
            accessConstraint1, resource)

        successorConstraints = []
        for PEP in networkx.edges_iter(conf.resourceStructure, resource):
            successor = PEP[1]
            if successor in visited:
                continue
            succVisited = set(visited)
            succVisited.add(resource)
            successorAccessConstraintEncoded = encodeUntilAccessConstraint(
                accessConstraint, successor, succVisited)
            pepTemplate = template.PEPTemplate(PEP)
            successorConstraints.append(
                And(pepTemplate, successorAccessConstraintEncoded))
        return Or(accessConstraint2Encoded,
                  And(accessConstraint1Encoded, Or(successorConstraints)))

    elif accessConstraint[0] == 'AU':
        accessConstraint1 = accessConstraint[1]
        accessConstraint2 = accessConstraint[2]

        accessConstraint2Encoded = encodeAccessConstraint(
            accessConstraint2, resource)
        accessConstraint1Encoded = encodeAccessConstraint(
            accessConstraint1, resource)

        successorConstraints = []
        noBackEdgesConstraints = []
        existsSuccessorConstraints = []

        for PEP in networkx.edges_iter(conf.resourceStructure, resource):
            successor = PEP[1]
            if successor in visited:
                noBackEdgesConstraints.append(Not(template.PEPTemplate(PEP)))
                continue

            existsSuccessorConstraints.append(template.PEPTemplate(PEP))

            succVisited = set(visited)
            succVisited.add(resource)
            successorAccessConstraintEncoded = encodeUntilAccessConstraint(
                accessConstraint, successor, succVisited)
            pepTemplate = template.PEPTemplate(PEP)
            successorConstraints.append(
                Implies(pepTemplate, successorAccessConstraintEncoded))

        noBackEdges = And(noBackEdgesConstraints)
        existsSuccessor = Or(existsSuccessorConstraints)

        return Or(
            accessConstraint2Encoded,
            And(accessConstraint1Encoded, noBackEdges, existsSuccessor,
                And(successorConstraints)))
    else:
        raise NameError('Not an until access constraint:', accessConstraint)
コード例 #50
0
ファイル: dsd.py プロジェクト: mcrovella/matching-metrics
def thin(G, p):
    R = nx.Graph(G)
    removeSet = [i for i in nx.edges_iter(R) if random.random() < p]
    R.remove_edges_from(removeSet)
    return R
コード例 #51
0
 # Compile prediction rank list, by adding predicted feats in descending order of prediction value
 ranked_by_pred = []
 for key in sorted(predicted_feats, key=predicted_feats.get, reverse=True):
     ranked_by_pred.append(key)
 
 # Get instance feats that are in ClassiNet
 instance_feats = []
 for feat in feats:
     for node in G.nodes_iter():
         if node == feat:
             instance_feats.append(feat)
 
 # Get neighbouring nodes in graph from starting points given by instance feats and predicted feats
 neighb_feats = {}
 for feat in instance_feats:
     for edge in nx.edges_iter(G, [feat]):
         nodea = edge[0]
         nodeb = edge[1]
         #edge_weight = G[nodea][nodeb]
         #print nodea, nodeb
         if nodeb not in feats:
             try:
                 neighb_feats[nodeb] += 1
             except:
                 neighb_feats[nodeb] = 1
 for feat in predicted_feats:
     for edge in nx.edges_iter(G, [feat]):
         nodea = edge[0]
         nodeb = edge[1]
         #edge_weight = G[nodea][nodeb]
         #print nodea, nodeb
コード例 #52
0
ファイル: lp.py プロジェクト: FiveKilogram/LinkPrediction
def LP(graph_file, out_file, sim_method, t, p):

    G = nx.read_edgelist(graph_file, nodetype=int)
    #G = G.to_undirected()
    #G = nx.convert_node_labels_to_integers(G)

    # for debug
    # print(nx.nodes(G))

    node_num = nx.number_of_nodes(G)
    edge_num = nx.number_of_edges(G)

    # 列出所有不存在的链接,存放到non_edge_list中
    # non_edge_num = (node_num * (node_num - 1)) / 2 - edge_num
    non_edge_list = [pair(u, v) for u, v in nx.non_edges(G)]
    non_edge_num = len(non_edge_list)

    # for debug
    print("V: %d\tE: %d\tNon: %d" % (node_num, edge_num, non_edge_num))

    # for debug
    #    print(len(non_edge_list))
    # print(non_edge_list)

    # 执行t次独立的实验,每次从G中选择p*100%的链接作为测试集,剩余的链接作为训练集
    test_num = int(edge_num * p)
    pre_num = 0

    for l in range(2, 101, 2):
        if l < 20:
            pre_num += 1
        else:
            break
        # end if
    # end for
    pre_num += 1

    # for debug
    print('test_edge_num: %d' % test_num)

    # 定义数组存放性能值
    auc_list = []
    rs_list = []
    time_list = []
    pre_matrix = [[0 for it in range(t)] for num in range(pre_num)]

    # 迭代t次进行测试
    for it in range(t):
        if it % 10 == 0:
            print('turn: %d' % it)
        # end if

        # 首先产生一批随机数
        seed = math.sqrt(edge_num * node_num) + math.pow(
            (1 + it) * 10, 3)  # 随机数种子
        random.seed(seed)
        rand_set = set(random.sample(range(edge_num), test_num))

        # rand_set = set()
        # i = 0
        # while (i < test_num):
        # 	r = random.randint(0, edge_num - 1)
        # 	if (r not in rand_set):
        # 		rand_set.add(r)
        # 		i += 1
        # 	# end if
        # # end while

        # for debug
        # print(rand_set)
        # print(len(rand_set))

        # 遍历G中链接,根据rand_set中的值分成训练集和测试集
        training_graph = nx.Graph()
        training_graph.add_nodes_from(range(node_num))
        test_edge_list = []

        r = 0
        for u, v in nx.edges_iter(G):
            u, v = pair(u, v)
            # for debug
            # print(u, v)
            if r in rand_set:  # 测试链接
                test_edge_list.append((u, v))
            else:
                training_graph.add_edge(u, v)  # 训练网络
            # end if
            r += 1
        # end for
        training_graph.to_undirected()

        # for debug
        # print(len(test_edge_list))
        # print(test_edge_list)
        # print(nx.number_of_edges(training_graph))
        # print(nx.number_of_nodes(training_graph))
        # print(nx.nodes(training_graph))
        # print(nx.edges(training_graph))

        # 计算相似度
        # if (it % 10 == 0):
        # print('计算相似度')

        start = datetime.datetime.now()
        sim_dict = similarities(training_graph, sim_method)
        end = datetime.datetime.now()

        # 0. 计算时间
        time_list.append((end - start).microseconds)

        # 1. 计算AUC
        auc_value = AUC(sim_dict, test_edge_list, non_edge_list)
        auc_list.append(auc_value)
        # for debug
        # print(auc_value)

        # 创建一个数组,存放顶点对的相似度
        sim_list = [((u, v), s) for (u, v), s in sim_dict.items()]

        # sim_dict不在需要
        sim_dict.clear()

        # 对sim_list按照相似度降序排列
        sim_list.sort(key=lambda x: (x[1], x[0]), reverse=True)

        # 2. 计算Ranking Score
        rank_score = Ranking_score(sim_list, test_edge_list, non_edge_num)
        rs_list.append(rank_score)
        # for debug
        # print(rank_score)

        # 3. 计算精度列表
        pre_list = Precision(sim_list, test_edge_list, test_num)

        for num in range(pre_num):
            pre_matrix[num][it] = pre_list[num]
        # end for
    # end for

    # 计算平均值和方差,并将结果输出到文件
    auc_avg, auc_std = stats(auc_list)

    print('AUC: %.4f(%.4f)' % (auc_avg, auc_std))
    out_file.write('%.4f(%.4f)\t' % (auc_avg, auc_std))

    rs_avg, rs_std = stats(rs_list)

    print('Ranking_Score: %.4f(%.4f)' % (rs_avg, rs_std))
    out_file.write('%.4f(%.4f)\t' % (rs_avg, rs_std))

    time_avg, time_std = stats(time_list)

    print('Time: %.4f(%.4f)' % (time_avg, time_std))
    out_file.write('%.4f(%.4f)\t' % (time_avg, time_std))

    pre_avg_list = []
    pre_std_list = []
    for num in range(pre_num):
        pre_avg, pre_std = stats(pre_matrix[num])
        pre_avg_list.append(pre_avg)
        pre_std_list.append(pre_std)
    # end for

    print('Precision: ')
    # out_file.write('\nPrecision: ')
    for num in range(pre_num):
        print('%.4f(%.4f)\t' % (pre_avg_list[num], pre_std_list[num]))
        out_file.write('%.4f(%.4f)\t' % (pre_avg_list[num], pre_std_list[num]))
    # end for

    out_file.write('%d\n' % test_num)
コード例 #53
0
 # Get all neighbour feats of instance feats
 start_feats = set()
 expanded_feats = {} # first-level neighbs
 # to add only mutual, we record an array of all nodes each is a neighbour of, then at the end
 # we can check it's neighb array and see if it's more than 1
 counts = {}
 
 # instance feats
 for feat in feats:
     for node in G.nodes_iter():
         # if feat is found in the graph, then we get its neighbours and then break to next feat
         if node == feat:
             start_feats.add(feat)
             
             # first level neighbours
             for edge in nx.edges_iter(G, [node]):
                 nodeb = edge[1]
                 ew = G[node][nodeb]["weight"]
                 
                 if nodeb not in start_feats:
                     # add weight to dict
                     expanded_feats[nodeb] = ew
                     
                     # and also log node in set of neighbs
                     try:
                         n = counts[nodeb]
                         n.add(node)
                         counts[nodeb] = n
                     except:
                         counts[nodeb] = set([node])