Beispiel #1
0
def disjoint_union(G, H):
    """ Return the disjoint union of graphs G and H.

    This algorithm forces distinct integer node labels.

    Parameters
    ----------
    G,H : graph
       A NetworkX graph

    Returns
    -------
    U : A union graph with the same type as G.

    Notes
    -----
    A new graph is created, of the same class as G.  It is recommended
    that G and H be either both directed or both undirected.

    The nodes of G are relabeled 0 to len(G)-1, and the nodes of H are
    relabeled len(G) to len(G)+len(H)-1.

    Graph, edge, and node attributes are propagated from G and H
    to the union graph.  If a graph attribute is present in both
    G and H the value from H is used.
    """
    R1 = nx.convert_node_labels_to_integers(G)
    R2 = nx.convert_node_labels_to_integers(H, first_label=len(R1))
    R = union(R1, R2)
    R.graph.update(G.graph)
    R.graph.update(H.graph)
    return R
Beispiel #2
0
def disjoint_union(G,H):
    """ Return the disjoint union of graphs G and H,
    forcing distinct integer node labels.

    Parameters
    ----------
    G,H : graph
       A NetworkX graph

    Returns
    -------
    U : A union graph with the same type as G.

    Notes
    -----
    A new graph is created, of the same class as G.  It is recommended
    that G and H be either both directed or both undirected.

    The nodes of G are relabeled 0 to len(G)-1, and the nodes of H are
    relabeld len(G) to len(G)+len(H)-1.
    """
    R1=nx.convert_node_labels_to_integers(G)
    R2=nx.convert_node_labels_to_integers(H,first_label=len(R1))
    R=union(R1,R2)
    R.name="disjoint_union( %s, %s )"%(G.name,H.name)
    return R
Beispiel #3
0
    def setUp(self):
        # G is the example graph in Figure 1 from Batagelj and
        # Zaversnik's paper titled An O(m) Algorithm for Cores
        # Decomposition of Networks, 2003,
        # http://arXiv.org/abs/cs/0310049.  With nodes labeled as
        # shown, the 3-core is given by nodes 1-8, the 2-core by nodes
        # 9-16, the 1-core by nodes 17-20 and node 21 is in the
        # 0-core.
        t1 = nx.convert_node_labels_to_integers(nx.tetrahedral_graph(), 1)
        t2 = nx.convert_node_labels_to_integers(t1, 5)
        G = nx.union(t1, t2)
        G.add_edges_from([(3, 7), (2, 11), (11, 5), (11, 12), (5, 12),
                          (12, 19), (12, 18), (3, 9), (7, 9), (7, 10),
                          (9, 10), (9, 20), (17, 13), (13, 14), (14, 15),
                          (15, 16), (16, 13)])
        G.add_node(21)
        self.G = G

        # Create the graph H resulting from the degree sequence
        # [0, 1, 2, 2, 2, 2, 3] when using the Havel-Hakimi algorithm.

        degseq = [0, 1, 2, 2, 2, 2, 3]
        H = nx.havel_hakimi_graph(degseq)
        mapping = {6: 0, 0: 1, 4: 3, 5: 6, 3: 4, 1: 2, 2: 5}
        self.H = nx.relabel_nodes(H, mapping)
def null_model(version):
    net = nx.DiGraph(globals()["TRN"][version])
    flips = globals()["flip_num"]
    nx.convert_node_labels_to_integers(net, ordering="sorted",
            label_attribute="element")
    rewirer = NetworkRewiring()
    (rnd_net, flip_rate) = rewirer.randomise(net, flip=flips, copy=False)
    return (rnd_net, flip_rate)
def TheAlgorithm(G):
    dColor = Dcolor(G)
    partialColoring = list()
     
    #Compute chi(G) (using brute force)
    k = len(dColor.color())
    
    hasStrongStableSet = False
    thisStableSet = FindStrongStableSet(G)
    if thisStableSet != None:
        hasStrongStableSet = True
    while hasStrongStableSet:
        #thisStableSet = FindStrongStableSet(G)
        partialColoring.append(list(thisStableSet))
        #Remove this stable set from the graph
        for thisStableVertex in thisStableSet:
            G.remove_node(thisStableVertex)
            
        thisStableSet = FindStrongStableSet(G)
        if thisStableSet == None:
            hasStrongStableSet = False
              
    #check for induced C7
    graphToTest = convert_node_labels_to_integers(G, 0, ordering='default', label_attribute = None)
    if induced_subgraph(graphToTest, make_cycle(CYCLE_LENGTH)) == None:
        stillHasInducedC7 = False
    else:
        stillHasInducedC7 = True
    graphToTest.clear()
    
    while stillHasInducedC7 == True:
        thisStableSet = FindSimpleStableSet(G)
        partialColoring.append(thisStableSet)
        for thisStableVertex in thisStableSet:
            G.remove_node(thisStableVertex)
            
        graphToTest = convert_node_labels_to_integers(G, 0, ordering='default', label_attribute = None)
        if induced_subgraph(graphToTest, make_cycle(CYCLE_LENGTH)) == None:
            stillHasInducedC7 = False
        graphToTest.clear()
            
    """        
    At this point, there does not exist a strong stable set of size 3, because there is no C7.
    This means that G is now a perfect graph.
    """
    t = chromatic_number(G)
 
    #Find the chromatic number of our partial graph of stable sets
    s = len(partialColoring)
     
    if k == (s + t):
        result = True
    else:
        result = False

    return result
def generate_random_slice(slice_id,num_populations,edgeweight,max_x_coord,max_y_coord):
    """
    Generate a NetworkX graph object with num_populations nodes, located in random places around the coordinate
    represented by the centroid tuple, and cluster spread as variance.  Nodes are wired together in a complete
    graph.  All of the nodes belong to lineage #1 by default.

    :param num_populations:
    :param edgeweight:
    :param centroid_range_tuple:
    :param cluster_spread:
    :return:
    """
    g = nx.empty_graph(num_populations)
    start_node_id = (slice_id - 1) * num_populations
    log.debug("nodes labeled: %s", range(start_node_id, start_node_id + num_populations))
    nx.convert_node_labels_to_integers(g, start_node_id)

    for id in g.nodes():
        xcoord = 0.0
        ycoord = 0.0

        while (True):
            xcoord = np.random.random_integers(0, max_x_coord, size=1).astype(np.int64)[0]
            ycoord = np.random.random_integers(0, max_y_coord, size=1).astype(np.int64)[0]

            location = (int(xcoord), int(ycoord))
            if location not in location_cache:
                location_cache.add(location)
                break

        # log.debug("node %s at %s,%s",id,xcoord,ycoord)
        g.node[id]['xcoord'] = str(xcoord)
        g.node[id]['ycoord'] = str(ycoord)
        lab = "assemblage-"
        lab += str(xcoord)
        lab += "-"
        lab += str(ycoord)
        g.node[id]['label'] = lab
        g.node[id]['level'] = "None"
        g.node[id]['cluster_id'] = 1
        g.node[id]['lineage_id'] = 1
        g.node[id]['appears_in_slice'] = slice_id
        if slice_id == 1:
            g.node[id]['parent_node'] = 'initial'

    # now we wire up the edges in the slice
    assign_distance_weighted_edges_to_slice(g, max_x_coord,max_y_coord)

    assign_uniform_intracluster_weights(g, edgeweight)
    assign_node_distances(g)

    return g
Beispiel #7
0
def isomorphic(xmrs1, xmrs2):
    g1 = nx.convert_node_labels_to_integers(
        xmrs1._graph, label_attribute='node_label'
    )
    g2 = nx.convert_node_labels_to_integers(
        xmrs2._graph, label_attribute='node_label'
    )
    return nx.is_isomorphic(
        g1,
        g2,
        node_match=xmrs_node_match,
        edge_match=xmrs_edge_match
    )
def enumerate_all_subgraphs_upto_size_k_parallel(document_graph, k, num_of_workers=4):
    """
    returns all subgraphs of a DiscourseDocumentGraph (i.e. a MultiDiGraph)
    with up to k nodes. This is a trivially parallelized version of
    enumerate_all_subgraphs_upto_size_k()
    """
    document_nodes = len(document_graph)
    if k > document_nodes:
        k = document_nodes

    int_graph = nx.convert_node_labels_to_integers(nx.DiGraph(document_graph),
                                                   first_label=1,
                                                   label_attribute='node_id')

    pool = Pool(processes=num_of_workers) # number of CPUs
    results = [pool.apply_async(enumerate_all_size_k_subgraphs, args=(int_graph, i))
                for i in xrange(1, k+1)]
    pool.close()
    pool.join()

    subgraphs = []
    for result in results:
        tmp_result = result.get()
        if isinstance(tmp_result, list):
            subgraphs.extend(tmp_result)
        else:
            subgraphs.append(tmp_result)
    return subgraphs
Beispiel #9
0
def main(args):
    """
    Entry point.
    """
    if len(args) != 2:
        sys.exit(__doc__ %{"script_name" : args[0].split("/")[-1]})

    # Load the simulation parameters.
    params = json.load((open(args[1], "r")))
    network_params = params["network_params"]

    # Setup the network.
    G = networkx.read_graphml(network_params["args"]["path"])
    G = networkx.convert_node_labels_to_integers(G)

    # Load the attack sequences.
    fname = network_params["args"]["path"].replace(".graphml", ".pkl")
    attack_sequences = pickle.load(open(fname, "rb"))

    # Carry out the requested number of trials of the disease dynamics and 
    # compute basic statistics of the results.
    Sm, Im, Rm = numpy.array([0.0]), numpy.array([0.0]), numpy.array([0.0])
    for t in range(1, params["trials"] + 1):
        S, I, R = single_trial(G, params, attack_sequences)
        Sm, S = extend(Sm, S)
        Im, I = extend(Im, I)
        Rm, R = extend(Rm, R)
        Sm += (S - Sm) / t
        Im += (I - Im) / t
        Rm += (R - Rm) / t

    # Print the averaged results to STDOUT.
    for i in range(len(Sm)):
        print "%.3f\t%.3f\t%.3f" %(Sm[i], Im[i], Rm[i])
Beispiel #10
0
def mergenodes(a,b):
    global G
    
    #Iterate through haplotypes on second node
    for key, value in G.node[b]['hap'].items():
        #If haplotype exists in dictionary of node a. Add weight to dictionary
        if key in G.node[a]['hap']:
            G.node[a]['hap'][key] += value
        #Otherwise add key and value to dictionary of node a
        else:
            G.node[a]['hap'].update({key:value})

    #Move all incoming edges of b to a
    for i in G.in_edges(b, data=True, keys=True):
        G.add_edge(i[0],a,a=i[3]['a'],weight=i[3]['weight'])
        G.remove_edge(i[0],i[1],key=i[2])

    #Remove node b form G
    G.remove_node(b)
        
    #Remove node from gl
    for i in gl:
        if b <= i:
            gl[gl.index(i)] = gl[gl.index(i)] - 1

    #Relablel nodes so that they are consecutive integers
    G = nx.convert_node_labels_to_integers(G, first_label=1, ordering="sorted")
def random_shell_graph(constructor, create_using=None, seed=None):
    """Return a random shell graph for the constructor given.

    Parameters
    ----------
    constructor: a list of three-tuples 
        (n,m,d) for each shell starting at the center shell.
    n : int
        The number of nodes in the shell
    m : int
        The number or edges in the shell
    d : float
        The ratio of inter-shell (next) edges to intra-shell edges.
        d=0 means no intra shell edges, d=1 for the last shell
    create_using : graph, optional (default Graph)
        The graph instance used to build the graph.
    seed : int, optional
        Seed for random number generator (default=None).   
      
    Examples
    --------
    >>> constructor=[(10,20,0.8),(20,40,0.8)]
    >>> G=nx.random_shell_graph(constructor)        

    """
    if create_using is not None and create_using.is_directed():
        raise nx.NetworkXError("Directed Graph not supported")
    G = empty_graph(0, create_using)
    G.name = "random_shell_graph(constructor)"

    if seed is not None:
        random.seed(seed)

    glist = []
    intra_edges = []
    nnodes = 0
    # create gnm graphs for each shell
    for (n, m, d) in constructor:
        inter_edges = int(m * d)
        intra_edges.append(m - inter_edges)
        g = nx.convert_node_labels_to_integers(gnm_random_graph(n, inter_edges), first_label=nnodes)
        glist.append(g)
        nnodes += n
        G = nx.operators.union(G, g)

    # connect the shells randomly
    for gi in range(len(glist) - 1):
        nlist1 = glist[gi].nodes()
        nlist2 = glist[gi + 1].nodes()
        total_edges = intra_edges[gi]
        edge_count = 0
        while edge_count < total_edges:
            u = random.choice(nlist1)
            v = random.choice(nlist2)
            if u == v or G.has_edge(u, v):
                continue
            else:
                G.add_edge(u, v)
                edge_count = edge_count + 1
    return G
Beispiel #12
0
def graph_example_1():
    G = nx.convert_node_labels_to_integers(nx.grid_graph([5, 5]),
                                           label_attribute='labels')
    rlabels = nx.get_node_attributes(G, 'labels')
    labels = {v: k for k, v in rlabels.items()}

    for nodes in [(labels[(0, 0)], labels[(1, 0)]),
                  (labels[(0, 4)], labels[(1, 4)]),
                  (labels[(3, 0)], labels[(4, 0)]),
                  (labels[(3, 4)], labels[(4, 4)])]:
        new_node = G.order() + 1
        # Petersen graph is triconnected
        P = nx.petersen_graph()
        G = nx.disjoint_union(G, P)
        # Add two edges between the grid and P
        G.add_edge(new_node + 1, nodes[0])
        G.add_edge(new_node, nodes[1])
        # K5 is 4-connected
        K = nx.complete_graph(5)
        G = nx.disjoint_union(G, K)
        # Add three edges between P and K5
        G.add_edge(new_node + 2, new_node + 11)
        G.add_edge(new_node + 3, new_node + 12)
        G.add_edge(new_node + 4, new_node + 13)
        # Add another K5 sharing a node
        G = nx.disjoint_union(G, K)
        nbrs = G[new_node + 10]
        G.remove_node(new_node + 10)
        for nbr in nbrs:
            G.add_edge(new_node + 17, nbr)
        G.add_edge(new_node + 16, new_node + 5)

    G.name = 'Example graph for connectivity'
    return G
    def __init__(self, graph_file, n_cascades=1, p_init=0.1):
        """Set up a SIR simulation class.
        """

        if isinstance(graph_file, str):
            super(SIRSim, self).__init__(graph_file, n_cascades)
            # Use integer labels to index into the infection times vector.
            self.G = nx.convert_node_labels_to_integers(self.G)
        elif isinstance(graph_file, nx.DiGraph) or \
                isinstance(graph_file, nx.Graph):
            self.G = graph_file
            self.n_cascades = n_cascades

        # Every node starts off susceptible.
        self.susceptible = set(self.G.nodes())
        self.infected = set()
        self.recovered = set()

        self.last_infected = set()

        # Mark the timesteps.
        self.t = 0

        # Array of infection times for each node.
        self.infection_times = np.ndarray(len(self.G))
        self.infection_times.fill(np.inf)

        self.p_init = p_init
        self.DEBUG = False
        self.initialize_graph()
Beispiel #14
0
	def __update_structure(self):
		imported_graph = nx.read_gexf(self.file_path)

		if not isinstance(imported_graph, nx.Graph):
			raise Exception("Imported graph is not undirected")

		self.structure = nx.convert_node_labels_to_integers(imported_graph)
Beispiel #15
0
def main(args):
    """
    Entry point.
    """
    if len(args) == 0:
        print "Usage: python disease.py <params file>"
        sys.exit(1)

    # Load the simulation parameters.
    params = json.load((open(args[0], "r")))
    network_params = params["network_params"]

    # Setup the network.
    if network_params["name"] == "read_graphml":
        G = networkx.read_graphml(network_params["args"]["path"])
        G = networkx.convert_node_labels_to_integers(G)
    else:
        G = getattr(networkx, network_params["name"])(**network_params["args"])

    # Carry out the requested number of trials of the disease dynamics and 
    # average the results.
    Sm, Im, Rm, Rv = 0.0, 0.0, 0.0, 0.0
    for t in range(1, params["trials"] + 1):
        S, I, R = single_trial(G, params)
        Rm_prev = Rm
        Sm += (S - Sm) / t
        Im += (I - Im) / t
        Rm += (R - Rm) / t
        Rv += (R - Rm) * (R - Rm_prev)

    # Print the average
    print("%.3f\t%.3f\t%.3f\t%.3f" \
          %(Sm, Im, Rm, (Rv / params["trials"]) ** 0.5))
def extract_triad_motif_significance_profile(network, num_rand_instances=10, num_rewirings=None):
    """
    Computes the triad motif significance profile of the input network.

    Arguments:
        network => The input network (can be directed or undirected).
        num_rand_instances => The number of randomly-rewired network instances used when computing
        z-score values.
        num_rewirings => The number of edge rewirings performed when randomizing the network.

    Returns:
        A fixed-size numpy array where each index corresponds to a predefined unique triad motif
        and where the value at each index represents the normalized z-score for the average
        over- or underexpression of a triad motif in the network.
    """

    # Make sure the network labels are encoded as integer IDs (makes everything easier).
    network = nx.convert_node_labels_to_integers(network)

    # Build an array of normalized motif expression z-scores (indices indicate unique motifs).
    significance_profile = compute_normalized_triad_motif_z_scores(
        network, num_rand_instances=num_rand_instances, num_rewirings=num_rewirings
    )

    return significance_profile
Beispiel #17
0
    def _retrieve_skycoords(V):
        coords_l = []
        # Accessing the borders one by one. At this step, V_subgraphs contains a list of cycles
        # (i.e. one describing the external border of the MOC component and several describing the holes
        # found in the MOC component).
        V_subgraphs = nx.connected_component_subgraphs(V)
        for v in V_subgraphs:
            # Compute the MST for each cycle
            v = nx.convert_node_labels_to_integers(v)
            mst = nx.minimum_spanning_tree(v)
            # Get one end of the span tree by looping over its node and checking if the degree is one
            src = None
            for (node, deg) in mst.degree():
                if deg == 1:
                    src = node
                    break

            # Get the unordered lon and lat
            ra = np.asarray(list(nx.get_node_attributes(v, 'ra').values()))
            dec = np.asarray(list(nx.get_node_attributes(v, 'dec').values()))
            coords = np.vstack((ra, dec)).T
            # Get the ordering from the MST
            ordering = np.asarray(list(nx.dfs_preorder_nodes(mst, src)))
            # Order the coords
            coords = coords[ordering]
            # Get a skycoord containing N coordinates computed from the Nx2 `coords` array
            coords = SkyCoord(coords, unit="deg")
            coords_l.append(coords)

        return coords_l
def flatten_graph(graph,allow_loops,allow_multi):
    '''
    
    Takes an input graph and returns a version w/ or w/o loops and 
    multiedges.
    
    | Args:
    |     G (networkx_class): The original graph.
    |     allow_loops (bool): True only if loops are allowed in output graph.
    |     allow_multi (bool): True only if multiedges are allowed in output graph.
    
    | Returns:
    |     A graph with or without multiedges and/or self-loops, as specified. 
        
    '''
    
    graph = nx.convert_node_labels_to_integers(graph)  
    
    if allow_loops and allow_multi:
        return nx.MultiGraph(graph)
    elif allow_loops and not allow_multi:
        return nx.Graph(graph)
    elif not allow_multi and not allow_loops:
        G = nx.Graph(graph)
    else:
        G = nx.MultiGraph(graph)
        
        
    for e in G.selfloop_edges():
        G.remove_edge(*e)
    if G.selfloop_edges() != []:
        print "Cannot remove all self-loops"

    return G        
Beispiel #19
0
def load_shp(shp_path):
    """ loads a shapefile into a networkx based GeoGraph object

    Args:
        shp_path:  string path to a line or point shapefile

    Returns:
        geograph:  GeoGraph

    """

    # NOTE:  if shp_path is unicode io doesn't work for some reason
    shp_path = shp_path.encode('ascii', 'ignore')
    g = nx.read_shp(shp_path)
    coords = dict(enumerate(g.nodes()))

    driver = ogr.GetDriverByName('ESRI Shapefile')
    shp = driver.Open(shp_path)
    layer = shp.GetLayer()

    spatial_ref = layer.GetSpatialRef()
    proj4 = None
    if not spatial_ref:
        if gm.is_in_lon_lat(coords):
            proj4 = gm.PROJ4_LATLONG
        else:
            warnings.warn("Spatial Reference could not be set for {}".
                format(shp_path))

    else:
        proj4 = spatial_ref.ExportToProj4()

    g = nx.convert_node_labels_to_integers(g)

    return GeoGraph(srs=proj4, coords=coords, data=g)
Beispiel #20
0
def main(args):
    """
    Entry point.
    """
    if len(args) != 2:
        sys.exit(__doc__ %{"script_name" : args[0].split("/")[-1]})

    # Load the simulation parameters.
    params = json.load((open(args[1], "r")))
    network_params = params["network_params"]

    # Setup the network.
    G = networkx.read_graphml(network_params["args"]["path"])
    G = networkx.convert_node_labels_to_integers(G)

    # Load the attack sequences.
    fname = network_params["args"]["path"].replace(".graphml", ".pkl")
    attack_sequences = pickle.load(open(fname, "rb"))
    
    # Carry out the requested number of trials of the disease dynamics and 
    # average the results.
    Sm, Im, Rm, Rv = 0.0, 0.0, 0.0, 0.0
    for t in range(1, params["trials"] + 1):
        S, I, R = single_trial(G, params, attack_sequences)
        Rm_prev = Rm
        Sm += (S - Sm) / t
        Im += (I - Im) / t
        Rm += (R - Rm) / t
        Rv += (R - Rm) * (R - Rm_prev)

    # Print the average
    print("%.3f\t%.3f\t%.3f\t%.3f" \
          %(Sm, Im, Rm, (Rv / params["trials"]) ** 0.5))
Beispiel #21
0
def getNodeIntDict(graph):
	ints = nx.convert_node_labels_to_integers(graph,label_attribute='old_name')
	nodeIntList = ints.nodes()
	nodeIntDict = {}	
	for n in nodeIntList:
		nodeIntDict[(ints.node[n]['old_name'])] = n
	return nodeIntDict
Beispiel #22
0
 def plot_celltype_graph_3d(self, filename="celltypes_graph_3d.png"):
     """Some eyecandi useful for presentations."""
     if not has_mayavi:
         return
     numeric_graph = nx.convert_node_labels_to_integers(self.__celltype_graph)
     pos = nx.spring_layout(numeric_graph, dim=3)
     xyz = numpy.array([pos[v] for v in numeric_graph])
     scalars = [self.__celltype_graph.node[vertex]["count"] for vertex in self.__celltype_graph]
     fig = mlab.figure(1, bgcolor=(0, 0, 0))
     mlab.clf()
     points = mlab.points3d(
         xyz[:, 0],
         xyz[:, 1],
         xyz[:, 2],
         scalars,
         scale_factor=0.1,
         scale_mode="none",
         colormap="summer",
         opacity=0.4,
         transparent=True,
         resolution=20,
     )
     points.mlab_source.dataset.lines = numpy.array(numeric_graph.edges())
     points.mlab_source.update()
     # mlab.pipeline.surface(points, color=(1,1,1),
     #                       representation='wireframe',
     #                       line_width=2,
     #                       name='synapses')
     tube = mlab.pipeline.tube(points, tube_radius=0.01)
     mlab.pipeline.surface(tube, color=(0.8, 0.8, 0.8))
     mlab.savefig(filename, size=(1280, 800), figure=fig)
     print "Mayavi celltype graph saved in", filename
     mlab.show()
Beispiel #23
0
    def save_graph(self, graphname, fmt='edgelist'):
        """
        Saves the graph to disk

        **Positional Arguments:**

                graphname:
                    - Filename for the graph

        **Optional Arguments:**

                fmt:
                    - Output graph format
        """
        self.g.graph['ecount'] = nx.number_of_edges(self.g)
        g = nx.convert_node_labels_to_integers(self.g, first_label=1)
        if fmt == 'edgelist':
            nx.write_weighted_edgelist(g, graphname, encoding='utf-8')
        elif fmt == 'gpickle':
            nx.write_gpickle(g, graphname)
        elif fmt == 'graphml':
            nx.write_graphml(g, graphname)
        else:
            raise ValueError('edgelist, gpickle, and graphml currently supported')
        pass
Beispiel #24
0
def draw_tree(H):  # plots a graph H with it's different spanning trees
    G=NX.convert_node_labels_to_integers(H)
    T1=generate_BFS_spanning_tree(G, 0)
    T2=generate_DFS_spanning_tree(G, 0)
    T3=make_random_spanning_tree(G)
    T4=makeNormalTree(G.size())
    
    global number_of_figs
    figure(number_of_figs)
    number_of_figs += 1
    
    P.subplot(2,3, 1)
    title("The graph")
    NX.draw(G, node_color='y', node_size=100)
    P.subplot(2,3, 2)
    title("BFS")
    NX.draw(T1, node_color='g', node_size=100)
    P.subplot(2,3, 3)
    title("DFS")
    NX.draw(T2, node_color='m', node_size=100)
    P.subplot(2,3, 4)
    title("Random")
    NX.draw(T3, node_color='m', node_size=100)
    P.subplot(2,3, 5)
    title("Normal")
    NX.draw(T4, node_color='y', node_size=150)
    def importGexf(self, url ):

        # TODO once files are stored in a standard upload directory this will need to be changed
        import platform
        if platform.system() == 'Windows':
            PATH = 'c:\\inetpub\\wwwroot\\pydev\\systemshock\\modellingengine\\fincat\\parameters\\'
        else:
            PATH = '/var/lib/geonode/src/GeoNodePy/geonode/modellingengine/fincat/parameters/'

        G = nx.read_gexf(PATH + url)

        # ensure the nodes are labelled with integers starting from 0
        # TODO might need to start from current number of nodes in G
        G = nx.convert_node_labels_to_integers(G, first_label=0)

        for node in G.nodes(data=True):
            nodeid = node[0] #node array index 0 is the node id, index 1 is the attribute list
            attributes = node[1]
            attributes['guid'] = nodeid
            if 'wkt' in attributes:
                attributes['geometry'] = self.WKTtoGeoJSON(attributes['wkt'])

        for edge in G.edges(data=True):
            edgeid = unicode(edge[0]) + '-' + unicode(edge[1])
            attributes = edge[2]
            attributes['guid'] = edgeid

        self.layergraphs.append(G)  # add the new layer graph to the overall network
        return True
Beispiel #26
0
 def postprocess_transformer_out(graph):
     # 1. the original graph needs to be directed
     ograph = _edge_to_vertex_transform(graph.graph['original'])
     graph.graph['original'] = rna.expanded_rna_graph_to_digraph(ograph)
     # 2. our convention is, that node ids are not overlapping ,, complying by optimistic renaming..
     graph = nx.convert_node_labels_to_integers(graph,first_label=1000)
     return graph
Beispiel #27
0
def generateGraphFunction(H):
    G = NX.convert_node_labels_to_integers(H)
    func = [0 for i in G.nodes()]
    which_used = 0
    if (generation_mode & __DFS == __DFS):
        which_used += 1
        for i in G.nodes():
            lf = generate_DFS_level_function(G, i)
            for j in range(len(lf)):
                func[j] += lf[j]
                
    if (generation_mode & __BFS == __BFS):
        which_used += 1
        for i in G.nodes():
            lf = generate_BFS_level_function(G, i)
            for j in range(len(lf)):
                func[j] += lf[j]

    if (generation_mode & __RANDOM == __RANDOM):
        which_used += 1
        for i in G.nodes():
            lf = generate_random_level_function(G)
            for j in range(len(lf)):
                func[j] += lf[j]

    if generation_mode == 0:
        ordr = float(which_used*G.order())
    else:
        ordr = float(G.order())
        
    for i in G.nodes():
        func[i] /= ordr

    return func
Beispiel #28
0
def main():
    # First half of the script calculates the partitions, and the metric used
    # to identify good candidate partitions for using in the exploded view; 
    # the ratio of extern tie-internal ties/total network edges
    
    # G=NX.generators.barabasi_albert_graph(550,2)
    G=NX.read_edgelist('test_network.edgelist',create_using=NX.Graph())
    G=NX.convert_node_labels_to_integers(G,first_label=0) # for consistent record keeping
    partitions=generate_network_clusters(G)
    external,internal,ei_ratio=external_internal_ties(G,partitions)
    # Plot E-I ratio and save
    P.plot(ei_ratio.values(),ls='-',marker='.',color='r')
    P.savefig('ei_plot.png')
    #P.show()
    #P.savefig('ei_plot.png',dpi=100)
    # Looking for large jumps in graph, thoes will be candidate partitions
    time.sleep(10)
    # Once the candidate partitions have been identified, we use UbiGraph to 
    # display exploded view
    S=open_ubigraph_server()    # Open connection to XML-RPC UbiGraph Server
    edges=build_ubigraph(G,S)         # Build network in UbiGraph
    time.sleep(20)
    edge_ref,external_edges=exploded_view(G,S,edges,partitions[20],repulsion=0.20738) # Choose partition and display 'exploded view'
    time.sleep(20)
    rebuild(S,edge_ref,external_edges)
def GenerateAllGraphsWithManyXNoY(xCardinalityUpperBound):

    t = range(1, xCardinalityUpperBound)
    graphConfigSet = set(set(product(set(t),repeat = CYCLE_LENGTH)))

    for thisGraphConfiguration in graphConfigSet:
        myGraph = ConstructBaseGraph()
        for thisSetIndex in range(0,CYCLE_LENGTH):
            if thisGraphConfiguration[thisSetIndex] >= 2:
                myGraph = AddXSet(myGraph, thisGraphConfiguration[thisSetIndex] - 1, False, thisSetIndex)
                       
        if not (GIsHFree(myGraph, FORBIDDEN_SUBGRAPHS)):
            print("ERROR!")
            f = File(DIRECTORY, G = myGraph, logger = MY_LOGGER, base="C5-")
            f.save()
            exit()
            
        result = TheAlgorithm(myGraph)
        
        if(result == True):
            print("Conjecture Holds.")
        else:
            print("Conjecture Fails!")
            G = convert_node_labels_to_integers(G, 0, ordering='default', label_attribute = None)
            f = File(DIRECTORY, G = myGraph, logger = MY_LOGGER, base="C5-")
            f.save()
            
        myGraph.clear()

    return
    def __init__(self, graph_file, n_cascades=1, mu=0.1):
        """Set up a
        """
        super(SISim, self).__init__(graph_file, n_cascades)

        # Use integer labels to index into the infection times vector.
        self.G = nx.convert_node_labels_to_integers(self.G)

        # Every node starts off susceptible.
        self.susceptible = set(self.G.nodes())
        self.infected = set()
        self.last_infected = set()
        self.stable_count = 0

        # Mark the timesteps.
        self.t = 0

        # Array of infection times for each node.
        self.infection_times = np.ndarray(len(self.G))
        self.infection_times.fill(np.inf)

        self.mu = mu
        self.DEBUG = False

        self.initialize_graph()
Beispiel #31
0
def load_network(filename):
    path = Path(__file__).parent / "./data/{}".format(filename)
    return nx.convert_node_labels_to_integers(nx.read_gml(str(path), label='id')) # map node names to integers (0:n-1) [because indexing]

g_metabolism = load_network("metabolism_afulgidus.gml")
metabolism_in_degree = [degree for node, degree in g_metabolism.in_degree]
mean_in_degree = sum(metabolism_in_degree) / g_metabolism.number_of_nodes()

metabolism_null = nx.fast_gnp_random_graph(g_metabolism.number_of_nodes(), mean_in_degree / g_metabolism.number_of_nodes(), directed=True)

g_karate = load_network("karate.gml")
g_yeast = load_network("yeast_spliceosome.gml")
g_grass = load_network("grass_web.gml")
path = Path(__file__).parent / "./data/p.pacificus_neural.synaptic_1.graphml"
g_multi = nx.convert_node_labels_to_integers(nx.read_graphml(str(path))) # map node names to integers (0:n-1) [because indexing]
g_neural = nx.Graph()                     # G will be a simple graph
g_neural.add_edges_from(g_multi.edges())        # G is now a simplified Gmulti (tricky :)

G_30 = nx.fast_gnp_random_graph(30, 3 / 30, directed=True)

G_300 = nx.fast_gnp_random_graph(300, 3 / 300)

block_nodes = [10, 15, 10, 15]

# Create an ordered structure
block_matrix = [
    [10 / block_nodes[0], 5 / block_nodes[0], 1 / block_nodes[0], 0],
    [5 / block_nodes[0], 10 / block_nodes[1], 5 / block_nodes[1], 1 / block_nodes[1]],
    [1 / block_nodes[0], 5 / block_nodes[1], 10 / block_nodes[2], 5 / block_nodes[2]],
    [0                  , 1 / block_nodes[1], 5 / block_nodes[2], 10 / block_nodes[3]]
Beispiel #32
0
# -*- coding: utf-8 -*-
"""
Created on Wed Jan  8 11:02:23 2020

@author: BrianFrench
"""
import networkx as nx
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import math
from gurobipy import *
Grid = nx.read_gml("Bus30WithData.gml")
Grid = nx.convert_node_labels_to_integers(Grid)
PowerSub = nx.read_gml("Bus30WithData.gml")
PowerSub = nx.convert_node_labels_to_integers(PowerSub)
for i in PowerSub.nodes:
  for j in PowerSub.nodes:
    if PowerSub.has_edge(i,j,1):
        PowerSub.remove_edge(i,j,1)
Edges = list(range(0,len(PowerSub.edges)))
Nodes = list(range(0,len(Grid.nodes)))
model = Model("mip1")
#define Variables
#Decision Variables
PG = model.addVars(Nodes,vtype=GRB.CONTINUOUS, name = "PG",lb = 0)
##State Variables
W_l = model.addVars(Edges, vtype = GRB.BINARY, name = "W_l")
W_n = model.addVars(Nodes, vtype = GRB.BINARY, lb = 0, ub=1, name = "W_n")
Theta = model.addVars(Nodes, vtype = GRB.CONTINUOUS, name = "Theta")
PowerIJ = model.addVars(Edges, vtype = GRB.CONTINUOUS, name = "PowerIJ")
Beispiel #33
0
def load_graphs(dataset_str):
    node_labels = [None]
    edge_labels = [None]
    idx_train = [None]
    idx_val = [None]
    idx_test = [None]

    if dataset_str == 'grid':
        graphs = []
        features = []
        for _ in range(1):
            graph = nx.grid_2d_graph(20, 20)
            graph = nx.convert_node_labels_to_integers(graph)

            feature = np.identity(graph.number_of_nodes())
            graphs.append(graph)
            features.append(feature)

    elif dataset_str == 'communities':
        graphs = []
        features = []
        node_labels = []
        edge_labels = []
        for i in range(1):
            community_size = 20
            community_num = 20
            p = 0.01

            graph = nx.connected_caveman_graph(community_num, community_size)

            count = 0

            for (u, v) in graph.edges():
                if random.random() < p:  # rewire the edge
                    x = random.choice(list(graph.nodes))
                    if graph.has_edge(u, x):
                        continue
                    graph.remove_edge(u, v)
                    graph.add_edge(u, x)
                    count += 1
            print('rewire:', count)

            n = graph.number_of_nodes()
            label = np.zeros((n, n), dtype=int)
            for u in list(graph.nodes):
                for v in list(graph.nodes):
                    if u // community_size == v // community_size and u > v:
                        label[u, v] = 1
            rand_order = np.random.permutation(graph.number_of_nodes())
            feature = np.identity(graph.number_of_nodes())[:, rand_order]
            graphs.append(graph)
            features.append(feature)
            edge_labels.append(label)

    elif dataset_str == 'protein':

        graphs_all, features_all, labels_all = Graph_load_batch(
            name='PROTEINS_full')
        features_all = (features_all - np.mean(
            features_all, axis=-1, keepdims=True)) / np.std(
                features_all, axis=-1, keepdims=True)
        graphs = []
        features = []
        edge_labels = []
        for graph in graphs_all:
            n = graph.number_of_nodes()
            label = np.zeros((n, n), dtype=int)
            for i, u in enumerate(graph.nodes()):
                for j, v in enumerate(graph.nodes()):
                    if labels_all[u - 1] == labels_all[v - 1] and u > v:
                        label[i, j] = 1
            if label.sum() > n * n / 4:
                continue

            graphs.append(graph)
            edge_labels.append(label)

            idx = [node - 1 for node in graph.nodes()]
            feature = features_all[idx, :]
            features.append(feature)

        print('final num', len(graphs))

    elif dataset_str == 'email':

        with open('data/email.txt', 'rb') as f:
            graph = nx.read_edgelist(f)

        label_all = np.loadtxt('data/email_labels.txt')
        graph_label_all = label_all.copy()
        graph_label_all[:, 1] = graph_label_all[:, 1] // 6

        for edge in list(graph.edges()):
            if graph_label_all[int(edge[0])][1] != graph_label_all[int(
                    edge[1])][1]:
                graph.remove_edge(edge[0], edge[1])

        comps = [
            comp for comp in nx.connected_components(graph) if len(comp) > 10
        ]
        graphs = [graph.subgraph(comp) for comp in comps]

        edge_labels = []
        features = []

        for g in graphs:
            n = g.number_of_nodes()
            feature = np.ones((n, 1))
            features.append(feature)

            label = np.zeros((n, n), dtype=int)
            for i, u in enumerate(g.nodes()):
                for j, v in enumerate(g.nodes()):
                    if label_all[int(u)][1] == label_all[int(v)][1] and i > j:
                        label[i, j] = 1
            label = label
            edge_labels.append(label)

    elif dataset_str == 'ppi':
        dataset_dir = 'data/ppi'
        print("Loading data...")
        G = json_graph.node_link_graph(
            json.load(open(dataset_dir + "/ppi-G.json")))
        edge_labels_internal = json.load(
            open(dataset_dir + "/ppi-class_map.json"))
        edge_labels_internal = {
            int(i): l
            for i, l in edge_labels_internal.items()
        }

        train_ids = [n for n in G.nodes()]
        train_labels = np.array([edge_labels_internal[i] for i in train_ids])
        if train_labels.ndim == 1:
            train_labels = np.expand_dims(train_labels, 1)

        print("Using only features..")
        feats = np.load(dataset_dir + "/ppi-feats.npy")
        ## Logistic gets thrown off by big counts, so log transform num comments and score
        feats[:, 0] = np.log(feats[:, 0] + 1.0)
        feats[:, 1] = np.log(feats[:, 1] - min(np.min(feats[:, 1]), -1))
        feat_id_map = json.load(open(dataset_dir + "/ppi-id_map.json"))
        feat_id_map = {int(id): val for id, val in feat_id_map.items()}
        train_feats = feats[[feat_id_map[id] for id in train_ids]]

        node_dict = {}
        for id, node in enumerate(G.nodes()):
            node_dict[node] = id

        comps = [comp for comp in nx.connected_components(G) if len(comp) > 10]
        graphs = [G.subgraph(comp) for comp in comps]

        id_all = []
        for comp in comps:
            id_temp = []
            for node in comp:
                id = node_dict[node]
                id_temp.append(id)
            id_all.append(np.array(id_temp))

        features = [train_feats[id_temp, :] + 0.1 for id_temp in id_all]

    else:
        raise NotImplementedError

    return graphs, features, edge_labels, node_labels, idx_train, idx_val, idx_test
Beispiel #34
0
                                 distance=1500,
                                 project_utm=False,
                                 return_crs=False)
south = station_box[1]
north = station_box[0]
east = station_box[2]
west = station_box[3]

# Get graph
G = ox.graph_from_bbox(north,
                       south,
                       east,
                       west,
                       network_type='drive',
                       retain_all=True)
G = nx.convert_node_labels_to_integers(G)
nodes, edges = ox.graph_to_gdfs(G)

# Concatenate lats and longs to save time
lats = queries.start_lat
lats = lats.append(queries.end_lat, ignore_index=True)
longs = queries.start_long
longs = longs.append(queries.end_long, ignore_index=True)

# Find nearest nodes
nearest_node_ids = ox.get_nearest_nodes(G, X=longs, Y=lats, method='balltree')

# Extract nearest node ids
start_nn = nearest_node_ids[0:nRecords]
end_nn = nearest_node_ids[nRecords:2 * nRecords]
Beispiel #35
0
def graph_cluster_EM(graph, bipartite=False, use_rev_len=False, **kwarg):
    """
    Returns a graph where nodes have cluster attribute

    Arguments:
    N_CLUSTERS -- Number of clusters to find
    EM_ITER -- How long the EM iterations should continue
    EM_REP -- How many times should EM restart with random initialization?
    bipartite --
    """

    if bipartite: N_CLUSTERS_DFLT = (2, 2)
    else: N_CLUSTERS_DFLT = 2
    N_CLUSTERS = kwarg.get('N_CLUSTERS', N_CLUSTERS_DFLT)
    EM_ITER = kwarg.get('EM_ITER', 10)
    EM_REP = kwarg.get('EM_REP', 6)

    graph_orig = graph.copy()
    # remove singleton nodes
    _gd = graph.degree()
    singletons = set([n for n in graph.nodes() if _gd[n] == 0])
    graph.remove_nodes_from(singletons)

    adj_mat_N = len(graph)

    if bipartite:
        part1 = filter(lambda n: graph.node[n]['part'] == 1, graph.nodes())
        part2 = filter(lambda n: graph.node[n]['part'] == 2, graph.nodes())
        # labels to integers which puts part1 before part2
        node_seq = part1 + part2
        nx.relabel_nodes(graph,
                         dict(zip(node_seq, range(adj_mat_N))),
                         copy=False)
    else:
        node_seq = graph.nodes()
        nx.convert_node_labels_to_integers(graph)

    # graph.adjacency list returns in order of graph.nodes() which are
    # not necessarily sorted, so we make the adj_list_r sorted by node ids
    adj_list_r, adj_list_c = zeros(adj_mat_N,
                                   dtype=object), zeros(adj_mat_N,
                                                        dtype=object)
    for n, adj_l in zip(graph.nodes(), graph.adjacency_list()):
        adj_list_r[n] = adj_l
    if isinstance(graph, nx.DiGraph):
        for n, adj_l in zip(graph.nodes(), graph.reverse.adjacency_list()):
            adj_list_c[n] = adj_l
    else:
        for n, adj_l in zip(graph.nodes(), graph.adjacency_list()):
            adj_list_c[n] = adj_l

    # If review length are taken into account
    if use_rev_len:
        logging.info('Computing log(review length) lists')
        revlen = zeros(adj_mat_N, dtype=object)
        for i in range(adj_mat_N):
            rvl_n = map(lambda x: len(x[2]['reviewTxt']),
                        graph.edges(i, data=True))
            rvl = log(array(rvl_n))
            revlen[i] = rvl
    else:
        revlen = None

    logging.info('Started cluster detection')
    if bipartite:
        ghat = _graph_cluster_EM_bipartite(adj_lists=(adj_list_r, adj_list_c),
                                           Cs=N_CLUSTERS,
                                           EM_ITER=EM_ITER,
                                           EM_REP=EM_REP,
                                           parts_sizes=(len(part1),
                                                        len(part2)),
                                           revlen=revlen)
    else:
        ghat = _graph_cluster_EM(adj_lists=(adj_list_r, adj_list_c),
                                 C=N_CLUSTERS,
                                 EM_ITER=EM_ITER,
                                 EM_REP=EM_REP,
                                 revlen=revlen)
    logging.info('Finished cluster detection')

    for i, n in enumerate(node_seq):
        graph_orig.node[n]['cluster'] = int(ghat[i])

    return graph_orig
Beispiel #36
0
def identify_balanced_rearrangements(H):

    # Create matching graph
    #  - duplicate nodes, one set red, one set blue
    #  - add transverse edges (v_red, v_blue)
    #  - for each original edge:
    #    - add (u_red, v_red) for red edges
    #    - add (u_blue, v_blue) for blue edges
    #    - replacate edge costs

    transverse_edge_cost = 1.

    M = networkx.Graph()
    for node in H.nodes_iter():
        transverse_edge = []
        for color in (1, -1):
            colored_node = node + (color, )
            M.add_node(colored_node)
            transverse_edge.append(colored_node)
        M.add_edge(*transverse_edge, cost=transverse_edge_cost)

    for edge in H.edges_iter():
        for multi_edge_idx, edge_attr in H[edge[0]][edge[1]].iteritems():
            color = edge_attr['color']
            colored_node_1 = edge[0] + (color, )
            colored_node_2 = edge[1] + (color, )
            M.add_edge(colored_node_1,
                       colored_node_2,
                       attr_dict=edge_attr,
                       cost=0.)

    M1 = networkx.convert_node_labels_to_integers(M,
                                                  label_attribute='node_tuple')

    # Min cost perfect matching
    edges = networkx.get_edge_attributes(M1, 'cost')
    for edge in edges.keys():
        if edge[0] == edge[1]:
            raise Exception('self loop {}'.format(M1[edge[0]][edge[1]]))
    min_cost_edges = blossomv.blossomv.min_weight_perfect_matching(edges)

    # Remove unselected edges
    assert set(min_cost_edges).issubset(edges.keys())
    remove_edges = set(edges.keys()).difference(min_cost_edges)
    M2 = M1.copy()
    M2.remove_edges_from(remove_edges)

    # Re-create original graph with matched edges
    M3 = networkx.relabel_nodes(M2,
                                mapping=networkx.get_node_attributes(
                                    M2, 'node_tuple'))

    # Create subgraph of H with only selected edges
    H1 = networkx.Graph()
    for edge in M3.edges_iter():
        edge_attr = M3[edge[0]][edge[1]]
        node_1 = edge[0][:-1]
        node_2 = edge[1][:-1]
        if node_1 == node_2:
            continue
        if H1.has_edge(node_1, node_2):
            H1.remove_edge(node_1, node_2)
        else:
            H1.add_edge(node_1, node_2, attr_dict=edge_attr)

    return H1
def torrents_and_ferraro_graph():
    G = nx.convert_node_labels_to_integers(nx.grid_graph([5, 5]),
                                           label_attribute='labels')
    rlabels = nx.get_node_attributes(G, 'labels')
    labels = dict((v, k) for k, v in rlabels.items())

    for nodes in [(labels[(0, 4)], labels[(1, 4)]),
                  (labels[(3, 4)], labels[(4, 4)])]:
        new_node = G.order() + 1
        # Petersen graph is triconnected
        P = nx.petersen_graph()
        G = nx.disjoint_union(G, P)
        # Add two edges between the grid and P
        G.add_edge(new_node + 1, nodes[0])
        G.add_edge(new_node, nodes[1])
        # K5 is 4-connected
        K = nx.complete_graph(5)
        G = nx.disjoint_union(G, K)
        # Add three edges between P and K5
        G.add_edge(new_node + 2, new_node + 11)
        G.add_edge(new_node + 3, new_node + 12)
        G.add_edge(new_node + 4, new_node + 13)
        # Add another K5 sharing a node
        G = nx.disjoint_union(G, K)
        nbrs = G[new_node + 10]
        G.remove_node(new_node + 10)
        for nbr in nbrs:
            G.add_edge(new_node + 17, nbr)
        # Commenting this makes the graph not biconnected !!
        # This stupid mistake make one reviewer very angry :P
        G.add_edge(new_node + 16, new_node + 8)

    for nodes in [(labels[(0, 0)], labels[(1, 0)]),
                  (labels[(3, 0)], labels[(4, 0)])]:
        new_node = G.order() + 1
        # Petersen graph is triconnected
        P = nx.petersen_graph()
        G = nx.disjoint_union(G, P)
        # Add two edges between the grid and P
        G.add_edge(new_node + 1, nodes[0])
        G.add_edge(new_node, nodes[1])
        # K5 is 4-connected
        K = nx.complete_graph(5)
        G = nx.disjoint_union(G, K)
        # Add three edges between P and K5
        G.add_edge(new_node + 2, new_node + 11)
        G.add_edge(new_node + 3, new_node + 12)
        G.add_edge(new_node + 4, new_node + 13)
        # Add another K5 sharing two nodes
        G = nx.disjoint_union(G, K)
        nbrs = G[new_node + 10]
        G.remove_node(new_node + 10)
        for nbr in nbrs:
            G.add_edge(new_node + 17, nbr)
        nbrs2 = G[new_node + 9]
        G.remove_node(new_node + 9)
        for nbr in nbrs2:
            G.add_edge(new_node + 18, nbr)

    G.name = 'Example graph for connectivity'
    return G
Beispiel #38
0
import  networkx as nx
from networkx.algorithms.community import k_clique_communities
G = nx.complete_graph(5)
K5 = nx.convert_node_labels_to_integers(G,first_label=2)
G.add_edges_from(K5.edges())

print (G.nodes)
print (G.edges)
c = list(k_clique_communities(G, 5))
print (list(c[0]))

print (list(k_clique_communities(G, 6)))
Beispiel #39
0
def get_connectivity_graph(qubits, topology='grid', param=None):
    attempt = 0
    while attempt < 10:
        if topology == 'grid':
            # assume square grid
            side = int(np.sqrt(qubits))
            G = nx.grid_2d_graph(side, side)
        elif topology == 'erdosrenyi':
            if param == None:
                print("Erdos Renyi graph needs parameter p.")
            G = nx.fast_gnp_random_graph(qubits, param)
        elif topology == 'turan':
            if param == None:
                print("Turan graph needs parameter r.")
            G = nx.turan_graph(qubits, param)
        elif topology == 'regular':
            if param == None:
                print("d-regular graph needs parameter d.")
            G = nx.random_regular_graph(param, qubits)
        elif topology == 'cycle':
            G = nx.cycle_graph(qubits)
        elif topology == 'wheel':
            G = nx.wheel_graph(qubits)
        elif topology == 'complete':
            G = nx.complete_graph(qubits)
        elif topology == 'hexagonal':
            # assume square hexagonal grid, node = 2(m+1)**2-2
            side = int(np.sqrt((qubits + 2) / 2)) - 1
            G = nx.hexagonal_lattice_graph(side, side)
        elif topology == 'path':
            G = nx.path_graph(qubits)
        elif topology == 'ibm_falcon':
            # https://www.ibm.com/blogs/research/2020/07/qv32-performance/
            # 27 qubits
            G = nx.empty_graph(27)
            G.name = "ibm_falcon"
            G.add_edges_from([(0, 1), (1, 2), (2, 3), (3, 4), (3, 5), (5, 6),
                              (6, 7), (7, 8), (8, 9), (8, 10), (10, 11),
                              (1, 12), (6, 13), (11, 14), (12, 15), (15, 16),
                              (16, 17), (17, 18), (17, 19), (19, 20), (13, 20),
                              (20, 21), (21, 22), (22, 23), (22, 24), (24, 25),
                              (14, 25), (25, 26)])
        elif topology == 'ibm_penguin':
            # 20 qubits
            G = nx.empty_graph(20)
            G.name = "ibm_penguin"
            G.add_edges_from([(0, 1), (1, 2), (2, 3), (3, 4), (0, 5), (5, 6),
                              (6, 7), (7, 8), (8, 9), (4, 9), (5, 10),
                              (10, 11), (11, 12), (7, 12), (12, 13), (13, 14),
                              (9, 14), (10, 15), (15, 16), (16, 17), (17, 18),
                              (18, 19), (14, 19)])
        elif topology == '1express':  # path with express channels
            G = nx.convert_node_labels_to_integers(nx.path_graph(qubits))
            G.add_edges_from([(s, s + param)
                              for s in range(0, qubits - param, param // 2)])
        elif topology == '2express':  # grid with express channels
            side = int(np.sqrt(qubits))
            G = nx.convert_node_labels_to_integers(nx.grid_2d_graph(
                side, side))
            G.add_edges_from([
                (s, s + param) for x in range(side)
                for s in range(x * side, x * side + side - param, param // 2)
            ])  # rows
            G.add_edges_from([
                (s, s + param * side) for y in range(side)
                for s in range(y, y + side * (side - param), param // 2 * side)
            ])  # cols
        else:
            print("Topology %s not recognized; use empty graph instead." %
                  topology)
            G = nx.empty_graph(qubits)
        if nx.is_connected(G) or nx.is_empty(G):
            break
        else:
            attempt += 1

    return nx.convert_node_labels_to_integers(G)
Beispiel #40
0
def read_file(graph_file,
              directed=False,
              rescale=False,
              return_edgearray=False,
              relable_nodes=True,
              logger=None,
              **kwargs):
    edgearray = np.loadtxt(graph_file)

    edgearray = edgearray[
        edgearray[:, -1].argsort()]  # IMPORTANT: sort timestampes
    edgearray[:, -1] = edgearray[:, -1] - min(
        edgearray[:, -1])  # set earliest timestamp as 0

    edges = edgearray[:, :2].astype(np.int)
    mask = edges[:, 0] != edges[:, 1]  # self->self is not allowed
    edges = edges[mask]
    edgearray = edgearray[mask]
    edges = edges.tolist()  # mush be a list, or an ajcanency np array
    timestamps = edgearray[:, -1].astype(np.float)

    if directed:
        G = nx.DiGraph(edges)
    else:
        G = nx.Graph(edges)

    if rescale:
        assert kwargs.get('scale', None) is not None
        scale = kwargs['scale']
        timestamps = timestamps / scale

    # add timestamps
    for i, edge in enumerate(edges):
        if G[edge[0]][edge[1]].get('timestamp', None) is None:
            G[edge[0]][edge[1]]['timestamp'] = [timestamps[i]]
        else:
            if timestamps[i] > G[edge[0]][edge[1]]['timestamp'][-1]:
                G[edge[0]][edge[1]]['timestamp'].append(timestamps[i])

    # relabel all nodes using integers from 0 to N-1
    old_new_dict = dict(zip(list(G.nodes), range(G.number_of_nodes())))
    if relable_nodes:
        G = nx.convert_node_labels_to_integers(G,
                                               first_label=0,
                                               ordering='default')
    G.maxt = timestamps.max()

    if logger is not None:
        logger.info('# nodes: {}, # edges: {}, # inters: {}'.format(
            G.number_of_nodes(), G.number_of_edges(), len(edgearray)))

    # read embeddings
    if kwargs.get('emb_file', None) is not None:
        emb_file = kwargs['emb_file']
        embeddings = np.loadtxt(emb_file, skiprows=1)
        nodeids = embeddings[:, 0].astype(np.int)
        embeddings = embeddings[:, 1:]
        embedding_matrix = np.zeros_like(embeddings)
        for i, nid in enumerate(nodeids):
            embedding_matrix[old_new_dict[nid]] = embeddings[i]
    else:
        embedding_matrix = None

    if return_edgearray:
        return G, embedding_matrix, edgearray
    else:
        return G, embedding_matrix
Beispiel #41
0
def get_structure_components(bonded_structure,
                             inc_orientation=False,
                             inc_site_ids=False,
                             inc_molecule_graph=False):
    """
    Gets information on the components in a bonded structure.

    Correctly determines the dimensionality of all structures, regardless of
    structure type or improper connections due to periodic boundary conditions.

    Requires a StructureGraph object as input. This can be generated using one
    of the NearNeighbor classes. For example, using the CrystalNN class::

        bonded_structure = CrystalNN().get_bonded_structure(structure)

    Based on the modified breadth-first-search algorithm described in:

    P. Larsem, M. Pandey, M. Strange, K. W. Jacobsen, 2018, arXiv:1808.02114

    Args:
        bonded_structure (StructureGraph): A structure with bonds, represented
            as a pymatgen structure graph. For example, generated using the
            CrystalNN.get_bonded_structure() method.
        inc_orientation (bool, optional): Whether to include the orientation
            of the structure component. For surfaces, the miller index is given,
            for one-dimensional structures, the direction of the chain is given.
        inc_site_ids (bool, optional): Whether to include the site indices
            of the sites in the structure component.
        inc_molecule_graph (bool, optional): Whether to include MoleculeGraph
            objects for zero-dimensional components.

    Returns:
        (list of dict): Information on the components in a structure as a list
        of dictionaries with the keys:

        - "structure_graph": A pymatgen StructureGraph object for the
            component.
        - "dimensionality": The dimensionality of the structure component as an
            int.
        - "orientation": If inc_orientation is `True`, the orientation of the
            component as a tuple. E.g. (1, 1, 1)
        - "site_ids": If inc_site_ids is `True`, the site indices of the
            sites in the component as a tuple.
        - "molecule_graph": If inc_molecule_graph is `True`, the site a
            MoleculeGraph object for zero-dimensional components.
    """
    import networkx as nx  # optional dependency therefore not top level import

    comp_graphs = (
        bonded_structure.graph.subgraph(c)
        for c in nx.weakly_connected_components(bonded_structure.graph))

    components = []
    for graph in comp_graphs:
        dimensionality, vertices = calculate_dimensionality_of_site(
            bonded_structure, list(graph.nodes())[0], inc_vertices=True)

        component = {'dimensionality': dimensionality}

        if inc_orientation:
            if dimensionality in [1, 2]:
                vertices = np.array(vertices)

                g = vertices.sum(axis=0) / vertices.shape[0]

                # run singular value decomposition
                _, _, vh = np.linalg.svd(vertices - g)

                # get direction (first column is best fit line,
                # 3rd column is unitary norm)
                index = 2 if dimensionality == 2 else 0
                orientation = get_integer_index(vh[index, :])
            else:
                orientation = None

            component['orientation'] = orientation

        if inc_site_ids:
            component['site_ids'] = tuple(graph.nodes())

        if inc_molecule_graph and dimensionality == 0:
            component['molecule_graph'] = zero_d_graph_to_molecule_graph(
                bonded_structure, graph)

        component_structure = Structure.from_sites(
            [bonded_structure.structure[n] for n in sorted(graph.nodes())])

        sorted_graph = nx.convert_node_labels_to_integers(graph,
                                                          ordering="sorted")
        component_graph = StructureGraph(
            component_structure,
            graph_data=json_graph.adjacency_data(sorted_graph))
        component['structure_graph'] = component_graph

        components.append(component)
    return components
Beispiel #42
0
def create_from_networkx(nx_graph,
                         ntype,
                         etype,
                         edge_id_attr_name='id',
                         node_attrs=None,
                         edge_attrs=None,
                         restrict_format='any'):
    """Create a heterograph that has only one set of nodes and edges.

    Parameters
    ----------
    nx_graph : NetworkX graph
    ntype : str
        Type name for both source and destination nodes
    etype : str
        Type name for edges
    edge_id_attr_name : str, optional
        Key name for edge ids in the NetworkX graph. If not found, we
        will consider the graph not to have pre-specified edge ids. (Default: 'id')
    node_attrs : list of str
        Names for node features to retrieve from the NetworkX graph (Default: None)
    edge_attrs : list of str
        Names for edge features to retrieve from the NetworkX graph (Default: None)
    restrict_format : 'any', 'coo', 'csr', 'csc', optional
        Force the storage format.  Default: 'any' (i.e. let DGL decide what to use).

    Returns
    -------
    g : DGLHeteroGraph
    """
    if not nx_graph.is_directed():
        nx_graph = nx_graph.to_directed()

    # Relabel nodes using consecutive integers
    nx_graph = nx.convert_node_labels_to_integers(nx_graph, ordering='sorted')

    # nx_graph.edges(data=True) returns src, dst, attr_dict
    if nx_graph.number_of_edges() > 0:
        has_edge_id = edge_id_attr_name in next(iter(
            nx_graph.edges(data=True)))[-1]
    else:
        has_edge_id = False

    if has_edge_id:
        num_edges = nx_graph.number_of_edges()
        src = np.zeros((num_edges, ), dtype=np.int64)
        dst = np.zeros((num_edges, ), dtype=np.int64)
        for u, v, attr in nx_graph.edges(data=True):
            eid = attr[edge_id_attr_name]
            src[eid] = u
            dst[eid] = v
    else:
        src = []
        dst = []
        for e in nx_graph.edges:
            src.append(e[0])
            dst.append(e[1])
    src = utils.toindex(src)
    dst = utils.toindex(dst)
    num_nodes = nx_graph.number_of_nodes()
    g = create_from_edges(src,
                          dst,
                          ntype,
                          etype,
                          ntype,
                          num_nodes,
                          num_nodes,
                          validate=False,
                          restrict_format=restrict_format)

    # handle features
    # copy attributes
    def _batcher(lst):
        if F.is_tensor(lst[0]):
            return F.cat([F.unsqueeze(x, 0) for x in lst], dim=0)
        else:
            return F.tensor(lst)

    if node_attrs is not None:
        # mapping from feature name to a list of tensors to be concatenated
        attr_dict = defaultdict(list)
        for nid in range(g.number_of_nodes()):
            for attr in node_attrs:
                attr_dict[attr].append(nx_graph.nodes[nid][attr])
        for attr in node_attrs:
            g.ndata[attr] = _batcher(attr_dict[attr])

    if edge_attrs is not None:
        # mapping from feature name to a list of tensors to be concatenated
        attr_dict = defaultdict(lambda: [None] * g.number_of_edges())
        # each defaultdict value is initialized to be a list of None
        # None here serves as placeholder to be replaced by feature with
        # corresponding edge id
        if has_edge_id:
            num_edges = g.number_of_edges()
            for _, _, attrs in nx_graph.edges(data=True):
                if attrs[edge_id_attr_name] >= num_edges:
                    raise DGLError('Expect the pre-specified edge ids to be'
                                   ' smaller than the number of edges --'
                                   ' {}, got {}.'.format(
                                       num_edges, attrs['id']))
                for key in edge_attrs:
                    attr_dict[key][attrs['id']] = attrs[key]
        else:
            # XXX: assuming networkx iteration order is deterministic
            #      so the order is the same as graph_index.from_networkx
            for eid, (_, _, attrs) in enumerate(nx_graph.edges(data=True)):
                for key in edge_attrs:
                    attr_dict[key][eid] = attrs[key]
        for attr in edge_attrs:
            for val in attr_dict[attr]:
                if val is None:
                    raise DGLError(
                        'Not all edges have attribute {}.'.format(attr))
            g.edata[attr] = _batcher(attr_dict[attr])

    return g
list_of_mean_number_of_cliques = []
list_of_mean_average_shortest_patlen = []
list_of_edge_triangle_ratio = [i * 0.02 for i in range(51)]

for edge_triangle_ratio in list_of_edge_triangle_ratio:
    list_of_triangular_clustering_coefficient = []
    list_of_number_of_cliques = []
    list_of_average_shortest_patlen = []
    list_of_square_clustering_coefficient = []
    for i in range(100):
        deg_seq = [pd.poisson_random_variable(6) for i in range(1000)]
        G = nt.newman_clustering_configuration(deg_seq, edge_triangle_ratio)
        Gcc = sorted(nx.connected_component_subgraphs(G),
                     key=len,
                     reverse=True)[0]
        Gcc = nx.convert_node_labels_to_integers(Gcc, first_label=0)

        list_of_square_clustering_coefficient.append(
            np.mean(nx.square_clustering(Gcc).values()))
        list_of_triangular_clustering_coefficient.append(
            np.mean(nx.clustering(G).values()))
        list_of_number_of_cliques.append(
            len([i for i in nx.find_cliques(G) if len(i) >= 4]))
        list_of_average_shortest_patlen.append(
            nx.average_shortest_path_length(Gcc))

    list_of_mean_triangular_clustering_coefficient.append(
        float(np.mean(list_of_triangular_clustering_coefficient)))
    list_of_mean_square_clustering_coefficient.append(
        float(np.mean(list_of_square_clustering_coefficient)))
    list_of_mean_number_of_cliques.append(
G = nx.Graph()
edgelist_with_weight = [(edge[0], edge[1], {
    "weight": weight
}) for edge, weight in edgelist.items()]
G.add_edges_from(edgelist_with_weight)

S = max(nx.connected_components(G), key=len)

Gc = G.subgraph(S).copy()

#%%
## Now we will save the undirected largest connected component as a numbered edgelist to do the mercator embedding.

## We will first create the numbered nodes to send to mercator.
Gc_integer_labeled = nx.convert_node_labels_to_integers(Gc,
                                                        first_label=0,
                                                        label_attribute="name")

output_dir = "../outputs/mercator/input"
output_basename = "comention_giant_connected_component"
output_full_fname = "%s/%s_%s.edge" % (output_dir, output_code,
                                       output_basename)
nx.readwrite.edgelist.write_edgelist(Gc_integer_labeled,
                                     output_full_fname,
                                     data=False)

#%%
## Now we are taking the output edgelist as an input for the mercator embedding and create the embedding
## with metadata as a json file
custom_seed = 42
input_dir = output_dir
def torrents_and_ferraro_graph():
    # Graph from http://arxiv.org/pdf/1503.04476v1 p.26
    G = nx.convert_node_labels_to_integers(
        nx.grid_graph([5, 5]),
        label_attribute='labels',
    )
    rlabels = nx.get_node_attributes(G, 'labels')
    labels = {v: k for k, v in rlabels.items()}

    for nodes in [(labels[(0, 4)], labels[(1, 4)]),
                  (labels[(3, 4)], labels[(4, 4)])]:
        new_node = G.order() + 1
        # Petersen graph is triconnected
        P = nx.petersen_graph()
        G = nx.disjoint_union(G, P)
        # Add two edges between the grid and P
        G.add_edge(new_node + 1, nodes[0])
        G.add_edge(new_node, nodes[1])
        # K5 is 4-connected
        K = nx.complete_graph(5)
        G = nx.disjoint_union(G, K)
        # Add three edges between P and K5
        G.add_edge(new_node + 2, new_node + 11)
        G.add_edge(new_node + 3, new_node + 12)
        G.add_edge(new_node + 4, new_node + 13)
        # Add another K5 sharing a node
        G = nx.disjoint_union(G, K)
        nbrs = G[new_node + 10]
        G.remove_node(new_node + 10)
        for nbr in nbrs:
            G.add_edge(new_node + 17, nbr)
        # This edge makes the graph biconnected; it's
        # needed because K5s share only one node.
        G.add_edge(new_node + 16, new_node + 8)

    for nodes in [(labels[(0, 0)], labels[(1, 0)]),
                  (labels[(3, 0)], labels[(4, 0)])]:
        new_node = G.order() + 1
        # Petersen graph is triconnected
        P = nx.petersen_graph()
        G = nx.disjoint_union(G, P)
        # Add two edges between the grid and P
        G.add_edge(new_node + 1, nodes[0])
        G.add_edge(new_node, nodes[1])
        # K5 is 4-connected
        K = nx.complete_graph(5)
        G = nx.disjoint_union(G, K)
        # Add three edges between P and K5
        G.add_edge(new_node + 2, new_node + 11)
        G.add_edge(new_node + 3, new_node + 12)
        G.add_edge(new_node + 4, new_node + 13)
        # Add another K5 sharing two nodes
        G = nx.disjoint_union(G, K)
        nbrs = G[new_node + 10]
        G.remove_node(new_node + 10)
        for nbr in nbrs:
            G.add_edge(new_node + 17, nbr)
        nbrs2 = G[new_node + 9]
        G.remove_node(new_node + 9)
        for nbr in nbrs2:
            G.add_edge(new_node + 18, nbr)

    G.name = 'Example graph for connectivity'
    return G
from spreading_CR import SpreadingProcess
from timeit import default_timer
import networkx as nx
from numpy.random import randint
from numpy import inf

nsample = 100000

rates = [0.01, 0.02, 0.04, 0.1, 0.2, 0.4, 1, 2, 4]

G = nx.convert_node_labels_to_integers(nx.read_edgelist('nwk/brazil.lnk'))

for j in range(len(rates)):
    sp = SpreadingProcess(list(G.edges()), rates[j], 1, 0)

    s0 = 0
    start = default_timer()
    for i in range(nsample):
        sources = randint(G.number_of_nodes())
        sp.initialize([sources], i)
        sp.evolve(inf)

        s0 += sp.get_Rnode_number_vector()[-1]
        sp.reset()
    stop = default_timer()

    print str(rates[j])
    print str(s0 / float(nsample) / G.number_of_nodes())
    print 'time per outbreak (s)', str((stop - start) / nsample)
    print
Beispiel #47
0
 def get(self):
     G = self._get()
     G = nx.convert_node_labels_to_integers(G)
     return G
Beispiel #48
0
    def _crossover(self, ga, gb):
        gs = []

        ga = nx.convert_node_labels_to_integers(ga)
        edges_a = self._get_valid_edges(ga, n_double_edges=self.n_double_edges)
        if edges_a is None:
            return gs
        edges_a = self._get_selected_edges(
            ga,
            edges_a,
            fraction=self.double_edges_fraction_selected_by_importance)
        if edges_a is None:
            return gs

        gb = nx.convert_node_labels_to_integers(gb)
        edges_b = self._get_valid_edges(gb, n_double_edges=self.n_double_edges)
        if edges_b is None:
            return gs
        edges_b = self._get_selected_edges(
            gb,
            edges_b,
            fraction=self.double_edges_fraction_selected_by_importance)
        if edges_b is None:
            return gs

        for ea in edges_a:
            for eb in edges_b:
                if ea is not None and eb is not None:
                    ia, ja = ea[0]
                    ib, jb = eb[0]
                    ibu, jbu = ib + len(ga), jb + len(ga)

                    ka, la = ea[1]
                    kb, lb = eb[1]
                    kbu, lbu = kb + len(ga), lb + len(ga)

                    gu0 = nx.disjoint_union(ga, gb)
                    self._swap_edge(gu0, (ia, ja), (ibu, jbu), mode=0)
                    self._swap_edge(gu0, (ka, la), (kbu, lbu), mode=0)
                    components = list(nx.connected_components(gu0))
                    gs.extend([
                        nx.subgraph(gu0, component).copy()
                        for component in components
                    ])

                    gu0 = nx.disjoint_union(ga, gb)
                    self._swap_edge(gu0, (ia, ja), (ibu, jbu), mode=0)
                    self._swap_edge(gu0, (ka, la), (kbu, lbu), mode=1)
                    components = list(nx.connected_components(gu0))
                    gs.extend([
                        nx.subgraph(gu0, component).copy()
                        for component in components
                    ])

                    gu1 = nx.disjoint_union(ga, gb)
                    self._swap_edge(gu1, (ia, ja), (ibu, jbu), mode=1)
                    self._swap_edge(gu1, (ka, la), (kbu, lbu), mode=1)
                    components = list(nx.connected_components(gu1))
                    gs.extend([
                        nx.subgraph(gu1, component).copy()
                        for component in components
                    ])

                    gu1 = nx.disjoint_union(ga, gb)
                    self._swap_edge(gu1, (ia, ja), (ibu, jbu), mode=1)
                    self._swap_edge(gu1, (ka, la), (kbu, lbu), mode=0)
                    components = list(nx.connected_components(gu1))
                    gs.extend([
                        nx.subgraph(gu1, component).copy()
                        for component in components
                    ])

        return gs
Beispiel #49
0
def initA(
    varr: xr.DataArray,
    seeds: pd.DataFrame,
    thres_corr=0.8,
    wnd=10,
    noise_freq: Optional[float] = None,
) -> xr.DataArray:
    """
    Initialize spatial footprints from seeds.

    For each input seed, this function compute the correlation between the
    fluorescence activity of the seed and those of its neighboring pixels up to
    `wnd` pixels. It then set all correlation below `thres_corr` to zero, and
    use the resulting correlation image as the resutling spatial footprint of
    the seed.

    Parameters
    ----------
    varr : xr.DataArray
        Input movie data. Should have dimension "height", "width" and "frame".
    seeds : pd.DataFrame
        Dataframe of seeds.
    thres_corr : float, optional
        Threshold of correlation, below which the values will be set to zero in
        the resulting spatial footprints. By default `0.8`.
    wnd : int, optional
        Radius (in pixels) of a disk window within which correlation will be
        computed for each seed. By default `10`.
    noise_freq : float, optional
        Cut-off frequency for optional smoothing of activities before computing
        the correlation. If `None` then no smoothing will be done. By default
        `None`.

    Returns
    -------
    A : xr.DataArray
        The initial estimation of spatial footprint for each cell. Should have
        dimensions ("unit_id", "height", "width").

    See Also
    -------
    minian.cnmf.graph_optimize_corr :
        for how the correlation are computed in an out-of-core fashion
    """
    print("optimizing computation graph")
    nod_df = pd.DataFrame(
        np.array(
            list(
                itt.product(varr.coords["height"].values,
                            varr.coords["width"].values))),
        columns=["height", "width"],
    ).merge(seeds.reset_index(), how="outer", on=["height", "width"])
    seed_df = nod_df[nod_df["index"].notnull()]
    nn_tree = KDTree(nod_df[["height", "width"]], leaf_size=(2 * wnd)**2)
    nns_arr = nn_tree.query_radius(seed_df[["height", "width"]], r=wnd)
    sdg = nx.Graph()
    sdg.add_nodes_from([(i, d) for i, d in enumerate(nod_df[
        ["height", "width", "index"]].to_dict("records"))])
    for isd, nns in enumerate(nns_arr):
        cur_sd = seed_df.index[isd]
        sdg.add_edges_from([(cur_sd, n) for n in nns if n != cur_sd])
    sdg.remove_nodes_from(list(nx.isolates(sdg)))
    sdg = nx.convert_node_labels_to_integers(sdg)
    corr_df = graph_optimize_corr(varr, sdg, noise_freq)
    print("building spatial matrix")
    corr_df = corr_df[corr_df["corr"] > thres_corr]
    nod_df = pd.DataFrame.from_dict(dict(sdg.nodes(data=True)), orient="index")
    seed_df = nod_df[nod_df["index"].notnull()].astype({"index": int})
    A_ls = []
    ih_dict = (varr.coords["height"].to_series().reset_index(
        drop=True).reset_index().set_index("height")["index"].to_dict())
    iw_dict = (varr.coords["width"].to_series().reset_index(
        drop=True).reset_index().set_index("width")["index"].to_dict())
    Ashape = (varr.sizes["height"], varr.sizes["width"])
    for seed_id, sd in seed_df.iterrows():
        src_corr = corr_df[corr_df["target"] == seed_id].copy()
        src_nods = nod_df.loc[src_corr["source"]]
        src_corr["height"], src_corr["width"] = (
            src_nods["height"].values,
            src_nods["width"].values,
        )
        tgt_corr = corr_df[corr_df["source"] == seed_id].copy()
        tgt_nods = nod_df.loc[tgt_corr["target"]]
        tgt_corr["height"], tgt_corr["width"] = (
            tgt_nods["height"].values,
            tgt_nods["width"].values,
        )
        cur_corr = pd.concat([src_corr, tgt_corr]).append(
            {
                "corr": 1,
                "height": sd["height"],
                "width": sd["width"]
            },
            ignore_index=True)
        cur_corr["iheight"] = cur_corr["height"].map(ih_dict)
        cur_corr["iwidth"] = cur_corr["width"].map(iw_dict)
        cur_A = darr.array(
            sparse.COO(cur_corr[["iheight", "iwidth"]].T,
                       cur_corr["corr"],
                       shape=Ashape))
        A_ls.append(cur_A)
    A = xr.DataArray(
        darr.stack(A_ls).map_blocks(lambda a: a.todense(), dtype=float),
        dims=["unit_id", "height", "width"],
        coords={
            "unit_id": seed_df["index"].values,
            "height": varr.coords["height"].values,
            "width": varr.coords["width"].values,
        },
    )
    return A
Beispiel #50
0
    def connect(self, Frag, connect_dict):
        Frag_G = deepcopy(Frag.G)
        Frag_top = deepcopy(Frag.top)

        if len(connect_dict) != 2:
            raise ValueError(
                f"size of connect dict != 2 ({len(connect_dict)})")
        # Get conditions for connection
        condition_0, condition_1 = connect_dict

        # Find nodes meeting condition
        nodes_0 = [
            node for node, node_dict in self.G.nodes(data=True)
            if dict_compare(condition_0, node_dict)
        ]
        nodes_1 = [
            node for node, node_dict in Frag_G.nodes(data=True)
            if dict_compare(condition_1, node_dict)
        ]

        # Find connecting nodes
        connect_node_0 = [[n for n in self.G[node] if n not in nodes_0]
                          for node in nodes_0]
        connect_node_0 = flatten_list(connect_node_0)
        if len(connect_node_0) != 1:
            raise ValueError(
                f"Number of connect nodes not 1 ({len(connect_node_0)})")
        connect_node_0 = connect_node_0[0]

        connect_node_1 = [[n for n in Frag_G[node] if n not in nodes_1]
                          for node in nodes_1]
        connect_node_1 = flatten_list(connect_node_1)
        if len(connect_node_1) != 1:
            raise ValueError(
                f"Number of connect nodes not 1 ({len(connect_node_1)})")
        connect_node_1 = connect_node_1[0]

        # Find connecting edges
        connect_edges_0 = [[edge for edge in self.G.edges(node)]
                           for node in nodes_0]
        connect_edges_0 = flatten_list(connect_edges_0)
        connect_edges_0 = [
            edge for edge in connect_edges_0
            if ((edge[0] not in nodes_0) or (edge[1] not in nodes_0))
        ]
        connect_dist_0 = [
            self.G[e0][e1]["distance"] for e0, e1 in connect_edges_0
        ]
        connect_dist_0 = sum(connect_dist_0) / len(connect_dist_0)

        connect_edges_1 = [[edge for edge in Frag_G.edges(node)]
                           for node in nodes_1]
        connect_edges_1 = flatten_list(connect_edges_1)
        connect_edges_1 = [
            edge for edge in connect_edges_1
            if ((edge[0] not in nodes_1) or (edge[1] not in nodes_1))
        ]
        connect_dist_1 = [
            Frag_G[e0][e1]["distance"] for e0, e1 in connect_edges_1
        ]
        connect_dist_1 = sum(connect_dist_1) / len(connect_dist_1)

        connect_dist = (connect_dist_0 + connect_dist_1) / 2.0

        A = [node["xyz"] for i, node in self.G.nodes(data=True)]
        A = np.vstack(A)

        B = [node["xyz"] for i, node in Frag_G.nodes(data=True)]
        B = np.vstack(B)

        Bprime = correct_xyz(A, B, (connect_node_0, connect_node_1),
                             connect_dist)
        xyz_mapping = {node: xyz for node, xyz in zip(Frag_G.nodes, Bprime)}
        nx.set_node_attributes(Frag_G, xyz_mapping, "xyz")

        number_mapping = {
            i: node["fragment_number"] + self.n_fragments
            for i, node in Frag_G.nodes(data=True)
        }
        nx.set_node_attributes(Frag_G, number_mapping, "fragment_number")
        self.fragments.append(Frag_G)

        n_G_nodes = self.G.number_of_nodes()
        self.G = nx.disjoint_union(self.G, Frag_G)
        self.G.add_edge(
            connect_node_0,
            connect_node_1 + n_G_nodes,
            distance=np.linalg.norm(A[connect_node_0] -
                                    Bprime[connect_node_1]),
        )

        at0 = [
            atom for atom in self.top.atoms if atom.index == connect_node_0
        ][0]
        self.top = self.top.subset([
            atom.index for atom in self.top.atoms if atom.index not in nodes_0
        ])

        at1 = [
            atom for atom in Frag_top.atoms if atom.index == connect_node_1
        ][0]
        Frag_top = Frag_top.subset([
            atom.index for atom in Frag_top.atoms if atom.index not in nodes_1
        ])

        self.top = self.top.join(Frag_top)
        self.top.add_bond(at0, at1)

        # Remove nodes
        remove_nodes = nodes_0 + [n + n_G_nodes for n in nodes_1]
        self.G.remove_nodes_from(remove_nodes)
        self.G = nx.convert_node_labels_to_integers(self.G)

        return self
Beispiel #51
0
graphpath = "data/6_70_com_amazon.gml"
graph = nx.read_gml(graphpath)
edge_f = 'data/6_70_com_amazon.edgelist'
nx.write_edgelist(graph, edge_f, data=False)

edge_f = 'data/karate.edgelist'

# Specify whether the edges are directed
isDirected = False

# Load graph
G = graph_util.loadGraphFromEdgeListTxt(edge_f, directed=isDirected)
G = G.to_directed()

G = nx.convert_node_labels_to_integers(G,
                                       first_label=0,
                                       ordering='default',
                                       label_attribute="original_label")

models = []
# models.append(GraphFactorization(d=2, max_iter=100000, eta=1*10**-4, regu=1.0))
models.append(HOPE(d=4, beta=0.01))
# models.append(LaplacianEigenmaps(d=2))
# models.append(LocallyLinearEmbedding(d=2))

for embedding in models:
    print('Num nodes: %d, num edges: %d' %
          (G.number_of_nodes(), G.number_of_edges()))
    t1 = time()
    # Learn embedding - accepts a networkx graph or file with edge list
    Y, t = embedding.learn_embedding(graph=G,
                                     edge_f=None,
Beispiel #52
0
        loadpath = res_list[img_idx]['img_path'] + '_%.2d_%.2d' % (
            args.win_size, args.edge_geo_dist_thresh) + '.graph_res'
        temp_graph = nx.read_gpickle(loadpath)
        res_list[img_idx]['graph'] = temp_graph

    # make final results
    for img_idx in xrange(len(res_list)):

        cur_img = res_list[img_idx]['img']
        cur_conv_feats = res_list[img_idx]['conv_feats']
        cur_cnn_feat = res_list[img_idx]['cnn_feat']
        cur_cnn_feat_spatial_sizes = res_list[img_idx][
            'cnn_feat_spatial_sizes']
        cur_graph = res_list[img_idx]['graph']

        cur_graph = nx.convert_node_labels_to_integers(cur_graph)
        node_byxs = util.get_node_byx_from_graph(cur_graph,
                                                 [cur_graph.number_of_nodes()])

        if 'geo_dist_weighted' in args.edge_type:
            adj = nx.adjacency_matrix(cur_graph)
        else:
            adj = nx.adjacency_matrix(cur_graph, weight=None).astype(float)

        adj_norm = util.preprocess_graph_gat(adj)

        cur_feed_dict = \
        {
        network.imgs: cur_img,
        network.conv_feats: cur_conv_feats,
        network.node_byxs: node_byxs,
def create(args):
    ### load datasets
    graphs = []
    # synthetic graphs
    if args.graph_type == 'ladder':
        graphs = []
        for i in range(100, 201):
            graphs.append(nx.ladder_graph(i))
        args.max_prev_node = 10
    elif args.graph_type == 'ladder_small':
        graphs = []
        for i in range(2, 11):
            graphs.append(nx.ladder_graph(i))
        args.max_prev_node = 10
    elif args.graph_type == 'ladder_extra':
        graphs = []
        for i in range(1000):
            # 50 nodes in all graphs
            graphs.append(ladder_extra(6, 10))

        # Have to see what max_prev nodes is
        args.max_prev_node = 28  # Just for 6,10
        return graphs
    elif args.graph_type == 'ladder_extra_circular':
        graphs = []
        for i in range(1000):
            # 50 nodes in all graphs
            graphs.append(ladder_extra_circular(6, 10))

        # Have to see what max_prev nodes is!!
        args.max_prev_node = 28  # Just for 6,10
        return graphs
    elif args.graph_type == 'ladder_extra_full_circular':
        graphs = []
        for i in range(1000):
            # 50 nodes in all graphs
            graphs.append(ladder_extra_full_circular(6, 10))

        # Have to see what max_prev nodes is
        args.max_prev_node = 28  # Just for 6,10
        return graphs
    elif args.graph_type.startswith('layer_tree'):
        graphs = []
        width = 6
        branch = 3
        height_indx = args.graph_type.rfind('_') + 1
        height = int(args.graph_type[height_indx:])
        for i in range(1000):
            G = layered_tree(width, height, branch_factor=branch)
            graphs.append(G)

        # Max prev nodes????
        args.max_prev_node = 31  # width = 6, branch = 3
        return graphs
    elif args.graph_type.startswith('ladder_tree'):
        graphs = []
        width = 6
        branch = 2
        height_indx = args.graph_type.rfind('_') + 1
        height = int(args.graph_type[height_indx:])
        for i in range(1000):
            G = ladder_tree(width, height, branch_factor=branch)
            graphs.append(G)

        args.max_prev_node = 28  # width = 6, branch = 2
        return graphs
    elif args.graph_type.startswith('random'):
        indx_degree = int(args.graph_type.find('_')) + 1
        indx_nodes = int(args.graph_type.find('_', indx_degree))

        degree = int(args.graph_type[indx_degree:indx_nodes])
        nodes = int(args.graph_type[indx_nodes + 1:])

        graphs = []
        for i in range(1000):
            graphs.append(nx.random_regular_graph(degree, nodes))

        # Note we are only using these for testing so shouldn't need this
        return graphs
    elif args.graph_type == 'tree':
        print('Creating tree graphs')
        graphs = []
        for i in range(2, 5):
            for j in range(3, 5):
                graphs.append(nx.balanced_tree(i, j))
        args.max_prev_node = 256
    elif args.graph_type == 'tree_adversarial':
        graphs = []
        # Trees that have not been seen before
        # in the standard 'trees' dataset

        # The first set includes trees
        # that have different heights than
        # those in the training data
        heights = [2, 5, 6]
        for i in range(2, 5):
            for h in heights:
                graphs.append(nx.balanced_tree(i, h))

        args.max_prev_node = 256
        return graphs
    elif args.graph_type.startswith('tree_r_edge'):
        num_edges_removed = int(args.graph_type[-1])
        # Generate full balanced trees
        print('here')
        for i in range(2, 5):
            for j in range(3, 5):
                graph = nx.balanced_tree(i, j)
                # Get the edges so we can randomly
                # remove num_edges_removed edges
                for x in range(num_edges_removed):
                    edges = graph.edges()
                    edge = np.random.randint(len(edges))
                    graph.remove_edge(edges[edge][0], edges[edge][1])

                graphs.append(graph)

        args.max_prev_node = 256
        return graphs
    elif args.graph_type.startswith('rary-tree'):
        # Generate all rary-trees for a given
        # r and height of tree.
        r = args.graph_type[-1]
        h = 4
        graphs = []
        for n in range(1, 2**h):
            graphs.append(nx.full_rary_tree(r, n))

        args.max_prev_node = 256  # This doesnt super matter
        return graphs
    elif args.graph_type.startswith('tree_r_node'):
        # Remove n nodes from the graph
        n = int(args.graph_type[-1])
        graphs = []
        for i in range(2, 5):
            for j in range(3, 5):
                graph = nx.balanced_tree(i, j)

                for x in range(n):
                    nodes = graph.nodes()
                    node = np.random.randint(len(nodes))
                    graph.remove_node(nodes[node])

                graphs.append(graph)

        args.max_prev_node = 256
        return graphs
    elif args.graph_type == 'caveman':
        # graphs = []
        # for i in range(5,10):
        #     for j in range(5,25):
        #         for k in range(5):
        #             graphs.append(nx.relaxed_caveman_graph(i, j, p=0.1))
        graphs = []
        for i in range(2, 3):
            for j in range(30, 81):
                for k in range(10):
                    graphs.append(caveman_special(i, j, p_edge=0.3))
        args.max_prev_node = 100
    elif args.graph_type == 'caveman_small':
        # graphs = []
        # for i in range(2,5):
        #     for j in range(2,6):
        #         for k in range(10):
        #             graphs.append(nx.relaxed_caveman_graph(i, j, p=0.1))
        graphs = []
        for i in range(2, 3):
            for j in range(6, 11):
                for k in range(20):
                    graphs.append(caveman_special(i, j,
                                                  p_edge=0.8))  # default 0.8
        args.max_prev_node = 20
    elif args.graph_type == 'caveman_small_single':
        # graphs = []
        # for i in range(2,5):
        #     for j in range(2,6):
        #         for k in range(10):
        #             graphs.append(nx.relaxed_caveman_graph(i, j, p=0.1))
        graphs = []
        for i in range(2, 3):
            for j in range(8, 9):
                for k in range(100):
                    graphs.append(caveman_special(i, j, p_edge=0.5))
        args.max_prev_node = 20
    elif args.graph_type.startswith('community'):
        num_communities = int(args.graph_type[-1])
        print('Creating dataset with ', num_communities, ' communities')
        c_sizes = np.random.choice([12, 13, 14, 15, 16, 17], num_communities)
        #c_sizes = [15] * num_communities
        for k in range(3000):
            graphs.append(n_community(c_sizes, p_inter=0.01))
        args.max_prev_node = 80
    elif args.graph_type == 'grid':
        graphs = []
        for i in range(10, 20):
            for j in range(10, 20):
                graphs.append(nx.grid_2d_graph(i, j))
        args.max_prev_node = 40
    elif args.graph_type == 'grid_small':
        graphs = []
        for i in range(2, 5):
            for j in range(2, 6):
                graphs.append(nx.grid_2d_graph(i, j))
        args.max_prev_node = 15
    elif args.graph_type == 'barabasi':
        graphs = []
        for i in range(100, 200):
            for j in range(4, 5):
                for k in range(5):
                    graphs.append(nx.barabasi_albert_graph(i, j))
        args.max_prev_node = 130
    elif args.graph_type == 'barabasi_small':
        graphs = []
        for i in range(4, 21):
            for j in range(3, 4):
                for k in range(10):
                    graphs.append(nx.barabasi_albert_graph(i, j))
        args.max_prev_node = 20
    elif args.graph_type == 'grid_big':
        graphs = []
        for i in range(36, 46):
            for j in range(36, 46):
                graphs.append(nx.grid_2d_graph(i, j))
        args.max_prev_node = 90

    elif 'barabasi_noise' in args.graph_type:
        graphs = []
        for i in range(100, 101):
            for j in range(4, 5):
                for k in range(500):
                    graphs.append(nx.barabasi_albert_graph(i, j))
        graphs = perturb_new(graphs, p=args.noise / 10.0)
        args.max_prev_node = 99

    # real graphs
    elif args.graph_type == 'enzymes':
        graphs = Graph_load_batch(min_num_nodes=10,
                                  name='ENZYMES',
                                  node_attributes=True,
                                  node_labels=True)
        args.max_prev_node = 25
    elif args.graph_type == 'enzymes_small':
        graphs_raw = Graph_load_batch(min_num_nodes=10, name='ENZYMES')
        graphs = []
        for G in graphs_raw:
            if G.number_of_nodes() <= 20:
                graphs.append(G)
        args.max_prev_node = 15
    elif args.graph_type.startswith('enzymes'):
        graph_label = int(args.graph_type[-1])
        graphs = Graph_load_label(min_num_nodes=10,
                                  name='ENZYMES',
                                  node_attributes=True,
                                  node_labels=True,
                                  graph_label=graph_label)
        args.max_prev_node = 25

    elif args.graph_type == 'protein':
        graphs = Graph_load_batch(min_num_nodes=20, name='PROTEINS_full')
        args.max_prev_node = 80

    elif args.graph_type == 'DD':
        graphs = Graph_load_batch(min_num_nodes=100,
                                  max_num_nodes=500,
                                  name='DD',
                                  node_attributes=False,
                                  node_labels=True)
        args.max_prev_node = 230
    elif args.graph_type.startswith('DD'):
        graph_label = int(args.graph_type[-1])
        graphs = Graph_load_label(min_num_nodes=100,
                                  max_num_nodes=500,
                                  name='DD',
                                  node_attributes=False,
                                  node_labels=True,
                                  graph_label=graph_label)
        args.max_prev_node = 230

    elif args.graph_type == 'AIDS':  # Definitely check! Maybe train on inactive and test on active so train on 1!
        # Gotta calc this!
        graphs = Graph_load_batch(min_num_nodes=2,
                                  max_num_nodes=100,
                                  name='AIDS',
                                  node_attributes=False,
                                  node_labels=True)
        args.max_prev_node = 16
    elif args.graph_type.startswith('AIDS'):
        graph_label = int(args.graph_type[-1])
        graphs = Graph_load_label(min_num_nodes=2,
                                  max_num_nodes=100,
                                  name='AIDS',
                                  node_attributes=False,
                                  node_labels=True,
                                  graph_label=graph_label)
        args.max_prev_node = 16

    elif args.graph_type == 'Fingerprint':  # Not so great to train on because the graph classes include several graph labels
        # Gotta calc this!
        graphs = Graph_load_batch(min_num_nodes=2,
                                  max_num_nodes=26,
                                  name='Fingerprint',
                                  node_attributes=True,
                                  node_labels=False)
        args.max_prev_node = 6
    elif args.graph_type.startswith('Fingerprint'):
        graph_label = int(args.graph_type[-1])
        graphs = Graph_load_label(min_num_nodes=2,
                                  max_num_nodes=26,
                                  name='Fingerprint',
                                  node_attributes=True,
                                  node_labels=False,
                                  graph_label=graph_label)
        args.max_prev_node = 6

    elif args.graph_type == 'COLLAB':  # Interesting to try but may be quite slow
        # Gotta calc this!
        graphs = Graph_load_batch(min_num_nodes=32,
                                  max_num_nodes=492,
                                  name='COLLAB',
                                  node_attributes=False,
                                  node_labels=False)
        args.max_prev_node = 480
    elif args.graph_type.startswith('COLLAB'):
        graph_label = int(args.graph_type[-1])
        graphs = Graph_load_label(min_num_nodes=32,
                                  max_num_nodes=492,
                                  name='COLLAB',
                                  node_attributes=False,
                                  node_labels=False,
                                  graph_label=graph_label)
        args.max_prev_node = 480

    elif args.graph_type == 'IMDB-MULTI':  #Definitely try!!
        # Gotta calc this!
        graphs = Graph_load_batch(min_num_nodes=7,
                                  max_num_nodes=89,
                                  name='IMDB-MULTI',
                                  node_attributes=False,
                                  node_labels=False)
        args.max_prev_node = 86
    elif args.graph_type.startswith('IMDB-MULTI'):
        graph_label = int(args.graph_type[-1])
        graphs = Graph_load_label(min_num_nodes=7,
                                  max_num_nodes=89,
                                  name='IMDB-MULTI',
                                  node_attributes=False,
                                  node_labels=False,
                                  graph_label=graph_label)
        args.max_prev_node = 86

    elif args.graph_type == 'REDDIT-MULTI-12K':  # Not done yet may be too large to really try, Maybe want to limit max
        # Gotta calc this! # Try max = 500
        graphs = Graph_load_batch(min_num_nodes=2,
                                  max_num_nodes=500,
                                  name='REDDIT-MULTI-12K',
                                  node_attributes=False,
                                  node_labels=False)
        args.max_prev_node = 3061
    elif args.graph_type.startswith('REDDIT-MULTI-12K'):
        graph_label = int(args.graph_type[-1])
        graphs = Graph_load_label(min_num_nodes=2,
                                  max_num_nodes=3782,
                                  name='REDDIT-MULTI-12K',
                                  node_attributes=False,
                                  node_labels=False,
                                  graph_label=graph_label)
        args.max_prev_node = 3061

    elif args.graph_type == 'Letter-high':  # Could be quite interesting to look at. For example train on low distortion and test on med/high and diff letters
        # Gotta calc this!
        graphs = Graph_load_batch(min_num_nodes=2,
                                  max_num_nodes=9,
                                  name='Letter-high',
                                  node_attributes=True,
                                  node_labels=False)
        args.max_prev_node = 6
    elif args.graph_type.startswith('Letter-high'):
        graph_label = int(args.graph_type[-1])
        graphs = Graph_load_label(min_num_nodes=2,
                                  max_num_nodes=9,
                                  name='Letter-high',
                                  node_attributes=True,
                                  node_labels=False,
                                  graph_label=graph_label)
        args.max_prev_node = 6

    elif args.graph_type == 'Letter-med':
        # Gotta calc this!
        graphs = Graph_load_batch(min_num_nodes=2,
                                  max_num_nodes=9,
                                  name='Letter-med',
                                  node_attributes=True,
                                  node_labels=False)
        args.max_prev_node = 5
    elif args.graph_type.startswith('Letter-med'):
        graph_label = int(args.graph_type[-1])
        graphs = Graph_load_label(min_num_nodes=2,
                                  max_num_nodes=9,
                                  name='Letter-med',
                                  node_attributes=True,
                                  node_labels=False,
                                  graph_label=graph_label)
        args.max_prev_node = 5

    elif args.graph_type == 'Letter-low':  # For a specific letter may want to try for example the letter N
        # Gotta calc this!
        graphs = Graph_load_batch(min_num_nodes=2,
                                  max_num_nodes=8,
                                  name='Letter-low',
                                  node_attributes=True,
                                  node_labels=False)
        args.max_prev_node = 5
    elif args.graph_type.startswith('Letter-low'):
        graph_label = int(args.graph_type[-1])
        graphs = Graph_load_label(min_num_nodes=2,
                                  max_num_nodes=8,
                                  name='Letter-low',
                                  node_attributes=True,
                                  node_labels=False,
                                  graph_label=graph_label)
        args.max_prev_node = 5

    elif args.graph_type == 'citeseer':
        _, _, G = Graph_load(dataset='citeseer')
        G = max(nx.connected_component_subgraphs(G), key=len)
        G = nx.convert_node_labels_to_integers(G)
        graphs = []
        for i in range(G.number_of_nodes()):
            G_ego = nx.ego_graph(G, i, radius=3)
            if G_ego.number_of_nodes() >= 50 and (G_ego.number_of_nodes() <=
                                                  400):
                graphs.append(G_ego)
        args.max_prev_node = 250
    elif args.graph_type == 'citeseer_small':
        _, _, G = Graph_load(dataset='citeseer')
        G = max(nx.connected_component_subgraphs(G), key=len)
        G = nx.convert_node_labels_to_integers(G)
        graphs = []
        for i in range(G.number_of_nodes()):
            G_ego = nx.ego_graph(G, i, radius=1)
            if (G_ego.number_of_nodes() >= 4) and (G_ego.number_of_nodes() <=
                                                   20):
                graphs.append(G_ego)
        shuffle(graphs)
        graphs = graphs[0:200]
        args.max_prev_node = 15

    return graphs
Beispiel #54
0
def create(args):
### load datasets
    graphs=[]
    # synthetic graphs
    if args.graph_type=='ladder':
        graphs = []
        print("num nodes: ", 100, 201)
        for i in range(100, 201):
            graphs.append(nx.ladder_graph(i))
        args.max_prev_node = 10
    elif args.graph_type=='ladder_small':
        graphs = []
        for i in range(2, 11):
            graphs.append(nx.ladder_graph(i))
            print("len: ", len(graphs[-1]))
        args.max_prev_node = 10
    elif args.graph_type=='tree':
        graphs = []
        #print("num nodes: ", "i: ", 2,5, " j: ", 3,5)
        for i in range(2,5):
            for j in range(3,5):
                graphs.append(nx.balanced_tree(i,j))
        args.max_prev_node = 256
    elif args.graph_type=='caveman':
        # graphs = []
        # for i in range(5,10):
        #     for j in range(5,25):
        #         for k in range(5):
        #             graphs.append(nx.relaxed_caveman_graph(i, j, p=0.1))
        graphs = []
        for i in range(2, 3):
            for j in range(30, 81):
                j = 80
                for k in range(10):
                    graphs.append(caveman_special(i,j, p_edge=0.3))
        args.max_prev_node = 100
    elif args.graph_type=='caveman_2':
      graphs = []
      i = 2
      j = 20
      for k in range(300):
          graphs.append(caveman_special(c=i,k=j, p_edge=0.3))
      args.max_prev_node = 100
    elif args.graph_type=='caveman_4':
      graphs = []
      i = 4
      j = 20
      for k in range(500):
          graphs.append(caveman_special(c=i,k=j, p_edge=0.3))
      args.max_prev_node = 100
    elif args.graph_type=='caveman_small':
        # graphs = []
        # for i in range(2,5):
        #     for j in range(2,6):
        #         for k in range(10):
        #             graphs.append(nx.relaxed_caveman_graph(i, j, p=0.1))
        graphs = []
        for i in range(2, 3):
            for j in range(6, 11):
                j = 10
                for k in range(20):
                    graphs.append(caveman_special(i, 20, p_edge=0.8)) # default 0.8
        args.max_prev_node = 20
    elif args.graph_type=='caveman_small_single':
        # graphs = []
        # for i in range(2,5):
        #     for j in range(2,6):
        #         for k in range(10):
        #             graphs.append(nx.relaxed_caveman_graph(i, j, p=0.1))
        graphs = []
        for i in range(2, 3):
            for j in range(8, 9):
                for k in range(100):
                    graphs.append(caveman_special(i, j, p_edge=0.5))
        args.max_prev_node = 20
    elif args.graph_type.startswith('community'):
        num_communities = int(args.graph_type[-1])
        print('Creating dataset with ', num_communities, ' communities')
        c_sizes = np.random.choice([12, 13, 14, 15, 16, 17], num_communities)
        #c_sizes = [15] * num_communities
        for k in range(3000):
            graphs.append(n_community(c_sizes, p_inter=0.01))
        args.max_prev_node = 80
    elif args.graph_type=='grid':
        graphs = []
        for i in range(10,20):
            i = 16 #19
            for j in range(10,20):
                j = 16 #19
                graphs.append(nx.grid_2d_graph(i,j))
        args.max_prev_node = 40
    elif args.graph_type=='grid_small':
        graphs = []
        for i in range(2,5):
            for j in range(2,6):
                graphs.append(nx.grid_2d_graph(i,j))
        args.max_prev_node = 15
    elif args.graph_type=='barabasi':
        graphs = []
        for i in range(100,200):
             i = 200
             for j in range(4,5):
                 for k in range(5):
                    graphs.append(nx.barabasi_albert_graph(i,j))
        args.max_prev_node = 130
    elif args.graph_type=='barabasi_small':
        graphs = []
        for i in range(4,21):
             i = 20
             for j in range(3,4):
                 for k in range(40): #10
                    graphs.append(nx.barabasi_albert_graph(i,j))
        args.max_prev_node = 20
    elif args.graph_type=='grid_big':
        graphs = []
        for i in range(36, 46):
            for j in range(36, 46):
                graphs.append(nx.grid_2d_graph(i, j))
        args.max_prev_node = 90

    elif 'barabasi_noise' in args.graph_type:
        graphs = []
        for i in range(100,101):
            for j in range(4,5):
                for k in range(500):
                    graphs.append(nx.barabasi_albert_graph(i,j))
        graphs = perturb_new(graphs,p=args.noise/10.0)
        args.max_prev_node = 99

    # real graphs
    elif args.graph_type == 'enzymes':
        graphs= Graph_load_batch(min_num_nodes=10, name='ENZYMES')
        args.max_prev_node = 25
    elif args.graph_type == 'enzymes_small':
        graphs_raw = Graph_load_batch(min_num_nodes=10, name='ENZYMES')
        graphs = []
        for G in graphs_raw:
            if G.number_of_nodes()<=20:
                graphs.append(G)
        args.max_prev_node = 15
    elif args.graph_type == 'protein':
        graphs = Graph_load_batch(min_num_nodes=20, name='PROTEINS_full')
        args.max_prev_node = 80
    elif args.graph_type == 'DD':
        graphs = Graph_load_batch(min_num_nodes=250, max_num_nodes=500, name='DD',node_attributes=False,graph_labels=True) #min:100,max:500
        args.max_prev_node = 230
    elif args.graph_type == 'citeseer':
        adj, features, G = Graph_load(dataset='citeseer')
        G = max(nx.connected_component_subgraphs(G), key=len)
        G = nx.convert_node_labels_to_integers(G)
        graphs = []
        #tmp_100=0; tmp_200=0; tmp_300=0; tmp_400=0;
        for i in range(G.number_of_nodes()):
            G_ego = nx.ego_graph(G, i, radius=3)
            '''
            if G_ego.number_of_nodes() >= 50 and (G_ego.number_of_nodes() <= 400):
                if G_ego.number_of_nodes() >= 100:
                  tmp_100 += 1
                if G_ego.number_of_nodes() >= 200:
                  tmp_200 += 1
                if G_ego.number_of_nodes() >= 300:
                  tmp_300 += 1
                if G_ego.number_of_nodes() >= 400:
                  tmp_400 += 1
            '''
            num_nodes=200
            if G_ego.number_of_nodes() >= num_nodes:
                degrees = list(G_ego.degree())
                sort_by_degree =sorted(degrees, key=lambda x: x[1])
                for i in range(len(sort_by_degree)-num_nodes):
                  G_ego.remove_node(sort_by_degree[i][0])
                for i in range(len(sort_by_degree)-num_nodes,len(sort_by_degree)):
                  #G_ego.remove_node(sort_by_degree[i][0])
                  G_ego.add_node(sort_by_degree[i][0], feature = features[sort_by_degree[i][0]])
                graphs.append(G_ego)
        args.max_prev_node = 250
        #print("tmp_100: ", tmp_100);print("tmp_200: ", tmp_200);print("tmp_300: ", tmp_300);print("tmp_400: ", tmp_400);
    elif args.graph_type == 'citeseer_small':
        _, _, G = Graph_load(dataset='citeseer')
        G = max(nx.connected_component_subgraphs(G), key=len)
        G = nx.convert_node_labels_to_integers(G)
        graphs = []
        for i in range(G.number_of_nodes()):
            G_ego = nx.ego_graph(G, i, radius=1)
            if (G_ego.number_of_nodes() >= 4) and (G_ego.number_of_nodes() <= 20):
                graphs.append(G_ego)
        shuffle(graphs)
        graphs = graphs[0:200]
        args.max_prev_node = 15

    return graphs
Beispiel #55
0
print("\n Ruta mas corta")
# Método que nos permite calcular la ruta más corta del grafo usando Dijkstra
ruta_mas_corta = nx.dijkstra_path(GRAFO, origen, destino)
print(' -> '.join(ruta_mas_corta))

print("Longitud de la ruta mas corta")
# Método que nos permite calcular la longitud de la ruta más corta del grafo
print(nx.dijkstra_path_length(GRAFO, origen, destino))

# Métodos para dibujar el grafo en 2D con los nombres de cada nodo
nx.draw(GRAFO, with_labels=True)
plt.show()

# reorder nodes from 0,len(G)-1
# Convertirmos los nodos del grafo en enteros para poder realizar la impresión en 3D
GRAFO_ENTEROS = nx.convert_node_labels_to_integers(GRAFO)

pos = nx.spring_layout(GRAFO_ENTEROS, dim=3)

xyz = np.array([pos[v] for v in sorted(GRAFO_ENTEROS)])

scalars = np.array(GRAFO_ENTEROS.nodes()) + 5

mlab.figure(1, bgcolor=(0, 0, 0))
mlab.clf()

# Diseño y presentación del grafo en 3D
pts = mlab.points3d(xyz[:, 0],
                    xyz[:, 1],
                    xyz[:, 2],
                    scalars,
Beispiel #56
0
def draw_graphviz(tree,
                  label_func=str,
                  prog='twopi',
                  args='',
                  node_color='#c0deff',
                  **kwargs):
    """Display a tree or clade as a graph, using the graphviz engine.

    Requires NetworkX, matplotlib, Graphviz and either PyGraphviz or pydot.

    The third and fourth parameters apply to Graphviz, and the remaining
    arbitrary keyword arguments are passed directly to networkx.draw(), which
    in turn mostly wraps matplotlib/pylab.  See the documentation for Graphviz
    and networkx for detailed explanations.

    The NetworkX/matplotlib parameters are described in the docstrings for
    networkx.draw() and pylab.scatter(), but the most reasonable options to try
    are: *alpha, node_color, node_size, node_shape, edge_color, style,
    font_size, font_color, font_weight, font_family*

    :Parameters:

        label_func : callable
            A function to extract a label from a node. By default this is str(),
            but you can use a different function to select another string
            associated with each node. If this function returns None for a node,
            no label will be shown for that node.

            The label will also be silently skipped if the throws an exception
            related to ordinary attribute access (LookupError, AttributeError,
            ValueError); all other exception types will still be raised. This
            means you can use a lambda expression that simply attempts to look
            up the desired value without checking if the intermediate attributes
            are available:

                >>> Phylo.draw_graphviz(tree, lambda n: n.taxonomies[0].code)

        prog : string
            The Graphviz program to use when rendering the graph. 'twopi'
            behaves the best for large graphs, reliably avoiding crossing edges,
            but for moderate graphs 'neato' looks a bit nicer.  For small
            directed graphs, 'dot' may produce a normal-looking cladogram, but
            will cross and distort edges in larger graphs. (The programs 'circo'
            and 'fdp' are not recommended.)
        args : string
            Options passed to the external graphviz program.  Normally not
            needed, but offered here for completeness.

    Example
    -------

    >>> import pylab
    >>> from Bio import Phylo
    >>> tree = Phylo.read('ex/apaf.xml', 'phyloxml')
    >>> Phylo.draw_graphviz(tree)
    >>> pylab.show()
    >>> pylab.savefig('apaf.png')
    """
    try:
        import networkx
    except ImportError:
        raise MissingPythonDependencyError(
            "Install NetworkX if you want to use to_networkx.")

    G = to_networkx(tree)
    try:
        # NetworkX version 1.8 or later (2013-01-20)
        Gi = networkx.convert_node_labels_to_integers(G,
                                                      label_attribute='label')
        int_labels = {}
        for integer, nodeattrs in Gi.node.items():
            int_labels[nodeattrs['label']] = integer
    except TypeError:
        # Older NetworkX versions (before 1.8)
        Gi = networkx.convert_node_labels_to_integers(G,
                                                      discard_old_labels=False)
        int_labels = Gi.node_labels

    try:
        posi = networkx.graphviz_layout(Gi, prog, args=args)
    except ImportError:
        raise MissingPythonDependencyError(
            "Install PyGraphviz or pydot if you want to use draw_graphviz.")

    def get_label_mapping(G, selection):
        """Apply the user-specified node relabeling."""
        for node in G.nodes():
            if (selection is None) or (node in selection):
                try:
                    label = label_func(node)
                    if label not in (None, node.__class__.__name__):
                        yield (node, label)
                except (LookupError, AttributeError, ValueError):
                    pass

    if 'nodelist' in kwargs:
        labels = dict(get_label_mapping(G, set(kwargs['nodelist'])))
    else:
        labels = dict(get_label_mapping(G, None))
    kwargs['nodelist'] = list(labels.keys())
    if 'edge_color' not in kwargs:
        kwargs['edge_color'] = [
            isinstance(e[2], dict) and e[2].get('color', 'k') or 'k'
            for e in G.edges(data=True)
        ]
    if 'width' not in kwargs:
        kwargs['width'] = [
            isinstance(e[2], dict) and e[2].get('width', 1.0) or 1.0
            for e in G.edges(data=True)
        ]

    posn = dict((n, posi[int_labels[n]]) for n in G)
    networkx.draw(G,
                  posn,
                  labels=labels,
                  with_labels=True,
                  node_color=node_color,
                  **kwargs)
def create_named(name):
    ### load datasets
    graphs = []
    max_prev_node = 0
    # synthetic graphs
    if name == 'ladder':
        graphs = []
        for i in range(100, 201):
            graphs.append(nx.ladder_graph(i))
        max_prev_node = 10
    elif name == 'ladder_small':
        graphs = []
        for i in range(2, 11):
            graphs.append(nx.ladder_graph(i))
        max_prev_node = 10
    elif name == 'tree':
        print('Creating tree graphs')
        graphs = []
        for i in range(2, 5):
            for j in range(3, 5):
                graphs.append(nx.balanced_tree(i, j))
        max_prev_node = 256
    elif name == 'tree_adversarial':
        graphs = []
        # Trees that have not been seen before
        # in the standard 'trees' dataset

        # The first set includes trees
        # that have different heights than
        # those in the training data
        heights = [2, 5, 6]
        for i in range(2, 5):
            for h in heights:
                graphs.append(nx.balanced_tree(i, h))

        # More later!

    elif name == 'caveman':
        # graphs = []
        # for i in range(5,10):
        #     for j in range(5,25):
        #         for k in range(5):
        #             graphs.append(nx.relaxed_caveman_graph(i, j, p=0.1))
        graphs = []
        for i in range(2, 3):
            for j in range(30, 81):
                for k in range(10):
                    graphs.append(caveman_special(i, j, p_edge=0.3))
        max_prev_node = 100
    elif name == 'caveman_small':
        # graphs = []
        # for i in range(2,5):
        #     for j in range(2,6):
        #         for k in range(10):
        #             graphs.append(nx.relaxed_caveman_graph(i, j, p=0.1))
        graphs = []
        for i in range(2, 3):
            for j in range(6, 11):
                for k in range(20):
                    graphs.append(caveman_special(i, j,
                                                  p_edge=0.8))  # default 0.8
        max_prev_node = 20
    elif name == 'caveman_small_single':
        # graphs = []
        # for i in range(2,5):
        #     for j in range(2,6):
        #         for k in range(10):
        #             graphs.append(nx.relaxed_caveman_graph(i, j, p=0.1))
        graphs = []
        for i in range(2, 3):
            for j in range(8, 9):
                for k in range(100):
                    graphs.append(caveman_special(i, j, p_edge=0.5))
        max_prev_node = 20
    elif name.startswith('community'):
        num_communities = int(name[-1])
        print('Creating dataset with ', num_communities, ' communities')
        c_sizes = np.random.choice([12, 13, 14, 15, 16, 17], num_communities)
        #c_sizes = [15] * num_communities
        for k in range(3000):
            graphs.append(n_community(c_sizes, p_inter=0.01))
        max_prev_node = 80
    elif name == 'grid':
        graphs = []
        for i in range(10, 20):
            for j in range(10, 20):
                graphs.append(nx.grid_2d_graph(i, j))
        max_prev_node = 40
    elif name == 'grid_small':
        graphs = []
        for i in range(2, 5):
            for j in range(2, 6):
                graphs.append(nx.grid_2d_graph(i, j))
        max_prev_node = 15
    elif name == 'barabasi':
        graphs = []
        for i in range(100, 200):
            for j in range(4, 5):
                for k in range(5):
                    graphs.append(nx.barabasi_albert_graph(i, j))
        max_prev_node = 130
    elif name == 'barabasi_small':
        graphs = []
        for i in range(4, 21):
            for j in range(3, 4):
                for k in range(10):
                    graphs.append(nx.barabasi_albert_graph(i, j))
        max_prev_node = 20
    elif name == 'grid_big':
        graphs = []
        for i in range(36, 46):
            for j in range(36, 46):
                graphs.append(nx.grid_2d_graph(i, j))
        max_prev_node = 90

    elif 'barabasi_noise' in name:
        # Broken!
        graphs = []
        for i in range(100, 101):
            for j in range(4, 5):
                for k in range(500):
                    graphs.append(nx.barabasi_albert_graph(i, j))
        #graphs = perturb_new(graphs,p=args.noise/10.0)
        max_prev_node = 99

    # real graphs
    elif name == 'enzymes':
        graphs = Graph_load_batch(min_num_nodes=10, name='ENZYMES')
        max_prev_node = 25
    elif name == 'enzymes_small':
        graphs_raw = Graph_load_batch(min_num_nodes=10, name='ENZYMES')
        graphs = []
        for G in graphs_raw:
            if G.number_of_nodes() <= 20:
                graphs.append(G)
        max_prev_node = 15
    elif name == 'protein':
        graphs = Graph_load_batch(min_num_nodes=20, name='PROTEINS_full')
        max_prev_node = 80
    elif name == 'DD':
        graphs = Graph_load_batch(min_num_nodes=100,
                                  max_num_nodes=500,
                                  name='DD',
                                  node_attributes=False,
                                  graph_labels=True)
        max_prev_node = 230
    elif name == 'citeseer':
        _, _, G = Graph_load(dataset='citeseer')
        G = max(nx.connected_component_subgraphs(G), key=len)
        G = nx.convert_node_labels_to_integers(G)
        graphs = []
        for i in range(G.number_of_nodes()):
            G_ego = nx.ego_graph(G, i, radius=3)
            if G_ego.number_of_nodes() >= 50 and (G_ego.number_of_nodes() <=
                                                  400):
                graphs.append(G_ego)
        max_prev_node = 250
    elif name == 'citeseer_small':
        _, _, G = Graph_load(dataset='citeseer')
        G = max(nx.connected_component_subgraphs(G), key=len)
        G = nx.convert_node_labels_to_integers(G)
        graphs = []
        for i in range(G.number_of_nodes()):
            G_ego = nx.ego_graph(G, i, radius=1)
            if (G_ego.number_of_nodes() >= 4) and (G_ego.number_of_nodes() <=
                                                   20):
                graphs.append(G_ego)
        shuffle(graphs)
        graphs = graphs[0:200]
        max_prev_node = 15

    return graphs
Beispiel #58
0
def Liaison_model3(N_init, sampleN):
    clique_size = [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
    # clique sizes
    clique_edges = np.asarray(clique_size)
    clique_edges = np.divide(np.multiply(clique_edges, clique_edges + 1), 2)
    ep = 0.1

    total1 = 0
    clique_num = np.zeros(len(clique_size))  # number of cliques of given size
    for w in range(100):
        for i in range(len(clique_size)):
            clique_num[i] = clique_num[i] + (2.5321 * (float(N_init - total1) /
                                                       (clique_size[i])**3))
        clique_num = map(int, clique_num)
        total1 = sum(np.multiply(clique_size, clique_num))
        #print total1
    # comment this next line to generate a small sample graph easy to visualize
    #clique_num = [0,7,3,1,0,0,0,0]
    #6/(np.pi)^2

    ###########################################################

    def l_num(size):  # number of leaders given clique size
        return 1

    ###########################################################

    # almost cliques are generated
    Gclique, l, l_cliqeuwise, clique_sizes, rep_numbers_cliquewise, clique_ordered_cliquewise, clique_start = almost_clique(
        clique_size, clique_num, l_num)
    #print(len(rep_numbers_cliquewise)), 'cliques formed'

    ################### Spanning Tree with Power Law #####################
    '''
    global T
    T=nx.random_powerlaw_tree(len(rep_numbers_cliquewise), tries=1000000) 

    plt.figure(2)
    nx.draw(T, pos=nx.circular_layout(T))
    '''
    ####### MST of Randomly Weighted Undirected Complete Graph on n nodes ########

    GT = {}
    for u in range(len(rep_numbers_cliquewise)):
        GT[u] = {}
    for u in range(len(rep_numbers_cliquewise)):
        for v in range(u):
            r = random()
            GT[u][v] = r
            GT[v][u] = r
    T2 = mst(GT)
    #print "T=", T2
    mst_weight = sum([GT[u][v] for u, v in T2])

    T2G = nx.Graph()
    for u in range(len(rep_numbers_cliquewise)):
        for v in range(len(rep_numbers_cliquewise)):
            if (u, v) in T2:
                #print "(u,v)=", u, v
                T2G.add_edge(u, v, weight=1)

    plt.figure(3)
    nx.draw(T2G, pos=nx.circular_layout(T2G))

    #############################################################################

    L = nx.empty_graph(len(rep_numbers_cliquewise) - 1)

    n1 = len(Gclique.nodes())

    def mapping(x):
        return x + len(Gclique.nodes())

    L = nx.relabel_nodes(L, mapping)

    for u in range(len(l)):
        for v in range(len(l)):
            if u in T2G[v]:
                #Gclique.add_edge(l[u],l[v])
                Gclique.add_nodes_from(L.nodes())

    #print "Gclique.nodes()",Gclique.nodes()

    ########################### Creating Liaison Graph ##########################

    #print l
    l = np.array(l).tolist()
    #print "l = ",l

    for u in range(len(T2G.edges())):
        Gclique.add_edge(l[T2G.edges()[u][0]], n1 + u)
        Gclique.add_edge(l[T2G.edges()[u][1]], n1 + u)

    Liaisons_num = np.random.randint(1, len(T2G.edges()))
    #print "Liaisons_num =", Liaisons_num

    #iterate= len(L.nodes())-Liaisons_num

    for i in range(len(L.nodes())):
        j = np.random.choice(L.nodes(), 2, replace=False)
        #print j
        #print "j[0] = ", j[0], "j[1] = ", j[1]
        #print Gclique.neighbors(j[0]), Gclique.neighbors(j[1])
        #print "Gclique.degree(j[0])", Gclique.degree(j[0]), "Gclique.degree(j[1])", Gclique.degree(j[1])
        #if Gclique.degree(j[0])+Gclique.degree(j[1])<8 and Gclique.degree(j[0])+Gclique.degree(j[1])>2:
        if Gclique.degree(j[0]) + Gclique.degree(j[1]) < 7 and Gclique.degree(
                j[0]) + Gclique.degree(j[1]) > 2:
            for n in Gclique.neighbors(j[1]):
                Gclique.add_edge(j[0], n)
                Gclique.add_edge(j[0], n)
            L.remove_node(j[1])
            Gclique.remove_node(j[1])
    #print "L.nodes() = ", L.nodes()
    #print "Gclique.degree(L.nodes())", Gclique.degree(L.nodes())
    #print "Gclique.degree(L.nodes())", sorted(Gclique.degree(L.nodes()).values())

    Gclique = nx.convert_node_labels_to_integers(Gclique, first_label=0)

    #print L.nodes()
    #print Gclique.nodes()

    #############################################################################

    #print sorted(nx.degree(Gclique).values())
    degreeList = sorted(nx.degree(Gclique).values())

    counter = collections.Counter(degreeList)
    ''' 
    plt.figure(1)
    
    plt.scatter(counter.keys(),counter.values(),c='r',marker='o',s=100,alpha=0.5)
    plt.plot(counter.keys(),counter.values(),linewidth=2,c='r') 
    plt.xlabel('node degree',fontsize=10)
    plt.ylabel('number of nodes',fontsize=10)
    plt.axis([0, 10, 0, 35])
    
    clique_list = []
    for i in range(len(clique_size)):
        dum =  np.linspace(clique_size[i],clique_size[i], clique_num[i])
        clique_list = np.hstack((clique_list,dum ))
    
    colors = []; c = 0
    for i in range(len(clique_list)):
        colors.extend(np.linspace(c,c,clique_list[i]))
        c = c + 1
 
    #pos=nx.spring_layout(Gclique,iterations=200)
    print "clique_list= ", clique_list
    posx = []; posy = [];
    for i in range(len(clique_list)):
        centerx = np.cos(2*np.pi*i/len(clique_list)) 
        centery = np.sin(2*np.pi*i/len(clique_list))
        x1 = []; y1 = []; 
        for j in range(int(clique_list[i])):
            x1.append(centerx  + 0.2*np.cos(2*np.pi*j/clique_list[i]))
            y1.append(centery  + 0.2*np.sin(2*np.pi*j/clique_list[i]))
        posx.extend(x1); posy.extend(y1);
    
    x1 = []; y1 = []; 
    print "len(L.nodes())=", len(L.nodes())
    for j in range(len(L.nodes())):
        x1.append(0.5*np.cos(2*np.pi*j/len(L.nodes())))
        y1.append(0.5*np.sin(2*np.pi*j/len(L.nodes())))
    posx.extend(x1); posy.extend(y1); 
   
    pos = np.transpose(np.vstack((posx,posy)))
    
    plt.figure(4)
    #nx.draw(Gclique,pos,node_color=colors,node_size=800,cmap=plt.cm.Reds)
    nx.draw(Gclique,pos,node_size=800,cmap=plt.cm.Reds)
    #nx.draw(Gclique,pos=nx.spring_layout(Gclique), node_size=800,cmap=plt.cm.Reds)

    plt.show()
    '''
    A1 = nx.to_numpy_matrix(Gclique)
    A = np.array(A1)
    A = np.trunc(A)

    np.savetxt('AdjMatrices/L/L' + str(N_init) + '_' + str(sampleN) + '.txt',
               A,
               fmt='%10.f',
               delimiter='\t')

    with open(
            'StructuralProperties/ClusteringCoeff/L/L' + str(N_init) + '.txt',
            "a") as text_file:
        text_file.write(str(nx.average_clustering(Gclique)) + "\n")

    with open(
            'StructuralProperties/AveShortestPath/L/L' + str(N_init) + '.txt',
            "a") as text_file:
        text_file.write(str(nx.average_shortest_path_length(Gclique)) + "\n")

    with open('StructuralProperties/Density/L/L' + str(N_init) + '.txt',
              "a") as text_file:
        text_file.write(str(nx.density(Gclique)) + "\n")

    xdata, ydata = np.log10(counter.keys()), np.log10(counter.values())
    polycoef = np.polyfit(xdata, ydata, 1)
    yfit = 10**(polycoef[0] * xdata + polycoef[1])

    with open('StructuralProperties/DegreeDist/L/L' + str(N_init) + '.txt',
              "a") as text_file:
        text_file.write(str(polycoef[0]) + "\n")
    '''
    print polycoef[0]
    plt.subplot(211)
    plt.plot(xdata,ydata,'.k',xdata,yfit,'-r')
    plt.subplot(212)
    plt.loglog(xdata,ydata,'.k',xdata,yfit,'-r')
    plt.show()        
    
    print "number of nodes: ", math.sqrt(np.size(A))
    '''
    return Gclique
def main(args):

    if len(args) != 1:
        sys.exit("Usage: python origianl.py <graphml file>")
    
    C=3
    L=6 #损失值
    select_strength=1 #选择强度
    fname = args[0]
    G = networkx.read_graphml(fname)
    G = networkx.convert_node_labels_to_integers(G)
    
    '''
        给每个参与者初始化投资额,利用之前initial_investment.npy文件
    '''
    a=[]    
    defender = [Defender(i) for i in range(0, len(G))]
    for i in range(len(G)):
        defender[i].set_degree(G.degree()[i])
    for i in range(len(G)):
        if defender[i].get_degree()<2:
            defender[i].set_investment(random.uniform(0,0.2))
        elif defender[i].get_degree()<4 and defender[i].get_degree()>=2:
            defender[i].set_investment(random.uniform(0.2,0.4))
        elif defender[i].get_degree()<6 and defender[i].get_degree()>=4:
            defender[i].set_investment(random.uniform(0.4,0.6))
        elif defender[i].get_degree()<8 and defender[i].get_degree()>=6:
            defender[i].set_investment(random.uniform(0.6,0.8))
        elif defender[i].get_degree()>=8:
            defender[i].set_investment(random.uniform(0.8,1))
        a.append(defender[i].get_investment())
    numpy.save('initial_investment.npy',a)
    
    
    for i in range(len(G)):
        sum_weight=0.0+defender[i].get_degree()
        for j in neighbors(G,i):
            sum_weight+=defender[j].get_degree()
        defender[i].set_weight((defender[i].get_degree())/sum_weight)
    
    
    #pdb.set_trace() 
    '''开始演化博弈'''  
    for i in range(300):    
        #计算每个参与者的风险和收益  
        for j in range(0,len(G)):
            sum=0
            for k in neighbors(G,j):
                sum = sum+defender[k].get_weight()*defender[k].get_investment()
            defender[j].set_risk(math.exp(-sum-defender[j].get_weight()*defender[j].get_investment()))
            defender[j].set_payoff(-C*defender[j].get_investment()-L*defender[j].get_risk())
        
        if i==0:
            pp=0
            for jo in range(0,len(G)):
                pp=pp+defender[jo].get_payoff()
            print pp
        
        '''更新策略'''       
        for jj in range(0,len(G)):
            jjj=random_neighbor(G, jj)          
            if defender[jj].get_payoff() < defender[jjj].get_payoff():
                imitation_probabily=1.0/(1+math.exp((-select_strength)*(defender[jjj].get_payoff()-defender[jj].get_payoff())))
                if random.random() <= imitation_probabily:
                    defender[jj].set_investment_update(defender[jjj].get_investment())
                    #defender[jj].set_investment_update(((defender[jj].get_degree()+0.1)/defender[jjj].get_degree()+0.1)*defender[jjj].get_investment())    
                else:
                    defender[jj].set_investment_update((defender[jj]).get_investment())
            else:
                (defender[jj]).set_investment_update((defender[jj]).get_investment())
        
        
        for jjjj in range(0,len(G)):
            defender[jjjj].set_investment(defender[jjjj].get_investment_update())
            
        '''
    for j in range(0,len(G)): 
        print defender[j].get_investment()
        
        '''
        
        p=0
        for j in range(0,len(G)):
            p=p+defender[j].get_payoff()
        print p  
Beispiel #60
0
def load_network(filename):
    path = Path(__file__).parent / "./data/{}".format(filename)
    return nx.convert_node_labels_to_integers(nx.read_gml(str(path), label='id')) # map node names to integers (0:n-1) [because indexing]