def draw_graph(graphDic, nodesStatus, imageName):
    node_colors = [] 
    #first writing the number of nodes 
    #nx.draw(G) 
    #select the color 
    newGraphDic = {} #without the status 
    for element in graphDic.keys():
        status = nodesStatus[element[0] - 1]
        if status == "INACTIVE":
            node_colors +=['white']
        if status == "ACTIVE":
            node_colors +=['red']
        if status == "SELECTED":
            node_colors +=['green']
    #generating the graph from the dictionary 
    G = nx.from_dict_of_lists(graphDic) 
    nx.draw_circular(G, node_color = node_colors, with_labels=True, node_size = 50)
    #G.text(3, 8, 'boxed italics text in data coords', style='italic', bbox={'facecolor':'red', 'alpha':0.5, 'pad':10})
#    plt.legend(handles=[ green_patch])
#    nx.draw_networkx(G, node_color=node_colors, with_labels=True)
    
    #nx.draw_networkx(G) 
    #save the result  semiSparseRep 
    print "image name is" + imageName 
    plt.savefig(imageName);
Exemplo n.º 2
0
 def shortest_path(self, target):
     
     #checkmark 1
     d0 = time.clock()
     dict_links = {self.url[24:]:WikiWeb(self.url).links()}
     links = WikiWeb(self.url).links()
     wiki = 'https://en.wikipedia.org'
     print(time.clock()-d0)
     
     #checkmark 2
     d0 = time.clock()
     count=0
     while target[24:] not in links:
         link = links[count]
         dict_links.update({link:WikiWeb(wiki+link).links()})
         for link1 in dict_links[link]:
             if link1 not in links:
                 links.append(link1)
         count+=1
     print(time.clock()-d0)
     
     #checkmark 3
     d0 = time.clock()
     gr = nx.from_dict_of_lists(dict_links)
     sp = nx.shortest_path(gr, self.url[24:], target[24:])
     print(time.clock()-d0)
     
     return sp
     
     '''
Exemplo n.º 3
0
def load_pdata(dataset_str):
    names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
    objects = []
    for i in xrange(len(names)):
        objects.append(pkl.load(open("./data/ind.{}.{}".format(dataset_str, names[i]))))
    x, y, tx, ty, allx, ally, graph = tuple(objects)
    test_idx_reorder = parse_index_file("./data/ind.{}.test.index".format(dataset_str))
    test_idx_range = np.sort(test_idx_reorder)
    if dataset_str == 'citeseer':
        test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1)
        tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
        tx_extended[test_idx_range-min(test_idx_range), :] = tx
        tx = tx_extended
        ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
        ty_extended[test_idx_range-min(test_idx_range), :] = ty
        ty = ty_extended
    features = sp.vstack((allx, tx)).tolil()
    features[test_idx_reorder, :] = features[test_idx_range, :]

    labels = np.vstack((ally, ty))
    labels[test_idx_reorder, :] = labels[test_idx_range, :]
    
    idx_test = test_idx_range.tolist()
    idx_train = range(len(y))

    train_mask = sample_mask(idx_train, labels.shape[0])
    test_mask = sample_mask(idx_test, labels.shape[0])

    y_train = np.zeros(labels.shape)
    y_test = np.zeros(labels.shape)
    y_train[train_mask, :] = labels[train_mask, :]
    y_test[test_mask, :] = labels[test_mask, :]

    train_out = []
    for i in idx_train:
        ll = y_train[i].tolist()
        ll = ll.index(1) + 1
        train_out.append([i, ll])
    train_out = np.array(train_out)
    np.random.shuffle(train_out)

    test_out = []
    for i in idx_test:
        ll = y_test[i].tolist()
        ll = ll.index(1) + 1
        test_out.append([i, ll])
    test_out = np.array(test_out)
    adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
    adj = adj - sp.dia_matrix((adj.diagonal()[np.newaxis, :], [0]), shape=adj.shape)
    adj.eliminate_zeros()
    # Check that diag is zero:
    assert np.diag(adj.todense()).sum() == 0

    adj_triu = sp.triu(adj)
    adj_tuple = sparse_to_tuple(adj_triu)
    edges = adj_tuple[0]
    edges_all = sparse_to_tuple(adj)[0]
    num_mask = int(np.floor(edges.shape[0] / 10.))

    return graph, features, train_out, test_out
def skeleton_to_nx_graph(skeleton):
  """Converts a binary skeleton image to a networkx graph
  
  Arguments:
    skeleton (array): 2d/3d binary skeleton image
    
  Returns:
    dict: dict of adjacency information with entries node_id : [neighbours]
  """
  
  ids,nh = skeleton_to_list(skeleton, with_neighborhoods = True);
  print('ids done...'); 
 
  if len(ids) == 0:
     return nx.Graph();
  elif len(ids) == 1:
    adj = {};
    adj[tuple(ids[0])] = [];
    return nx.from_dict_of_lists(adj);
  else:
    g = nx.Graph();
    for i,pos in enumerate(ids):
      if i % 500 == 0:
          print('%d/%d nodes constructed...' % (i, len(ids)));
      p = tuple(pos);
      g.add_node(p);
      posnh = np.where(nh[i]);
      for pp in np.transpose(posnh):
          g.add_edge(p, tuple(pp+pos-1));
    return g;
Exemplo n.º 5
0
def from_ajacency_map(amap, directed = False):
    """
        Turns a map of adjacencies into a graph.

        amap: Adjacency Dict
        direct: If set to true, an undirected graph will be created.
    """
    return nx.from_dict_of_lists(amap, nx.DiGraph() if directed else nx.Graph())
Exemplo n.º 6
0
def parse_graph(file_string):
    # Open the file and decode the json information
    with open(file_string, 'r') as f:
        data = json.load(f)

    # Create a networkx graph from our adjacency list data
    G = nx.from_dict_of_lists(data)

    return G
Exemplo n.º 7
0
def neighbourhoods(distribution, areal_units, classes=None):
    """ Return the neighbourhoods where different classes gather

    Parameter
    ---------

    distribution: nested dictionaries
        Number of people per class, per areal unit as given in the raw data
        (ungrouped). The dictionary must have the following formatting:
        > {areal_id: {class_id: number}}

    areal_units: dictionnary
        Dictionnary of areal unit ids with shapely polygon object representing
        the unit's geometry as values.

    classes: dictionary of lists
        When the original categories need to be aggregated into different
        classes. 
        > {class: [categories belonging to this class]}
        This can be arbitrarily imposed, or computed with uncover_classes
        function of this package.

    Returns
    -------

    neighbourhoods: dictionary
        Dictionary of classes names with list of neighbourhoods (that are
        each represented by a list of areal unit)
        > {'class': [ [areal units in cluster i], ...]}
    """

    # Regroup into classes if specified. Otherwise return categories indicated
    # in the data
    if not classes:
       classes = return_categories(distribution) 

    ## Find the areal units where classes are overrepresented
    or_units = overrepresented_units(distribution, classes)
    
    ## Compute the adjacency list
    adjacency = _adjacency(areal_units)

    ## Extract neighbourhooods as connected components
    G = nx.from_dict_of_lists(adjacency) # Graph from adjacency
    neighbourhoods = {cl: [list(subgraph) for subgraph in
                            nx.connected_component_subgraphs(G.subgraph(or_units[cl]))]
                        for cl in classes}

    return neighbourhoods
Exemplo n.º 8
0
def load_data(dataset_str):
    """Load data."""
    names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
    objects = []
    for i in range(len(names)):
        with open("data/ind.{}.{}".format(dataset_str, names[i]), 'rb') as f:
            if sys.version_info > (3, 0):
                objects.append(pkl.load(f, encoding='latin1'))
            else:
                objects.append(pkl.load(f))

    x, y, tx, ty, allx, ally, graph = tuple(objects)
    test_idx_reorder = parse_index_file("data/ind.{}.test.index".format(dataset_str))
    test_idx_range = np.sort(test_idx_reorder)

    if dataset_str == 'citeseer':
        # Fix citeseer dataset (there are some isolated nodes in the graph)
        # Find isolated nodes, add them as zero-vecs into the right position
        test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1)
        tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
        tx_extended[test_idx_range-min(test_idx_range), :] = tx
        tx = tx_extended
        ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
        ty_extended[test_idx_range-min(test_idx_range), :] = ty
        ty = ty_extended

    features = sp.vstack((allx, tx)).tolil()
    features[test_idx_reorder, :] = features[test_idx_range, :]
    adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))

    labels = np.vstack((ally, ty))
    labels[test_idx_reorder, :] = labels[test_idx_range, :]

    idx_test = test_idx_range.tolist()
    idx_train = range(len(y))
    idx_val = range(len(y), len(y)+500)

    train_mask = sample_mask(idx_train, labels.shape[0])
    val_mask = sample_mask(idx_val, labels.shape[0])
    test_mask = sample_mask(idx_test, labels.shape[0])

    y_train = np.zeros(labels.shape)
    y_val = np.zeros(labels.shape)
    y_test = np.zeros(labels.shape)
    y_train[train_mask, :] = labels[train_mask, :]
    y_val[val_mask, :] = labels[val_mask, :]
    y_test[test_mask, :] = labels[test_mask, :]

    return adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask
Exemplo n.º 9
0
def edges_to_matrix(filename):
    data = np.loadtxt(filename)

    adjdict = dict()

    data = data.astype(int)
    for i in xrange(data.shape[0]):
        if adjdict.has_key(data[i,0]):
            adjdict[data[i,0]].append(data[i,1])
        else:
            adjdict[data[i,0]] = [data[i,1]]
    for i in xrange(data.shape[0]):
        if not adjdict.has_key(data[i,1]):
            adjdict[data[i,1]] = []

    nodes_set = set()
    for i in xrange(data.shape[0]):
        for j in xrange(data.shape[1]):
            nodes_set.add(data[i,j])
    nodes_list = list(nodes_set)
    values = dict()
    for i in xrange(len(nodes_list)):
        values[nodes_list[i]] = i

    refactored = dict()

    for node in adjdict.keys():
        edges = adjdict[node]
        refactored_edges = []
        for e in edges:
            refactored_edges.append(values[e])
        refactored[values[node]] = refactored_edges

    G = nx.from_dict_of_lists(refactored, create_using=nx.DiGraph())

    n = len(G.nodes())
    A = np.zeros((n,n))
    for u in G.nodes():
        for v in refactored[u]:
            A[u,v] = 1

    return A

    import numpy as np
    import networkx as nx
Exemplo n.º 10
0
def load_data(dataset):
    names = ['x', 'tx', 'allx', 'graph']
    objects = []
    for i in range(len(names)):
        objects.append(pkl.load(open("data/ind.{}.{}".format(dataset, names[i]))))
    x, tx, allx, graph = tuple(objects)
    test_idx_reorder = parse_index_file("data/ind.{}.test.index".format(dataset))
    test_idx_range = np.sort(test_idx_reorder)

    test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1)
    tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
    tx_extended[test_idx_range-min(test_idx_range), :] = tx
    tx = tx_extended

    features = sp.vstack((allx, tx)).tolil()
    features[test_idx_reorder, :] = features[test_idx_range, :]
    adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))

    return adj, features
Exemplo n.º 11
0
def game_from_file(filename):

	game = Game()
	game.network = nx.from_dict_of_lists(json.loads(open(filename).read()))


	# We split up the graphname, to get
	# -the number of players,
	# -the number of seed per player,
	# -the graph id.
	basename = os.path.basename(filename)

	num_list = map(int, basename.split(".")[:3])

	game.num_players = num_list[0]
	game.num_seeds = num_list[1]
	game.id = num_list[2]

	return game
def get_networkx_graph_from_array(binary_arr):
    """
    Return a networkx graph from a binary numpy array
    Parameters
    ----------
    binary_arr : numpy array
        binary numpy array can only be 2D Or 3D

    Returns
    -------
    networkx_graph : Networkx graph
        graphical representation of the input array after clique removal
    """
    assert np.max(binary_arr) in [0, 1], "input must always be a binary array"
    start = time.time()
    dict_of_indices_and_adjacent_coordinates = _set_adjacency_list(binary_arr)
    networkx_graph = nx.from_dict_of_lists(dict_of_indices_and_adjacent_coordinates)
    _remove_clique_edges(networkx_graph)
    print("time taken to obtain networkxgraph is %0.3f seconds" % (time.time() - start))
    return networkx_graph
Exemplo n.º 13
0
def load_data(dataset):
    # load the data: x, tx, allx, graph
    names = ['x', 'tx', 'allx', 'graph']
    objects = []
    for i in range(len(names)):
        objects.append(pkl.load(open("data/ind.{}.{}".format(dataset, names[i]))))
    x, tx, allx, graph = tuple(objects)

    test_idx_reorder = parse_index_file("data/ind.{}.test.index".format(dataset))
    test_idx_range = np.sort(test_idx_reorder)

    if dataset == 'citeseer':
        # Fix citeseer dataset (there are some isolated nodes in the graph)
        # Find isolated nodes, add them as zero-vecs into the right position
        test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1)
        tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
        tx_extended[test_idx_range-min(test_idx_range), :] = tx
        tx = tx_extended

    features = sp.vstack((allx, tx)).tolil()
    features[test_idx_reorder, :] = features[test_idx_range, :]
    adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))

    return adj, features
Exemplo n.º 14
0
def isolate_all(xyt_filename, BINS=6, force=False, sparse=False):

    filaments_filename = filament_filename_from_xyt_filename(xyt_filename)  # Assertions inside function
    if not SILENT:
        print "Isolating filaments from:", xyt_filename

    if not force and os.path.isfile(filaments_filename):
        if SILENT:
            return filaments_filename
        else:
            print "Filaments already saved as", filaments_filename
            if "y" not in raw_input("Run isolate_all() anyway? ([no]/yes):  "):
                print "Aborted: isolate_all()"
                return filaments_filename

    hdu_list = config.default_open(xyt_filename)
    ntheta = hdu_list[0].header["NTHETA"]
    wlen = hdu_list[0].header["WLEN"]
    frac = hdu_list[0].header["FRAC"]
    naxis1 = hdu_list[0].header["NAXIS1"]
    naxis2 = hdu_list[0].header["NAXIS2"]
    original = hdu_list[0].header["ORIGINAL"]
    Hi = hdu_list[1].data["hi"]
    Hj = hdu_list[1].data["hj"]

    # Compute TheteRHT for all pixels given, then bin by theta
    B = map(rht.theta_rht, hdu_list[1].data["hthets"])  # List of theta_rht values
    C = np.multiply(np.asarray(B), BINS / np.pi).astype(np.int_)
    del B

    # Ready the output HDUList and close the input HDUList
    output_hdulist = fits.HDUList(hdu_list[0].copy())  # , open(filaments_filename, 'w')) #Overwrites
    hdu_list.close()

    # Set Assignment
    # unprocessed = list()
    list_of_HDUs = list()
    search_pattern = [
        (-1, -1),
        (-1, 0),
        (-1, 1),
        (0, -1),
    ]  # [(-1, 1), (-1,-1), (-1, 0), (0, -1), (-2, -2), (-2, -1), (-2, 0), (-2, 1), (-2, 2), (-1, -2), (-1, 2), (0,-2)]
    for bin in range(BINS):
        delimiter = np.nonzero(C == bin)[0]
        raw_points = zip(Hi[delimiter], Hj[delimiter])
        del delimiter
        problem_size = len(raw_points)
        # message='Step '+str(bin+1)+'/'+str(BINS)+': (N='+str(problem_size)+')'
        # progress_bar = Progress(problem_size, message=message, incrementing=True)

        point_dict = dict([x[::-1] for x in enumerate(raw_points)])
        set_dict = collections.defaultdict(list)
        # theta_dict = dict()

        for coord in raw_points:
            # rht.update_progress(0.3*(i/problem_size), message=message)
            # progress_bar.update()
            # theta_dict[coord] = B[point_dict[coord]]
            for rel_coord in search_pattern:
                try:
                    j = point_dict[config.rel_add(coord, rel_coord)]
                    set_dict[point_dict[coord]].append(j)
                except Exception:
                    continue

        G = nx.from_dict_of_lists(set_dict)  # Undirected graph made using set_dict as an adjacency list
        del set_dict

        # progress_bar = Progress(problem_size, message=message, incrementing=False)
        sources = range(problem_size)
        flags = np.ones((problem_size), dtype=np.int_)
        while len(sources) > 0:
            source = sources.pop()
            if not flags[source]:
                continue
            else:
                # rht.update_progress(0.3+0.3*(1.0-len(sources)/problem_size), message=message)
                # progress_bar.update(len(sources))
                try:
                    for member in nx.descendants(G, source):
                        flags[member] = False
                        point_dict[raw_points[member]] = source
                        G.remove_node(member)  # TODO Remove members from G if that would speed up subsequent calls?
                except nx.NetworkXError:
                    # Assume we hit an isolated pixel (never made it into G) and move on
                    pass
        del sources, flags, G

        histogram = np.bincount(map(point_dict.get, raw_points))
        mask = np.nonzero(histogram >= int(frac * wlen))[0]
        del histogram

        # progress_bar = Progress(problem_size, message=message, incrementing=False)
        mask_dict = dict([x[::-1] for x in enumerate(mask)])
        out_clouds = collections.defaultdict(list)

        while len(point_dict) > 0:
            temp = point_dict.popitem()
            try:
                # Keying into mask_dict is the only operation that ought to throw an exception
                out_clouds[mask_dict[temp[1]]].append(temp[0])
                # progress_bar.update(len(point_dict))
                # rht.update_progress(0.6+0.399*(1.0-len(point_dict)/problem_size), message=message)
            except Exception:
                continue

        while len(out_clouds) > 0:
            cloud = out_clouds.popitem()[1]
            # unprocessed.append(cloud)
            list_of_HDUs.append(config.Cloud(cloud).as_HDU(sparse=sparse))  # TODO Incorporate theta_dict

        # rht.update_progress(1.0, final_message='Finished joining '+str(problem_size)+' points! Time Elapsed:')

    # Convert lists of two-integer tuples into ImageHDUs
    # unprocessed.sort(key=len, reverse=True)
    # output_hdulist = fits.HDUList(map(config.Cloud.as_ImageHDU, map(config.Cloud, unprocessed)))
    # del unprocessed

    list_of_HDUs.sort(key=lambda h: h.header["DIAG"], reverse=False)
    while len(list_of_HDUs) > 0:
        output_hdulist.append(list_of_HDUs.pop())

    # Output HDUList to File
    output_hdulist.writeto(filaments_filename, output_verify="silentfix", clobber=True, checksum=True)
    try:
        output_hdulist.flush()
    except Exception:
        pass
    try:
        output_hdulist.close()
    except Exception:
        pass

    if not SILENT:
        print "Results successfully output to " + filaments_filename
    return filaments_filename
Exemplo n.º 15
0
def graph_to_json():

    json_file = {}

    position_file = 'https://s3-us-west-2.amazonaws.com/pollstr/visuals/dataBrexit.txt'
    open_s3 = urllib.URLopener()
    position = eval(open_s3.open(position_file).read())

    neighborhood_file = 'https://s3-us-west-2.amazonaws.com/pollstr/visuals/net4.txt'
    open_s3 = urllib.URLopener()
    neighborhood_dict = eval(open_s3.open(neighborhood_file).read())

    Graph = nx.from_dict_of_lists(neighborhood_dict)

    nodes = Graph.nodes()
    list_of_nodes = []

    id_of_nodes = {}
    i = 0
    for node in nodes:
        id_of_nodes[node] = i
        i += 1

    node_info_dict = {}

    for node in nodes:
        node_info = {}
        node_info['name'] = str(node)
        try:
            if position[node]['position'] == 'leave':
                node_info['color'] = 'blue'
                node_info['followers'] = position[node]['followers']
                node_info['logFollowers'] = position[node]['log']

            elif position[node]['position'] == 'remain':
                node_info['color'] = 'yellow'
                node_info['followers'] = position[node]['followers']
                node_info['logFollowers'] = position[node]['log']
            else:
                node_info['color'] = '#e7e7e7'
                node_info['followers'] = position[node]['followers']
                node_info['logFollowers'] = position[node]['log']
        except:
            node_info['color'] = '#e7e7e7'
            node_info['followers'] = 'DK'
            node_info['logFollowers'] = 3
        node_info_dict[str(node)] = node_info

        list_of_nodes.append(node_info)

    edges = Graph.edges()
    list_of_edges = []
    for node in nodes:
        neighbors = Graph.neighbors(node)
        for neighbor in neighbors:

            edge_info = {}
            edge_info['source'] = id_of_nodes[node]
            edge_info['target'] = id_of_nodes[neighbor]
            edge_info['value'] = 1
            try:
                edge_info['color'] = node_info_dict[node]['color']
            except:
                edge_info['color'] = '#e7e7e7'
            list_of_edges.append(edge_info)

    json_file['nodes'] = list_of_nodes
    json_file['links'] = list_of_edges

    json_file = json.dumps(json_file)

    return json_file
Exemplo n.º 16
0
    for i in targetlist:
        global memberlist
        memberlist = []
        memberlist=data['Source'][data.Target==i].tolist()
        print memberlist
        cxlist = (list(itertools.permutations(memberlist,2)))
        for n in cxlist:
            mytuple = tuple([n,i])
            connectionlist += [mytuple]
    #print mytuple
    resultdict=collections.defaultdict(list)

#for x in connectionlist:
# resultdict[x[0]].append(x[1])
#

for x in connectionlist:
    resultdict[x[0]].append(x[1])
#resultdict
G=networkx.from_dict_of_lists(resultdict)

mymatrix = networkx.to_numpy_matrix(G)

mymatrix.shape

mymatrix[0,:]
#G.nodes()
label = [memberlist,memberlist]
mylarry = la.larry(mymatrix,label, dtype=float)

for i, city in enumerate(msa):
    print "Compute the number of neighbourhoods for %s (%s/%s)"%(msa[city],
                                                i+1,
                                                len(msa))

    ## Import adjacency matrix
    adjacency = {}
    with open('extr/adjacency_bg/msa/%s.csv'%city, 'r') as source:
        reader = csv.reader(source, delimiter='\t')
        reader.next()
        for rows in reader:
            adjacency[rows[0]] = rows[1:]


    ## Transform into graph
    G = nx.from_dict_of_lists(adjacency)


    ## Import list of bg where each class is overrepresented
    over_bg = {cl:[] for cl in classes}
    with open('extr/neighbourhoods/classes/msa/%s.csv'%city, 'r') as source:
        reader = csv.reader(source, delimiter='\t')
        for rows in reader:
            over_bg[rows[0]].append(rows[1])


    ## Extract neighbourhoods (the connected components of the subgraph
    ## constituted of the areal units where the class is overrepresented)
    neighbourhoods = {cl: nx.connected_component_subgraphs(G.subgraph(over_bg[cl]))
                        for cl in classes}
    neigh_num[city] = {cl: len(list(neighbourhoods[cl])) for cl in classes}
Exemplo n.º 18
0
# In[123]:

word_list = ['halloween', 'love', 'follow', 'happi', 'night', 'bihday', 'dress']
word_ass_dict = {}
for x in word_list:
    word_ass_dict[x] = count_ass(x)


# In[124]:

word_ass_dict


# In[125]:

Gword=nx.from_dict_of_lists(word_ass_dict)


# In[156]:

pos = nx.shell_layout(Gword)
nx.draw_networkx(Gword, pos, node_size = 1500, node_color = 'w', font_color = 'b', font_size = '12')
plt.axis('off')
plt.title('Graph of associations between most frequent words', fontsize='20')
plt.show()


# In[ ]:


Exemplo n.º 19
0
from pprint import pprint
import networkx as nx
import matplotlib.pyplot as plt

a, b, c, d, e, f, g, h = range(8)

N = {
     a: [b, c, d, e, f],
     b: [c, e],
     c: [d],
     d: [e],
     e: [f],
     f: [c, g, h],
     g: [f, h],
     h: [f, g]
    }

G = nx.from_dict_of_lists(N)
nx.draw(G)
plt.show()
print(type(N))
pprint(N)

Exemplo n.º 20
0
import simplejson as json
import networkx as nx
from networkx.readwrite import json_graph
import matplotlib.pyplot as plt
from numpy import cumsum

print 'Running Graph Properties script'

with open('net_sci_coauthorships.txt', 'r') as f:
    js_graph = json.load(f) # Dictionary of key-value pairs
G = nx.from_dict_of_lists(js_graph)

#### Plot histogram #### 
# Get degrees of all nodes, create sorted list
degrees = nx.degree(G).values()
plt.hist(degrees, bins=10, log=True)
plt.title('Degree Histogram')
plt.ylabel('Number of Nodes')
plt.xlabel('Degree')
# plt.show()
plt.savefig('degree_histogram.png')
plt.clf()

#### Plot cumulative distribution function #### 
cumsums = cumsum(degrees)
plt.plot(cumsums)
plt.title('Cumulative Node Degrees')
plt.ylabel('Cumulative Node Degree')
plt.xlabel('Number of Nodes')
# plt.show()
plt.savefig('degree_cumsum.png')
Exemplo n.º 21
0
def adj_lists_to_directed_graph(adjacency_lists):
    """Turns a dict of lists of nodes to a directed graph"""
    return nx.from_dict_of_lists(adjacency_lists, create_using=nx.DiGraph())
Exemplo n.º 22
0
import json
import heapq as heap
from operator import itemgetter
import numpy as np
import sim
import betweenness_centrality


# Load data from file given by command line argument
filename = sys.argv[1]
N = int(filename.split('.')[-3])
f = open(filename)
graph_data = json.load(f)
f.close()

G = nx.from_dict_of_lists(graph_data)

def save_graph(graph, save_name):
    '''
    Saves networkx graph "graph" as pdf named "save_name"
    Source: http://stackoverflow.com/a/17388676
    '''

    #initialze Figure
    plt.figure(num=None, figsize=(20, 20), dpi=80)
    plt.axis('off')
    fig = plt.figure(1)
    pos = nx.spring_layout(graph)
    nx.draw_networkx_nodes(graph,pos)
    nx.draw_networkx_edges(graph,pos)
    nx.draw_networkx_labels(graph,pos)
Exemplo n.º 23
0
def construct_tree_from_graph(adjacency_list, density, prune_threshold=None,
                              num_levels=None, verbose=False):
    """
    Construct a level set tree from a similarity graph and a density estimate.

    Parameters
    ----------
    adjacency_list : list [list]
        Adjacency list of the k-nearest neighbors graph on the data. Each entry
        contains the indices of the `k` closest neighbors to the data point at
        the same row index.

    density : list [float]
        Estimate of the density function, evaluated at the data points
        represented by the keys in `adjacency_list`.

    prune_threshold : int, optional
        Leaf nodes with fewer than this number of members are recursively
        merged into larger nodes. If 'None' (the default), then no pruning
        is performed.

    num_levels : list int, optional
        Number of density levels in the constructed tree. If None (default),
        `num_levels` is internally set to be the number of rows in `X`.

    verbose : bool, optional
        If True, a progress indicator is printed at every 100th level of tree
        construction.

    Returns
    -------
    T : levelSetTree
        See the LevelSetTree class for attributes and method definitions.

    See Also
    --------
    construct_tree, LevelSetTree

    Examples
    --------
    >>> X = numpy.random.rand(100, 2)
    >>> knn_graph, radii = debacl.utils.knn_graph(X, k=8)
    >>> density = debacl.utils.knn_density(radii, n=100, p=2, k=8)
    >>> tree = debacl.construct_tree_from_graph(knn_graph, density,
    ...                                         prune_threshold=5)
    >>> print tree
    +----+-------------+-----------+------------+----------+------+--------+----------+
    | id | start_level | end_level | start_mass | end_mass | size | parent | children |
    +----+-------------+-----------+------------+----------+------+--------+----------+
    | 0  |    0.000    |   0.768   |   0.000    |  0.390   | 100  |  None  |  [1, 2]  |
    | 1  |    0.768    |   1.494   |   0.390    |  0.790   |  30  |   0    |  [7, 8]  |
    | 2  |    0.768    |   4.812   |   0.390    |  1.000   |  31  |   0    |    []    |
    | 7  |    1.494    |   2.375   |   0.790    |  0.950   |  6   |   1    |    []    |
    | 8  |    1.494    |   2.308   |   0.790    |  0.940   |  5   |   1    |    []    |
    +----+-------------+-----------+------------+----------+------+--------+----------+
    """

    ## Initialize the graph and cluster tree
    levels = _utl.define_density_mass_grid(density, num_levels=num_levels)

    G = _nx.from_dict_of_lists(
        {i: neighbors for i, neighbors in enumerate(adjacency_list)})

    T = LevelSetTree(density, levels)

    ## Figure out roots of the tree
    cc0 = _nx.connected_components(G)

    for i, c in enumerate(cc0):  # c is only the vertex list, not the subgraph
        T._subgraphs[i] = G.subgraph(c)
        T.nodes[i] = ConnectedComponent(
            i, parent=None, children=[], start_level=0., end_level=None,
            start_mass=0., end_mass=None, members=c)

    # Loop through the removal grid
    previous_level = 0.
    n = float(len(adjacency_list))

    for i, level in enumerate(levels):
        if verbose and i % 100 == 0:
            _logging.info("iteration {}".format(i))

        ## figure out which points to remove, i.e. the background set.
        bg = _np.where((density > previous_level) & (density <= level))[0]
        previous_level = level

        ## compute the mass after the current bg set is removed
        old_vcount = sum([x.number_of_nodes()
                          for x in T._subgraphs.itervalues()])
        current_mass = 1. - ((old_vcount - len(bg)) / n)

        # loop through active components, i.e. subgraphs
        deactivate_keys = []     # subgraphs to deactivate at the iter end
        activate_subgraphs = {}  # new subgraphs to add at the end of the iter

        for (k, H) in T._subgraphs.iteritems():

            ## remove nodes at the current level
            H.remove_nodes_from(bg)

            ## check if subgraph has vanished
            if H.number_of_nodes() == 0:
                T.nodes[k].end_level = level
                T.nodes[k].end_mass = current_mass
                deactivate_keys.append(k)

            else:  # subgraph hasn't vanished

                ## check if subgraph now has multiple connected components
                # NOTE: this is *the* bottleneck
                if not _nx.is_connected(H):

                    ## deactivate the parent subgraph
                    T.nodes[k].end_level = level
                    T.nodes[k].end_mass = current_mass
                    deactivate_keys.append(k)

                    ## start a new subgraph & node for each child component
                    cc = _nx.connected_components(H)

                    for c in cc:
                        new_key = max(T.nodes.keys()) + 1
                        T.nodes[k].children.append(new_key)
                        activate_subgraphs[new_key] = H.subgraph(c)

                        T.nodes[new_key] = ConnectedComponent(
                            new_key, parent=k, children=[], start_level=level,
                            end_level=None, start_mass=current_mass,
                            end_mass=None, members=c)

        # update active components
        for k in deactivate_keys:
            del T._subgraphs[k]

        T._subgraphs.update(activate_subgraphs)

    ## Prune the tree
    if prune_threshold is not None:
        T = T.prune(threshold=prune_threshold)

    return T
Exemplo n.º 24
0
    # get D1 Friends, the Rest will be iterable
    # Get the top five reciprocal friends and put them into the list of tuples
    list_of_tuples = []
    list_of_ids = []


    final_graph = {}
    client = pymongo.MongoClient()
    db = client.final
    for val in db.collection_names():
        final_graph[int(val)] = load_from_mongo('final', str(val))[0]['reciprocal_friends']
    final_graph[24551258] = intersection


    # Start creating the graph peice by peice.
    G=nx.from_dict_of_lists(final_graph)
    pos=nx.spring_layout(G) # positions for all nodes
    nx.draw_networkx_nodes(G,pos,node_size=3)
    nx.draw_networkx_edges(G,pos,width=1)
    nx.draw_networkx_labels(G,pos,font_size=2,font_family='sans-serif')
    plt.show()

    
    print("the number of nodes of the network is " + str(G.size()))
    print("The diameter of the network is: " + str(nx.diameter(G)))
    print("The average distance of the network is " + str(nx.center(G)))




Exemplo n.º 25
0
		else:
			print 'sciezka'
		print q
		if once:
			return True
	else:
		visited[v] = True
		for x in g[v]:
			if not visited[x]:
				found = hamiltonian_backend(g, x, visited, once)
				if found:
					return True
		visited[v] = False
	q.pop(-1)


def hamiltonian(g,once=False):
	v = [False] * len(g)
	return hamiltonian_backend(g, 1, v,once)



if __name__ == "__main__":
	hamiltonian(graph,once=True)
	G = nx.from_dict_of_lists(graph, create_using=nx.MultiDiGraph())
	nx.draw(G)
	plt.show()