def strong_stable_set(G):
    '''
    
    '''
    clique = graph_clique_number(G)
    result = None
    for stable in stable_set(G):
        g = G.copy()
        for node in stable:
            g.remove_node(node)
        if clique != graph_clique_number(g):
            result = G.subgraph(stable)
            break;
    return result
def findAnswer():
    sql = "create or replace view vcast as select mid,pid from 12CS10034_Movie Natural Join 12CS10034_M_Cast"
    cur.execute(sql)

    sql = "select distinct A.mid,B.mid from vcast A,vcast B where A.pid = B.pid and A.mid>B.mid"
    cur.execute(sql)

    M = nx.Graph()

    count = 0
    for x in cur.fetchall():
        i = -1
        old = ""
        v1 = ""
        v2 = ""
        for y in x:
            i=i+1
            if(i==0):
                v1=y
            elif(i==1):
                v2=y
                i=-1
        M.add_edge(v1,v2)
        M.add_edge(v2,v1)

    x = nx.graph_clique_number(M)
    print "The size of largest mutually similar group is: ",x
示例#3
0
文件: views.py 项目: freyley/nets
def netstats_simple(graph):
    G = graph
    if nx.is_connected(G): 
        d = nx.diameter(G)
        r = nx.radius(G)
    else: 
        d = 'NA - graph is not connected' #should be calculatable on unconnected graph - see example code for hack
        r = 'NA - graph is not connected'
   
#using dictionary to pack values and variablesdot, eps, ps, pdf break equally
    result = {#"""single value measures"""  
              'nn': G.number_of_nodes(),
              'ne': G.number_of_edges(),
              'd': d,
              'r': r,
              'conn': nx.number_connected_components(G),
              'asp': nx.average_shortest_path_length(G), 
#              """number of the largest clique"""
              'cn': nx.graph_clique_number(G),
#              """number of maximal cliques"""
              'mcn': nx.graph_number_of_cliques(G),
#              """transitivity - """
              'tr': nx.transitivity(G),
              #cc = nx.clustering(G) """clustering coefficient"""
              'avgcc': nx.average_clustering(G) } 
#    result['d'] = nx.diameter(G)
    print result
    return result
示例#4
0
def is_independent(graph, nodes):
    """Decides whether of not the subgraph of 'graph' induced by nodes in 
    'nodes' is an independent set or not."""
    if len(nodes) == 0:
        return True
    else:
        return graph_clique_number(graph.subgraph(nodes)) == 1
示例#5
0
 def free(self):
     '''
     a method to determine if the graph is ISK4-free
     Parameters:
         None
     Returns:
         sub: list of vertices that form a ISK4
                 is None if does not contain (list)
     '''
     # check if omega is greater than four
     sub = None
     if nx.graph_clique_number(self.g) > 3:
         sub = induced_subgraph(self.g, make_clique(4))
     else:
         # loop through all possible subdivisions between 4 and n
         n = len(self.g.nodes())
         i = 0
         while i < n - 3 and sub is None:
             for ball in unlabeled_balls_in_unlabeled_boxe(i,
                                                           [i]*6):
                 graph = self.create_subdivions(ball)
                 sub = induced_subgraph(self.g, graph)
                 if sub is not None:
                     break
             i += 1
     return sub
示例#6
0
def compute_singlevalued_measures(ntwk, weighted=True, calculate_cliques=False):
    """
    Returns a single value per network
    """
    iflogger.info("Computing single valued measures:")
    measures = {}
    iflogger.info("...Computing degree assortativity (pearson number) ...")
    try:
        measures["degree_pearsonr"] = nx.degree_pearsonr(ntwk)
    except AttributeError:  # For NetworkX 1.6
        measures["degree_pearsonr"] = nx.degree_pearson_correlation_coefficient(ntwk)
    iflogger.info("...Computing degree assortativity...")
    try:
        measures["degree_assortativity"] = nx.degree_assortativity(ntwk)
    except AttributeError:
        measures["degree_assortativity"] = nx.degree_assortativity_coefficient(ntwk)
    iflogger.info("...Computing transitivity...")
    measures["transitivity"] = nx.transitivity(ntwk)
    iflogger.info("...Computing number of connected_components...")
    measures["number_connected_components"] = nx.number_connected_components(ntwk)
    iflogger.info("...Computing average clustering...")
    measures["average_clustering"] = nx.average_clustering(ntwk)
    if nx.is_connected(ntwk):
        iflogger.info("...Calculating average shortest path length...")
        measures["average_shortest_path_length"] = nx.average_shortest_path_length(ntwk, weighted)
    if calculate_cliques:
        iflogger.info("...Computing graph clique number...")
        measures["graph_clique_number"] = nx.graph_clique_number(ntwk)  # out of memory error
    return measures
示例#7
0
def properties_full_graph(full_g, most_central_characters):
    """
	:param full_g: full entity graph
	:param most_important_entities: list of book's main characters
	Retrieves properties of the graphs
	"""
    print('-- FULL ENTITY GRAPH PROPERTIES --')

    # Number of nodes and edges
    print('number of entities: ', full_g.number_of_nodes())
    print('number of interactions:: ', full_g.number_of_edges())

    # Connectedness
    print('is connected ?', nx.is_connected(full_g))
    if nx.is_connected(full_g) == True:
        print(nx.diameter(full_g))

    # Degree graph - connectedness
    count = 0
    for edge in full_g.edges(data=True):
        count += edge[2]['weight']
    print('average weighted degree of the graph',
          count / full_g.number_of_edges())

    # Isolated nodes - connectedness
    isolated_nodes = list(nx.isolates(full_g))
    print('number of isolated nodes', len(isolated_nodes))

    # Cliques - connectedness
    print('size largest clique: ', nx.graph_clique_number(full_g))
    cliques = list(nx.find_cliques(full_g))
    length = 0
    for element in cliques:
        if len(element) > length:
            max_element = element
            length = len(element)
    print('largest clique: ', max_element)
    # Visualise clique
    # subgraph_entities = most_central_characters[:50]
    # clique_visu(full_g.subgraph(subgraph_entities), max_element)

    # Clustering
    most_important_entities = list(most_central_characters.keys())
    print('clustering coef', nx.average_clustering(full_g))
    cc = sorted(
        nx.clustering(full_g, nodes=most_important_entities, weight='weight'))
    print('characters with highest clustering coef', cc)
    # k core
    k_core = list(nx.k_core(full_g))
    # k_core_visu(full_g, nx.k_core(full_g))
    print('k core', list(k_core))

    # Distance between first and second most important characters
    m = most_central_characters[most_important_entities[0]]
    s = most_central_characters[most_important_entities[1]]
    dist_importance = (m - s) / m
    if dist_importance > 1 / 3:
        print('one-main-character type novel')
    else:
        print('several main characters type novel')
    def induced_width(self, elimination_order):
        """
        Returns the width (integer) of the induced graph formed by running Variable Elimination on the network.
        The width is the defined as the number of nodes in the largest clique in the graph minus 1.

        Parameters
        ----------
        elimination_order: list, array like
            List of variables in the order in which they are to be eliminated.

        Examples
        --------
        >>> import numpy as np
        >>> import pandas as pd
        >>> from pgmpy.models import BayesianModel
        >>> from pgmpy.inference import VariableElimination
        >>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),
        ...                       columns=['A', 'B', 'C', 'D', 'E'])
        >>> model = BayesianModel([('A', 'B'), ('C', 'B'), ('C', 'D'), ('B', 'E')])
        >>> model.fit(values)
        >>> inference = VariableElimination(model)
        >>> inference.induced_width(['C', 'D', 'A', 'B', 'E'])
        3
        """
        induced_graph = self.induced_graph(elimination_order)
        return nx.graph_clique_number(induced_graph) - 1
示例#9
0
def full_sim_group_size(comm_prop, arms, n_time=1000, alg=discounted_thompson, gamma = 1, comm_init=False, const = True):
    """Full simulation - NOTE: ADD VARIOUS VALUES OF THE PROPENSITY TO SHARE INFORMATION"""
    G = nx.MultiDiGraph()
    A = comm_graph(G)
    A.initialise(arms.n, arms.n, alg, gamma, True, const, comm_prop, comm_init)
    A.select_arm(arms, alg, gamma)
    data = np.empty(0)
    reciprocity = np.empty(0)
    clique_numbers = np.empty(0)
    avg_clustering = np.empty(0)
    for i in range(2,K+1,5):
        H = copy.deepcopy(A)
        network = H.subgraph([j for j in range(i)])
        network.init_info_further(arms.n)
        network.init_sharing_vector(const,comm_prop)
        #print (sim_group_size(network,n_time))
        data = np.append(data,sim_group_size(network,arms,n_time,alg, gamma)[0])
        network = sim_group_size(network,arms,n_time,alg, gamma)[1]
        reciprocity = np.append(reciprocity,reciprocity_weighted_graph(network))
        F = transform_di_weight_simple(network,0.99)
        clique_numbers = np.append(clique_numbers,nx.graph_clique_number(F))
        avg_clustering = np.append(avg_clustering,nx.average_clustering(F))
        #   network.plot_high_sharing(0.1)
        # draw_cliques(F)
    return data, reciprocity, clique_numbers, avg_clustering
    def induced_width(self, elimination_order):
        """
        Returns the width (integer) of the induced graph formed by running Variable Elimination on the network.
        The width is the defined as the number of nodes in the largest clique in the graph minus 1.

        Parameters
        ----------
        elimination_order: list, array like
            List of variables in the order in which they are to be eliminated.

        Examples
        --------
        >>> import numpy as np
        >>> import pandas as pd
        >>> from pgmpy.models import BayesianModel
        >>> from pgmpy.inference import VariableElimination
        >>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),
        ...                       columns=['A', 'B', 'C', 'D', 'E'])
        >>> model = BayesianModel([('A', 'B'), ('C', 'B'), ('C', 'D'), ('B', 'E')])
        >>> model.fit(values)
        >>> inference = VariableElimination(model)
        >>> inference.induced_width(['C', 'D', 'A', 'B', 'E'])
        3
        """
        induced_graph = self.induced_graph(elimination_order)
        return nx.graph_clique_number(induced_graph) - 1
示例#11
0
def is_independent(graph, nodes):
    """Decides whether of not the subgraph of 'graph' induced by nodes in 
    'nodes' is an independent set or not."""
    if len(nodes) == 0:
        return True
    else:
        return graph_clique_number(graph.subgraph(nodes)) == 1
示例#12
0
文件: Cliques.py 项目: rvf0068/vgraph
def Cliques(P, tipo, ruta):
    RUTA = ruta + '/NetWX/files/'
    path = Path(RUTA)
    path.mkdir(parents=True, exist_ok=True)

    graph_clique_num = []
    graph_number_of_cliqs = []
    graph_find_cliques = []
    for i in range(len(P)):
        graph_clique_num.append(graph_clique_number(P[i]))
        graph_number_of_cliqs.append(graph_number_of_cliques(P[i]))
        graph_find_cliques.append(find_cliques(P[i]))
    graph_clique_num = DataFrame(graph_clique_num)
    graph_number_of_cliqs = DataFrame(graph_number_of_cliqs)
    graph_find_cliques = DataFrame(graph_find_cliques)
    graph_clique_num.to_csv(RUTA + tipo + " - clique num.txt",
                            sep='\t',
                            header=None,
                            index=False)
    graph_number_of_cliqs.to_csv(RUTA + tipo + " - number of maxcliques.txt",
                                 sep='\t',
                                 header=None,
                                 index=False)
    graph_find_cliques.to_csv(RUTA + tipo + " - find cliques.txt",
                              sep='\t',
                              header=None,
                              index=False)
示例#13
0
def calculate_networks_indicators(graph):
    """计算基本网络指标"""
    degree_centrality = nx.degree_centrality(graph)
    nodes = list(degree_centrality.keys())
    betweenness_centrality = nx.betweenness_centrality(graph, weight='weight')
    network_indicators = pd.DataFrame({
        'nodes':
        nodes,
        'degree_centrality': [degree_centrality[node] for node in nodes],
        'betweenness_centrality':
        [betweenness_centrality[node] for node in nodes]
    })

    network_indicators['local_reaching_centrality'] = [
        nx.local_reaching_centrality(graph, node, weight='weight')
        for node in nodes
    ]
    constraint = nx.constraint(graph, weight='weight')
    network_indicators['constraint'] = [constraint[node] for node in nodes]
    effective_size = nx.effective_size(graph, weight='weight')
    network_indicators['effective_size'] = [
        effective_size[node] for node in nodes
    ]
    triangles = nx.triangles(graph)
    network_indicators['triangles'] = [triangles[node] for node in nodes]
    clustering = nx.clustering(graph, weight='weight')
    network_indicators['clustering'] = [clustering[node] for node in nodes]

    weight_dict = {
        item[0]: item[1]
        for item in nx.degree(graph, weight='weight')
    }
    degree_dict = {item[0]: item[1] for item in nx.degree(graph)}
    average_weight_dict = {
        weight_key:
        (weight_dict[weight_key] /
         degree_dict[weight_key] if degree_dict[weight_key] != 0 else 0)
        for weight_key in weight_dict.keys()
    }
    network_indicators['tie_strength'] = [
        average_weight_dict[node] for node in nodes
    ]
    network_indicators['number_of_node'] = nx.number_of_nodes(graph)
    network_indicators['density'] = nx.density(graph)
    cliques = nx.graph_clique_number(graph)
    if cliques >= 3:
        network_indicators['cliques'] = cliques
    else:
        network_indicators['cliques'] = 0
    network_indicators['efficiency'] = nx.global_efficiency(graph)
    network_indicators['isolates'] = nx.number_of_isolates(graph)

    network_indicators = network_indicators[[
        'nodes', 'degree_centrality', 'betweenness_centrality',
        'local_reaching_centrality', 'constraint', 'effective_size',
        'triangles', 'clustering', 'tie_strength', 'number_of_node', 'density',
        'cliques', 'efficiency', 'isolates'
    ]]
    return network_indicators
示例#14
0
def colour_forcing_sets_disjoint(patterns, colour, fraction, numColors,
                                 outputfile):
    # input:
    #       list of patterns
    #       colour
    #       fraction: will return maximal cliques of size at least fraction*cliquenumber
    #                 e.g. if fraction=0 returns all maximal cliques
    #                 if fraction=1 returns only maximum cliques
    #       numColors = number of colors in ramsey problem
    #       outputfile = write all output there
    # output:
    #       list of maximal sets of patterns such that any two force the colour
    #       list is of the form [l_1,l_2,...] where l_i is a list of indices
    #       of array "patterns" any two of which force the colour

    # Generate graph of forcing pattern pairs
    npatterns = len(patterns)
    edgelist = []
    for i in range(npatterns):
        for j in range(i, npatterns):
            if isforcing(patterns[i], patterns[j], colour, numColors):
                edgelist.append((i, j))

    G = nx.Graph(edgelist)
    initialcliquenumber = nx.graph_clique_number(G)

    disjointcliqueslist = []

    while nx.number_of_edges(G) > 0:
        cliques = nx.find_cliques(G)
        cliquenumber = nx.graph_clique_number(G)
        if cliquenumber < fraction * initialcliquenumber:
            return disjointcliqueslist
        else:
            while True:
                currentclique = cliques.next()
                if len(currentclique) == cliquenumber:
                    break
            disjointcliqueslist.append(currentclique)
            vertices = G.nodes()
            G = G.subgraph([x for x in vertices if x not in currentclique])
    print 'There will be ' + str(
        len(disjointcliqueslist)) + ' 4.12 constraints'
    outputfile.write('There will be ' + str(len(disjointcliqueslist)) +
                     ' 4.12 constraints\n')
    return disjointcliqueslist
示例#15
0
def independence_number(graph):
    """
    Compute the independence number of 'graph'.
    """
    if graph.number_of_nodes() == 0:
        return 0
    else:
        return graph_clique_number(complement(graph))
示例#16
0
def independence_number(graph):
    """
    Compute the independence number of 'graph'.
    """
    if graph.number_of_nodes() == 0:
        return 0
    else:
        return graph_clique_number(complement(graph))
示例#17
0
文件: helper.py 项目: wenq89/Chainoba
def calculateclique(network)->int:
    '''
    Return the clique number (size of the largest clique) for G.
    '''
    try:
        n = nx.graph_clique_number(network)
    except:
        n = 0
    return n
示例#18
0
def strong_stable_set(G):
    '''
    checks if G contains a strong stable set
    Parameters:
        G: the graph to check (networkx)
    Returns:
        a subgraph which forms a strong stable set (networkx)
    '''
    clique = graph_clique_number(G)
    result = None
    for stable in stable_set(G):
        g = G.copy()
        for node in stable:
            g.remove_node(node)
        if clique != graph_clique_number(g):
            result = G.subgraph(stable)
            break;
    return result
def calc_and_compare(name, extension):

    path_name = PATH.format(name, extension)

    result = len(main(path_name, workers_num))
    
    nx_result = NX.graph_clique_number(create_graph_from_file(path_name))
    
    return result == nx_result
def query1():
	cursor.execute('SELECT P1.MID,P2.MID FROM 12CS30032_M_Cast as P1 join 12CS30032_M_Cast as P2 using (PID) where P1.MID!=P2.MID')
	result=cursor.fetchall()

	G0.add_edges_from(result)

	res=nx.graph_clique_number(G0)

	print res
	return
示例#21
0
 def print_statistics(self):
     message("-------------------------------------------")
     message("-- Graph Statistics -----------------------")
     message()
     message("   isolates:", nx.number_of_isolates(self.graph))
     message("    density:", nx.density(self.graph))
     message("    bridges:", len(list(nx.bridges(self.graph))))
     message("    cliques:", nx.graph_clique_number(self.graph))
     message(" conn-comps:", nx.number_connected_components(self.graph))
     message()
     message("-------------------------------------------")
示例#22
0
def coloring_aux(G, options, logger=None):
    """Returns a coloring_aux of G and number of steps

    Parameters:
        G: the networkx graph
        options: a dictionary of options for forward checking,
                 ordering(mcv & lcv) and approx to chromatic number
        logger: the logger to use
    Returns
        (color, steps): a colored networkx graph and # of steps
    """
    if logger is None:
        logging.basicConfig(level=logging.DEBUG,
                            format='%(asctime)s %(message)s')
        logger = logging.getLogger(__name__)
    if 'approx' in options.keys():
        chromatic = options['approx'](G)
    else:
        # default is to use clique width
        chromatic = nx.graph_clique_number(G)
    if 'forward_check' in options.keys():
        forward = options['forward_check']
    else:
        # default is no forward checking
        forward = always_forward
    if 'next_node' in options.keys():
        next_node = options['next_node']
    else:
        # default is to get first available node
        next_node = get_first_node
    if 'get_colors' in options.keys():
        get_colors = options['get_colors']
    else:
        # default is a function that gets
        # all available colors not shared by neighbors
        get_colors = available_colors
    color = backtrack(copy_graph(G),
                      chromatic,
                      forward,
                      next_node,
                      get_colors,
                      logger)
    while color is None:
        chromatic += 1
        color = backtrack(copy_graph(G),
                          chromatic,
                          forward,
                          next_node,
                          get_colors,
                          logger)
    global COUNT
    steps = COUNT
    COUNT = 0
    return (color, steps)
示例#23
0
def main():
    # get list of connected movies from table of cast
    # 2 movies are connected if they have at least one common cast member
    data = get_tables()
    # generate empty graph
    graph = networkx.Graph()
    # add edges to graph
    for row in data:
        graph.add_edge(row[0], row[1])
    # print (approximation) of size of maximum clique
    # Finding maximum clique is an NP-hard problem.
    print "Maximum clique number : " + str(networkx.graph_clique_number(graph))
示例#24
0
def spatial_major(in_folder, cliqueThresh):
    for file in os.listdir(in_folder):
        if file != '.DS_Store':  #weird MAC thing
            path = in_folder + file
            G = getJsonNet(path)
            G = nx.Graph(G)  #can't do cliques on directed nets
            if G.order() > 0:
                n = nx.graph_clique_number(G)
                if n > cliqueThresh:
                    print file
                    for c in nx.k_clique_communities(G, n):
                        print list(c)
def graphAnalysis(graph, top_number, save_file_path):
    """
        Do the essential analysis to the final combined graph
    """
    with io.open(save_file_path, 'w') as save_file:

        # centrality
        # degree centrality
        deg_central = nx.degree_centrality(graph)
        deg_central_sort = sorted(deg_central.items(), key = lambda x: x[1], reverse = True)
        top_deg_central_sort = deg_central_sort[:top_number]
        save_file.write('top %d degree centrality items,' % top_number)
        save_file.write(','.join('%s %s' % x for x in top_deg_central_sort))

        # clustering

        # number of triangles: triangles() is not defined for directed graphs
        triangle_num = nx.triangles(graph)
        triangle_num_sort = sorted(triangle_num.items(), key = lambda x: x[1], reverse = True)
        top_triangle_num_sort = triangle_num_sort[:top_number]
        save_file.write('\ntop %d number of triangles including a node as one vertex,' % top_number)
        save_file.write(','.join('%s %s' % x for x in top_triangle_num_sort))

        # clustering coefficient of node in the graph
        cluster_coefficient = nx.clustering(graph)
        cluster_coefficient_sort = sorted(cluster_coefficient.items(), key = lambda x: x[1], reverse = True)
        top_cluster_coefficient_sort = cluster_coefficient_sort[:top_number]
        save_file.write('\ntop %d clustering coefficient items,' % top_number)
        save_file.write(','.join('%s %s' % x for x in top_cluster_coefficient_sort))

        # transitivity of the graph
        triangle_transitivity = nx.transitivity(graph)
        save_file.write('\ntransitivity of the graph,%f' % triangle_transitivity)

        # average clustering coefficient of the graph
        avg_cluster = nx.average_clustering(graph)
        save_file.write('\naverage clustering coefficient of the graph,%f' % avg_cluster)

        # clique
        # size of the largest clique in the graph
        size_largest_clique = nx.graph_clique_number(graph)
        save_file.write('\nsize of the largest clique in the graph,%d' % size_largest_clique)
        
        # all the cliques in the graph
        
        all_clique = nx.find_cliques(graph) # a generator
        list_all_clique = list(all_clique)
        list_all_clique_sort = sorted(list_all_clique, key = lambda x: len(x), reverse = True)
        list_all_clique_sort = [' '.join(clique) for clique in list_all_clique_sort]
        # print list_all_clique_sort
        save_file.write('\ncliques,')
        save_file.write(','.join(x for x in list_all_clique_sort))
示例#26
0
def details(graph):
    cliques_number = nx.graph_clique_number(graph)
    print("Number of cliques in the graph %d:" % cliques_number)
    clique = approximation.max_clique(graph)
    print("Maximum clique of the graph is: %.2d" % len(clique))
    print(clique)
    shortest_path = nx.shortest_path(graph)
    print("Shortest path: ", shortest_path)
    try:
        diameter = nx.diameter(graph)
        print("Diameter of the graph: %d" % diameter)
    except:
        print("Graph is not connected!")
示例#27
0
def make_clique_number_dataset():

    min_clique = 1
    max_clique = 40
    number_cliques = max_clique - min_clique + 1
    graphs = [[] for _ in range(number_cliques)]

    max_nodes_per_graph = 60
    number_graphs_per_clique = 50
    total_number_graphs = number_cliques * number_graphs_per_clique

    found_all_graphs = False
    no_graphs_found = 0

    t = tqdm(desc="Number of graphs found", total=total_number_graphs)
    while not found_all_graphs:
        n = np.random.randint(5, max_nodes_per_graph)
        m = np.random.randint(0, int(n * (n - 1) * 0.5))
        G = nx.gnm_random_graph(n, m)

        clique_number = nx.graph_clique_number(G)

        if clique_number <= max_clique and len(
                graphs[clique_number -
                       min_clique]) <= number_graphs_per_clique:
            graphs[clique_number - min_clique].append(G)
            no_graphs_found += 1
            t.update(1)

            if no_graphs_found == total_number_graphs:
                found_all_graphs = True

    out_dir = "datasets/"

    # Make output directory if neccessary and export dataset
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)
    outfile = os.path.join(out_dir, 'clique_number_dataset')
    out_graphs = []
    for i, graphlist in enumerate(graphs):
        for graph in graphlist:
            # assign label to each graph
            graph.graph['label'] = i + min_clique
            out_graphs.append(graph)
    export_dataset(outfile,
                   out_graphs,
                   has_g_labels=True,
                   has_n_labels=False,
                   has_n_attributes=False)
示例#28
0
def compute_singlevalued_measures(ntwk,
                                  weighted=True,
                                  calculate_cliques=False):
    """
    Returns a single value per network
    """
    iflogger.info("Computing single valued measures:")
    measures = {}
    iflogger.info("...Computing degree assortativity (pearson number) ...")
    try:
        measures["degree_pearsonr"] = nx.degree_pearsonr(ntwk)
    except AttributeError:  # For NetworkX 1.6
        measures[
            "degree_pearsonr"] = nx.degree_pearson_correlation_coefficient(
                ntwk)
    iflogger.info("...Computing degree assortativity...")
    try:
        measures["degree_assortativity"] = nx.degree_assortativity(ntwk)
    except AttributeError:
        measures["degree_assortativity"] = nx.degree_assortativity_coefficient(
            ntwk)
    iflogger.info("...Computing transitivity...")
    measures["transitivity"] = nx.transitivity(ntwk)
    iflogger.info("...Computing number of connected_components...")
    measures["number_connected_components"] = nx.number_connected_components(
        ntwk)
    iflogger.info("...Computing graph density...")
    measures["graph_density"] = nx.density(ntwk)
    iflogger.info("...Recording number of edges...")
    measures["number_of_edges"] = nx.number_of_edges(ntwk)
    iflogger.info("...Recording number of nodes...")
    measures["number_of_nodes"] = nx.number_of_nodes(ntwk)
    iflogger.info("...Computing average clustering...")
    measures["average_clustering"] = nx.average_clustering(ntwk)
    if nx.is_connected(ntwk):
        iflogger.info("...Calculating average shortest path length...")
        measures[
            "average_shortest_path_length"] = nx.average_shortest_path_length(
                ntwk, weighted)
    else:
        iflogger.info("...Calculating average shortest path length...")
        measures[
            "average_shortest_path_length"] = nx.average_shortest_path_length(
                nx.connected_component_subgraphs(ntwk)[0], weighted)
    if calculate_cliques:
        iflogger.info("...Computing graph clique number...")
        measures["graph_clique_number"] = nx.graph_clique_number(
            ntwk)  # out of memory error
    return measures
示例#29
0
文件: nx.py 项目: chrisfilo/nipype
def compute_singlevalued_measures(ntwk, weighted=True,
                                  calculate_cliques=False):
    """
    Returns a single value per network
    """
    iflogger.info('Computing single valued measures:')
    measures = {}
    iflogger.info('...Computing degree assortativity (pearson number) ...')
    try:
        measures['degree_pearsonr'] = nx.degree_pearsonr(ntwk)
    except AttributeError:  # For NetworkX 1.6
        measures[
            'degree_pearsonr'] = nx.degree_pearson_correlation_coefficient(
                ntwk)
    iflogger.info('...Computing degree assortativity...')
    try:
        measures['degree_assortativity'] = nx.degree_assortativity(ntwk)
    except AttributeError:
        measures['degree_assortativity'] = nx.degree_assortativity_coefficient(
            ntwk)
    iflogger.info('...Computing transitivity...')
    measures['transitivity'] = nx.transitivity(ntwk)
    iflogger.info('...Computing number of connected_components...')
    measures['number_connected_components'] = nx.number_connected_components(
        ntwk)
    iflogger.info('...Computing graph density...')
    measures['graph_density'] = nx.density(ntwk)
    iflogger.info('...Recording number of edges...')
    measures['number_of_edges'] = nx.number_of_edges(ntwk)
    iflogger.info('...Recording number of nodes...')
    measures['number_of_nodes'] = nx.number_of_nodes(ntwk)
    iflogger.info('...Computing average clustering...')
    measures['average_clustering'] = nx.average_clustering(ntwk)
    if nx.is_connected(ntwk):
        iflogger.info('...Calculating average shortest path length...')
        measures[
            'average_shortest_path_length'] = nx.average_shortest_path_length(
                ntwk, weighted)
    else:
        iflogger.info('...Calculating average shortest path length...')
        measures[
            'average_shortest_path_length'] = nx.average_shortest_path_length(
                nx.connected_component_subgraphs(ntwk)[0], weighted)
    if calculate_cliques:
        iflogger.info('...Computing graph clique number...')
        measures['graph_clique_number'] = nx.graph_clique_number(
            ntwk)  # out of memory error
    return measures
def obtenerValores(dirigido, noDirigido):
    # variables locales
    datos = []
    m = 0
    c = 0
    dm = 0
    com = 0
    # 1; orden - ambas
    #print("orden")
    datos.append(str(dirigido.order()))
    # 2; tamaño - dirigida
    #print("tamaño")
    datos.append(str(dirigido.size()))
    # 3; densidad, dirigida
    #print("densidad")
    datos.append(str(nx.density(dirigido)))
    # 4; grado promedio - dirigido
    #print("grado promedio")
    datos.append(str((dirigido.size()) / (dirigido.order())))
    # 5; diametro - no dirigido
    #print("diametro")
    datos.append(str(nx.diameter(noDirigido)))
    # 6; radio - no dirigido
    #print("radio")
    datos.append(str(nx.radius(noDirigido)))
    # 7; tamaño de clique mas grande - no dirigida
    #print("clique mas grande")
    datos.append(str(nx.graph_clique_number(noDirigido)))
    # 8; numero de cliques maximales - no dirigida
    #print("cliques maximales")
    datos.append(str(nx.graph_number_of_cliques(noDirigido)))
    # 9; global reaching centrality - dirigido
    #print("reachability")
    datos.append(str(nx.global_reaching_centrality(dirigido)))
    # 10; clustering coefficient - dirigida
    #print("clustering")
    datos.append(str(nx.average_clustering(dirigido)))
    # 11; transitividad - dirigida
    #print("transitivity")
    datos.append(str(nx.transitivity(dirigido)))
    # 12; 13; 14; datos MODC: modularidad, dependencia minima, total de comunidades - no dirigido
    #print("MODC")
    (m, dm, com) = MODC(noDirigido, True)
    datos.append(str(m))
    datos.append(str(dm))
    datos.append(str(com))
    # fin de funcion
    return (datos)
示例#31
0
 def _graph(self, graph):
     """Generate graph-based attributes."""
     graph_attr = pd.DataFrame()
     graph_attr['number_of_nodes'] = [nx.number_of_nodes(graph)]
     graph_attr['number_of_edges'] = [nx.number_of_edges(graph)]
     graph_attr['number_of_selfloops'] = [nx.number_of_selfloops(graph)]
     graph_attr['graph_number_of_cliques'] = [
         nx.graph_number_of_cliques(graph)
     ]
     graph_attr['graph_clique_number'] = [nx.graph_clique_number(graph)]
     graph_attr['density'] = [nx.density(graph)]
     graph_attr['transitivity'] = [nx.transitivity(graph)]
     graph_attr['average_clustering'] = [nx.average_clustering(graph)]
     graph_attr['radius'] = [nx.radius(graph)]
     graph_attr['is_tree'] = [1 if nx.is_tree(graph) else 0]
     graph_attr['wiener_index'] = [nx.wiener_index(graph)]
     return graph_attr
示例#32
0
def setup(g, num_players, num_seeds):
    #first compute the best partition
    partition = community.best_partition(g)
    induced_graph = community.induced_graph(partition, g)

    # Play around with picking the "best" community
    # node boundary?
    #print nx.current_flow_closeness_centrality(induced_graph) # not better
    # print nx.katz_centrality(induced_graph) # doesn't converge
    #print nx.eigenvector_centrality(induced_graph) # not as good
    #print nx.communicability_centrality(induced_graph) # not as good
    #{0: 8.451771641899612, 1: 9.041654401534407, 2: 9.321830560246685, 3: 8.79634625159723, 4: 7.512000387517644, 5: 9.319261339431147, 6: 8.635502364748598, 7: 9.182167514276696, 8: 8.812816793986622, 9: 5.955242238035001, 10: 7.224124906314186, 11: 8.598864555204745, 12: 1.3780813983087927, 13: 8.574141188778002, 14: 1.4894068385674029}
    #{0: 0.03170498456257798, 1: 0.03351885293616147, 2: 0.982004394865475, 3: 0.009750044520081363, 4: 0.012642119637055598, 5: 0.08211419419246402, 6: 0.013202397926046897, 7: 0.15814666928657686, 8: 0.026268239793024895, 9: 0.0005523351650465954, 10: 0.0009839216844465231, 11: 0.019821817113884598, 12: 4.399697547690089e-05, 13: 0.016495461620553098, 14: 0.00022120782918811697}
    #{0: 1670.2226290285078, 1: 3648.298186716118, 2: 4153.05229512053, 3: 3214.282455755265, 4: 561.0349179323383, 5: 4068.320908838754, 6: 2977.2760610270666, 7: 3474.488922208751, 8: 3493.8811964338947, 9: 1521.5720949300896, 10: 2520.2823105797784, 11: 1385.0884502097147, 12: 281.6674672972596, 13: 2306.8136315883607, 14: 358.98207498678886}

    # viewer.draw_graph(induced_graph)
    # try:
    #     plt.show()
    # except:
    #     plt.hide()

    # Choose the community with the most number of outgoing edges
    #weights = nx.communicability_centrality(induced_graph) #weight='weight'
    weights = nx.degree(induced_graph, weight='weight')
    #print weights

    best_com = max(weights, key=weights.__getitem__)

    com = defaultdict(list)
    for node, c in partition.iteritems():
        com[c].append(node)

    selected_comm = g.subgraph(com[best_com])

    # get one node from every clique
    #print selected_comm.number_of_nodes()
    max_size_clique = nx.graph_clique_number(selected_comm)
    print max_size_clique
    lst = []
    for cl in nx.find_cliques(selected_comm):
        if len(cl) >= max_size_clique / 2:
            lst.append(r.choice(cl))
            #return cl
            #print len(cl), cl
    return lst
    def clique(self):
        import config
        graphs = config.graph
        file = open('clique.txt', 'w')
        clique_string = [
            "This txt file will show you the finding of cliques in your network.\n\n"
            +
            "Description : In complex network, a clique is a maximal subset of the vertices or nodes in an undirected network such that every member\n"
            +
            "of the set is connected by an edge or link to every other node." +
            "The meaning of 'maximal' here means there is no other vertex or node in the network that can be added to the subset while keeping or preserving\n"
            +
            "the property that every vertex or node is connected to every other.\n"
        ]
        file.write(clique_string[0])

        max_clique = list(nx.make_max_clique_graph(graphs))
        max_clique_str = str(max_clique)
        max_clique_string = [
            "Maximal Cliques:\n-The maximal cliques and treats these cliques as nodes.\n -These nodes in a [] are connected if they have common members in the original graph.\n"
            + "-" + max_clique_str + '\n'
        ]
        file.write(max_clique_string[0])

        all_maximal_cliques = str(list(nx.find_cliques(graphs)))
        all_maximal_cliques_string = [
            "Cliques:\n-The possible cliques in the network.\n" + "-" +
            all_maximal_cliques + '\n'
        ]
        file.write(all_maximal_cliques_string[0])

        number_of_maximum_clique = str(nx.graph_number_of_cliques(graphs))
        number_of_node_in_largest_clique = str(nx.graph_clique_number(graphs))
        clique_number_string = [
            "Basic statistic of cliques in network:\n-The (largest) number of cliques in the network:"
            + number_of_maximum_clique + "\n" +
            "-The number of nodes in the largest clique in the network:" +
            number_of_node_in_largest_clique
        ]
        file.write(clique_number_string[0])

        file.close()  # this must add or only display a empty txt
        import os
        os.system("notepad.exe clique.txt")
示例#34
0
def test_graph(G):

    start = time.time()
    res = main(G)
    end = time.time()

    print("Result: {}, size: {}".format(res, len(res)))

    delta_own = end - start

    start = time.time()
    nx_clique = NX.graph_clique_number(G)
    end = time.time()

    print("Size NX: {}".format(nx_clique))

    delta_nx = end - start

    print("Time - Own: {}, NX: {}".format(delta_own, delta_nx))
示例#35
0
def colour_forcing_sets(patterns, colour, fraction, numColors, outputfile):
    # input:
    #       list of patterns
    #       colour
    #       fraction: will return maximal cliques of size at least fraction*cliquenumber
    #                 e.g. if fraction=0 returns all maximal cliques
    #                 if fraction=1 returns only maximum cliques
    #       numColors = number of colors in ramsey problem
    #       outputfile = write all output there
    # output:
    #       list of maximal sets of patterns such that any two force the colour
    #       list is of the form [l_1,l_2,...] where l_i is a list of indices
    #       of array "patterns" any two of which force the colour

    # WARNING: Might have high-ish space complexity!

    # Generate graph of forcing pattern pairs
    npatterns = len(patterns)
    edgelist = []
    for i in range(npatterns):
        for j in range(i, npatterns):
            if isforcing(patterns[i], patterns[j], colour, numColors):
                edgelist.append((i, j))

    G = nx.Graph()
    G.add_edges_from(edgelist)

    # draw graph G
    # nx.draw_networkx(G)

    cliqueslist = list(nx.find_cliques(G))
    cliquenumber = nx.graph_clique_number(G, cliqueslist)

    trimmedcliqueslist = filter(lambda x: len(x) >= cliquenumber * fraction,
                                cliqueslist)

    # find maximal cliques
    print 'There will be ' + str(len(
        trimmedcliqueslist)) + ' 4.12 constraints for colour ' + str(colour)
    outputfile.write('There will be ' + str(len(trimmedcliqueslist)) +
                     ' 4.12 constraints for colour ' + str(colour) + '\n')
    return trimmedcliqueslist
示例#36
0
文件: file.py 项目: fras2560/research
 def __init__(self, directory, G=None, logger=None, file=None, base=None):
     """
     G: a networkx graph (networkx)
     directory: the filepath to the directory (filepath)
     logger: the logger to log information (logging)
     file: the file of the graph to load
     """
     self.directory = directory
     if G is None and file is None:
         raise Exception("File not initialized properly")
     if G is not None:
         self.G = G
     else:
         self.G = self.load(file)
     self.clique = nx.graph_clique_number(self.G)
     if logger is None:
         logging.basicConfig(level=logging.DEBUG, format="%(asctime)s %(message)s")
         self.logger = logging.getLogger(__name__)
     else:
         self.logger = logger
     self.base = base
示例#37
0
 def _graph(self):
     """Generate graph-based attributes."""
     self.graph_attr['number_of_nodes'] = [nx.number_of_nodes(self.graph)]
     self.graph_attr['number_of_edges'] = [nx.number_of_edges(self.graph)]
     self.graph_attr['number_of_selfloops'] = [
         nx.number_of_selfloops(self.graph)
     ]
     self.graph_attr['graph_number_of_cliques'] = [
         nx.graph_number_of_cliques(self.graph)
     ]
     self.graph_attr['graph_clique_number'] = [
         nx.graph_clique_number(self.graph)
     ]
     self.graph_attr['density'] = [nx.density(self.graph)]
     self.graph_attr['transitivity'] = [nx.transitivity(self.graph)]
     self.graph_attr['average_clustering'] = [
         nx.average_clustering(self.graph)
     ]
     self.graph_attr['radius'] = [nx.radius(self.graph)]
     self.graph_attr['is_tree'] = [1 if nx.is_tree(self.graph) else 0]
     self.graph_attr['wiener_index'] = [nx.wiener_index(self.graph)]
     return self.graph_attr
示例#38
0
文件: nx.py 项目: ofenlab/nipype
def compute_singlevalued_measures(ntwk,
                                  weighted=True,
                                  calculate_cliques=False):
    """
    Returns a single value per network
    """
    iflogger.info('Computing single valued measures:')
    measures = {}
    iflogger.info('...Computing degree assortativity (pearson number) ...')
    try:
        measures['degree_pearsonr'] = nx.degree_pearsonr(ntwk)
    except AttributeError:  # For NetworkX 1.6
        measures[
            'degree_pearsonr'] = nx.degree_pearson_correlation_coefficient(
                ntwk)
    iflogger.info('...Computing degree assortativity...')
    try:
        measures['degree_assortativity'] = nx.degree_assortativity(ntwk)
    except AttributeError:
        measures['degree_assortativity'] = nx.degree_assortativity_coefficient(
            ntwk)
    iflogger.info('...Computing transitivity...')
    measures['transitivity'] = nx.transitivity(ntwk)
    iflogger.info('...Computing number of connected_components...')
    measures['number_connected_components'] = nx.number_connected_components(
        ntwk)
    iflogger.info('...Computing average clustering...')
    measures['average_clustering'] = nx.average_clustering(ntwk)
    if nx.is_connected(ntwk):
        iflogger.info('...Calculating average shortest path length...')
        measures[
            'average_shortest_path_length'] = nx.average_shortest_path_length(
                ntwk, weighted)
    if calculate_cliques:
        iflogger.info('...Computing graph clique number...')
        measures['graph_clique_number'] = nx.graph_clique_number(
            ntwk)  #out of memory error
    return measures
示例#39
0
文件: theory.py 项目: sboosali/PGM
def treewidth(graph):
    """
    treewidth = min max-elimination-clique
    """

    print 'treewidth( %s )' % graph
    
    tw = +inf
    G = deepcopy(graph)
    nodes = deepcopy(graph.node.keys())

    _G = deepcopy(G)
    _tw = 0
    for elimination_ordering in itertools.permutations(nodes):
        
        for node in elimination_ordering:
            elim(_G, node)
            if _G: _tw = max(_tw, nx.graph_clique_number(_G))

        tw = min(_tw, tw)
        _G = deepcopy(G)
        _tw = 0

    return tw
示例#40
0
				con.commit()
				cur.execute("create view view1 as (select distinct PID from vcast250 where MID = '" + movie + "'); ")
				con.commit()
				cur.execute("select MID from vcast250 where PID in (select PID from view1) and MID != '" + movie + "';")
				con.commit()
				rows = cur.fetchall()
				l = []
				for row in rows:
					t = (movie,row[0])
					l.append(t)
				G.add_edges_from(l)
				cur.execute("drop view if exists view1")
				con.commit()
				del l

			print nx.graph_clique_number(G);
			for actor in actor_list:
				cur.execute("drop view if exists view2;")
				con.commit()
				cur.execute("create view view2 as (select distinct MID from vcast250 where PID = '" + actor + "'); ")
				con.commit()
				cur.execute("select distinct PID from vcast250 where MID in (select MID from view2) and PID != '" + actor + "';")
				con.commit()
				rows = cur.fetchall()
				l = []
				for row in rows:
					t = (actor,row[0])
					l.append(t)
				Gact.add_edges_from(l)
				cur.execute("drop view if exists view2;")
				con.commit()
def k_max_clique(g, **kwargs):
    return nx.graph_clique_number(g)
示例#42
0
 def test_clique_number2(self):
     G = nx.Graph()
     G.add_nodes_from([1, 2, 3])
     assert_equal(nx.graph_clique_number(G), 1)
def size_of_largest_clique(G):
    return nx.graph_clique_number(G)
示例#44
0
#         colors_to_select.remove(color)
#         colors_of_edges.append((color))
#         nodes_color_alpha.append(0.4)
#         edges_color_alpha.append(0.6)
#         edge_width_l.append(4.0)
# lvl2=[]
# for i in range(nx.graph_number_of_cliques(G)):
#     lvl2.append(graphs_len[i])


print "Η λίστα των μεγεθών των κλικών είναι:"
# print 'The list of clique sizes is:'
print lvl2
print str(" ")

print "Ο αριθμός κλίκας (το μέγεθος της μεγαλύτερης κλίκας) του G είναι:", nx.graph_clique_number(G)
# print 'The clique number (size of the largest clique) for G is:', nx.graph_clique_number(G)
# print sorted(nx.connected_components(G), key = len, reverse=True)
print str(" ")

print "Το λεξικό των κλικών που περιέχουν κάθε κόμβο είναι:"
# print 'The dictionary of the lists of cliques containing each node:'
print nx.cliques_containing_node(G)
print str(" ")

print "Το λεξικό του πλήθους κλικών που περιέχουν κάθε κόμβο είναι:"
# print 'The dictionary of the numbers of maximal cliques for each node:'
print nx.number_of_cliques(G)
print str(" ")

print "Το λεξικό του μεγέθους των μεγαλύτερων κλικών που περιέχουν κάθε κόμβο είναι:"
示例#45
0
 def test_clique_number(self):
     G=self.G
     assert_equal(nx.graph_clique_number(G),4)
     assert_equal(nx.graph_clique_number(G,cliques=self.cl),4)
def calc_and_compare(G):
    result = len(maxclique(G, workers_num, loaded=True))
    nx_result = NX.graph_clique_number(G)
    return result == nx_result
def calc_and_compare(G):
    result = len(main(G, workers_num))
    nx_result = NX.graph_clique_number(G)
    return result == nx_result
import networkx as nx
import dautil as dl


fb_file = dl.data.SPANFB().load()
G = nx.read_edgelist(fb_file,
                     create_using=nx.Graph(),
                     nodetype=int)

print('Graph Clique Number',
      nx.graph_clique_number(G.subgraph(list(range(2048)))))
示例#49
0
 def test_clique_number2(self):
     G = nx.Graph()
     G.add_nodes_from([1, 2, 3])
     assert nx.graph_clique_number(G) == 1
示例#50
0
for node in graph.nodes():
    color_intensity.append( graph_degree[node] )
    node_sizes.append( len( groups[node] ) )

print '\t**Generating spring layout ...'
graph_layout = nx.spring_layout(graph, iterations=50)
graph_layout2 = nx.spectral_layout(graph)
print '... done!'

plt.figure()
plt.xticks([])
plt.yticks([])

print '\t**Drawing nodes ...'
nx.draw_networkx_nodes(graph, pos=graph_layout2, node_color=color_intensity, with_labels=False, alpha=0.75, node_size= 100, cmap=plt.get_cmap('Blues'))
print '\t**Saving pdf (no edges) ...'
plt.savefig('correlation_network5-no_edges.pdf', bbox_inches='tight')
print '\t**Drawing edges ...'
nx.draw_networkx_edges(graph, pos=graph_layout, with_labels=False, alpha=0.3)
print '\t**Saving final pdf ...'
plt.savefig('correlation_network3.pdf', bbox_inches='tight')
print '... done!'
plt.close()

a=nx.find_cliques(graph)
nx.graph_clique_number(graph)
nx.graph_number_of_cliques(graph)
nx.density(graph)
print nx.info(graph)
nx.average_clustering(graph, weight='yeah')
nx.get_edge_attributes(graph, 'weights')
def calculate(network):
    try:
        n = nx.graph_clique_number(network)
    except:
        n = 0
    return n
    # Stats
    node_count = len(G.nodes())
    edge_count = len(G.edges())

    if node_count < 3:
        continue

    cliques = tuple(nx.find_cliques(G))

    component_count = nx.number_connected_components(G)
    assortativity_coef = nx.degree_assortativity_coefficient(G)
    clustering_coef_avg = nx.average_clustering(G)
    clustering_coef_stddev = np.std(list(nx.clustering(G).values()))
    number_of_cliques_larger_than_2 = len(tuple(clique for clique in cliques if len(clique) > 2))
    clique_count = len(cliques)
    largest_clique = nx.graph_clique_number(G)

    cur.execute("""INSERT INTO stats_temporal_network
                    (time_bin,
                      node_count,
                      edge_count,
                      component_count,
                      degree_assortativity_coefficient,
                      avg_clustering_coefficient,
                      std_clustering_coefficient,
                      number_of_cliques_larger_than_2,
                      largest_clique)
                       VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s)""", (date,
                                                                node_count,
                                                                edge_count,
                                                                component_count,
示例#53
0
 def test_clique_number3(self):
     G = nx.Graph()
     assert_equal(nx.graph_clique_number(G), 0)
        for attrib in source_node[0]:
            if (attrib != '_id'):
                graph.node[vertexname][attrib] = source_node[0][attrib]
    else:
        graph.node[vertexname]['user_name'] = vertexname
    graph.node[vertexname]['name'] = vertexname

# go through the links again, this time adding them to the graph now that all the nodes are in
count = 0
for link in link_collection.find():
    source_name = link['source']
    target_name = link['target']
    graph.add_edge(source_name,target_name)
    
print nx.info(graph)
print nx.graph_clique_number(graph)
print nx.number_connected_components(graph)
sys.stdout.flush()

# output the network metadata
import arrow
now = arrow.utcnow().timestamp
starttime = arrow.get('2014-01-01 00:00:00')
endtime = arrow.get('2014-12-31 11:59:59')
#print now

metadata_record_list = [{'datatype':'TwitterMentions'},
                        {'creationdate': now}, 
                        {'revision': 0.2}, 
                        {'sourcetype':'twitter_2014_gnip'},
                        {'description':'mentions'},
示例#55
0
	orderedDegreeIndices.reverse();

	G = nx.Graph()
	G.add_nodes_from(prunedNodes)
	G.add_edges_from(prunedEdges)

	print "-------------------------------------"
	print "Find cliques of the graph"
	print "-------------------------------------"
	cliques = list(nx.find_cliques(G))
	print cliques

	print "-------------------------------------"
	print "Compute clique number - size of the largest clique"
	print "-------------------------------------"
	graphCliqueNumber = nx.graph_clique_number(G, cliques)
	print graphCliqueNumber

	print "-------------------------------------"
	print "Compute number of maximal ciiques"
	print "-------------------------------------"
	graphNumberOfCliques = nx.graph_number_of_cliques(G, cliques)
	print graphNumberOfCliques


	print "-------------------------------------"
	print "Compute size of largest maximal clique containing a given node"
	print "-------------------------------------"
	maximalCliqueSizePerNode = nx.node_clique_number(G)
	print maximalCliqueSizePerNode
示例#56
0
 def test_clique_number(self):
     G = self.G
     assert nx.graph_clique_number(G) == 4
     assert nx.graph_clique_number(G, cliques=self.cl) == 4
示例#57
0
def graphAnalysis(graph, top_number, edge_distance_flag, save_file_path):
    """
        Do the essential analysis to the final combined graph
    """
    with io.open(save_file_path, 'w') as save_file:
        
        # centrality
        # degree centrality
        deg_central = nx.degree_centrality(graph)
        deg_central_sort = sorted(deg_central.items(), key = lambda x: x[1], reverse = True)
        top_deg_central_sort = []
        for ne_deg in deg_central_sort[:top_number]:
            top_deg_central_sort.append((' '.join(ne_deg[0]), ne_deg[1]))
        save_file.write('top %d degree centrality items,' % top_number)
        save_file.write(','.join('%s %s' % x for x in top_deg_central_sort))
        
        if edge_distance_flag:
            # closeness centrality
            close_central = nx.closeness_centrality(graph, distance = 'freq')
            close_central_sort = sorted(close_central.items(), key = lambda x: x[1], reverse = True)
            top_close_central_sort = []
            for ne_close in close_central_sort[:top_number]:
                top_close_central_sort.append((' '.join(ne_close[0]), ne_close[1]))
            save_file.write('\ntop %d closeness centrality items,' % top_number)
            save_file.write(','.join('%s %s' % x for x in top_close_central_sort))
    
            # betweenness centrality
            between_central = nx.betweenness_centrality(graph, weight = 'freq')
            between_central_sort = sorted(between_central.items(), key = lambda x: x[1], reverse = True)
            top_between_central_sort = []
            for ne_between in between_central_sort[:top_number]:
                top_between_central_sort.append((' '.join(ne_between[0]), ne_between[1]))
            save_file.write('\ntop %d betweenness centrality items,' % top_number)
            save_file.write(','.join('%s %s' % x for x in top_between_central_sort))
        
        # clustering
        
        # number of triangles: triangles() is not defined for directed graphs
        triangle_num = nx.triangles(graph)
        triangle_num_sort = sorted(triangle_num.items(), key = lambda x: x[1], reverse = True)
        top_triangle_num_sort = []
        for ne_triangle in triangle_num_sort[:top_number]:
            top_triangle_num_sort.append((' '.join(ne_triangle[0]), ne_triangle[1]))
        save_file.write('\ntop %d number of triangles including a node as one vertex,' % top_number)
        save_file.write(','.join('%s %s' % x for x in top_triangle_num_sort))
        
        # clustering coefficient of node in the graph
        cluster_coefficient = nx.clustering(graph)
        cluster_coefficient_sort = sorted(cluster_coefficient.items(), key = lambda x: x[1], reverse = True)
        top_cluster_coefficient_sort = []
        for ne_cluster_coefficient in cluster_coefficient_sort[:top_number]:
            top_cluster_coefficient_sort.append((' '.join(ne_cluster_coefficient[0]), ne_cluster_coefficient[1]))
        save_file.write('\ntop %d clustering coefficient items,' % top_number)
        save_file.write(','.join('%s %s' % x for x in top_cluster_coefficient_sort))
        
        # transitivity of the graph
        triangle_transitivity = nx.transitivity(graph)
        save_file.write('\ntransitivity of the graph,%f' % triangle_transitivity)
        
        # average clustering coefficient of the graph
        avg_cluster = nx.average_clustering(graph)
        save_file.write('\naverage clustering coefficient of the graph,%f' % avg_cluster)
        
        # clique
        # size of the largest clique in the graph
        size_largest_clique = nx.graph_clique_number(graph)
        save_file.write('\nsize of the largest clique in the graph,%d' % size_largest_clique)
        
        # all the cliques in the graph
        
        all_clique = nx.find_cliques(graph) # a generator
        list_all_clique = list(all_clique)
        list_all_clique_sort = sorted(list_all_clique, key = lambda x: len(x), reverse = True)
        save_file.write('\ncliques,')
        write_all_clique_sort = []
        for clique in list_all_clique_sort:
            clique_string = ''
            for ne in clique:
                clique_string += str(' '.join(ne)) + '|'
            write_all_clique_sort.append(clique_string)
        save_file.write(','.join(x for x in write_all_clique_sort))
示例#58
0
 def test_clique_number3(self):
     G = nx.Graph()
     assert nx.graph_clique_number(G) == 0
示例#59
0
def classify(request, pk):
	#gets object based on id given
	graph_file = get_object_or_404(Document, pk=pk)
	#reads file into networkx graph based on extension
	if graph_file.extension() == ".gml":
		G = nx.read_gml(graph_file.uploadfile)
	else:
		G = nx.read_gexf(graph_file.uploadfile)
	#closes file so we can delete it
	graph_file.uploadfile.close()
	#loads the algorithm and tests the algorithm against the graph
	g_json = json_graph.node_link_data(G)
	#save graph into json file
	with open(os.path.join(settings.MEDIA_ROOT, 'graph.json'), 'w') as graph:
			json.dump(g_json, graph)
	with open(os.path.join(settings.MEDIA_ROOT, 'rf_classifier.pkl'), 'rb') as malgo:
		algo_loaded = pickle.load(malgo, encoding="latin1")
		dataset = np.array([G.number_of_nodes(), G.number_of_edges(), nx.density(G), nx.degree_assortativity_coefficient(G), nx.average_clustering(G), nx.graph_clique_number(G)])
		print (dataset)
		#creates X to test against
		X = dataset
		prediction = algo_loaded.predict(X)
		
		
		
		graph_type = check_prediction(prediction)
		graph = GraphPasser(G.number_of_nodes(), G.number_of_edges(), nx.density(G), nx.degree_assortativity_coefficient(G), nx.average_clustering(G), nx.graph_clique_number(G))
	#gives certain variables to the view

	return render(
		request,
		'classification/classify.html',
		{'graph': graph, 'prediction': graph_type}
		)