Пример #1
0
def plot_heatmap(graph, pos, hubs, data, save_as):
    dataframe = pd.DataFrame(data, columns=['value'])
    dataframe.apply(lambda x: ((x - np.mean(x)) /
                               (np.max(x) - np.min(x))) * 225)
    dataframe = dataframe.reindex(graph.nodes())
    # Providing a continuous color scale with cmap
    node_size = []
    for i in (graph.nodes()):
        if i not in hubs:
            node_size.append(0.6)
        else:
            # enlarge hub size
            node_size.append(5)
    opts = {
        "node_color": dataframe['value'],
        'node_size': node_size,  #0.6, 
        'with_labels': False,
        "pos": pos,
        "cmap": plt.cm.plasma
    }

    nodes = nx.draw_networkx_nodes(graph, **opts)
    nodes.set_norm(mcolors.SymLogNorm(linthresh=0.01, linscale=1))
    edges = nx.draw_networkx_edges(graph, pos, width=0.05)

    plt.colorbar(nodes)
    plt.axis('off')
    plt.savefig(save_as)
    plt.show()
Пример #2
0
def local_efficiency(graph):
    total = 0
    for node in graph.nodes():
        podgraf = graph.subgraph_from_nodes(graph.network_size(node, 1))
        podgraf.del_node(node)
        total += average_efficiency(podgraf)
    return total / len(graph.nodes())
Пример #3
0
def average_efficiency(graph):
    total = 0
    for start in graph.nodes():
        partial = 0
        for end in graph.nodes():
            if start != end:
                partial += 1 / graph.shortest_path(start, end)[0]
        total += partial
    if len(graph.nodes()) <= 1:
        return 0
    return total / (len(graph.nodes()) * (len(graph.nodes()) - 1))
def run_clustering(output, clustering_method, graph, graph_id, oslom2_dir,
                   infomap_dir, cluster_seed, infomap_calls):
    clusters_per_node = {}
    if graph.number_of_edges(
    ) == 0:  # oslom2 and infomap do not support graphs with 0 edges
        clusters_per_node = {}
        cluster_index = 1
        for node in graph.nodes():
            clusters_per_node[node] = [cluster_index
                                       ]  # put each node in its own cluster
            cluster_index += 1
    elif clustering_method == 'oslom2':
        oslom_edge_file = file_io.write_oslom_edge_file(
            output, "oslom_edge_file_{}".format(graph_id), graph)
        cluster.run_oslom2(output, oslom_edge_file, oslom2_dir, cluster_seed,
                           infomap_calls)
        output_tp_file = os.path.join(
            oslom_edge_file + "_oslo_files",
            "tp")  # or tp1 or tp2 (to be exposed as parameter)
        clusters_per_node = file_io.read_oslom2_tp_file(output_tp_file)
    elif clustering_method == 'infomap':
        pajek_file = file_io.write_pajek_file(output,
                                              "pajek_file_{}".format(graph_id),
                                              graph)
        cluster.run_infomap(output, pajek_file, infomap_dir, cluster_seed)
        output_tree_file = os.path.splitext(pajek_file)[0] + '.tree'
        level = 1  # lowest hierarchy level
        clusters_per_node = file_io.read_infomap_tree_file(
            output_tree_file, level)  # get cluster(s) from Infomap .tree file
    return clusters_per_node
Пример #5
0
def init_graph(graph,
               particle_density=0,
               max_node_particles=1,
               particle_f=particle.random_target,
               gossip_initiators=0,
               initial_data_f=None):
    """Initialize each node's attributes based on the given parameters."""
    for node, node_data in graph.nodes(data=True):
        # Incoming queue of particles.
        node_data["particles"] = []

        # The maximum capacity of particles at this node.
        node_data["max_particles"] = max_node_particles

        # Particles that arrived to a node at full capacity.
        node_data["overflow_particles"] = []

        # Particles that reached their target.
        node_data["old_particles"] = []

        # An empty routing table, tagged at time-step -1 (pre-simulation).
        node_data["routing_tables"] = {-1: {node: (0, node)}}

        # The initial data that this node may have.
        node_data["data"] = initial_data_f() if initial_data_f else None

        # Generate initial particles based on 'particle_density'.
        for _ in range(max_node_particles):
            if random.uniform(0, 1) < particle_density:
                node_data["particles"].append(particle_f(graph, node))
Пример #6
0
def all_particles(graph):
    """Collect all particles after a simulation."""
    return list(
        itertools.chain.from_iterable([
            node_data["old_particles"] + node_data["particles"] +
            node_data["overflow_particles"]
            for _, node_data in graph.nodes(data=True)
        ]))
Пример #7
0
def run_simulation(graph,
                   update_f,
                   order=SimOrder.Random,
                   timesteps=10,
                   additional_update_fs=[],
                   collect=None):
    """Run the update function on each node at each time-step.

   Additional update functions can also be passed, each to be run every N
   time-steps. These additional functions f need to be passed as a list
   additional_update_fs=[(f, N)].

   """

    # Setup data collection and run once before simulation.
    collected_data = {}
    if collect:
        collected_data[-1] = collect(graph)

    # Apply update dynamics and data collection at each time-step.
    for timestep in range(timesteps):
        print("Timestep: {}".format(timestep))

        # Determine the order that particle updates are applied.
        all_nodes = list(graph.nodes(data=True))
        if order == SimOrder.Random:
            random.shuffle(all_nodes)

        # For each node apply the update function.
        for node, node_data in all_nodes:
            update_f(graph, node, timestep)

        # Run any additional update functions if given.
        for (additional_update_f, update_interval) in additional_update_fs:
            if timestep % update_interval == 0:
                for node in graph.nodes():
                    additional_update_f(graph, node, timestep)

        # Run data collection if requested.
        if collect:
            collected_data[timestep] = collect(graph)

    return collected_data
def construct_random_network(doc, p=0.2):
    """Construct random network for use as baseline.

    Create a random network based on *doc*, with words used for nodes.
    Edges are created between any given pair of nodes (a,b)  with probability *p*.

    All edges will have weight = 1.0
    """
    doc = preprocess.preprocess_text(doc)
    words = list(set(doc)) # list of unique words

    # create graph
    graph = nx.DiGraph()
    graph.add_nodes_from(words)

    # add edges
    for word_a in graph.nodes():
        for word_b in graph.nodes():
            if word_a != word_b and rand() < p:
                _update_edge_weight(graph, word_a, word_b)

    return graph
Пример #9
0
def construct_random_network(doc, p=0.2):
    """Construct random network for use as baseline.

    Create a random network based on *doc*, with words used for nodes.
    Edges are created between any given pair of nodes (a,b)  with probability *p*.

    All edges will have weight = 1.0
    """
    doc = preprocess.preprocess_text(doc)
    words = list(set(doc))  # list of unique words

    # create graph
    graph = nx.DiGraph()
    graph.add_nodes_from(words)

    # add edges
    for word_a in graph.nodes():
        for word_b in graph.nodes():
            if word_a != word_b and rand() < p:
                _update_edge_weight(graph, word_a, word_b)

    return graph
Пример #10
0
 def testGenerate2Nodes(self):
     
     G = nx.Graph()
     G.add_node(0)
     index = 0
     expected = [{'nodes':[0, 1], 'edges':[]},
                 {'nodes':[0, 1], 'edges':[(0, 1)]}]
     self.gen = Generator(G, 1, [])
     for graph in self.gen.iterate():
         self.assertEqual(graph.nodes(), expected[index]['nodes'])
         self.assertEqual(graph.edges(), expected[index]['edges'])
         index += 1
     self.assertEqual(index, 2)
Пример #11
0
def precompute_dist_data(edge_index, num_nodes, approximate=0):
        '''
        Here dist is 1/real_dist, higher actually means closer, 0 means disconnected
        :return:
        '''
        graph = nx.Graph()
        edge_list = edge_index.transpose(1, 0).tolist()
        graph.add_edges_from(edge_list)

        n = num_nodes
        dists_array = np.zeros((n, n))
        # dists_dict = nx.all_pairs_shortest_path_length(graph,cutoff=approximate if approximate>0 else None)
        # dists_dict = {c[0]: c[1] for c in dists_dict}
        dists_dict = all_pairs_shortest_path_length_parallel(graph, cutoff=approximate if approximate > 0 else None)
        for i, node_i in enumerate(graph.nodes()):
            shortest_dist = dists_dict[node_i]
            for j, node_j in enumerate(graph.nodes()):
                dist = shortest_dist.get(node_j, -1)
                if dist != -1:
                    # dists_array[i, j] = 1 / (dist + 1)
                    dists_array[node_i, node_j] = 1 / (dist + 1)
        return dists_array
Пример #12
0
def remove_centered(graph, percent, start=None):
    resitev = graph
    total_edges = len(graph.edges())
    if start == None:
        start = random.choice(graph.nodes())
    queue = [start]
    done = []
    while len(resitev.edges()) > (1 - percent) * total_edges and queue != []:
        node = queue[0]
        queue.remove(node)
        for el in graph.nodes(from_node=node):
            if len(resitev.edges()) <= (1 - percent) * total_edges:
                break
            if el not in done:
                queue.append(el)
            try:
                resitev.del_edge(node, el)
                resitev.del_edge(el, node)
            except:
                continue
        done.append(node)
    return resitev
Пример #13
0
 def testTwoNode(self):
     g = nx.Graph()
     g.add_node(0)
     gen = Generator2(g, 1, [])
     expected = [{'nodes':[0, 1],
                  'edges':[]},
                 {'nodes':[0, 1],
                  'edges':[(0, 1)]},
                 ]
     number = 0
     for graph in gen.iterate():
         self.assertEqual(expected[number]['nodes'], graph.nodes())
         self.assertEqual(expected[number]['edges'], graph.edges())
         number += 1
     self.assertEqual(number, 2)
Пример #14
0
def alpha_node(graph):
    for node in graph.nodes():
        set = []
        value_list = graph.node[node]
        for i in range(0,len(value_list)):
            if isinstance(value_list[i], tuple):
                alpha = sols.recursivealpha(value_list[i])
                for j in alpha:
                    if isinstance(j, tuple):
                        if j not in set:
                            set.append(j)
                    else:
                        for prop in alpha:
                            set.append(prop)
            elif isinstance(value_list[i], str):
                set.append(value_list[i])
        graph.node[node] = remove_duplicates(set)
Пример #15
0
def global_sim(graph, percent):
    pairs = set()
    all_pairs = set()
    for first in graph.nodes():
        for second in graph.nodes():
            if first != second:
                all_pairs.add((first, second))
    lay = False
    if percent > 0.5:
        percent = 1 - percent
        lay = True
    while len(pairs) < min(
            len(graph.nodes()) * (len(graph.nodes()) - 1) * percent,
            len(graph.nodes()) * (len(graph.nodes()) - 1) * (1 - percent)):
        first = random.choice(graph.nodes())
        second = random.choice(graph.nodes())
        if first != second:
            pairs.add((first, second))
    if lay:
        pairs = all_pairs - pairs
    # Might change also:
    return average_sim(graph, percent) / average_sim(
        generate_ideal(len(graph.nodes())))
Пример #16
0
def average_sim(graph, percent):
    pairs = set()
    all_pairs = set()
    for first in graph.nodes():
        for second in graph.nodes():
            if first != second:
                all_pairs.add((first, second))
    lay = False
    if percent > 0.5:
        percent = 1 - percent
        lay = True
    while len(pairs) < min(
            len(graph.nodes()) * (len(graph.nodes()) - 1) * percent,
            len(graph.nodes()) * (len(graph.nodes()) - 1) * (1 - percent)):
        first = random.choice(graph.nodes())
        second = random.choice(graph.nodes())
        if first != second:
            pairs.add((first, second))
    if lay:
        pairs = all_pairs - pairs
    total = 0
    for pair in pairs:
        total += 1 / graph.shortest_path(pair[0], pair[1])[0]
    return total / len(pairs)
Пример #17
0
def textrank_keyword(text):
    txt = u' '.join([ su.text for su in text ])
    # Gets a dict of word -> lemma
    tokens = textcleaner.clean_text_by_word(text, 'english')
    split_text = list(textcleaner.tokenize_by_word(txt))

    # Creates the graph and adds the edges
    graph = commons.build_graph(keywords._get_words_for_graph(tokens))
    keywords._set_graph_edges(graph, tokens, split_text)
    del split_text # It's no longer used
    commons.remove_unreachable_nodes(graph)

    # # Ranks the tokens using the PageRank algorithm. Returns dict of lemma -> score
    pagerank_scores = keywords._pagerank_word(graph)
    extracted_lemmas = keywords._extract_tokens(graph.nodes(), pagerank_scores, 0.2, None)
    lemmas_to_word = keywords._lemmas_to_words(tokens)
    keyWords = keywords._get_keywords_with_score(extracted_lemmas, lemmas_to_word)
    # # text.split() to keep numbers and punctuation marks, so separeted concepts are not combined
    combined_keywords = keywords._get_combined_keywords(keyWords, txt.split())
    kw_scores = keywords._format_results(keyWords, combined_keywords, False, True)

    results = [ Counter({ 'TEXTRANK_KEYWORD_SCORE': keyword_mean_score(su.basic, kw_scores) }) for su in text ]
    return results
Пример #18
0
def add_excluded_nodes_to_assignments(graph, assignments):
    for node in graph.nodes():
        if not node in assignments:
            assignments[node] = -1
Пример #19
0
def remove_dups_graph(graph):
    for node in graph.nodes():
        value_list = graph.node[node]
        unique_list = remove_duplicates(value_list)
        graph.node[node] = unique_list
Пример #20
0
def main():
    num_graph = 0
    for graph in Graphs:
        formulas_in = graph_formulas[num_graph]
        status = 1;
        index = 1;
        alpha_node(graph)
        while status == 1:
            for node in range(index,len(graph.nodes())+1):

                start_length = len(graph.nodes())

                alpha_node_solve(graph,node)

                beta_node_solve(graph, node, formulas_in)

                delta_node_solve(graph, node, formulas_in)

                symmetric_gamma_node(graph, node, formulas_in)

                delta_node_solve(graph, node, formulas_in)

                alpha_node_solve(graph,node)

                beta_node_solve(graph, node, formulas_in)

                delta_node_solve(graph, node, formulas_in)

                symmetric_gamma_node(graph, node, formulas_in)

                end_length = len(graph.nodes())
                if start_length < end_length:
                    diff = end_length - start_length
                    index = index+1
                elif index < len(graph.nodes()):
                    index = index+1
                else:
                    status = 0;
        num_graph += 1

    '''
        :finding inconsistencies in the model
    '''
    index_inconsistent =[]
    for i in range(0,len(Graphs)):
        graph = Graphs[i]
        for node in graph.nodes():
            consistent_list = graph.node[node]
            status = sols.inconsistent(consistent_list)
            if status == True:
                index_inconsistent.append(i)
            else:
                status == False
    index_inconsistent = list(set(index_inconsistent))
    # removing inconsistent graphs- models
    if index_inconsistent is not []:
        for num in reversed(index_inconsistent):
            del Graphs[num];

    '''
        :display and save as pictures all the exiting graphs in the list
    '''

    if Graphs == []:
        print "There are no models for the input formula: ", (syntax.formula_to_string(psi))
        print "So the the negation of it : ", "~(",(syntax.formula_to_string(psi)), ") is valid."

    else:
        for i in range(0,len(Graphs)):
            graph = Graphs[i]
            custom_labels={}
            node_colours=['y']
            for node in graph.nodes():
                custom_labels[node] = graph.node[node]
                node_colours.append('c')

            nx.draw(Graphs[i], nx.spring_layout(Graphs[i]),  node_size=1500, with_labels=True, labels = custom_labels, node_color=node_colours)
            #show with custom labels
            fig_name = "graph" + str(i) + ".png"

            plt.savefig(fig_name)
            plt.show()

        print "Satisfiable models have been displayed."
        if len(Graphs) == 1:
            print "You have ",len(Graphs), " valid model."
        else:
            print "You have ",len(Graphs), " valid models."
        print "Your provided formula is: ", (syntax.formula_to_string(psi))
        print "Pictures of the graphs have been saves as: graph0.png, graph1.png etc."