Exemplo n.º 1
0
def load_synthetic(mu, n=1000, tau1=3, tau2=1.5, edge_drop_percent=0.2):
    G = LFR_benchmark_graph(n,
                            tau1,
                            tau2,
                            mu,
                            average_degree=5,
                            min_community=30,
                            seed=10)
    for n in G.nodes:
        G.nodes[n]['value'] = list(G.nodes[n]['community'])[0]
    true_coms = list(nx.get_node_attributes(G, 'value').values())
    com_keys = list(Counter(true_coms).keys())
    for i in range(0, len(true_coms)):
        G.nodes[i]['value'] = com_keys.index(true_coms[i])
    #remove self edges
    selfE = list(G.selfloop_edges())
    for (i, j) in selfE:
        G.remove_edge(i, j)

    #convert all graph to undirected
    G = nx.Graph(G)
    ListOfNodes = list(G.nodes())
    sample = int(len(ListOfNodes) * node_drop_percent)
    RandomSample = random.sample(ListOfNodes, sample)
    for n in G.nodes():
        if (n not in RandomSample):
            G.nodes[n][label_name] = G.nodes[n]['value']
    return (G, RandomSample)
Exemplo n.º 2
0
def test_generator():
    n = 250
    tau1 = 3
    tau2 = 1.5
    mu = 0.1
    G = LFR_benchmark_graph(n, tau1, tau2, mu, average_degree=5,
                            min_community=20, seed=10)
    assert_equal(len(G), 250)
    C = {frozenset(G.nodes[v]['community']) for v in G}
    assert_true(is_partition(G.nodes(), C))
Exemplo n.º 3
0
def test_generator():
    n = 250
    tau1 = 3
    tau2 = 1.5
    mu = 0.1
    G = LFR_benchmark_graph(n, tau1, tau2, mu, average_degree=5,
                            min_community=20, seed=10)
    assert_equal(len(G), 250)
    C = {frozenset(G.node[v]['community']) for v in G}
    assert_true(is_partition(G.nodes(), C))
Exemplo n.º 4
0
# SLURM_PROCID starts from 0 to 50.
SLURM_PROCID = int(argv[1])

rho = rhos[SLURM_PROCID]
num_seeds = int(rho * num_nodes)

for u in us:
    for i in range(num_runs):
        G = LFR_benchmark_graph(num_nodes,
                                tau1,
                                tau2,
                                u,
                                average_degree=avg_degree,
                                max_degree=max_degree,
                                seed=10)
        seeds = random.sample(G.nodes(), num_seeds)
        activation_time, step = simulation_ltm(G, thsh, seeds, frac=f)
        activation_per_step = [0] * step
        for node in activation_time:
            atime = activation_time[node]
            activation_per_step[atime] += 1
        sweeping_results[u].append(
            (len(activation_time), step, activation_per_step))

Data_Root = '/auto/rcf-proj/ef/hp_273/Diffusion/LFR/per_step_data/'

with open(
        Data_Root + 'phase_simu_nodes_%sk_z_%s_thsh_%s_f_%s_%s.pickle' %
    (num_nodes // 1000, avg_degree, thsh, f, SLURM_PROCID), 'wb') as file:
    pickle.dump(dict(sweeping_results), file)
Exemplo n.º 5
0
    for mu in mu_list:

        print('\nNode Prediction when mu is', mu, r'and 20% of nodes are dropped')

        G = LFR_benchmark_graph(n, tau1, tau2, mu, average_degree, min_community=minimum_community)
        #print('First Node:',G.nodes[0])
        communities = {frozenset(G.nodes[v]['community']) for v in G}
        #print('Communities:\n', communities)
        #print('Number of Ground Turth Communities:', len(communities))
        labels_truth = get_labels_from_community(communities, n)
        #print('Ground Truth Labels:\n', labels_truth)
        G = apply_community_value_to_graph(G, labels_truth)
        #print('First Node Modified:',G.nodes[0])

        for node in random.sample(G.nodes(), int(n*drop_factor)):
            del G.node[node]['value']
        
        nmi, ars = calculate_node_classification_accuracy(G, 'value', labels_truth, method)

        nmi_list.append(nmi)
        ars_list.append(ars)
    
    print_scatter_plot([mu_list, nmi_list, ars_list], 'Mu', 'Average NMI', 'Average ARS', 'LFR Benchmark Graph Node Classification')

    with open(name+'-'+method+'.csv', mode='w') as file:
        file_writer = csv.writer(file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)

        file_writer.writerow(['Mu', 'NMI Score', 'ARS Score'])

        for i in range(len(mu_list)):