コード例 #1
0
def get_robustness_zero(dataset):
    for crawler_type in crawler_list:
        robustness_zero[crawler_type] = dict()
        for i in range(0, MAX_GEN):
            robustness_zero[crawler_type][i] = dict()
            ad_p_remove = "0"
            s1_fname = directory + str(ad_p_remove) + '/' + \
                       dataset + '/' + crawler_type + '/' + \
                       dataset + '_' + str(i + 1) + '_1'
            sample_graph_1 = _mylib.read_file(s1_fname)

            node_sim_list = []
            node_cov_sim_list = []
            partition_sim_list = []
            degree_sim_list = []

            for j in range(0, MAX_GEN):
                if i == j:
                    continue

                s2_fname = directory + str(ad_p_remove) + '/' + \
                           dataset + '/' + crawler_type + '/' + \
                           dataset + '_' + str(j + 1) + '_1'
                sample_graph_2 = _mylib.read_file(s2_fname)

                sample_robustness = calculateRobustness(
                    sample_graph_1, sample_graph_2, s1_fname, s2_fname)

                node_sim_list.append(sample_robustness['node_sim'])
                node_cov_sim_list.append(
                    sample_robustness['node_coverage_sim'])
                partition_sim_list.append(sample_robustness['partition_sim'])
                degree_sim_list.append(sample_robustness['degree_sim'])

            robustness_zero[crawler_type][i]['node_sim'] = 1. * sum(
                node_sim_list) / len(node_sim_list)
            robustness_zero[crawler_type][i]['node_cov_sim'] = 1. * sum(
                node_cov_sim_list) / len(node_cov_sim_list)
            robustness_zero[crawler_type][i]['partition_sim'] = 1. * sum(
                partition_sim_list) / len(partition_sim_list)
            robustness_zero[crawler_type][i]['degree_sim'] = 1. * sum(
                degree_sim_list) / len(degree_sim_list)

    return robustness_zero
コード例 #2
0
    dataset = args.dataset
    directory = './data-adv/rand_edge/'
    # dataset = 'socfb-Amherst41'
    log_file = './log/' + dataset

    MAX_GEN = 10
    ad_p_remove = "0"
    crawler_list = ['med', 'mod', 'rw', 'bfs', 'rand']
    # crawler_list = ['mod']

    for crawler_type in crawler_list:
        for i in range(0, MAX_GEN):
            fname = directory + str(ad_p_remove) + '/' + \
                    dataset + '/' + crawler_type + '/' + \
                    dataset + '_' + str(i+1) + '_1'
            G = _mylib.read_file(fname)
            node_count = G.number_of_nodes()

            com_G = get_community(G, fname)
            node_G = G.nodes()
            deg_hist_G = nx.degree_histogram(G)

            for j in range(0, MAX_GEN):
                if i == j:
                    continue

                fname = directory + str(ad_p_remove) + '/' + \
                        dataset + '/' + crawler_type + '/' + \
                        dataset + '_' + str(j + 1) + '_1'

                G = _mylib.read_file(fname)
コード例 #3
0
    elif remove_type == 'edge_incomp':
        directory = './data-adv/edge_incomp/'
        nodes_original = set()

    log_file = './log/' + dataset + '_' + remove_type

    MAX_GEN = 10
    # crawler_list = ['med','mod','rw', 'bfs', 'rand']
    crawler_list = ['mod', 'rw', 'bfs', 'rand']

    for crawler_type in crawler_list:
        for i in range(0, MAX_GEN):
            s1_fname = directory + '0' + '/' + \
                       dataset + '/' + crawler_type + '/' + \
                       dataset + '_' + str(i + 1) + '_1'
            sample_graph_1 = _mylib.read_file(s1_fname)

            # for ad_p_remove in ["0.01", "0.05", "0.1", "0.2", "0.3"]:
            for ad_p_remove in ["0.1", "0.2", "0.3", "0.4", "0.5"]:
                s2_fname = directory + ad_p_remove + '/' + \
                           dataset + '/' + crawler_type + '/' + \
                           dataset + '_' + str(i + 1) + '_1'
                sample_graph_2 = _mylib.read_file(s2_fname)
                deleted_nodes_count = int(
                    float(ad_p_remove) * len(nodes_original))
                # print(' Delete nodes count: {}/{} \t {}'.format(deleted_nodes_count, len(nodes_original), ad_p_remove))

                partition_sim = calculate_community_sim(
                    sample_graph_1, sample_graph_2, s1_fname, s2_fname,
                    deleted_nodes_count)
コード例 #4
0
    parser.add_argument('fname', help='Edgelist file', type=str)
    parser.add_argument('-remove',
                        help='remove node or edge',
                        type=str,
                        default='edge')

    args = parser.parse_args()
    fname = args.fname
    remove_type = args.remove

    fname = fname.replace('\\', '/')
    network_name = fname.split('.')[1].split('/')[-1]

    log_file = './log/eigen_' + remove_type + '.txt'

    G = _mylib.read_file(fname, isDirected=False)
    e = compute_eigenvals(G, network_name)
    max_e = e[-1]

    print(max_e)

    # for ad_p_remove in [0.01, 0.05, 0.1, 0.2, 0.3]:
    #     e_list = list()
    #     for i in range(0, 10):
    #         if remove_type == 'edge':
    #             directory = './data-adv/rand_edge/' + str(ad_p_remove) + '/'
    #         elif remove_type == 'node':
    #             directory = './data-adv/rand_node/' + str(ad_p_remove) + '/'
    #         elif remove_type == 'target':
    #             directory = './data-adv/target/' + str(ad_p_remove) + '/'
    #
コード例 #5
0
        directory = './data-adv/rand_node/'
        nodes_original = pickle.load(open(directory + dataset + '_nodes.pickle'))
    elif remove_type == 'target':
        directory = './data-adv/target/'
        nodes_original = pickle.load(open(directory + dataset + '_nodes.pickle'))
    elif remove_type == 'edge_incomp':
        directory = './data-adv/edge_incomp/'
        nodes_original = set()

    log_file = './log/' + dataset + '_' + remove_type

    MAX_GEN = 10
    # crawler_list = ['med','mod','rw', 'bfs', 'rand']
    crawler_list = ['mod', 'rw', 'bfs', 'rand']

    G_original = _mylib.read_file(fname)
    deg = dict(G_original.degree())
    avg_deg = np.average(deg.values())
    e = adv_norm.compute_eigenvals(G_original, dataset)
    max_e = e[-1]
    cc = nx.average_clustering(G_original)

    for crawler_type in crawler_list:
        for i in range(0, MAX_GEN):
            s1_fname = directory + '0' + '/' + \
                       dataset + '/' + crawler_type + '/' + \
                       dataset + '_' + str(i + 1) + '_1'
            sample_graph_1 = _mylib.read_file(s1_fname)
            deg = dict(sample_graph_1.degree())
            avg_deg_0 = np.average(deg.values())