Пример #1
0
from plot_utility import SVM_plot
from Data_load import read_UCI_data, read_2D

Train_data, Train_label = read_UCI_data(0)

C = 3.0
gamma = 0.4
Global_model = svm.SVC(C=C, kernel='rbf', gamma=gamma, tol=1e-6)

Global_model.fit(Train_data, Train_label)
Global_loss = Ce.training_loss(Global_model)
# print(Ce.training_loss(Global_model))

Edge_node_n = 10
Edge_data, Edge_label, Global_index = \
    data_partition(Train_data, Train_label, Edge_node_n)

Edge_loss = np.zeros(Edge_node_n)
Edge_upper_loss = np.zeros(Edge_node_n)

for i in range(Edge_node_n):
    local_model = Ed.local_train(Edge_data[i], Edge_label[i], C, gamma, 'rbf')
    Edge_loss[i] = Ce.training_loss(local_model)

    upper_model = Ed.local_train(Edge_data[i], Edge_label[i], C * Edge_node_n,
                                 gamma, 'rbf')
    Edge_upper_loss[i] = \
        (1/Edge_node_n) * Ce.training_loss(upper_model)

print('Upper bound:')
print(np.sum(Edge_upper_loss))
Пример #2
0
def main(argv):
    Edge_node_n = 10
    C = 3.0
    gamma = 0.1
    dataset = 3
    data_part = 0
    n_part = 5
    equal_flag = True

    try:
        opts, args = getopt.getopt(argv, 'hn:C:g:p:d:t:e:', [
            'help', 'Edge_node_n=', 'C=', 'gamma=', 'data_part=', 'dataset=',
            'n_part=', 'equal_flag='
        ])
    except getopt.GetoptError:
        print(
            'Bound_DSVM.py -n <Edge_node_n> -C <C> -g <gamma> -p <data_part> -d <dataset> -t <n_part> -e <equal_flag>'
        )
        sys.exit(2)

    for opt, arg in opts:
        if (opt == '-h'):
            print(
                'Bound_DSVM.py -n <Edge_node_n> -C <C> -g <gamma> -p <data_part> -d <dataset> -t <n_part> -e <equal_flag>'
            )
            print(
                '-p 0: data_partition; 1: kmeans_partition; 2: kmeans_random')
            print(
                '-d 0: skin_nonskin; 1: phishing; 2: a8a; 3: ijcnn; 4: covtype'
            )
            print('-e True: equal; False: not equal')
            sys.exit()
        elif (opt == '-n'):
            Edge_node_n = int(arg)
        elif (opt == '-C'):
            C = float(arg)
        elif (opt == '-g'):
            gamma = float(arg)
        elif (opt == '-p'):
            data_part = int(arg)
        elif (opt == '-d'):
            dataset = int(arg)
        elif (opt == '-t'):
            n_part = int(arg)
        elif (opt == '-e'):
            equal_flag = arg

    method_name = ['optimal', 'proposed', 'local support vector']
    n_data_point = [int(0)] * 3

    # Train_data, Train_label, Test_data, Test_label = read_UCI_data(1)
    Train_data, Train_label, Test_data, Test_label = read_UCI_data(dataset)

    print("global size is %s" % (np.size(Train_label)))

    # Training_set = sklearn.datasets.load_svmlight_file('german')
    #
    # Train_label_t = Training_set[1]
    # Train_data_t = np.array(Training_set[0].todense())
    # Train_data_t = preprocessing.normalize(Train_data_t)
    #
    # Train_data_s,Train_label_s = shuffle(Train_data_t, Train_label_t)
    #
    # Train_label = Train_label_s
    # Train_data = Train_data_s

    # C = 3.0
    # gamma = 0.3

    Global_model = svm.SVC(C=C, kernel='rbf', gamma=gamma)

    Global_model.fit(Train_data, Train_label)
    n_data_point[0] = Global_model.n_support_[0] + Global_model.n_support_[1]
    # print(Ce.training_loss(Global_model))

    # print (np.size(Global_model.dual_coef_[0]))
    # SVM_plot(Train_data[:, 0], Train_data[:, 1], Train_label, Global_model)

    # Only_SV_model = svm.SVC(C=C, kernel='rbf', gamma=gamma)
    # data, label = shuffle(Global_model.support_vectors_, Train_label[Global_model.support_])
    # Only_SV_model.fit(Global_model.support_vectors_, Train_label[Global_model.support_])
    # Only_SV_model.fit(data, label)
    # print(Ce.training_loss(Only_SV_model))
    # print(Global_model.n_support_)
    # print(Only_SV_model.n_support_)

    # # SVM_plot(Train_data[:, 0], Train_data[:, 1], Train_label, Global_model)
    # # SVM_plot(Global_model.support_vectors_[:, 0], Global_model.support_vectors_[:, 1], Train_label[Global_model.support_], Only_SV_model)

    # Edge_node_n = 10
    Edge_data = []
    Edge_label = []
    if (data_part == 0):
        Edge_data, Edge_label, Global_index = data_partition(
            Train_data, Train_label, Edge_node_n)
    elif (data_part == 1):
        Edge_data, Edge_label, Global_index = kmeans_partition(
            Train_data, Train_label, Edge_node_n)
    elif (data_part == 2):
        Edge_data, Edge_label, Global_index = k_means_random_partition(
            Train_data, Train_label, Edge_node_n, n_part, equal_flag)
    else:
        raise ValueError

    Edge_data_all = copy.deepcopy(Edge_data)
    Edge_label_all = copy.deepcopy(Edge_label)

    # for i in range(Edge_node_n):
    #     print("Edge_data[%i].size is %s and Edge_label[%i].size is %d" % (
    #     i, np.size(Edge_data[i], axis=0), i, np.size(Edge_label[i])))

    Distance_plus = []
    Distance_minus = []
    support_plus_ = []
    support_minus_ = []
    support_vectors_plus = []
    support_vectors_minus = []

    for i in range(Edge_node_n):
        local_model = Ed.local_train(Edge_data[i], Edge_label[i], C, gamma,
                                     'rbf')

        # # SVM_plot(Edge_data[i][:, 0], Edge_data[i][:, 1], Edge_label[i], local_model)
        support_, support_vectors_, n_support_ = Ed.local_support(local_model)

        # print("i = %i, n_support = %s" % (i, np.sum(n_support_)))

        D_plus = np.multiply(
            Edge_label[i][support_[n_support_[0]:]],
            local_model.decision_function(support_vectors_[n_support_[0]:, :]))
        D_minus = np.multiply(
            Edge_label[i][support_[:n_support_[0]]],
            local_model.decision_function(support_vectors_[:n_support_[0], :]))

        S_plus_, SV_plus, _ \
            = Ed.upload_sort(support_[n_support_[0]:], support_vectors_[n_support_[0]:, :], D_plus)

        S_minus_, SV_minus, _ \
            = Ed.upload_sort(support_[:n_support_[0]], support_vectors_[:n_support_[0], :], D_minus)

        Distance_plus.append(D_plus)
        Distance_minus.append(D_minus)
        support_plus_.append(S_plus_)
        support_minus_.append(S_minus_)
        support_vectors_plus.append(SV_plus)
        support_vectors_minus.append(SV_minus)

    #
    # local_support_vector = np.concatenate((support_vectors_plus[0], support_vectors_minus[0]), axis=0)
    # local_label_plus = np.ones(np.size(support_vectors_plus[0], axis=0))
    # local_label_minus = np.ones(np.size(support_vectors_minus[0], axis=0)) * (-1)
    # local_label = np.concatenate((local_label_plus, local_label_minus), axis=0)
    # for i in range(1, Edge_node_n):
    #     local_support_vector = np.concatenate((local_support_vector, support_vectors_plus[i], support_vectors_minus[i]),
    #                                           axis=0)
    #     local_label_plus = np.ones(np.size(support_vectors_plus[i], axis=0))
    #     local_label_minus = np.ones(np.size(support_vectors_minus[i], axis=0)) * (-1)
    #     local_label = np.concatenate((local_label, local_label_plus, local_label_minus), axis=0)
    #
    # All_upload_model = svm.SVC(C=C, kernel='rbf', gamma=gamma)
    # All_upload_model.fit(local_support_vector, local_label)
    #
    # # # SVM_plot(local_support_vector[:, 0], local_support_vector[:, 1], local_label, All_upload_model)
    # print(np.size(local_label, axis=0))
    # print(All_upload_model.n_support_)
    '''
    First data uploading
    '''
    # Updated_support_plus_ = []
    # Updated_support_minus_ = []
    # Updated_support_vectors_plus = []
    # Updated_support_vectors_minus = []

    Upload_support_vector_ = []
    Upload_label = []

    for i in range(Edge_node_n):
        upper_model = Ed.local_train(Edge_data[i], Edge_label[i],
                                     C * Edge_node_n, gamma, 'rbf')

        Upload_edge_support_vector_ = upper_model.support_vectors_
        Upload_edge_label = Edge_label[i][upper_model.support_]

        Upload_support_vector_.append(Upload_edge_support_vector_)
        Upload_label.append(Upload_edge_label)

    Collect_support_vector_ = np.concatenate(
        (Upload_support_vector_[0], Upload_support_vector_[1]))
    Collect_label = np.concatenate((Upload_label[0], Upload_label[1])).reshape(
        (-1))
    if (Edge_node_n > 2):
        for j in range(2, Edge_node_n):
            Collect_support_vector_ = np.concatenate(
                (Collect_support_vector_, Upload_support_vector_[j]))
            Collect_label = np.concatenate(
                (Collect_label, Upload_label[j].reshape(-1)))

    test = svm.SVC(C=C, kernel='rbf', gamma=gamma)
    test.fit(Collect_support_vector_, Collect_label)
    # print(Ce.training_loss(test))
    # print(np.size(Collect_label, axis=0))
    # print(test.n_support_)
    #
    #
    # print(np.size(Collect_label, axis=0))

    # while((np.sum(test.n_support_)) == np.size(Collect_support_vector_, axis=0)):
    # # while ((np.sum(test.n_support_)) == np.size(Collect_support_vector_, axis=0)):
    #     for i in range(Edge_node_n):
    #         k1= [Updated_support_vectors_plus[i][0]]
    #         k2 = [Updated_support_vectors_minus[i][0]]
    #         Upload_edge_support_vector_ = np.concatenate(
    #             (k1, k2), axis=0)
    #         Upload_support_vector_[i] = np.concatenate((Upload_support_vector_[i], Upload_edge_support_vector_), axis=0)
    #
    #         Upload_edge_label = [1.0, -1.0]
    #         Upload_label[i] = np.concatenate((Upload_label[i], Upload_edge_label), axis=0)
    #
    #         Updated_support_plus_[i], Updated_support_vectors_plus[i] \
    #             = Ed.local_upload(Updated_support_plus_[i], Updated_support_vectors_plus[i])
    #         Updated_support_minus_[i], Updated_support_vectors_minus[i] \
    #             = Ed.local_upload(Updated_support_minus_[i], Updated_support_vectors_minus[i])
    #
    #     Collect_support_vector_ = np.concatenate((Upload_support_vector_[0], Upload_support_vector_[1]))
    #     Collect_label = np.concatenate((Upload_label[0], Upload_label[1])).reshape((-1))
    #
    #     if(Edge_node_n > 2):
    #         for j in range(2,Edge_node_n):
    #             Collect_support_vector_ = np.concatenate((Collect_support_vector_,Upload_support_vector_[j]))
    #             Collect_label = np.concatenate((Collect_label, Upload_label[j].reshape(-1)))
    #     test = svm.SVC(C=C, kernel='rbf', gamma=gamma)
    #     test.fit(Collect_support_vector_, Collect_label)
    #
    #
    # print(np.size(Collect_label, axis=0))
    # print(test.n_support_)
    ite = 0
    old_support_vectors_ = test.support_vectors_
    new_support_vectors_, Upload_support_vector_, Upload_label, Collect_support_vector_, Collect_label \
        = training_iteration(Edge_node_n,
                             Edge_data,
                             Edge_label,
                             Upload_support_vector_,
                             Upload_label,
                             Collect_support_vector_,
                             Collect_label,
                             C, gamma)
    # print("ite = %s" % (ite))
    ite += 1
    while (not Ed.SV_compare(old_support_vectors_, new_support_vectors_)):
        old_support_vectors_ = new_support_vectors_
        new_support_vectors_, Upload_support_vector_, Upload_label, Collect_support_vector_, Collect_label \
            = training_iteration(Edge_node_n,
                             Edge_data,
                             Edge_label,
                             Upload_support_vector_,
                             Upload_label,
                             Collect_support_vector_,
                             Collect_label,
                             C, gamma)
        # print("ite = %s" % (ite))
        ite += 1

    print(SV_diff_count(Global_model.support_vectors_, new_support_vectors_))
    n_data_point[1] = int(np.size(Collect_support_vector_, axis=0))
    '''
     Uploading all the local support vectors:
    '''

    # print("For uploading all the local support vectors:")

    Edge_num = np.zeros(Edge_node_n).tolist()

    for i in range(Edge_node_n):
        Edge_num[i] = np.size(Edge_label_all[i])

    Distance_plus = []
    Distance_minus = []
    support_plus_ = []
    support_minus_ = []
    support_vectors_plus = []
    support_vectors_minus = []

    for i in range(Edge_node_n):
        local_model = Ed.local_train(Edge_data_all[i], Edge_label_all[i], C,
                                     gamma, 'rbf')

        # # SVM_plot(Edge_data[i][:, 0], Edge_data[i][:, 1], Edge_label[i], local_model)
        support_, support_vectors_, n_support_ = Ed.local_support(local_model)

        support_vectors_plus.append(support_vectors_[n_support_[0]:, :])
        support_vectors_minus.append(support_vectors_[:n_support_[0], :])

        # # SVM_plot(Edge_data[i][:, 0], Edge_data[i][:, 1], Edge_label[i], local_model)

    local_support_vector = np.concatenate(
        (support_vectors_plus[0], support_vectors_minus[0]), axis=0)
    local_label_plus = np.ones(np.size(support_vectors_plus[0], axis=0))
    local_label_minus = np.ones(np.size(support_vectors_minus[0],
                                        axis=0)) * (-1)
    local_label = np.concatenate((local_label_plus, local_label_minus), axis=0)
    for i in range(1, Edge_node_n):
        local_support_vector = np.concatenate(
            (local_support_vector, support_vectors_plus[i],
             support_vectors_minus[i]),
            axis=0)
        local_label_plus = np.ones(np.size(support_vectors_plus[i], axis=0))
        local_label_minus = np.ones(np.size(support_vectors_minus[i],
                                            axis=0)) * (-1)
        local_label = np.concatenate(
            (local_label, local_label_plus, local_label_minus), axis=0)

    old_support_vectors_ = []

    central_model, global_support_vector, global_label = \
        Ce.central_training(local_support_vector, local_label, C, gamma, 'rbf')

    new_support_vectors_ = central_model.support_vectors_
    # SVM_plot(local_support_vector[:, 0], local_support_vector[:, 1], local_label, All_upload_model)
    # print(np.size(local_label, axis=0))
    # print(central_model.n_support_)

    ite_count = 1

    remain_SV = np.copy(local_support_vector)

    while (not Ed.SV_compare(old_support_vectors_, new_support_vectors_)):
        ite_count += 1

        new_support_vector_plus = []
        new_support_vector_minus = []
        # print("global_support_v.size is %s" % (np.size(global_support_vector, axis=0)))

        # delete the additional local vector before receive the central node delivery
        for i in range(Edge_node_n):
            # index = 0
            # # print ("i is %i" %(i))
            # before_size = np.size (Edge_data[i], axis= 0)
            # # print (before_size)
            # for j in Edge_data[i]:
            #     for k in global_support_vector:
            #         # print (j)
            #         if (vector_compare(k,j)):
            #             Edge_data[i] = np.delete(Edge_data[i], index, 0)
            #             Edge_label[i] = np.delete(Edge_label[i], index, 0)
            #             index -= 1
            #     index +=1
            #
            # # print(np.size (Edge_data[i], axis=0))
            # num_delete += before_size -  np.size (Edge_data[i], axis= 0)

            Edge_data_all[i], Edge_label_all[i] = \
                Ed.data_mix(Edge_data_all[i], Edge_label_all[i], global_support_vector, global_label)

            # print ("Edge_label[%i].size after receiving the delivered data is %s" %(i,np.size(Edge_label_all[i] )))

            local_model = Ed.local_train(Edge_data_all[i], Edge_label_all[i],
                                         C, gamma, 'rbf')
            support_, support_vectors_, n_support_ = Ed.local_support(
                local_model)

            # print(n_support_)
            # delete the additional local support vector
            # index = 0
            # for j in support_vectors_:
            #     for k in support_vectors_plus:
            #         if (vector_compare(k,j)):
            #             support_vectors_ = np.delete(support_vectors_, index, 0)
            #             n_support_[1] -= 1
            #             index -= 1
            #     for k in support_vectors_minus:
            #         if (vector_compare(k,j)):
            #             support_vectors_ = np.delete(support_vectors_, index, 0)
            #             n_support_[0] -= 1
            #             index -= 1
            #     index += 1
            for j in support_vectors_[n_support_[0]:, :]:
                if (not Ed.array_compare(j, remain_SV)):
                    new_support_vector_plus.append(j)

            for j in support_vectors_[:n_support_[0], :]:
                if (not Ed.array_compare(j, remain_SV)):
                    new_support_vector_minus.append(j)
            # print("support_v_p.size is %a " %( support_vectors_plus ))
            # print("(support_vectors_[n_support_[0]:, :]) is %a " %( support_vectors_[n_support_[0]:, :]))

        new_support_vector_plus = np.array(new_support_vector_plus)
        new_support_vector_minus = np.array(new_support_vector_minus)
        # print("num_plus is %s " %(np.size(new_support_vector_plus, axis=0)))
        # print("num_minus is %s " %(np.size(new_support_vector_minus, axis=0)))
        '''
        Uploading all the local support vectors
        '''
        # print (np.size(support_vectors_plus, axis = 0))
        # print (support_vectors_plus)

        if (np.size(new_support_vector_plus, axis=0) != 0
                and np.size(new_support_vector_minus, axis=0) != 0):
            local_support_vector = \
                np.concatenate((local_support_vector, new_support_vector_plus, new_support_vector_minus), axis=0)
            local_label_plus = np.ones(np.size(new_support_vector_plus,
                                               axis=0))
            local_label_minus = np.ones(
                np.size(new_support_vector_minus, axis=0)) * (-1)
            local_label = np.concatenate(
                (local_label, local_label_plus, local_label_minus), axis=0)
        elif (np.size(new_support_vector_plus, axis=0) == 0
              and np.size(new_support_vector_minus, axis=0) != 0):
            local_support_vector = \
                np.concatenate((local_support_vector, new_support_vector_minus), axis=0)
            local_label_minus = np.ones(
                np.size(new_support_vector_minus, axis=0)) * (-1)
            local_label = np.concatenate((local_label, local_label_minus),
                                         axis=0)
        elif (np.size(new_support_vector_plus, axis=0) != 0
              and np.size(new_support_vector_minus, axis=0) == 0):
            local_support_vector = \
                np.concatenate((local_support_vector, new_support_vector_plus), axis=0)
            local_label_plus = np.ones(np.size(new_support_vector_plus,
                                               axis=0))
            local_label = np.concatenate((local_label, local_label_plus),
                                         axis=0)

        # for i in range(1,Edge_node_n):
        #     local_support_vector = np.concatenate((local_support_vector, support_vectors_plus[i], support_vectors_minus[i]), axis=0)
        #     local_label_plus = np.ones(np.size(support_vectors_plus[i], axis=0))
        #     local_label_minus = np.ones(np.size(support_vectors_minus[i], axis=0)) * (-1)
        #     local_label = np.concatenate((local_label, local_label_plus, local_label_minus), axis=0)

        # print ("local_sv.size is %s" %(np.size(local_support_vector, axis = 0)))
        # print ("(after delete) (remain_SV.size) is %s " % (np.size(remain_SV,axis=0)))
        remain_SV = np.copy(local_support_vector)
        # print("The overall transformed sv num (remain_SV.size) is %s " % (np.size(remain_SV, axis=0)))

        old_support_vectors_ = central_model.support_vectors_

        central_model, global_support_vector, global_label = \
            Ce.central_training(local_support_vector, local_label, C, gamma, 'rbf')

        new_support_vectors_ = central_model.support_vectors_
        # print(Ce.training_loss(central_model))
        # print(np.size(local_label, axis=0))
        # print(central_model.n_support_)

    # SVM_plot(local_support_vector[:, 0], local_support_vector[:, 1], local_label, central_model)

    # print("ite_count = %i" % (ite_count))
    # for i in range(Edge_node_n):
    #     print("the net addition in node %i is %s" % (i, np.size(Edge_label_all[i]) - Edge_num[i]))
    # print("The overall transformed sv num (remain_SV.size) is %s " % (np.size(remain_SV, axis=0)))
    print(SV_diff_count(Global_model.support_vectors_, new_support_vectors_))
    n_data_point[2] = int(np.size(local_support_vector, axis=0))

    dataframe = pd.DataFrame({
        'method_name': method_name,
        'n_data_point': n_data_point
    })

    if (dataset == 0):
        file_name = 'skin_nonskin_' + str(C) + '_' + str(gamma) + '_' + str(
            Edge_node_n) + '_nodes.csv'
    elif (dataset == 1):
        file_name = 'phishing_' + str(C) + '_' + str(gamma) + '_' + str(
            Edge_node_n) + '_nodes.csv'
    elif (dataset == 2):
        file_name = 'a8a_' + str(C) + '_' + str(gamma) + '_' + str(
            Edge_node_n) + '_nodes.csv'
    elif (dataset == 3):
        file_name = 'ijcnn1_' + str(C) + '_' + str(gamma) + '_' + str(
            Edge_node_n) + '_nodes.csv'
    elif (dataset == 4):
        file_name = 'covtype_' + str(C) + '_' + str(gamma) + '_' + str(
            Edge_node_n) + '_nodes.csv'
    else:
        raise ValueError

    dataframe.to_csv(file_name, index=False, sep=',')
def main(argv):
    edge_node_n = 5
    C = 7.0
    gamma = 0.5
    dataset = 1
    data_part = 0
    n_part = 5
    equal_flag = True

    try:
        opts, args = getopt.getopt(argv, 'hn:p:d:t:e:', [
            'help', 'Edge_node_n=', 'data_part=', 'dataset=', 'n_part=',
            'equal_flag='
        ])
    except getopt.GetoptError:
        print(
            'Distributed_core_set.py -n <Edge_node_n> -p <data_part> -d <dataset> -t <n_part> -e <equal_flag>'
        )
        sys.exit(2)

    for opt, arg in opts:
        if (opt == '-h'):
            print(
                'Distributed_core_set.py -n <Edge_node_n> -p <data_part> -d <dataset> -t <n_part> -e <equal_flag>'
            )
            print(
                '-p 0: data_partition; 1: kmeans_partition; 2: kmeans_random')
            print(
                '-d 0: skin_nonskin; 1: phishing; 2: NB15; 3: ijcnn; 4: covtype'
            )
            print('-e True: equal; False: not equal')
            sys.exit()
        elif (opt == '-n'):
            edge_node_n = int(arg)
        # elif (opt == '-C'):
        #     C = float(arg)
        # elif (opt == '-g'):
        #     gamma = float(arg)
        elif (opt == '-p'):
            data_part = int(arg)
        elif (opt == '-d'):
            dataset = int(arg)
        elif (opt == '-t'):
            n_part = int(arg)
        elif (opt == '-e'):
            equal_flag = arg

    if (dataset == 0):
        C_set = np.arange(1.0, 18.0, 2.0)
        gamma_set = np.array([0.1, 0.3, 0.5, 1.0, 2.0, 3.0])
    elif (dataset == 1):
        C_set = np.arange(1.0, 11.0, 1.0)
        gamma_set = np.array([0.01, 0.03, 0.1, 0.3, 0.5, 1.0])
    elif (dataset == 3):
        C_set = np.arange(1.0, 16.0, 2.0)
        gamma_set = np.array([0.1, 0.3, 0.5, 1.0, 2.0])

    epsilon = 0.001

    # load training data
    train_data, train_label, test_data, test_label = read_UCI_data(dataset)

    # separate data
    edge_data, edge_label, global_index = data_partition(
        train_data, train_label, edge_node_n)

    C_list = []
    gamma_list = []
    upload_n_list = []
    SV_n_list = []
    accuracy_list = []

    for C in C_set:
        for gamma in gamma_set:

            C_list.append(C)
            gamma_list.append(gamma)

            # MEB at each edge node
            upload_n = 0
            global_set = []
            for node in range(edge_node_n):
                train_size = np.size(edge_label[node])
                R, dist_core, core_set = MEB(edge_data[node],
                                             edge_label[node],
                                             train_size,
                                             epsilon,
                                             C,
                                             'rbf',
                                             gamma=gamma)
                upload_n = upload_n + np.size(core_set)
                global_set.append(global_index[node][core_set])
            # print(upload_n)
            upload_n_list.append(upload_n)

            core_set = global_set[0]
            for node in range(1, edge_node_n):
                core_set = np.append(core_set, global_set[node])

            core_train = train_data[core_set]
            core_label = train_label[core_set]

            core_model = SVC(C=C, kernel='rbf', gamma=gamma)
            core_model.fit(core_train, core_label)
            core_SV_size = core_model.n_support_[0] + core_model.n_support_[1]

            SV_n_list.append(core_SV_size)
            accuracy_list.append(core_model.score(test_data, test_label))

            # print(core_SV_size)
            # print(core_model.score(test_data, test_label))

    dataframe = pd.DataFrame({
        'C': C_list,
        'gamma': gamma_list,
        '# of upload': upload_n_list,
        '# of SV': SV_n_list,
        'test_accuracy': accuracy_list
    })

    if (dataset == 0):
        file_name = 'skin_nonskin_' + str(edge_node_n) + '_nodes.csv'
    elif (dataset == 1):
        file_name = 'phishing_' + str(edge_node_n) + '_nodes.csv'
    elif (dataset == 2):
        file_name = 'NB15_' + str(edge_node_n) + '_nodes.csv'
    elif (dataset == 3):
        file_name = 'ijcnn1_' + str(edge_node_n) + '_nodes.csv'
    elif (dataset == 4):
        file_name = 'covtype_' + str(edge_node_n) + '_nodes.csv'
    else:
        raise ValueError
    file_name = 'results/' + file_name

    dataframe.to_csv(file_name, index=False, sep=',')
Пример #4
0
Global_model.fit(Train_data, Train_label)
# print (np.size(Global_model.dual_coef_[0]))
# SVM_plot(Train_data[:, 0], Train_data[:, 1], Train_label, Global_model)

Only_SV_model = svm.SVC(C = C, kernel = 'rbf', gamma=gamma)
Only_SV_model.fit(Global_model.support_vectors_, Train_label[Global_model.support_])

print(Global_model.n_support_)
print(Only_SV_model.n_support_)

# # SVM_plot(Train_data[:, 0], Train_data[:, 1], Train_label, Global_model)
# # SVM_plot(Global_model.support_vectors_[:, 0], Global_model.support_vectors_[:, 1], Train_label[Global_model.support_], Only_SV_model)

Edge_node_n = 10
Edge_data, Edge_label, Global_index = data_partition(Train_data, Train_label, Edge_node_n)
# Edge_data, Edge_label, Global_index = kmeans_partition(Train_data, Train_label, Edge_node_n)

for i in range(Edge_node_n):
    print ("Edge_data[%i].size is %s and Edge_label[%i].size is %d" %(i, np.size ( Edge_data[i], axis=0 ), i,np.size(Edge_label[i]) ))


Distance_plus = []
Distance_minus = []
support_plus_ = []
support_minus_ = []
support_vectors_plus = []
support_vectors_minus = []

for i in range(Edge_node_n):
    local_model = Ed.local_train(Edge_data[i], Edge_label[i], C, gamma, 'rbf')