예제 #1
0
파일: fedavg.py 프로젝트: liaojie-box/Files
def main(args):
    #####-Choose Variable-#####
    set_variable = args.set_num_Chosenusers
    set_variable0 = copy.deepcopy(args.set_epochs)
    set_variable1 = copy.deepcopy(args.set_degree_noniid)

    if not os.path.exists('./experiresult'):
        os.mkdir('./experiresult')

    # load dataset and split users
    dict_users, dict_users_train, dict_users_test = {}, {}, {}
    dataset_train, dataset_test = [], []
    if args.dataset == 'mnist':
        dataset_train = datasets.MNIST('./dataset/mnist/',
                                       train=True,
                                       download=True,
                                       transform=transforms.Compose([
                                           transforms.ToTensor(),
                                           transforms.Normalize((0.1307, ),
                                                                (0.3081, ))
                                       ]))
        dataset_test = datasets.MNIST('./dataset/mnist/',
                                      train=False,
                                      download=True,
                                      transform=transforms.Compose([
                                          transforms.ToTensor(),
                                          transforms.Normalize((0.1307, ),
                                                               (0.3081, ))
                                      ]))
        # sample users
        if args.iid:
            dict_users = mnist_iid(args, dataset_train, args.num_users,
                                   args.num_items_train)
            # dict_users_test = mnist_iid(dataset_test, args.num_users, args.num_items_test)
            dict_sever = mnist_iid(args, dataset_test, args.num_users,
                                   args.num_items_test)
        else:
            dict_users = mnist_noniid(args, dataset_train, args.num_users,
                                      args.num_items_train)
            dict_sever = mnist_noniid(args, dataset_test, args.num_users,
                                      args.num_items_test)

    elif args.dataset == 'cifar':
        dict_users_train, dict_sever = {}, {}
        transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ])
        dataset_train = datasets.CIFAR10('./dataset/cifar/',
                                         train=True,
                                         transform=transform,
                                         target_transform=None,
                                         download=True)
        dataset_test = copy.deepcopy(dataset_train)
        if args.iid:
            dict_users = cifar_iid(dataset_train, args.num_users,
                                   args.num_items_train)
            num_train = int(0.6 * args.num_items_train)
            for idx in range(args.num_users):
                dict_users_train[idx] = set(list(dict_users[idx])[:num_train])
                dict_sever[idx] = set(list(dict_users[idx])[num_train:])
        else:
            dict_users = cifar_noniid(args, dataset_train, args.num_users,
                                      args.num_items_train)
            dict_test = []
            num_train = int(0.6 * args.num_items_train)
            for idx in range(args.num_users):
                dict_users_train[idx] = set(list(dict_users[idx])[:num_train])
                dict_sever[idx] = set(list(dict_users[idx])[num_train:])

    # sample users
    if args.iid:
        dict_users = mnist_iid(args, dataset_train, args.num_users,
                               args.num_items_train)
        # dict_users_test = mnist_iid(dataset_test, args.num_users, args.num_items_test)
        dict_sever = mnist_iid(args, dataset_test, args.num_users,
                               args.num_items_test)
    else:
        dict_users = mnist_noniid(args, dataset_train, args.num_users,
                                  args.num_items_train)
        dict_sever = mnist_iid(args, dataset_test, args.num_users,
                               args.num_items_test)

    img_size = dataset_train[0][0].shape

    for v in range(len(set_variable)):
        final_train_loss = [[0 for i in range(len(set_variable1))]
                            for j in range(len(set_variable0))]
        final_train_accuracy = [[0 for i in range(len(set_variable1))]
                                for j in range(len(set_variable0))]
        final_test_loss = [[0 for i in range(len(set_variable1))]
                           for j in range(len(set_variable0))]
        final_test_accuracy = [[0 for i in range(len(set_variable1))]
                               for j in range(len(set_variable0))]
        final_com_cons = [[0 for i in range(len(set_variable1))]
                          for j in range(len(set_variable0))]
        args.num_Chosenusers = copy.deepcopy(set_variable[v])
        for s in range(len(set_variable0)):
            for j in range(len(set_variable1)):
                args.epochs = copy.deepcopy(set_variable0[s])
                args.degree_noniid = copy.deepcopy(set_variable1[j])
                print(args)
                loss_test, loss_train = [], []
                acc_test, acc_train = [], []
                for m in range(args.num_experiments):
                    # build model
                    net_glob = None
                    if args.model == 'cnn' and args.dataset == 'mnist':
                        if args.gpu != -1:
                            torch.cuda.set_device(args.gpu)
                            # net_glob = CNNMnist(args=args).cuda()
                            net_glob = CNN_test(args=args).cuda()
                        else:
                            net_glob = CNNMnist(args=args)
                    elif args.model == 'mlp' and args.dataset == 'mnist':
                        len_in = 1
                        for x in img_size:
                            len_in *= x
                        if args.gpu != -1:
                            torch.cuda.set_device(args.gpu)
                            net_glob = MLP1(dim_in=len_in,
                                            dim_hidden=256,
                                            dim_out=args.num_classes).cuda()
                        else:
                            net_glob = MLP1(dim_in=len_in,
                                            dim_hidden=256,
                                            dim_out=args.num_classes)
                    elif args.model == 'cnn' and args.dataset == 'cifar':
                        if args.gpu != -1:
                            net_glob = CNNCifar(args).cuda()
                        else:
                            net_glob = CNNCifar(args)
                    else:
                        exit('Error: unrecognized model')
                    print("Nerual Net:", net_glob)

                    net_glob.train(
                    )  #Train() does not change the weight values
                    # copy weights
                    w_glob = net_glob.state_dict()
                    w_size = 0
                    w_size_all = 0
                    for k in w_glob.keys():
                        size = w_glob[k].size()
                        if (len(size) == 1):
                            nelements = size[0]
                        else:
                            nelements = size[0] * size[1]
                        w_size += nelements * 4
                        w_size_all += nelements
                        # print("Size ", k, ": ",nelements*4)
                    print("Weight Size:", w_size, " bytes")
                    print("Weight & Grad Size:", w_size * 2, " bytes")
                    print("Each user Training size:",
                          784 * 8 / 8 * args.local_bs, " bytes")
                    print("Total Training size:", 784 * 8 / 8 * 60000,
                          " bytes")
                    # training
                    loss_avg_list, acc_avg_list, list_loss, loss_avg, com_cons = [], [], [], [], []
                    ###  FedAvg Aglorithm  ###
                    for iter in range(args.epochs):
                        print('\n', '*' * 20, f'Epoch: {iter}', '*' * 20)
                        if args.num_Chosenusers < args.num_users:
                            chosenUsers = random.sample(
                                range(args.num_users), args.num_Chosenusers)
                            chosenUsers.sort()
                        else:
                            chosenUsers = range(args.num_users)
                        print("\nChosen users:", chosenUsers)
                        w_locals, w_locals_1ep, loss_locals, acc_locals = [], [], [], []

                        values_golbal = []
                        for i in w_glob.keys():
                            values_golbal += list(
                                w_glob[i].view(-1).cpu().numpy())

                        for idx in range(len(chosenUsers)):
                            local = LocalUpdate(
                                args=args,
                                dataset=dataset_train,
                                idxs=dict_users[chosenUsers[idx]],
                                tb=summary)
                            w_1st_ep, w, loss, acc = local.update_weights(
                                net=copy.deepcopy(net_glob))
                            w_locals.append(copy.deepcopy(w))
                            ### get 1st ep local weights ###
                            w_locals_1ep.append(copy.deepcopy(w_1st_ep))
                            loss_locals.append(copy.deepcopy(loss))
                            # print("User ", chosenUsers[idx], " Acc:", acc, " Loss:", loss)
                            acc_locals.append(copy.deepcopy(acc))

                            # histogram for all clients
                            values_local = []
                            for i in w_glob.keys():
                                values_local += list(
                                    w[i].view(-1).cpu().numpy())
                            values_increment = [
                                values_local[i] - values_golbal[i]
                                for i in range(len(values_local))
                            ]
                            value_sequence = sorted(
                                [d for d in values_increment],
                                reverse=True)  # value sequence
                            hist, bin_edges = np.histogram(value_sequence,
                                                           bins=100)
                            # valueCount = collections.Counter(hist)
                            # val, cnt = zip(*valueCount.items())
                            #print(hist, bin_edges)
                            # fig, ax = plt.subplots()
                            plt.close()
                            # plt.bar(range(len(hist)), hist, width=0.80, color='b')
                            # plt.close()
                            # plt.hist(value_sequence,bin_edges,color='b',alpha=0.8, rwidth=0.8)
                            plt.hist(value_sequence,
                                     bin_edges,
                                     color='steelblue',
                                     edgecolor='black',
                                     alpha=0.8)
                            plt.savefig(
                                './histogra/histogra-{}-client-{}-iter-{}.pdf'.
                                format(args.model, idx, iter))
                            plt.show()

                        # malicious_users = [0, 3]
                        # w_locals = noise_add(args, w_locals, 0.001, malicious_users)
                        ### update global weights ###
                        # w_locals = users_sampling(args, w_locals, chosenUsers)
                        w_glob = average_weights(w_locals)

                        # val_min_dist, val_mah_dist = [], []
                        # for i in range(len(w_locals)):
                        #     val_min_dist.append(minkowski_distance(w_locals[i],w_glob,1))
                        #     val_mah_dist.append(mahala_distance(w_locals[i],w_glob,w_locals,5))
                        # print('Minkowski distance:', val_mah_dist)
                        # print('Mahala distance:', val_min_dist)

                        # copy weight to net_glob
                        net_glob.load_state_dict(w_glob)
                        # global test
                        list_acc, list_loss = [], []
                        net_glob.eval()
                        for c in range(args.num_users):
                            net_local = LocalUpdate(args=args,
                                                    dataset=dataset_test,
                                                    idxs=dict_sever[idx],
                                                    tb=summary)
                            acc, loss = net_local.test(net=net_glob)
                            # acc, loss = net_local.test_gen(net=net_glob, idxs=dict_users[c], dataset=dataset_test)
                            list_acc.append(acc)
                            list_loss.append(loss)
                        # print("\nEpoch: {}, Global test loss {}, Global test acc: {:.2f}%".\
                        #      format(iter, sum(list_loss) / len(list_loss),100. * sum(list_acc) / len(list_acc)))
                        # print loss
                        loss_avg = sum(loss_locals) / len(loss_locals)
                        acc_avg = sum(acc_locals) / len(acc_locals)
                        loss_avg_list.append(loss_avg)
                        acc_avg_list.append(acc_avg)
                        print("\nTrain loss: {}, Train acc: {}".\
                              format(loss_avg_list[-1], acc_avg_list[-1]))
                        print("\nTest loss: {}, Test acc: {}".\
                              format(sum(list_loss) / len(list_loss), sum(list_acc) / len(list_acc)))

                        # if (iter+1)%20==0:
                        #     torch.save(net_glob.state_dict(),'./Train_model/glob_model_{}epochs.pth'.format(iter))

                    loss_train.append(loss_avg)
                    acc_train.append(acc_avg)
                    loss_test.append(sum(list_loss) / len(list_loss))
                    acc_test.append(sum(list_acc) / len(list_acc))
                    com_cons.append(iter + 1)
                # plot loss curve
                final_train_loss[s][j] = copy.deepcopy(
                    sum(loss_train) / len(loss_train))
                final_train_accuracy[s][j] = copy.deepcopy(
                    sum(acc_train) / len(acc_train))
                final_test_loss[s][j] = copy.deepcopy(
                    sum(loss_test) / len(loss_test))
                final_test_accuracy[s][j] = copy.deepcopy(
                    sum(acc_test) / len(acc_test))
                final_com_cons[s][j] = copy.deepcopy(
                    sum(com_cons) / len(com_cons))

            print('\nFinal train loss:', final_train_loss)
            print('\nFinal train accuracy:', final_train_accuracy)
            print('\nFinal test loss:', final_test_loss)
            print('\nFinal test accuracy:', final_test_accuracy)
        timeslot = int(time.time())
        data_test_loss = pd.DataFrame(index=set_variable0,
                                      columns=set_variable1,
                                      data=final_train_loss)
        data_test_loss.to_csv(
            './experiresult/' +
            'train_loss_{}_{}.csv'.format(set_variable[v], timeslot))
        data_test_loss = pd.DataFrame(index=set_variable0,
                                      columns=set_variable1,
                                      data=final_test_loss)
        data_test_loss.to_csv(
            './experiresult/' +
            'test_loss_{}_{}.csv'.format(set_variable[v], timeslot))
        data_test_acc = pd.DataFrame(index=set_variable0,
                                     columns=set_variable1,
                                     data=final_train_accuracy)
        data_test_acc.to_csv(
            './experiresult/' +
            'train_acc_{}_{}.csv'.format(set_variable[v], timeslot))
        data_test_acc = pd.DataFrame(index=set_variable0,
                                     columns=set_variable1,
                                     data=final_test_accuracy)
        data_test_acc.to_csv(
            './experiresult/' +
            'test_acc_{}_{}.csv'.format(set_variable[v], timeslot))
        data_test_acc = pd.DataFrame(index=set_variable0,
                                     columns=set_variable1,
                                     data=final_com_cons)
        data_test_acc.to_csv('./experiresult/' +
                             'aggregation_consuming_{}_{}.csv'.format(
                                 set_variable[v], timeslot))

        plt.close()
예제 #2
0
    net_best = None
    val_acc_list, net_list = [], []
    for iter in tqdm(range(args.epochs)):
        w_locals, loss_locals = [], []
        m = max(int(args.frac * args.num_users), 1)
        idxs_users = np.random.choice(range(args.num_users), m, replace=False)
        for idx in idxs_users:
            local = LocalUpdate(args=args,
                                dataset=dataset_train,
                                idxs=dict_users[idx],
                                tb=summary)
            w, loss = local.update_weights(net=copy.deepcopy(net_glob))
            w_locals.append(copy.deepcopy(w))
            loss_locals.append(copy.deepcopy(loss))
        # update global weights
        w_glob = average_weights(w_locals)

        # copy weight to net_glob
        net_glob.load_state_dict(w_glob)

        # print loss
        loss_avg = sum(loss_locals) / len(loss_locals)
        if args.epochs % 10 == 0:
            print('\nTrain loss:', loss_avg)
        loss_train.append(loss_avg)

    # plot loss curve
    plt.figure()
    plt.plot(range(len(loss_train)), loss_train)
    plt.ylabel('train_loss')
    plt.savefig('../save/fed_{}_{}_{}_C{}_iid{}.png'.format(
예제 #3
0
def main(args):
    #####-Choose Variable-#####
    set_variable = args.set_num_Chosenusers
    set_variable0 = copy.deepcopy(args.set_epochs)
    set_variable1 = copy.deepcopy(args.set_privacy_budget)

    if not os.path.exists('./exper_result'):
        os.mkdir('./exper_result')

    # load dataset and split users
    dataset_train, dataset_test = [], []
    dataset_train = datasets.MNIST('./dataset/mnist/',
                                   train=True,
                                   download=True,
                                   transform=transforms.Compose([
                                       transforms.ToTensor(),
                                       transforms.Normalize((0.1307, ),
                                                            (0.3081, ))
                                   ]))
    dataset_test = datasets.MNIST('./dataset/mnist/',
                                  train=False,
                                  download=True,
                                  transform=transforms.Compose([
                                      transforms.ToTensor(),
                                      transforms.Normalize((0.1307, ),
                                                           (0.3081, ))
                                  ]))

    # sample users
    if args.iid:
        dict_users = mnist_iid(dataset_train, args.num_users,
                               args.num_items_train)
        # dict_users_test = mnist_iid(dataset_test, args.num_users, args.num_items_test)
        dict_server = mnist_iid(dataset_test, args.num_users,
                                args.num_items_test)
    else:
        dict_users = mnist_noniid(dataset_train, args.num_users)
        dict_server = mnist_noniid(dataset_test, args.num_users)

    img_size = dataset_train[0][0].shape

    for v in range(len(set_variable)):
        final_train_loss = [[0 for i in range(len(set_variable1))]
                            for j in range(len(set_variable0))]
        final_train_accuracy = [[0 for i in range(len(set_variable1))]
                                for j in range(len(set_variable0))]
        final_test_loss = [[0 for i in range(len(set_variable1))]
                           for j in range(len(set_variable0))]
        final_test_accuracy = [[0 for i in range(len(set_variable1))]
                               for j in range(len(set_variable0))]
        final_com_cons = [[0 for i in range(len(set_variable1))]
                          for j in range(len(set_variable0))]
        args.num_Chosenusers = copy.deepcopy(set_variable[v])
        for s in range(len(set_variable0)):
            for j in range(len(set_variable1)):
                args.epochs = copy.deepcopy(set_variable0[s])
                args.privacy_budget = copy.deepcopy(set_variable1[j])
                print("dataset:", args.dataset, " num_users:", args.num_users, " num_chosen_users:", args.num_Chosenusers, " Privacy budget:", args.privacy_budget,\
                      " epochs:", args.epochs, "local_ep:", args.local_ep, "local train size", args.num_items_train, "batch size:", args.local_bs)
                loss_test, loss_train = [], []
                acc_test, acc_train = [], []
                for m in range(args.num_experiments):
                    # build model
                    net_glob = None
                    if args.model == 'cnn' and args.dataset == 'mnist':
                        if args.gpu != -1:
                            torch.cuda.set_device(args.gpu)
                            # net_glob = CNNMnist(args=args).cuda()
                            net_glob = CNN_test(args=args).cuda()
                        else:
                            net_glob = CNNMnist(args=args)
                    elif args.model == 'mlp':
                        len_in = 1
                        for x in img_size:
                            len_in *= x
                        if args.gpu != -1:
                            torch.cuda.set_device(args.gpu)
                            net_glob = MLP1(dim_in=len_in, dim_hidden=256,\
                                            dim_out=args.num_classes).cuda()
                        else:
                            net_glob = MLP1(dim_in=len_in, dim_hidden=256,\
                                            dim_out=args.num_classes)
                    else:
                        exit('Error: unrecognized model')
                    print("Nerual Net:", net_glob)

                    net_glob.train(
                    )  #Train() does not change the weight values
                    # copy weights
                    w_glob = net_glob.state_dict()
                    w_size = 0
                    w_size_all = 0
                    for k in w_glob.keys():
                        size = w_glob[k].size()
                        if (len(size) == 1):
                            nelements = size[0]
                        else:
                            nelements = size[0] * size[1]
                        w_size += nelements * 4
                        w_size_all += nelements
                        # print("Size ", k, ": ",nelements*4)
                    print("Weight Size:", w_size, " bytes")
                    print("Weight & Grad Size:", w_size * 2, " bytes")
                    print("Each user Training size:",
                          784 * 8 / 8 * args.local_bs, " bytes")
                    print("Total Training size:", 784 * 8 / 8 * 60000,
                          " bytes")
                    # training
                    threshold_epochs = copy.deepcopy(args.epochs)
                    threshold_epochs_list, noise_list = [], []
                    loss_avg_list, acc_avg_list, list_loss, loss_avg = [], [], [], []
                    eps_tot_list, eps_tot = [], 0
                    com_cons = []
                    ###  FedAvg Aglorithm  ###
                    ### Compute noise scale ###
                    noise_scale = copy.deepcopy(Privacy_account(args,\
                                            threshold_epochs, noise_list, 0))
                    for iter in range(args.epochs):
                        print('\n', '*' * 20, f'Epoch: {iter}', '*' * 20)
                        start_time = time.time()
                        if args.num_Chosenusers < args.num_users:
                            chosenUsers = random.sample(range(1,args.num_users)\
                                                        ,args.num_Chosenusers)
                            chosenUsers.sort()
                        else:
                            chosenUsers = range(args.num_users)
                        print("\nChosen users:", chosenUsers)

                        if iter >= 1 and args.para_est == True:
                            w_locals_before = copy.deepcopy(w_locals_org)

                        w_locals, w_locals_1ep, loss_locals, acc_locals = [], [], [], []
                        for idx in range(len(chosenUsers)):
                            local = LocalUpdate(args=args, dataset=dataset_train,\
                                    idxs=dict_users[chosenUsers[idx]], tb=summary)
                            w_1st_ep, w, loss, acc = local.update_weights(\
                                                    net=copy.deepcopy(net_glob))
                            w_locals.append(copy.deepcopy(w))
                            ### get 1st ep local weights ###
                            w_locals_1ep.append(copy.deepcopy(w_1st_ep))
                            loss_locals.append(copy.deepcopy(loss))
                            # print("User ", chosenUsers[idx], " Acc:", acc, " Loss:", loss)
                            acc_locals.append(copy.deepcopy(acc))

                        w_locals_org = copy.deepcopy(w_locals)

                        #  estimate some paramters of the loss function
                        if iter >= 2 and args.para_est == True:
                            Lipz_s,Lipz_c,delta,_,_,_,_,_=para_estimate(args,\
                                        list_loss,loss_locals,w_locals_before,\
                                            w_locals_org,w_glob_before,w_glob)
                            print('Lipschitz smooth, lipschitz continuous, gradient divergence:',\
                                  sum(Lipz_s)/len(Lipz_s),sum(Lipz_c)/len(Lipz_c),sum(delta)/len(delta))

                        ### Clipping ###
                        for idx in range(len(chosenUsers)):
                            w_locals[idx] = copy.deepcopy(
                                clipping(args, w_locals[idx]))
                            # print(get_2_norm(w_locals[idx], w_glob))

                        ### perturb 'w_local' ###
                        w_locals = noise_add(args, noise_scale, w_locals)

                        ### update global weights ###
                        ### w_locals = users_sampling(args, w_locals, chosenUsers) ###
                        w_glob_before = copy.deepcopy(w_glob)
                        w_glob = average_weights(w_locals)

                        # copy weight to net_glob
                        net_glob.load_state_dict(w_glob)
                        # global test
                        list_acc, list_loss = [], []
                        net_glob.eval()
                        for c in range(args.num_users):
                            net_local = LocalUpdate(args=args,dataset=dataset_test,\
                                                    idxs=dict_server[c], tb=summary)
                            acc, loss = net_local.test(net=net_glob)
                            # acc, loss = net_local.test_gen(net=net_glob,\
                            # idxs=dict_users[c], dataset=dataset_test)
                            list_acc.append(acc)
                            list_loss.append(loss)
                        # print("\nEpoch:{},Global test loss:{}, Global test acc:{:.2f}%".\
                        #      format(iter, sum(list_loss) / len(list_loss),\
                        #      100. * sum(list_acc) / len(list_acc)))

                        # print loss
                        loss_avg = sum(loss_locals) / len(loss_locals)
                        acc_avg = sum(acc_locals) / len(acc_locals)
                        loss_avg_list.append(loss_avg)
                        acc_avg_list.append(acc_avg)
                        print("\nTrain loss: {}, Train acc: {}".\
                              format(loss_avg_list[-1], acc_avg_list[-1]))
                        print("\nTest loss: {}, Test acc: {}".\
                              format(sum(list_loss) / len(list_loss),\
                                     sum(list_acc) / len(list_acc)))

                        noise_list.append(noise_scale)
                        threshold_epochs_list.append(threshold_epochs)
                        print('\nNoise Scale:', noise_list)
                        print('\nThreshold epochs:', threshold_epochs_list)
                        ### optimal method ###
                        if args.dp_mechanism == 'CRD' and iter >= 1:
                            threshold_epochs = Adjust_T(args, loss_avg_list,\
                                                threshold_epochs_list, iter)
                            noise_scale = copy.deepcopy(Privacy_account(args,\
                                        threshold_epochs, noise_list, iter))

                        # print run time of each experiment
                        end_time = time.time()
                        print('Run time: %f second' % (end_time - start_time))

                        if iter >= threshold_epochs:
                            break
                    loss_train.append(loss_avg)
                    acc_train.append(acc_avg)
                    loss_test.append(sum(list_loss) / len(list_loss))
                    acc_test.append(sum(list_acc) / len(list_acc))
                    com_cons.append(iter + 1)

                # record results
                final_train_loss[s][j] = copy.deepcopy(
                    sum(loss_train) / len(loss_train))
                final_train_accuracy[s][j] = copy.deepcopy(
                    sum(acc_train) / len(acc_train))
                final_test_loss[s][j] = copy.deepcopy(
                    sum(loss_test) / len(loss_test))
                final_test_accuracy[s][j] = copy.deepcopy(
                    sum(acc_test) / len(acc_test))
                final_com_cons[s][j] = copy.deepcopy(
                    sum(com_cons) / len(com_cons))

            print('\nFinal train loss:', final_train_loss)
            print('\nFinal train acc:', final_train_accuracy)
            print('\nFinal test loss:', final_test_loss)
            print('\nFinal test acc:', final_test_accuracy)

        timeslot = int(time.time())
        data_test_loss = pd.DataFrame(index = set_variable0, columns =\
                                      set_variable1, data = final_train_loss)
        data_test_loss.to_csv('./exper_result/'+'train_loss_{}_{}_{}.csv'.\
                              format(set_variable[v],args.dp_mechanism,timeslot))
        data_test_loss = pd.DataFrame(index = set_variable0, columns =\
                                      set_variable1, data = final_test_loss)
        data_test_loss.to_csv('./exper_result/'+'test_loss_{}_{}_{}.csv'.\
                              format(set_variable[v],args.dp_mechanism,timeslot))
        data_test_acc = pd.DataFrame(index = set_variable0, columns =\
                                     set_variable1, data = final_train_accuracy)
        data_test_acc.to_csv('./exper_result/'+'train_acc_{}_{}_{}.csv'.\
                             format(set_variable[v],args.dp_mechanism,timeslot))
        data_test_acc = pd.DataFrame(index = set_variable0, columns =\
                                     set_variable1, data = final_test_accuracy)
        data_test_acc.to_csv('./exper_result/'+'test_acc_{}_{}_{}.csv'.\
                             format(set_variable[v],args.dp_mechanism,timeslot))
        data_test_acc = pd.DataFrame(index = set_variable0, columns =\
                                     set_variable1, data = final_com_cons)
        data_test_acc.to_csv('./exper_result/'+'aggregation_consuming_{}_{}_{}.csv'.\
                             format(set_variable[v],args.dp_mechanism,timeslot))
예제 #4
0
        ])
    pass

# save model to dictionary
model_dict0 = torch.load(str(MODEL_FOLDER / 'checkpoint_epoch_0.pth'))
params0 = model_dict0['model_state']
model_dict1 = torch.load(str(MODEL_FOLDER / 'checkpoint_epoch_1.pth'))
params1 = model_dict1['model_state']

# append parameters into a list
w_locals = []
w_locals.append(params0)
w_locals.append(params1)

# compute perfect global model
params_avg = average_weights(w_locals)

# remove the model files
del (params0)
del (params1)
(MODEL_FOLDER / 'checkpoint_epoch_0.pth').unlink()
(MODEL_FOLDER / 'checkpoint_epoch_1.pth').unlink()

# save global model at each vehicle
torch.save(
    {
        'model_state': params_avg,
        'optimizer_state': model_dict0['optimizer_state'],
        'accumulated_iter': model_dict0['accumulated_iter']
    }, str(model713_file))