コード例 #1
0
ファイル: main.py プロジェクト: Carudy/sofl
def show_acc(net, data_train, data_test, args):
    # net_test = copy.deepcopy(net)
    acc_train, _ = test_img(net, data_train, args)
    acc_test, _ = test_img(net, data_test, args)
    print("Training accuracy: {:.2f}".format(acc_train))
    print("Testing accuracy: {:.2f}".format(acc_test))
    return acc_train.item()
コード例 #2
0
def test(net_glob, dp, args, is_self_balanced, imbalanced_way):
    net_glob.eval()
    acc_train, loss_train = test_img(net_glob, dp, args, is_self_balanced,
                                     imbalanced_way)
    dp.type = 'test'
    acc_test, loss_test = test_img(net_glob, dp, args, is_self_balanced,
                                   imbalanced_way)
    print("Training accuracy: {:.2f}".format(acc_train))
    print("Testing accuracy: {:.2f}".format(acc_test))
コード例 #3
0
def evalua(net_glob, paras, dataset_test, args):
    net_glob.load_state_dict(paras)
    net_glob.eval()
    acc_test, loss_test = test_img(net_glob, dataset_test, args)
    # print("Testing accuracy: {:.2f}".format(acc_test))
    return acc_test, loss_test


# if __name__ == '__main__':
# net_glob, args, dataset_train, dataset_test, dict_users = modelBuild()
# print(args.device)
コード例 #4
0
        # copy weight to net_glob
        net_glob.load_state_dict(w_glob)

        # print loss
        loss_avg = sum(loss_locals) / len(loss_locals)
        print('Round {:3d}, Average loss {:.3f}'.format(iter, loss_avg))
        loss_train.append(loss_avg)

        # change num of testing accordingly in below function
        dice_score_glob,_ = evaluate_dice_score(net_glob, '/data/OASISchallenge/FS/', '/data/OASISchallenge/',
                        '/data/OASISchallenge/testing_15.txt', args.log_folder)
        
        writer.add_scalar('Dice_Score_Global',  dice_score_glob, iter)
        writer.add_scalar('Train Loss', loss_avg, iter)
        test_img(net_glob, test_loader, writer, args, iter)

        if dice_score_best< dice_score_glob:
            dice_score_best = dice_score_glob
            torch.save({
                'epoch': iter,
                'model_state_dict': net_glob.state_dict(),
                'loss': loss_avg,
                'best_dice': dice_score_best}, model_path)

    # testing
    print("Evaluation")

    net_glob.eval()

    dice_score_glob,_ = evaluate_dice_score(net_glob, '/data/OASISchallenge/FS/', '/data/OASISchallenge/',
コード例 #5
0
        # update global weights
        w_glob = FedAvg(w_locals)

        # copy weight to net_glob
        net_glob.load_state_dict(w_glob)

        # print loss
        loss_avg = sum(loss_locals) / len(loss_locals)
        if (epoch + 1) % 50 == 0:
            print('Round {:3d}, Average loss {:.3f}'.format(
                epoch + 1, loss_avg))
        loss_train.append(loss_avg)

        # print acc
        if (epoch + 1) % 100 == 0:
            acc_glob, loss_glob = test_img(net_glob, test_set, args)
            print('Epoch: {:3d} global accuracy: {:.3f}, global loss:{:.3f}'.
                  format(epoch + 1, acc_glob, loss_glob))

    # plot loss curve
    plt.figure()
    plt.plot(range(len(loss_train)), loss_train)
    plt.ylabel('Train loss')
    plt.xlabel('Global epoch')
    plt.savefig('./save/fed_{}_C{}_{}.png'.format(args.meta_epochs, args.frac,
                                                  args.dataset))

    # save global model
    torch.save(net_glob.state_dict(), './save/model.pt')
    print('Global model saved...')
コード例 #6
0
                    w_locals.append(copy.deepcopy(weight))

                loss_locals.append(copy.deepcopy(loss))

            # update global weights
            w_glob = FedAvg(w_locals)

            # copy weight to net_glob
            global_net.load_state_dict(w_glob)

            # print loss
            loss_avg = sum(loss_locals) / len(loss_locals)

            loss_avg_client.append(loss_avg)

            acc_test, loss_test = test_img(global_net, dataset_test, args)

            acc_global_model.append(acc_test)

            last_loss_avg = loss_avg
            last_acc_global = acc_test

            print('Round {:3d}, Average loss {:.3f}, Global acc: {:.3f}, valid {:3d}'
                  .format(round, loss_avg, acc_test, len(user_idx_this_round)))
        else:

            print('Round {:3d}, Average loss {:.3f}, Global acc: {:.3f} 0 !'
                  .format(round, last_loss_avg, last_acc_global))
            loss_avg_client.append(last_loss_avg)
            acc_global_model.append(last_acc_global)
コード例 #7
0
ファイル: client.py プロジェクト: alitk/FL-Neural-OS
    print(type(dataset_train))
    data_weight = len(dataset_train) / args.num_users / 100

    if args.random_idx:
        idx = random.randint(0, args.num_users - 1)
    else:
        idx = args.idx

    local = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx])
    w, loss = local.train(net=copy.deepcopy(net_local))
    print(loss)
    net_local.load_state_dict(w)

    #Here let's just define the trained portion of train_set for finding acccuracy
    acc_train, loss_train = test_img(
        net_local, DatasetSplit(dataset_train, dict_users[idx]), args)

    #acc_train, loss_train = test_img(net_local, dataset_train, args)
    acc_test, loss_test = test_img(net_local, dataset_test, args)
    print("Training accuracy: {:.2f}".format(acc_train))
    print("Testing accuracy: {:.2f}".format(acc_test))

    #w_locals.append(copy.deepcopy(w))
    #loss_locals.append(copy.deepcopy(loss))
    #w['epoch']=0

    if args.new:
        checkpoint = {'data_weight': data_weight, 'state_dict': w}

    else:
        checkpoint = {'data_weight': data_weight, 'state_dict': w}
コード例 #8
0
def main():
    # parse args
    args = args_parser()
    args.device = torch.device('cuda:{}'.format(
        args.gpu) if torch.cuda.is_available() and args.gpu != -1 else 'cpu')

    # load dataset and split users
    if args.dataset == 'mnist':
        trans_mnist = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307, ), (0.3081, ))
        ])
        dataset_train = datasets.MNIST('../data/mnist/',
                                       train=True,
                                       download=True,
                                       transform=trans_mnist)
        dataset_test = datasets.MNIST('../data/mnist/',
                                      train=False,
                                      download=True,
                                      transform=trans_mnist)
        print("type of test dataset", type(dataset_test))
        # sample users
        if args.iid:
            dict_users = mnist_iid(dataset_train, args.num_users)
        else:
            dict_users, dict_labels_counter = mnist_noniid(
                dataset_train, args.num_users)
            dict_users_2, dict_labels_counter_2 = dict_users, dict_labels_counter
            #dict_users, dict_labels_counter = mnist_noniid_unequal(dataset_train, args.num_users)
    elif args.dataset == 'cifar':
        trans_cifar = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ])
        dataset_train = datasets.CIFAR10('../data/cifar',
                                         train=True,
                                         download=True,
                                         transform=trans_cifar)
        dataset_test = datasets.CIFAR10('../data/cifar',
                                        train=False,
                                        download=True,
                                        transform=trans_cifar)
        if args.iid:
            dict_users = cifar_iid(dataset_train, args.num_users)
        else:
            exit('Error: only consider IID setting in CIFAR10')
    else:
        exit('Error: unrecognized dataset')
    img_size = dataset_train[0][0].shape

    # build model
    if args.model == 'cnn' and args.dataset == 'cifar':
        net_glob = CNNCifar(args=args).to(args.device)
        net_glob_2 = CNNCifar(args=args).to(args.device)
    elif args.model == 'cnn' and args.dataset == 'mnist':
        net_glob = CNNMnist(args=args).to(args.device)
        net_glob_2 = CNNMnist(args=args).to(args.device)
    elif args.model == 'mlp':
        len_in = 1
        for x in img_size:
            len_in *= x
        net_glob = MLP(dim_in=len_in, dim_hidden=200,
                       dim_out=args.num_classes).to(args.device)
    else:
        exit('Error: unrecognized model')

    #print(net_glob)

    #net_glob.train()

    acc_test, loss_test = test_img(net_glob, dataset_test, args)
    print("val test finished")
    print("{:.2f}".format(acc_test))
    temp = net_glob

    #net_glob_2 = net_glob
    temp_2 = net_glob_2

    # copy weights
    w_glob = net_glob.state_dict()

    # training
    loss_train = []
    cv_loss, cv_acc = [], []
    val_loss_pre, counter = 0, 0
    net_best = None
    best_loss = None
    val_acc_list, net_list = [], []

    Loss_local_each_global_total = []

    test_ds, valid_ds = torch.utils.data.random_split(dataset_test,
                                                      (9500, 500))
    loss_workers_total = np.zeros(shape=(args.num_users, args.epochs))
    label_workers = {
        i: np.array([], dtype='int64')
        for i in range(args.num_users)
    }

    workers_percent = []
    workers_count = 0
    acc_test_global, loss_test_global = test_img(x, valid_ds, args)
    selected_users_index = []

    for idx in range(args.num_users):
        # print("train started")
        local = LocalUpdate(args=args,
                            dataset=dataset_train,
                            idxs=dict_users[idx])
        w, loss = local.train(net=copy.deepcopy(net_glob).to(args.device))
        # print(w)
        # print("train completed")

        # temp = FedAvg(w)
        temp.load_state_dict(w)
        temp.eval()
        acc_test_local, loss_test_local = test_img(temp, valid_ds, args)
        loss_workers_total[idx, iter] = acc_test_local

        if workers_count >= (args.num_users / 2):
            break
        elif acc_test_local >= (0.7 * acc_test_global):
            selected_users_index.append(idx)

    for iter in range(args.epochs):
        print("round started")
        Loss_local_each_global = []
        loss_workers = np.zeros((args.num_users, args.epochs))
        w_locals, loss_locals = [], []
        m = max(int(args.frac * args.num_users), 1)
        #idxs_users = np.random.choice(range(args.num_users), m, replace=False)

        #if iter % 5 == 0:
        # Minoo
        x = net_glob
        x.eval()

        Loss_local_each_global_total.append(acc_test_global)

        for idx in selected_users_index:
            #print("train started")
            local = LocalUpdate(args=args,
                                dataset=dataset_train,
                                idxs=dict_users[idx])
            w, loss = local.train(net=copy.deepcopy(net_glob).to(args.device))
            #print(w)
            #print("train completed")

            #temp = FedAvg(w)
            temp.load_state_dict(w)
            temp.eval()
            acc_test_local, loss_test_local = test_img(temp, valid_ds, args)
            loss_workers_total[idx, iter] = acc_test_local

            if workers_count >= (args.num_users / 2):
                break
            elif acc_test_local >= (0.7 * acc_test_global):
                w_locals.append(copy.deepcopy(w))
                loss_locals.append(copy.deepcopy(loss))
                print("Update Received")
                workers_count += 1

        # update global weights
        w_glob = FedAvg(w_locals)

        # copy weight to net_glob
        net_glob.load_state_dict(w_glob)

        print("round completed")
        loss_avg = sum(loss_locals) / len(loss_locals)
        print('Round {:3d}, Average loss {:.3f}'.format(iter, loss_avg))
        loss_train.append(loss_avg)
        workers_percent.append(workers_count)

    # plot loss curve
    plt.figure()
    plt.plot(range(len(workers_percent)), workers_percent)
    plt.ylabel('train_loss')
    plt.savefig(
        './save/Newfed_WorkersPercent_0916_{}_{}_{}_C{}_iid{}.png'.format(
            args.dataset, args.model, args.epochs, args.frac, args.iid))
    # print(loss_workers_total)

    # plot loss curve
    # plt.figure()
    # plt.plot(range(len(loss_train)), loss_train)
    # plt.ylabel('train_loss')
    # plt.savefig('./save/Newfed_0916_{}_{}_{}_C{}_iid{}.png'.format(args.dataset, args.model, args.epochs, args.frac, args.iid))
    #

    plt.figure()
    for i in range(args.num_users):
        plot = plt.plot(range(len(loss_workers_total[i, :])),
                        loss_workers_total[i, :],
                        label="Worker {}".format(i))
    plot5 = plt.plot(range(len(Loss_local_each_global_total)),
                     Loss_local_each_global_total,
                     color='000000',
                     label="Global")
    plt.legend(loc='best')
    plt.ylabel('Small Test Set Accuracy of workers')
    plt.xlabel('Number of Rounds')
    plt.savefig(
        './save/NewFed_2workers_Acc_0916_{}_{}_{}_C{}_iid{}.png'.format(
            args.dataset, args.model, args.epochs, args.frac, args.iid))

    # plt.figure()
    # bins = np.linspace(0, 9, 3)
    # a = dict_labels_counter[:, 0].ravel()
    # print(type(a))
    # b = dict_labels_counter[:, 1].ravel()
    # x_labels = ['0', '1', '2', '3','4','5','6','7','8','9']
    # # Set plot parameters
    # fig, ax = plt.subplots()
    # width = 0.1  # width of bar
    # x = np.arange(10)
    # ax.bar(x, dict_labels_counter[:, 0], width, color='#000080', label='Worker 1')
    # ax.bar(x + width, dict_labels_counter[:, 1], width, color='#73C2FB', label='Worker 2')
    # ax.bar(x + 2*width, dict_labels_counter[:, 2], width, color='#ff0000', label='Worker 3')
    # ax.bar(x + 3*width, dict_labels_counter[:, 3], width, color='#32CD32', label='Worker 4')
    # ax.set_ylabel('Number of Labels')
    # ax.set_xticks(x + width + width / 2)
    # ax.set_xticklabels(x_labels)
    # ax.set_xlabel('Labels')
    # ax.legend()
    # plt.grid(True, 'major', 'y', ls='--', lw=.5, c='k', alpha=.3)
    # fig.tight_layout()
    # plt.savefig(
    #     './save/Newfed_2workersLabels_0916_{}_{}_{}_C{}_iid{}.png'.format(args.dataset, args.model, args.epochs, args.frac,
    #                                                                args.iid))

    # testing
    print("testing started")
    net_glob.eval()
    print("train test started")
    acc_train_final, loss_train_final = test_img(net_glob, dataset_train, args)
    print("train test finished")
    acc_test_final, loss_test_final = test_img(net_glob, dataset_test, args)
    print("val test finished")
    #print("Training accuracy: {:.2f}".format(acc_train))
    #print("Testing accuracy: {:.2f}".format(acc_test))
    print("{:.2f}".format(acc_test_final))
    #print("{:.2f".format(Loss_local_each_worker))

    # training
    w_glob_2 = net_glob_2.state_dict()

    loss_train_2 = []
    cv_loss_2, cv_acc_2 = [], []
    val_loss_pre_2, counter_2 = 0, 0
    net_best_2 = None
    best_loss_2 = None
    val_acc_list_2, net_list_2 = [], []

    Loss_local_each_global_total_2 = []

    loss_workers_total_2 = np.zeros(shape=(args.num_users, args.epochs))
    label_workers_2 = {
        i: np.array([], dtype='int64')
        for i in range(args.num_users)
    }

    for iter in range(args.epochs):
        print("round started")
        Loss_local_each_global_2 = []
        loss_workers_2 = np.zeros((args.num_users, args.epochs))
        w_locals_2, loss_locals_2 = [], []
        m_2 = max(int(args.frac * args.num_users), 1)
        idxs_users_2 = np.random.choice(range(args.num_users),
                                        m_2,
                                        replace=False)

        # Minoo
        x_2 = net_glob_2
        x_2.eval()
        acc_test_global_2, loss_test_global_2 = test_img(x_2, valid_ds, args)
        Loss_local_each_global_total_2.append(acc_test_global_2)

        for idx in idxs_users_2:
            #print("train started")
            local_2 = LocalUpdate(args=args,
                                  dataset=dataset_train,
                                  idxs=dict_users_2[idx])
            w_2, loss_2 = local_2.train(
                net=copy.deepcopy(net_glob_2).to(args.device))
            #print(w)
            #print("train completed")
            w_locals_2.append(copy.deepcopy(w_2))
            loss_locals_2.append(copy.deepcopy(loss_2))
            #temp = FedAvg(w)
            temp_2.load_state_dict(w_2)
            temp_2.eval()
            acc_test_local_2, loss_test_local_2 = test_img(
                temp_2, valid_ds, args)
            loss_workers_total_2[idx, iter] = acc_test_local_2

        # update global weights
        w_glob_2 = FedAvg(w_locals_2)

        # copy weight to net_glob
        net_glob_2.load_state_dict(w_glob_2)

        print("round completed")
        loss_avg_2 = sum(loss_locals_2) / len(loss_locals_2)
        print('Round {:3d}, Average loss {:.3f}'.format(iter, loss_avg_2))
        loss_train_2.append(loss_avg_2)
        print("round completed")

        # plot loss curve
    plt.figure()
    plt.plot(range(len(loss_train_2)),
             loss_train_2,
             color='#000000',
             label="Main FL")
    plt.plot(range(len(loss_train)),
             loss_train,
             color='#ff0000',
             label="Centralized Algorithm")
    plt.ylabel('train_loss')
    plt.savefig('./save/main_fed_0916_{}_{}_{}_C{}_iid{}.png'.format(
        args.dataset, args.model, args.epochs, args.frac, args.iid))
    # print(loss_workers_total)

    plt.figure()
    for i in range(args.num_users):
        plot = plt.plot(range(len(loss_workers_total_2[i, :])),
                        loss_workers_total_2[i, :],
                        label="Worker {}".format(i))
    plot5 = plt.plot(range(len(Loss_local_each_global_total_2)),
                     Loss_local_each_global_total_2,
                     color='000000',
                     label="Global")
    plt.legend(loc='best')
    plt.ylabel('Small Test Set Accuracy of workers')
    plt.xlabel('Number of Rounds')
    plt.savefig('./save/mainfed_Acc_0916_{}_{}_{}_C{}_iid{}.png'.format(
        args.dataset, args.model, args.epochs, args.frac, args.iid))

    # plt.figure()
    # bins = np.linspace(0, 9, 3)
    # a = dict_labels_counter_2[:, 0].ravel()
    # print(type(a))
    # b = dict_labels_counter_2[:, 1].ravel()
    # x_labels = ['0', '1', '2', '3','4','5','6','7','8','9']
    # # Set plot parameters
    # fig, ax = plt.subplots()
    # width = 0.1  # width of bar
    # x = np.arange(10)
    # ax.bar(x, dict_labels_counter_2[:, 0], width, color='#000080', label='Worker 1')
    # ax.bar(x + width, dict_labels_counter_2[:, 1], width, color='#73C2FB', label='Worker 2')
    # ax.bar(x + 2*width, dict_labels_counter_2[:, 2], width, color='#ff0000', label='Worker 3')
    # ax.bar(x + 3*width, dict_labels_counter_2[:, 3], width, color='#32CD32', label='Worker 4')
    # ax.set_ylabel('Number of Labels')
    # ax.set_xticks(x + width + width / 2)
    # ax.set_xticklabels(x_labels)
    # ax.set_xlabel('Labels')
    # ax.legend()
    # plt.grid(True, 'major', 'y', ls='--', lw=.5, c='k', alpha=.3)
    # fig.tight_layout()
    # plt.savefig(
    #     './save/main_fed_2workersLabels_0916_{}_{}_{}_C{}_iid{}.png'.format(args.dataset, args.model, args.epochs, args.frac,
    #                                                                args.iid))

    # testing
    print("testing started")
    net_glob.eval()
    print("train test started")
    acc_train_final, loss_train_final = test_img(net_glob, dataset_train, args)
    print("train test finished")
    acc_test_final, loss_test_final = test_img(net_glob, dataset_test, args)
    print("val test finished")
    #print("Training accuracy: {:.2f}".format(acc_train))
    #print("Testing accuracy: {:.2f}".format(acc_test))
    print("{:.2f}".format(acc_test_final))
    #print("{:.2f".format(Loss_local_each_worker))

    return loss_test_final, loss_train_final
コード例 #9
0
        idxs_users = np.random.choice(range(args.num_users), m, replace=False)

        for idx in idxs_users:
            local = LocalUpdate_fedavg(args=args,
                                       dataset=dataset_train,
                                       idxs=dict_users[idx],
                                       idxs_labeled=dict_users_labeled[idx],
                                       pseudo_label=pseudo_label)
            w, loss = local.train(net=copy.deepcopy(net_glob).to(args.device))
            w_locals.append(copy.deepcopy(w))
            loss_locals.append(copy.deepcopy(loss))

        w_glob = FedAvg(w_locals)
        net_glob.load_state_dict(w_glob)
        net_glob.eval()
        acc_valid, loss_valid = test_img(net_glob, dataset_valid, args)
        if loss_valid <= best_loss_valid:
            best_loss_valid = loss_valid
            w_best = copy.deepcopy(w_glob)

        loss_avg = sum(loss_locals) / len(loss_locals)
        print('Round {:3d}, Average loss {:.3f}, acc_valid {:.2f}%'.format(
            iter, loss_avg, acc_valid))
        loss_train.append(loss_avg)

    print("\n Begin test")

    net_glob.load_state_dict(w_best)
    net_glob.eval()

    users_labeled = set()
コード例 #10
0
def main():
    # parse args
    args = args_parser()
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    dataPath = args.datasetPath

    # random seed
    np.random.seed(args.seed)
    cudnn.benchmark = False
    cudnn.deterministic = True
    torch.manual_seed(args.seed)
    cudnn.enabled = True
    torch.cuda.manual_seed(args.seed)

    # load dataset and split users
    if args.dataset == 'cifar10':
        _CIFAR_TRAIN_TRANSFORMS = [
            transforms.RandomCrop(32, padding=4),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4822, 0.4465),
                                 (0.2023, 0.1994, 0.2010)),
        ]
        dataset_train = datasets.CIFAR10(
            dataPath,
            train=True,
            download=True,
            transform=transforms.Compose(_CIFAR_TRAIN_TRANSFORMS))

        _CIFAR_TEST_TRANSFORMS = [
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4822, 0.4465),
                                 (0.2023, 0.1994, 0.2010)),
        ]
        dataset_test = datasets.CIFAR10(
            dataPath,
            train=False,
            transform=transforms.Compose(_CIFAR_TEST_TRANSFORMS))

        if args.iid == 0:  # IID
            dict_users = cifar_iid(dataset_train, args.num_users)
        elif args.iid == 2:  # non-IID
            dict_users = cifar_noniid_2(dataset_train, args.num_users)
        else:
            exit('Error: unrecognized class')

    elif args.dataset == 'emnist':
        _MNIST_TRAIN_TRANSFORMS = _MNIST_TEST_TRANSFORMS = [
            transforms.ToTensor(),
            transforms.ToPILImage(),
            transforms.Pad(2),
            transforms.ToTensor(),
            transforms.Normalize((0.1307, ), (0.3081, ))
        ]
        dataset_train = datasets.EMNIST(
            dataPath,
            train=True,
            download=True,
            transform=transforms.Compose(_MNIST_TRAIN_TRANSFORMS),
            split='letters')
        dataset_test = datasets.EMNIST(
            dataPath,
            train=False,
            download=True,
            transform=transforms.Compose(_MNIST_TEST_TRANSFORMS),
            split='letters')

        dict_users = femnist_star(dataset_train, args.num_users)

    elif args.dataset == 'cifar100':
        _CIFAR_TRAIN_TRANSFORMS = [
            transforms.RandomCrop(32, padding=4),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4822, 0.4465),
                                 (0.2023, 0.1994, 0.2010)),
        ]
        dataset_train = datasets.CIFAR100(
            dataPath,
            train=True,
            download=True,
            transform=transforms.Compose(_CIFAR_TRAIN_TRANSFORMS))

        _CIFAR_TEST_TRANSFORMS = [
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4822, 0.4465),
                                 (0.2023, 0.1994, 0.2010)),
        ]
        dataset_test = datasets.CIFAR100(
            dataPath,
            train=False,
            transform=transforms.Compose(_CIFAR_TEST_TRANSFORMS))
        if args.iid == 0:  # IID
            dict_users = cifar_100_iid(dataset_train, args.num_users)
        elif args.iid == 2:  # non-IID
            dict_users = cifar_100_noniid(dataset_train, args.num_users)
    else:
        exit('Error: unrecognized dataset')

    # build model
    if args.dataset == 'cifar10':
        if args.model == "CNNStd5":
            net_glob = CNNCifarStd5().cuda()
        else:
            exit('Error: unrecognized model')
    elif args.dataset == 'emnist':
        if args.model == "CNNStd5":
            net_glob = CNNEmnistStd5().cuda()
        else:
            exit('Error: unrecognized model')
    elif args.dataset == 'cifar100':
        if args.model == "CNNStd5":
            net_glob = CNNCifar100Std5().cuda()
        else:
            exit('Error: unrecognized model')
    else:
        exit('Error: unrecognized model')

    print('Number of model parameters: {}'.format(
        sum([p.data.nelement() for p in net_glob.parameters()])))

    net_glob.train()

    learning_rate = args.lr
    test_acc = []
    avg_loss = []

    # Train
    for iter in range(args.epochs):

        m = max(int(args.frac * args.num_users), 1)
        idxs_users = np.random.choice(range(args.num_users), m, replace=False)
        w_locals, loss_locals = [], []
        for i, idx in enumerate(idxs_users):
            print('user: {:d}'.format(idx))
            local = LocalUpdate(args=args,
                                dataset=dataset_train,
                                idxs=dict_users[idx])
            w, loss = local.train(model=copy.deepcopy(net_glob).cuda(),
                                  lr=learning_rate)

            w_locals.append(copy.deepcopy(w))
            loss_locals.append(copy.deepcopy(loss))

        # update global weights
        w_glob = FedAvg(w_locals)

        # copy weight to net_glob
        net_glob.load_state_dict(w_glob)

        # print loss
        loss_avg = sum(loss_locals) / len(loss_locals)
        print('Round {:3d}, Average loss {:.6f}'.format(iter, loss_avg))

        acc_test, _ = test_img(net_glob.cuda(), dataset_test, args)
        print("test accuracy: {:.4f}".format(acc_test))
        test_acc.append(acc_test)

        avg_loss.append(loss_avg)

        learning_rate = adjust_learning_rate(learning_rate, args.lr_drop)

    filename = './accuracy-' + str(args.dataset) + '-iid' + str(args.iid) + '-' + str(args.epochs) + '-seed' \
               + str(args.seed) + '-' + str(args.loss_type) + '-beta' + str(args.beta) + '-mu' + str(args.mu)
    save_result(test_acc, avg_loss, filename)
コード例 #11
0
                    map_location=torch.device(
                        'cuda:{}'.format(args.gpu) if torch.cuda.is_available(
                        ) and args.gpu != -1 else 'cpu'))
                w_locals.append(copy.deepcopy(malN_glob))
                print('Training of malicious node ' + str(allDeviceName[idx]) +
                      ' in iteration ' + str(iter) + ' has done!')
        # update global weights
        w_glob = FedAvg(w_locals)
        loss_avg = sum(loss_locals) / len(loss_locals)
        loss_train_list.append(loss_avg)

        # copy weight to net_glob
        net_glob.load_state_dict(w_glob)
        net_glob.eval()

        acc_test, loss_test = test_img(net_glob, dataset_test, args)
        # acc_train, _ = test_img(net_glob, dataset_train, args)

        loss_test_list.append(loss_test)
        acc_test_list.append((acc_test.cpu().numpy().tolist() / 100))

        accDfTest = pd.DataFrame({'baseline': acc_test_list})
        accDfTest.to_csv(
            "D:\\ChainsFLexps\\ggFL\\GoogleFL-iid{}-{}-{}localEpochs-{}users-{}Rounds_ACC_{}.csv"
            .format(args.iid, args.model, args.local_ep,
                    str(int(float(args.frac) * 100)), args.epochs, dateNow),
            index=False,
            sep=',')

        lossDfTest = pd.DataFrame({'baseline': loss_test_list})
        lossDfTest.to_csv(
                w.to(torch.device('cpu')).state_dict())

        # update global weights
        w_glob = FedAvg(nets_users)

        # copy weight to net_glob
        net_glob.load_state_dict(w_glob)

        # print loss
        loss_avg = sum(loss_locals) / len(loss_locals)
        print('Round {:3d}, Average loss {:.3f}'.format(iter, loss_avg))
        loss_train.append(loss_avg)

        #print accuracy
        print("Apochs: ", iter)
        atr, ltr = test_img(net_glob.to(args.device), dataset_train, args)
        ate, lte = test_img(net_glob.to(args.device), dataset_test, args)

        if net_best < ate:
            print('Saving...')
            state = {'net': net_glob.state_dict(), 'acc': ate, 'iter': iter}
            if not os.path.isdir('ckpt'):
                os.mkdir('ckpt')
            torch.save(state, './ckpt/vgg_full.pth')
            net_best = ate

        acc_train.append(atr)
        loss_tr.append(ltr)
        acc_test.append(ate)
        loss_test.append(lte)
コード例 #13
0
    np.savetxt(filename, [])
    filename = 'result/MLP/' + "Loss_FedAvg_unbalance_MLP.csv"
    np.savetxt(filename, [])
    filename = 'result/MLP/' + "Loss_FedAvg_Optimize_unbalance_MLP.csv"
    np.savetxt(filename, [])
    filename = 'result/MLP/' + "Loss_FedAvg_balance_MLP.csv"
    np.savetxt(filename, [])
    filename = 'result/MLP/' + "Loss_FedAvg_Optimize_balance_MLP.csv"
    np.savetxt(filename, [])

    for iter in range(args.epochs):  # num of iterations

        # CL setting
        # testing
        net_glob_cl_iid.eval()
        acc_test_cl, loss_test_clxx = test_img(net_glob_cl_iid, dataset_test,
                                               args)
        print("Testing accuracy: {:.2f}".format(acc_test_cl))
        acc_train_cl_his_iid.append(acc_test_cl)

        filename = 'result/MLP/' + "Accuracy_FedAvg_iid_MLP.csv"
        with open(filename, "a") as myfile:
            myfile.write(str(acc_test_cl) + ',')

        glob_cl = CLUpdate(args=args,
                           dataset=dataset_train,
                           idxs=dict_users_iid)
        w_cl, loss_cl = glob_cl.cltrain(
            net=copy.deepcopy(net_glob_cl_iid).to(args.device))
        net_glob_cl_iid.load_state_dict(w_cl)

        # Loss
コード例 #14
0
 def inference(self, net, dataset_test, idxs):
     dataset = DatasetSplit(dataset_test, idxs)
     self.args.verbose = False
     acc_test, loss_test = test_img(net, dataset, self.args)
     return acc_test, loss_test
コード例 #15
0
def main():
    # parse args
    args = args_parser()
    args.device = torch.device('cuda:{}'.format(args.gpu) if torch.cuda.is_available() and args.gpu != -1 else 'cpu')

    # load dataset and split users
    if args.dataset == 'mnist':
        trans_mnist = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
        dataset_train = datasets.MNIST('../data/mnist/', train=True, download=True, transform=trans_mnist)
        dataset_test = datasets.MNIST('../data/mnist/', train=False, download=True, transform=trans_mnist)
        # sample users
        if args.iid:
            dict_users = mnist_iid(dataset_train, args.num_users)
        else:
            dict_users, dict_labels_counter = mnist_noniid(dataset_train, args.num_users)
            dict_users_mainFL, dict_labels_counter_mainFL = dict_users, dict_labels_counter
    elif args.dataset == 'cifar':
        trans_cifar = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
        dataset_train = datasets.CIFAR10('../data/cifar', train=True, download=True, transform=trans_cifar)
        dataset_test = datasets.CIFAR10('../data/cifar', train=False, download=True, transform=trans_cifar)
        if args.iid:
            dict_users = cifar_iid(dataset_train, args.num_users)
        else:
            dict_users, dict_labels_counter = cifar_noniid(dataset_train, args.num_users)
            dict_users_mainFL, dict_labels_counter_mainFL = dict_users, dict_labels_counter
    elif args.dataset == 'fmnist':
        trans_fmnist = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
        dataset_train = datasets.FashionMNIST('../data/fmnist', train=True, download=True, transform=trans_fmnist)
        dataset_test = datasets.FashionMNIST('../data/fmnist', train=False, download=True, transform=trans_fmnist)
        if args.iid:
            dict_users = mnist_iid(dataset_train, args.num_users)
        else:
            dict_users, dict_labels_counter = mnist_noniid(dataset_train, args.num_users)
            dict_users_mainFL, dict_labels_counter_mainFL = dict_users, dict_labels_counter
    else:
        exit('Error: unrecognized dataset')


    img_size = dataset_train[0][0].shape

    acc_full_distributed = []
    acc_full_main = []
    loss_full_ditributed = []
    loss_full_main = []

    SD_acc_full_distributed = []
    SD_acc_full_main = []
    SD_loss_full_ditributed = []
    SD_loss_full_main = []

    workers_percent_full_distributed = []
    workers_percent_full_main = []
    variable_start = 0.1
    variable_end = 1.0
    while_counter = 0.1
    counter_array = []
    Accuracy_Fraction = []
    Workers_Fraction = []

    accuracy_fraction_each_round_newFL = 0
    workers_fraction_each_round_newFL = 0
    accuracy_fraction_each_round_mainFL = 0
    workers_fraction_each_round_mainFL = 0

    data_main = {}
    data_DCFL = {}
    data_Global_main = {"C": [], "Round":[], "Average Loss Train": [], "Average Loss Test": [], "Accuracy Test": [],
                        "Workers Number": [], "Large Test Loss":[], "Large Test Accuracy":[]}
    data_Global_DCFL = {"C": [], "Round":[], "Average Loss Train": [], "Average Loss Test": [], "Accuracy Test": [],
                        "Workers Number": [], "Large Test Loss":[], "Large Test Accuracy":[]}
    Final_LargeDataSetTest_DCFL = {"C":[], "Test Accuracy":[], "Test Loss":[], "Train Loss":[], "Train Accuracy":[],
                                   "Total Rounds":[]}
    Final_LargeDataSetTest_MainFL = {"C":[], "Test Accuracy": [], "Test Loss": [], "Train Loss": [], "Train Accuracy":[]}



    # build model
    args.frac = variable_start

    test_ds, valid_ds_before = torch.utils.data.random_split(dataset_test, (9500, 500))
    valid_ds = create_shared_dataset(valid_ds_before, 200)

    #while variable_start <= variable_end:
    for c_counter in range(1, 11, 3):
        if args.model == 'cnn' and args.dataset == 'cifar':
            net_glob = CNNCifar(args=args).to(args.device)
            net_glob_mainFL = copy.deepcopy(net_glob)
        elif args.model == 'cnn' and args.dataset == 'mnist':
            net_glob = CNNMnist(args=args).to(args.device)
            net_glob_mainFL = copy.deepcopy(net_glob)
        elif args.model == 'cnn' and args.dataset == 'fmnist':
            net_glob = CNNFashion_Mnist(args=args).to(args.device)
            net_glob_mainFL = copy.deepcopy(net_glob)
        elif args.model == 'mlp':
            len_in = 1
            for x in img_size:
                len_in *= x
            net_glob = MLP(dim_in=len_in, dim_hidden=200, dim_out=args.num_classes).to(args.device)
        else:
            exit('Error: unrecognized model')

        counter_array.append((c_counter/10))
        args.frac = (c_counter/10)

        ######saving index of workers
        dict_workers_index = defaultdict(list)


        #############Main FL

        w_glob_mainFL = net_glob_mainFL.state_dict()

        loss_train_mainFL = []
        # cv_loss_2, cv_acc_2 = [], []
        # val_loss_pre_2, counter_2 = 0, 0
        # net_best_2 = None
        # best_loss_2 = None
        # val_acc_list_2, net_list_2 = [], []

        Loss_local_each_global_total_mainFL = []
        Accuracy_local_each_global_total_mainFL = []

        loss_workers_total_mainFL = np.zeros(shape=(args.num_users, args.epochs))
        label_workers_mainFL = {i: np.array([], dtype='int64') for i in range(args.num_users)}

        validation_test_mainFed = []
        acc_test, loss_test = test_img(net_glob_mainFL, dataset_test, args)
        workers_participation_main_fd = np.zeros((args.num_users, args.epochs))
        workers_percent_main = []

        # for iter in range(args.epochs):
        net_glob_mainFL.eval()
        acc_test_final_mainFL, loss_test_final_mainFL = test_img(net_glob_mainFL, dataset_test, args)
        while_counter_mainFL = loss_test_final_mainFL
        iter_mainFL = 0

        workers_mainFL = []
        for i in range(args.num_users):
            workers_mainFL.append(i)

        temp_netglob_mainFL = net_glob_mainFL

        while iter_mainFL < (args.epochs/2):

            data_main['round_{}'.format(iter_mainFL)] = []
            # data_Global_main['round_{}'.format(iter)] = []
            # print("round started")
            Loss_local_each_global_mainFL = []
            loss_workers_mainFL = np.zeros((args.num_users, args.epochs))
            w_locals_mainFL, loss_locals_mainFL = [], []
            m_mainFL = max(int(args.frac * args.num_users), 1)
            idxs_users_mainFL = np.random.choice(range(args.num_users), m_mainFL, replace=False)
            list_of_random_workers = random.sample(workers_mainFL, m_mainFL)
            for i in range(len(list_of_random_workers)):
                dict_workers_index[iter_mainFL].append(list_of_random_workers[i])

            x_mainFL = net_glob_mainFL
            x_mainFL.eval()
            acc_test_global_mainFL, loss_test_global_mainFL = test_img(x_mainFL, valid_ds, args)
            Loss_local_each_global_total_mainFL.append(loss_test_global_mainFL)
            Accuracy_local_each_global_total_mainFL.append(acc_test_global_mainFL)
            SD_acc_full_main.append(acc_test_global_mainFL)
            SD_loss_full_main.append(loss_test_global_mainFL)

            workers_count_mainFL = 0
            temp_accuracy = np.zeros(1)
            temp_loss_test = np.zeros(1)
            temp_loss_train = np.zeros(1)
            for idx in list_of_random_workers:
                # print("train started")
                local_mainFL = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users_mainFL[idx])
                w_mainFL, loss_mainFL = local_mainFL.train(net=copy.deepcopy(net_glob_mainFL).to(args.device))
                # print(w)
                # print("train completed")
                w_locals_mainFL.append(copy.deepcopy(w_mainFL))
                loss_locals_mainFL.append(copy.deepcopy(loss_mainFL))
                # temp = FedAvg(w)
                temp_netglob_mainFL.load_state_dict(w_mainFL)
                temp_netglob_mainFL.eval()
                print(pnorm_2(temp_netglob_mainFL, 2))
                acc_test_local_mainFL, loss_test_local_mainFL = test_img(temp_netglob_mainFL, valid_ds, args)
                temp_accuracy[0] = acc_test_local_mainFL
                temp_loss_test[0] = loss_test_local_mainFL
                temp_loss_train[0] = loss_mainFL
                loss_workers_total_mainFL[idx, iter_mainFL] = acc_test_local_mainFL
                workers_participation_main_fd[idx][iter_mainFL] = 1
                workers_count_mainFL += 1
                data_main['round_{}'.format(iter_mainFL)].append({
                    'C': args.frac,
                    'User ID': idx,
                    # 'Local Update': copy.deepcopy(w_mainFL),
                    'Loss Train': temp_loss_train[0],
                    'Loss Test': temp_loss_test[0],
                    'Accuracy': temp_accuracy[0]
                })

            # update global weights
            w_glob_mainFL = FedAvg(w_locals_mainFL)

            # copy weight to net_glob
            net_glob_mainFL.load_state_dict(w_glob_mainFL)

            # print("round completed")
            loss_avg_mainFL = sum(loss_locals_mainFL) / len(loss_locals_mainFL)
            # print('Round {:3d}, Average loss {:.3f}'.format(iter, loss_avg_mainFL))
            loss_train_mainFL.append(loss_avg_mainFL)
            # print("round completed")

            acc_test_round_mainfed, loss_test_round_mainfed = test_img(net_glob_mainFL, dataset_test, args)
            validation_test_mainFed.append(acc_test_round_mainfed)
            workers_percent_main.append(workers_count_mainFL / args.num_users)

            # plot workers percent of participating
            print(iter_mainFL, " round main fl finished")

            acc_test_final_mainFL, loss_test_final_mainFL = test_img(net_glob_mainFL, dataset_test, args)
            while_counter_mainFL = loss_test_final_mainFL

            data_Global_main["Round"].append(iter_mainFL)
            data_Global_main["C"].append(args.frac)
            data_Global_main["Average Loss Train"].append(float(loss_avg_mainFL))
            data_Global_main["Average Loss Test"].append(float(loss_test_global_mainFL))
            data_Global_main["Accuracy Test"].append(float(acc_test_global_mainFL))
            data_Global_main["Workers Number"].append(float(workers_count_mainFL))
            data_Global_main["Large Test Loss"].append(float(loss_test_final_mainFL))
            data_Global_main["Large Test Accuracy"].append(float(acc_test_final_mainFL))

            iter_mainFL = iter_mainFL + 1

        workers_percent_final_mainFL = np.zeros(args.num_users)
        workers_name_mainFL = np.empty(args.num_users)
        for i in range(len(workers_participation_main_fd[:, 1])):
            workers_percent_final_mainFL[i] = sum(workers_participation_main_fd[i, :]) / args.epochs
            workers_name_mainFL[i] = i

        net_glob_mainFL.eval()
        # print("train test started")
        acc_train_final_main, loss_train_final_main = test_img(net_glob_mainFL, dataset_train, args)
        # print("train test finished")
        acc_test_final_main, loss_test_final_main = test_img(net_glob_mainFL, dataset_test, args)

        Final_LargeDataSetTest_MainFL["C"].append(args.frac)
        Final_LargeDataSetTest_MainFL["Test Loss"].append(float(loss_test_final_main))
        Final_LargeDataSetTest_MainFL["Test Accuracy"].append(float(acc_test_final_main))
        Final_LargeDataSetTest_MainFL["Train Loss"].append(float(loss_train_final_main))
        Final_LargeDataSetTest_MainFL["Train Accuracy"].append(float(acc_train_final_main))






        # copy weights
        w_glob = net_glob.state_dict()

        temp_after = copy.deepcopy(net_glob)
        temp_before = copy.deepcopy(net_glob)

        # training
        loss_train = []
        # cv_loss, cv_acc = [], []
        # val_loss_pre, counter = 0, 0
        # net_best = None
        # best_loss = None
        # val_acc_list, net_list = [], []

        Loss_local_each_global_total = []


        # valid_ds = create_shared_dataset(dataset_test, 500)
        loss_workers_total = np.zeros(shape=(args.num_users, args.epochs))
        label_workers = {i: np.array([], dtype='int64') for i in range(args.num_users)}

        workers_percent_dist = []
        validation_test_newFed = []
        workers_participation = np.zeros((args.num_users, args.epochs))
        workers = []
        for i in range(args.num_users):
            workers.append(i)

        counter_threshold_decrease = np.zeros(args.epochs)
        Global_Accuracy_Tracker = np.zeros(args.epochs)
        Global_Loss_Tracker = np.zeros(args.epochs)
        threshold = 0.5
        alpha = 0.5     ##decrease parameter
        beta = 0.1 ##delta accuracy controller
        gamma = 0.5  ##threshold decrease parameter


        Goal_Loss = float(loss_test_final_main)

        #for iter in range(args.epochs):

        net_glob.eval()
        acc_test_final, loss_test_final = test_img(net_glob, dataset_test, args)
        while_counter = float(loss_test_final)
        iter = 0

        total_rounds_dcfl = 0

        while (while_counter + 0.01) > Goal_Loss and iter <= args.epochs:

            data_DCFL['round_{}'.format(iter)] = []
            Loss_local_each_global = []
            loss_workers = np.zeros((args.num_users, args.epochs))
            w_locals, loss_locals = [], []
            m = max(int(args.frac * args.num_users), 1)
            idxs_users = np.random.choice(range(args.num_users), m, replace=False)
            counter_threshold = 0
            print(iter, " in dist FL started")
            #if iter % 5 == 0:

            x = copy.deepcopy(net_glob)
            x.eval()
            acc_test_global, loss_test_global = test_img(x, valid_ds, args)
            Loss_local_each_global_total.append(acc_test_global)
            Global_Accuracy_Tracker[iter] = acc_test_global
            Global_Loss_Tracker[iter] = loss_test_global
            if iter > 0 & (Global_Loss_Tracker[iter-1] - Global_Loss_Tracker[iter] <= beta):
                threshold = threshold - gamma
                if threshold == 0.0:
                    threshold = 1.0
                print("threshold decreased to", threshold)
            workers_count = 0

            SD_acc_full_distributed.append(acc_test_global)
            SD_loss_full_ditributed.append(loss_test_global)


            temp_w_locals = []
            temp_workers_loss = np.empty(args.num_users)
            temp_workers_accuracy = np.empty(args.num_users)
            temp_workers_loss_test = np.empty(args.num_users)
            temp_workers_loss_differenc = np.empty(args.num_users)
            temp_workers_accuracy_differenc = np.empty(args.num_users)
            flag = np.zeros(args.num_users)

            list_of_random_workers_newfl = []
            if iter < (args.epochs/2):
                for key, value in dict_workers_index.items():
                    # print(value)
                    if key == iter:
                        list_of_random_workers_newfl = dict_workers_index[key]
            else:
                list_of_random_workers_newfl = random.sample(workers, m)


            for idx in list_of_random_workers_newfl:
                #print("train started")

                # before starting train
                temp_before = copy.deepcopy(net_glob)
                # temp_before.load_state_dict(w)
                temp_before.eval()
                acc_test_local_before, loss_test_local_before = test_img(temp_before, valid_ds, args)

                local = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx])
                w, loss = local.train(net=copy.deepcopy(net_glob).to(args.device))
                #print(w)
                #print("train completed")

                #print("type of idx is ", type(temp_w_locals))
                temp_w_locals.append(copy.deepcopy(w))
                temp_workers_loss[idx] = copy.deepcopy(loss)

                temp_after = copy.deepcopy(net_glob)

                temp_after.load_state_dict(w)
                temp_after.eval()
                acc_test_local_after, loss_test_local_after = test_img(temp_after, valid_ds, args)
                loss_workers_total[idx, iter] = loss_test_local_after
                temp_workers_accuracy[idx] = acc_test_local_after
                temp_workers_loss_test[idx] = loss_test_local_after
                temp_workers_loss_differenc[idx] = loss_test_local_before - loss_test_local_after
                temp_workers_accuracy_differenc[idx] = acc_test_local_after - acc_test_local_before

            print("train finished")
            while len(w_locals) < 1:
                #print("recieving started")
                index = 0
                for idx in list_of_random_workers_newfl:
                    #print("acc is ", temp_workers_accuracy[idx])
                    # print(temp_workers_loss_differenc)
                    if workers_count >= m:
                        break
                    elif temp_workers_loss_differenc[idx] >= (threshold) \
                            and temp_workers_loss_differenc[idx] > 0 \
                            and flag[idx]==0:
                        print("Update Received")
                        w_locals.append(copy.deepcopy(temp_w_locals[index]))
                        #print(temp_w_locals[index])
                        loss_locals.append(temp_workers_loss[idx])
                        flag[idx] = 1
                        workers_count += 1
                        workers_participation[idx][iter] = 1

                        data_DCFL['round_{}'.format(iter)].append({
                            'C': args.frac,
                            'User ID': idx,
                            'Loss Train': loss_workers_total[idx, iter],
                            'Loss Test': temp_workers_loss[idx],
                            'Accuracy': temp_workers_accuracy[idx]
                        })
                    index += 1
                if len(w_locals) < 1:
                    threshold = threshold / 2
                    if threshold == -np.inf:
                        threshold = 1
                print("threshold increased to ", threshold)




            # update global weights
            w_glob = FedAvg(w_locals)

            # copy weight to net_glob
            net_glob.load_state_dict(w_glob)

            #print("round completed")
            loss_avg = sum(loss_locals) / len(loss_locals)
            loss_train.append(loss_avg)
            workers_percent_dist.append(workers_count/args.num_users)


            counter_threshold_decrease[iter] = counter_threshold
            print(iter, " round dist fl finished")


            acc_test_final, loss_test_final = test_img(net_glob, dataset_test, args)
            while_counter = loss_test_final


            data_Global_DCFL["Round"].append(iter)
            data_Global_DCFL["C"].append(args.frac)
            data_Global_DCFL["Average Loss Train"].append(loss_avg)
            data_Global_DCFL["Accuracy Test"].append(Global_Accuracy_Tracker[iter])
            data_Global_DCFL["Average Loss Test"].append(Global_Loss_Tracker[iter])
            data_Global_DCFL["Workers Number"].append(workers_count)
            data_Global_DCFL["Large Test Loss"].append(float(loss_test_final))
            data_Global_DCFL["Large Test Accuracy"].append(float(acc_test_final))

            total_rounds_dcfl = iter

            iter = iter + 1


        #plot workers percent of participating
        workers_percent_final = np.zeros(args.num_users)
        workers_name = np.empty(args.num_users)
        #print(workers_participation)
        for i in range(len(workers_participation[:, 1])):
            workers_percent_final[i] = sum(workers_participation[i, :])/args.epochs
            workers_name[i] = i



        workers_fraction_each_round_newFL = sum(workers_percent_final)/len(workers_percent_final)


        # testing
        #print("testing started")
        net_glob.eval()
        #print("train test started")
        acc_train_final, loss_train_final = test_img(net_glob, dataset_train, args)
        #print("train test finished")
        acc_test_final, loss_test_final = test_img(net_glob, dataset_test, args)

        acc_full_distributed.append(acc_test_final)
        loss_full_ditributed.append(loss_test_final)

        Final_LargeDataSetTest_DCFL["C"].append(args.frac)
        Final_LargeDataSetTest_DCFL["Test Loss"].append(float(loss_test_final))
        Final_LargeDataSetTest_DCFL["Test Accuracy"].append(float(acc_test_final))
        Final_LargeDataSetTest_DCFL["Train Loss"].append(float(loss_train_final))
        Final_LargeDataSetTest_DCFL["Train Accuracy"].append(float(acc_train_final))
        Final_LargeDataSetTest_DCFL["Total Rounds"].append(int(total_rounds_dcfl))

        variable_start = variable_start + while_counter

        print("C is ", c_counter/10)

    with open('CIFAR_100users_data_main_1229-2020.json', 'w') as outfile:
        json.dump(data_main, outfile)

    with open('CIFAR_100users_data_DCFL_1229-2020.json', 'w') as outfile:
        json.dump(data_DCFL, outfile)

    with open('CIFAR_100users_data_DCFL_Global_1229-2020.json', 'w') as outfile:
        json.dump(data_Global_DCFL, outfile)

    with open('CIFAR_100users_data_main_Global_1229-2020.json', 'w') as outfile:
        json.dump(data_Global_main, outfile)

    with open('Final-CIFAR_100users_data_main_Global_1229-2020.json', 'w') as outfile:
        json.dump(Final_LargeDataSetTest_MainFL, outfile)

    with open('Final-CIFAR_100users_data_DCFL_Global_1229-2020.json', 'w') as outfile:
        json.dump(Final_LargeDataSetTest_DCFL, outfile)


    return 1
コード例 #16
0
ファイル: Muhammed.py プロジェクト: khamfroush-lab/Fed-MEC
def muhammed(network: torch.nn.Module, user_data_indices: Dict[int, Any],
             labels_counter: Dict[int, Any], args: Any, cost: Any,
             train_data: DataLoader, test_data: DataLoader,
             shared_data: DataLoader, **kwargs) -> Tuple:
    global_data = defaultdict(list)
    global_eval_data = defaultdict(list)
    workers_index = defaultdict(list)
    client_participation_counter = defaultdict(int)

    # r1=1 and r2=4 are the best values based on Muhammed's results.
    r1 = kwargs.get("r1", 1)
    r2 = kwargs.get("r2", 4)

    comm_cost_total = []

    n_k = np.zeros(shape=(args.num_users))
    for i in range(len(user_data_indices)):
        n_k[i] = len(user_data_indices[i])

    pre_net_glob = copy.deepcopy(network)

    def compute_alpha_star(n_clients, r1, r2) -> int:
        fact = math.factorial
        coeff = math.exp(-(fact(r2) / fact(r1 - 1))**(1 / (r2 - r1 + 1)))
        return n_clients * coeff

    for comm_round in range(args.epochs):
        selected = set()
        comm_cost = []
        randomized_clients = list(user_data_indices.keys())
        random.shuffle(randomized_clients)

        network.eval()
        with torch.no_grad():
            sds_test_acc, sds_test_loss = test_img(network, shared_data, args)

        # _______________________________________________ #
        # =================== STAGE 1 =================== #
        alpha_star = compute_alpha_star(len(randomized_clients), r1, r2)
        acc_best = 0
        for m in range(int(alpha_star)):
            # Train client `m` using a copy of the global model and then test its
            # accuracy on the test data set. This is to find the "optimal" test threshold
            # value for client selection.
            trainer = LocalUpdate(args, train_data, user_data_indices[m])
            local_model, local_loss = trainer.train(
                net=copy.deepcopy(network).to(args.device))
            local_network = copy.deepcopy(network)
            local_network.load_state_dict(local_model)
            local_network.eval()

            acc_client, loss_client = test_img(local_network,
                                               datatest=test_data,
                                               args=args)
            comm_cost.append(cost[m])
            if acc_client > acc_best:
                acc_best = acc_client
            # selected[clients[m]] = False

        # _______________________________________________ #
        # =================== STAGE 2 =================== #
        set_best = set()
        num_best = 0
        R = max(int(args.frac * args.num_users), 1)
        for m in range(int(alpha_star), len(randomized_clients)):
            if num_best == R:
                continue  # "Rejects" the client m.
            elif (len(randomized_clients) - m) <= (R - num_best):
                c = randomized_clients[m]
                selected.add(c)
                set_best.add(c)
                num_best += 1
            else:
                # client data m
                # acc_client, loss_client = test_img_user(network, datatest=train_data, idxs=user_data_indices[m],
                #                                         args=args)
                acc_client, loss_client = test_img(network,
                                                   datatest=test_data,
                                                   args=args)
                comm_cost.append(cost[m])
                if acc_client > acc_best:
                    c = randomized_clients[m]
                    selected.add(c)
                    set_best.add(c)
                    num_best += 1

        # _______________________________________________ #
        # =================== STAGE 3 =================== #
        # NOTE: Just use 1 to make the algorithm make sense for our setup.
        K = 1
        for _ in range(K):
            local_models, local_losses = [], []
            for client in selected:
                trainer = LocalUpdate(args, train_data,
                                      user_data_indices[client])
                local_model, local_loss = trainer.train(
                    net=copy.deepcopy(network).to(args.device))
                local_models.append(local_model)
                local_losses.append(local_loss)
                comm_cost.append(cost[client])
                client_participation_counter[client] += 1

            for n in range(args.num_users - len(local_models)):
                local_models.append(pre_net_glob.state_dict())
            new_weights = fed_avg(local_models, n_k)
            # new_weights = FedAvg(local_models)
            network.load_state_dict(new_weights)
            pre_net_glob = copy.deepcopy(network)

        # _______________________________________________ #
        # ================= DATA SAVING ================= #
        network.eval()
        with torch.no_grad():
            test_acc, test_loss = test_img(network, test_data, args)

        global_data["Round"].append(comm_round)
        global_data["C"].append(args.frac)
        global_data["Average Loss Train"].append(np.mean(local_losses))
        global_data["SDS Loss"].append(float(sds_test_loss))
        global_data["SDS Accuracy"].append(float(sds_test_acc))
        global_data["Workers Number"].append(int(len(selected)))
        global_data["Large Test Loss"].append(float(test_loss))
        global_data["Large Test Accuracy"].append(float(test_acc))
        global_data["Communication Cost"].append(sum(comm_cost))

        comm_cost_total.append(sum(comm_cost))

    # Calculate the percentage of each workers' participation.
    for client in client_participation_counter:
        client_participation_counter[client] /= args.epochs

    final_train_acc, final_train_loss = test_img(network, train_data, args)
    final_test_acc, final_test_loss = test_img(network, test_data, args)

    network.eval()
    with torch.no_grad():
        global_eval_data["C"].append(args.frac)
        global_eval_data["Test Loss"].append(float(final_test_loss))
        global_eval_data["Test Accuracy"].append(float(final_test_acc))
        global_eval_data["Train Loss"].append(float(final_train_loss))
        global_eval_data["Train Accuracy"].append(float(final_train_acc))
        global_eval_data["Communication Cost"].append(sum(comm_cost_total))
        global_eval_data["Total Rounds"].append(args.epochs)

    return global_eval_data, global_data
コード例 #17
0
     for batch_idx, (data, target) in enumerate(train_loader):    
         data, target = data.to(args.device), target.to(args.device)
         optimizer.zero_grad()
         output = net_glob(data)
         loss = F.cross_entropy(output[-1], target)
         loss.backward()
         optimizer.step()
         batch_loss.append(loss.item())            
         
     loss_avg = sum(batch_loss)/len(batch_loss)
     print('\nTrain loss:', loss_avg)
     list_loss.append(loss_avg)
     
     # testing
     net_glob.eval()
     acc_test, _ = test_img(net_glob, dataset_test, args)
     local_acc_test, _, _ = test_img_local(net_glob, 
                                           dataset_test, 
                                           test_dict_users, args)
     global_acc_tests.append(acc_test)
     local_acc_tests.append(sum(local_acc_test)/len(local_acc_test))
     print('Round {:3d}, Average global accuracy {:.2f}'.format(epoch, acc_test))
     print('Round {:3d}, Average local accuracy {:.2f}'.format(epoch, sum(local_acc_test)/len(local_acc_test)))
     net_glob.train()
     
 # plot loss curve
 plt.figure()
 plt.plot(range(len(list_loss)), list_loss)
 plt.ylabel('train_loss')
 plt.savefig('./save/seed{}_naive_loss_{}_{}_{}_K{}C{}_noniid{}unb{}_E{}B{}.png'.format(args.manual_seed, args.dataset, args.model, args.epochs, args.num_users, args.frac, args.class_per_device, args.unbalance, 1, 100 ))
 pickle.dump(list_loss,
コード例 #18
0
    w_locals, loss_locals = [], []
    w0_locals, loss0_locals = [], []
    weight_div_list = []
    para_cl = []
    para_fl = []
    beta_locals, mu_locals, sigma_locals = [], [], []
    x_stat_loacals, pxm_locals = [], []
    data_locals = [[] for i in range(args.epochs)]
    w_fl_iter, w_cl_iter = [], []
    deltaloss_fl_iter, deltaloss_cl_iter = [], []
    beta_max_his, mu_max_his, sigma_max_his = [], [], []
    acc_train_cl_his, acc_train_fl_his = [], []

    net_glob_cl.eval()
    acc_train_cl, loss_train_clxx = test_img(net_glob_cl, dataset_train, args)
    acc_test_cl, loss_test_clxx = test_img(net_glob_cl, dataset_test, args)
    acc_train_cl_his.append(acc_test_cl)
    acc_train_fl_his.append(acc_test_cl)
    print("Training accuracy: {:.2f}".format(acc_train_cl))
    print("Testing accuracy: {:.2f}".format(acc_test_cl))

    dict_users_iid = []
    for iter in range(args.num_users):
        dict_users_iid.extend(dict_users_iid_temp[iter])

    beta = -float("inf")
    lamb = float("inf")
    Ld = []

    for iter in range(args.epochs):  # num of iterations
コード例 #19
0
                                test_dataset=Vehicle_test(testsets[idx]),
                                idxs=dict_users[idx])
            #            local = LocalUpdate( train_dataset=Vehicle_train(unbalanced_datasets[idx]),test_dataset=Vehicle_test(testsets[idx]), idxs=dict_users[idx])
            w, loss, res = local.train(net=copy.deepcopy(net_glob))
            lc_res = res
            tmp_res.append(sum(lc_res - lctmp_res) + 1)
            w_locals.append(copy.deepcopy(w))
            loss_locals.append(copy.deepcopy(loss))
        # update global weights
        tmp = np.array(tmp_res, dtype=np.float16)
        basis = tmp[0]
        tmp = tmp / basis
        w_glob = FedDyUp(w_locals, tmp, basis)
        net_glob.load_state_dict(w_glob)
        net_glob.eval()
        acc_test, loss_test = test_img(net_glob, Vehicle_test())
        loss_avg = sum(loss_locals) / len(loss_locals)
        print('Round {:3d}, Average loss {:.3f}'.format(iter, loss_avg))
        loss_train.append(loss_avg)
    np.save('loss', loss_train)

    # testing
    #    net_glob=torch.load('model_fed_new.pkl')
    net_glob.eval()
    acc_train, loss_train = test_img(net_glob, Vehicle_train())
    acc_test, loss_test = test_img(net_glob, Vehicle_test())
    print("Training accuracy: {:.4f}".format(acc_train))
    print("Training loss: {}".format(loss_train))
    print("Testing accuracy: {:.4f}".format(acc_test))
    print("Testing loss:{}".format(loss_test))
コード例 #20
0
def ICC_FL(net_glob, dict_workers_index, dict_users_data, dict_labels_counter_mainFL, args, cost,
           dataset_train, dataset_test, valid_ds, loss_test_final_main, optimal_delay):


    data_Global_DCFL = {"C": [], "Round": [], "Average Loss Train": [], "SDS Loss": [], "SDS Accuracy": [],
                        "Workers Number": [], "Large Test Loss": [], "Large Test Accuracy": [], "Communication Cost": []}
    Final_LargeDataSetTest_DCFL = {"C": [], "Test Accuracy": [], "Test Loss": [], "Train Loss": [],
                                   "Train Accuracy": [],
                                   "Total Rounds": [], "Communication Cost": []}
    # copy weights
    w_glob = net_glob.state_dict()

    temp = copy.deepcopy(net_glob)

    # training
    loss_train = []
    Loss_local_each_global_total = []

    selected_clients_costs_total = []
    loss_workers_total = np.zeros(shape=(args.num_users, 10 * args.epochs))

    workers_percent_dist = []
    workers_participation = np.zeros((args.num_users, 10 * args.epochs))
    workers = []
    for i in range(args.num_users):
        workers.append(i)

    n_k = np.zeros(shape=(args.num_users))
    for i in range(len(dict_users_data)):
        n_k[i] = len(dict_users_data[i])

    counter_threshold_decrease = np.zeros(10 * args.epochs)
    Global_Accuracy_Tracker = np.zeros(10 * args.epochs)
    Global_Loss_Tracker = np.zeros(10 * args.epochs)
    threshold = 1.0
    beta = 0.1 ##delta accuracy controller
    gamma = 0.05  ##threshold decrease parameter

    Goal_Loss = float(loss_test_final_main)

    net_glob.eval()
    acc_test_final, loss_test_final = test_img(net_glob, dataset_test, args)
    while_counter = float(loss_test_final)
    iter = 0

    total_rounds_dcfl = 0
    pre_net_glob = copy.deepcopy(net_glob)

    while abs(while_counter - Goal_Loss) >= 0.05:
        selected_clients_costs_round = []
        w_locals, loss_locals = [], []
        m = max(int(args.frac * args.num_users), 1)
        counter_threshold = 0

        x = net_glob
        x.eval()
        acc_test_global, loss_test_global = test_img(x, valid_ds, args)
        Loss_local_each_global_total.append(acc_test_global)
        Global_Accuracy_Tracker[iter] = acc_test_global
        Global_Loss_Tracker[iter] = loss_test_global
        if iter > 0 & (Global_Loss_Tracker[iter-1] - Global_Loss_Tracker[iter] <= beta):
            threshold = threshold - gamma
            if threshold == 0.0:
                threshold = 1.0
        workers_count = 0


        temp_w_locals = []
        temp_workers_loss = np.zeros(args.num_users)
        temp_workers_accuracy = np.zeros(args.num_users)
        temp_workers_loss_test = np.zeros(args.num_users)
        temp_workers_loss_difference = np.zeros(args.num_users)
        flag = np.zeros(args.num_users)

        list_of_random_workers_newfl = []
        if iter < (args.epochs):
            for key, value in dict_workers_index.items():
                if key == iter:
                    list_of_random_workers_newfl = dict_workers_index[key]
        else:
            list_of_random_workers_newfl = random.sample(workers, m)


        for idx in list_of_random_workers_newfl:
            initial_global_model = copy.deepcopy(net_glob).to(args.device)
            initial_global_model.eval()
            acc_test_local_initial, loss_test_local_initial = test_img(initial_global_model, valid_ds, args)


            local = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users_data[idx])
            w, loss = local.train(net=copy.deepcopy(net_glob).to(args.device))

            temp_w_locals.append(copy.deepcopy(w))
            temp_workers_loss[idx] = copy.deepcopy(loss)

            temp.load_state_dict(w)
            temp.eval()

            acc_test_local_after, loss_test_local_after = test_img(temp, valid_ds, args)
            loss_workers_total[idx, iter] = loss_test_local_after
            temp_workers_accuracy[idx] = acc_test_local_after
            temp_workers_loss_test[idx] = loss_test_local_after
            temp_workers_loss_difference[idx] = abs(loss_test_local_after - loss_test_local_initial)

        while len(w_locals) < 1:
            index = 0
            for idx in list_of_random_workers_newfl:
                if workers_count >= m:
                    break
                elif temp_workers_loss_test[idx] <= threshold and flag[idx]==0 and cost[idx] <= optimal_delay:
                    w_locals.append(copy.deepcopy(temp_w_locals[index]))
                    loss_locals.append(temp_workers_loss[idx])
                    flag[idx] = 1
                    workers_count += 1
                    workers_participation[idx][iter] = 1
                    selected_clients_costs_round.append(cost[idx])
                index += 1
            if len(w_locals) < 1:
                threshold = threshold * 2




        # update global weights
        w_glob = FedAvg(w_locals)

        # for n in range(args.num_users - len(w_locals)):
        #     w_locals.append(pre_net_glob.state_dict())
        # w_glob = fed_avg(w_locals, n_k)

        # copy weight to net_glob
        net_glob.load_state_dict(w_glob)

        loss_avg = sum(loss_locals) / len(loss_locals)
        loss_train.append(loss_avg)
        workers_percent_dist.append(workers_count/args.num_users)


        counter_threshold_decrease[iter] = counter_threshold
        print(iter, " round dist fl finished")


        acc_test_final, loss_test_final = test_img(net_glob, dataset_test, args)
        while_counter = loss_test_final


        data_Global_DCFL["Round"].append(iter)
        data_Global_DCFL["C"].append(args.frac)
        data_Global_DCFL["Average Loss Train"].append(loss_avg)
        data_Global_DCFL["SDS Accuracy"].append(Global_Accuracy_Tracker[iter])
        data_Global_DCFL["SDS Loss"].append(Global_Loss_Tracker[iter])
        data_Global_DCFL["Workers Number"].append(workers_count)
        data_Global_DCFL["Large Test Loss"].append(float(loss_test_final))
        data_Global_DCFL["Large Test Accuracy"].append(float(acc_test_final))
        data_Global_DCFL["Communication Cost"].append(sum(selected_clients_costs_round))

        selected_clients_costs_total.append(sum(selected_clients_costs_round))

        iter += 1
        total_rounds_dcfl = iter

        pre_net_glob = copy.deepcopy(net_glob)

    # plot workers percent of participating
    workers_percent_final = np.zeros(args.num_users)
    workers_name = np.zeros(args.num_users)
    for i in range(len(workers_participation[:, 1])):
        workers_percent_final[i] = sum(workers_participation[i, :]) / (iter - 1)
        workers_name[i] = i

    # selected_clients_costs_total.append(sum(selected_clients_costs_round))

    # testing
    net_glob.eval()
    acc_train_final, loss_train_final = test_img(net_glob, dataset_train, args)
    acc_test_final, loss_test_final = test_img(net_glob, dataset_test, args)


    Final_LargeDataSetTest_DCFL["C"].append(args.frac)
    Final_LargeDataSetTest_DCFL["Test Loss"].append(float(loss_test_final))
    Final_LargeDataSetTest_DCFL["Test Accuracy"].append(float(acc_test_final))
    Final_LargeDataSetTest_DCFL["Train Loss"].append(float(loss_train_final))
    Final_LargeDataSetTest_DCFL["Train Accuracy"].append(float(acc_train_final))
    Final_LargeDataSetTest_DCFL["Total Rounds"].append(int(total_rounds_dcfl))
    Final_LargeDataSetTest_DCFL["Communication Cost"].append(sum(selected_clients_costs_total))


    return Final_LargeDataSetTest_DCFL, data_Global_DCFL
コード例 #21
0
            loss_locals.append(copy.deepcopy(loss))
            loss_consistent_locals.append(copy.deepcopy(loss_consistent))

        glob_comu.append(sum(epoch_comu)/len(epoch_comu))

        diff_w_old = get_median(diff_w_old_dic, iter, args)

        w_glob = FedAvg(w_locals)
        w_ema_glob = FedAvg(w_ema_locals)

        net_glob.load_state_dict(w_glob)
        net_ema_glob.load_state_dict(w_ema_glob)

        net_glob.eval()
        net_ema_glob.eval()
        acc_valid, loss_valid = test_img(net_glob, dataset_valid, args)
        acc_ema_valid, loss_ema_valid = test_img(net_ema_glob, dataset_valid, args)
        if loss_valid <= best_loss_valid:
            best_loss_valid = loss_valid
            w_best = copy.deepcopy(w_glob)
        if loss_ema_valid <= best_ema_loss_valid:
            best_ema_loss_valid = loss_ema_valid
            w_ema_best = copy.deepcopy(w_ema_glob)

        loss_avg = sum(loss_locals) / len(loss_locals)
        loss_consistent_avg = sum(loss_consistent_locals) / len(loss_consistent_locals)
        print('Round {:3d}, loss {:.3f}, acc_valid {:.2f}%, acc_ema_valid {:.2f}%'
            .format(iter, loss_avg, acc_valid, acc_ema_valid))
        loss_train.append(loss_avg)
        
    net_glob.load_state_dict(w_best)
コード例 #22
0
ファイル: MainFl.py プロジェクト: khamfroush-lab/Fed-MEC
def mainFl(
    net_glob_mainFL: Any,
    dict_users_mainFL: Dict[int, Any],
    dict_labels_counter_mainFL,
    args,
    cost,
    dataset_train,
    dataset_test,
    small_shared_dataset
):
    """

    Args:
        net_glob_mainFL (torch.nn.Module): global model
        dict_users_mainFL (Dict: dict_users_mainFL[idx_user]): dict contains users data indexes, to access one user's data index,
        write dict_users_mainFL[idx_user]
        dict_labels_counter_mainFL: dict contains each users's labels total number, we do not use it right now
        args: all args. You can look for details in utils/options.py.
        cost: An array contains cost of sending locally updated models from users to server. We do not use it right now.
        dataset_train (torch dataset): Total train data set. We need it for train part, as in dict_users, we just have index of data.
        dataset_test (torch dataset): Total test dataset.
        small_shared_dataset (torch dataset): The small shared dataset that we just use it here for tracking and comparing with our
        algorithm, not for decision making.

    Returns:
        float(loss_test_final_main): final loss test over main test dataset
        dict_workers_index: index of selected workers in each round, we need it for use in other algorithms
        Final_LargeDataSetTest_MainFL: A dict contains macroscopic data to be saved after each total FL process (each C)
        data_Global_main: A dict contains microscopic data to be saved after each total FL process (each round)

    """

    data_Global_main = {"C": [], "Round": [], "Average Loss Train": [], "SDS Loss": [], "SDS Accuracy": [],
                        "Workers Number": [], "Large Test Loss": [], "Large Test Accuracy": [], "Communication Cost": []}
    Final_LargeDataSetTest_MainFL = {"C": [], "Test Accuracy": [], "Test Loss": [], "Train Loss": [],
                                     "Train Accuracy": [], "Total Rounds": [], "Communication Cost": []}

    # saving index of workers
    dict_workers_index = defaultdict(list)

    n_k = np.zeros(shape=(args.num_users))
    for i in range(len(dict_users_mainFL)):
        n_k[i] = len(dict_users_mainFL[i])
    # print(n_k)

    # Main FL

    # contains average loss over each clients' loss
    loss_train_mainFL = []
    # contains loss of
    Loss_local_each_global_total_mainFL = []
    Accuracy_local_each_global_total_mainFL = []
    # contains loss of each workers over small shared dataset in each round
    loss_workers_total_mainFL = np.zeros(shape=(args.num_users, args.epochs))
    label_workers_mainFL = {i: np.array(
        [], dtype='int64') for i in range(args.num_users)}

    #
    validation_test_mainFed = []
    acc_test, loss_test = test_img(net_glob_mainFL, dataset_test, args)
    workers_participation_main_fd = np.zeros((args.num_users, args.epochs))
    workers_percent_main = []

    net_glob_mainFL.eval()
    acc_test_final_mainFL, loss_test_final_mainFL = test_img(
        net_glob_mainFL, dataset_test, args)
    print("main fl initial loss is ", loss_test_final_mainFL)

    # while counter initialization
    iter_mainFL = 0

    # assign index to each worker in workers_mainFL arr
    workers_mainFL = []
    for i in range(args.num_users):
        workers_mainFL.append(i)

    temp_netglob_mainFL = copy.deepcopy(net_glob_mainFL)

    selected_clients_costs_total = []
    total_rounds_mainFL = 0

    pre_net_glob = copy.deepcopy(net_glob_mainFL)

    while iter_mainFL < (args.epochs):
        # print(f"iter {iter_mainFL} is started")
        selected_clients_costs_round = []
        w_locals_mainFL, loss_locals_mainFL = [], []
        m_mainFL = max(int(args.frac * args.num_users), 1)

        # selecting some clients randomly and save the index of them for use in other algorithms
        list_of_random_workers = random.sample(workers_mainFL, m_mainFL)
        # print("list of random workers is ", list_of_random_workers)
        for i in range(len(list_of_random_workers)):
            dict_workers_index[iter_mainFL].append(list_of_random_workers[i])

        # calculating and saving initial loss of global model over small shared dataset for just record
        x_mainFL = copy.deepcopy(net_glob_mainFL)
        x_mainFL.eval()
        acc_test_global_mainFL, loss_test_global_mainFL = test_img(
            x_mainFL, small_shared_dataset, args)
        Loss_local_each_global_total_mainFL.append(loss_test_global_mainFL)
        Accuracy_local_each_global_total_mainFL.append(acc_test_global_mainFL)
        # print("loss global is ", loss_test_global_mainFL)
        # print("accuracy global is ", acc_test_global_mainFL)
        workers_count_mainFL = 0
        for idx in list_of_random_workers:
            # start training each selected client
            # print("idx is ", idx)
            local_mainFL = LocalUpdate(
                args=args, dataset=dataset_train, idxs=dict_users_mainFL[idx])
            w_mainFL, loss_mainFL = local_mainFL.train(
                net=copy.deepcopy(net_glob_mainFL).to(args.device))

            # copy its updated weights
            w_locals_mainFL.append(copy.deepcopy(w_mainFL))
            # copy the training loss of that client
            loss_locals_mainFL.append(loss_mainFL)

            temp_netglob_mainFL.load_state_dict(w_mainFL)
            # test the locally updated model over small shared dataset and save its loss and accuracy for record
            temp_netglob_mainFL.eval()
            acc_test_local_mainFL, loss_test_local_mainFL = test_img(
                temp_netglob_mainFL, small_shared_dataset, args)
            # print("client loss is ", loss_test_local_mainFL)
            # print("accuracy of client is ", acc_test_local_mainFL)
            # loss_workers_total_mainFL[idx, iter_mainFL] = acc_test_local_mainFL
            # saving how many times each client is participating for just record
            workers_participation_main_fd[idx][iter_mainFL] = 1
            # saving total number of clients participated in that round (equal to C*N)
            workers_count_mainFL += 1
            selected_clients_costs_round.append(cost[idx])

        # Add others clients weights who did not participate
        # for i in range(args.num_users - len(list_of_random_workers)):
        #     w_locals_mainFL.append(pre_weights.state_dict())

        # update global weights
        # w_glob_mainFL = FedAvg(w_locals_mainFL)


        for n in range(args.num_users - m_mainFL):
            w_locals_mainFL.append(pre_net_glob.state_dict())
        # NOTE: Updated weights (@author Nathaniel).
        w_glob_mainFL = fed_avg(w_locals_mainFL, n_k)

        # copy weight to net_glob
        net_glob_mainFL.load_state_dict(w_glob_mainFL)
        # print("after ", net_glob_mainFL)

        # calculating average training loss
        # print(loss_locals_mainFL)
        loss_avg_mainFL = sum(loss_locals_mainFL) / len(loss_locals_mainFL)
        loss_train_mainFL.append(loss_avg_mainFL)
        # print(loss_avg_mainFL)

        # calculating test loss and accuracy over main large test dataset
        acc_test_round_mainfed, loss_test_round_mainfed = test_img(
            net_glob_mainFL, dataset_test, args)
        validation_test_mainFed.append(acc_test_round_mainfed)
        workers_percent_main.append(workers_count_mainFL / args.num_users)
        # calculating accuracy and loss over small shared dataset
        acc_test_final_mainFL, loss_test_final_mainFL = test_img(
            net_glob_mainFL, dataset_test, args)

        data_Global_main["Round"].append(iter_mainFL)
        data_Global_main["C"].append(args.frac)
        data_Global_main["Average Loss Train"].append(float(loss_avg_mainFL))
        data_Global_main["SDS Loss"].append(float(loss_test_global_mainFL))
        data_Global_main["SDS Accuracy"].append(float(acc_test_global_mainFL))
        data_Global_main["Workers Number"].append(float(workers_count_mainFL))
        data_Global_main["Large Test Loss"].append(
            float(loss_test_final_mainFL))
        data_Global_main["Large Test Accuracy"].append(
            float(acc_test_final_mainFL))
        data_Global_main["Communication Cost"].append(
            sum(selected_clients_costs_round))

        # TODO: This doesn't make sense?
        selected_clients_costs_total.append(sum(selected_clients_costs_round))

        iter_mainFL += 1
        # total_rounds_mainFL = iter_mainFL
        pre_net_glob = copy.deepcopy(net_glob_mainFL)

        # print(f"iter {iter_mainFL} is finished")

    # calculating the percentage of each workers participation
    workers_percent_final_mainFL = np.zeros(args.num_users)
    workers_name_mainFL = np.empty(args.num_users)
    for i in range(len(workers_participation_main_fd[:, 1])):
        workers_percent_final_mainFL[i] = sum(
            workers_participation_main_fd[i, :]) / args.epochs
        workers_name_mainFL[i] = i

    net_glob_mainFL.eval()
    # print("train test started")
    acc_train_final_main, loss_train_final_main = test_img(
        net_glob_mainFL, dataset_train, args)
    # print("train test finished")
    acc_test_final_main, loss_test_final_main = test_img(
        net_glob_mainFL, dataset_test, args)

    Final_LargeDataSetTest_MainFL["C"].append(args.frac)
    Final_LargeDataSetTest_MainFL["Test Loss"].append(
        float(loss_test_final_main))
    Final_LargeDataSetTest_MainFL["Test Accuracy"].append(
        float(acc_test_final_main))
    Final_LargeDataSetTest_MainFL["Train Loss"].append(
        float(loss_train_final_main))
    Final_LargeDataSetTest_MainFL["Train Accuracy"].append(
        float(acc_train_final_main))
    Final_LargeDataSetTest_MainFL["Communication Cost"].append(
        sum(selected_clients_costs_total))
    Final_LargeDataSetTest_MainFL["Total Rounds"].append(args.epochs)

    return float(loss_test_final_main), dict_workers_index, Final_LargeDataSetTest_MainFL, data_Global_main
    #print("COUNT DATA",str(count_array))
    


    print("NO ATTACK DATA=",loss_train)
    print("1 ATTACK DATA=",loss_train_1)
    print("5 ATTACK DATA=",loss_train_5)
    print("7 ATTACK DATA=",loss_train_7)
    print("10 ATTACK DATA=",loss_train_10)


    # testing
    net_glob.eval()
    #print("Agent_Found_Count",agent_found_count)
    acc_train, loss_train = test_img(net_glob, dataset_train, args)
    acc_test, loss_test = test_img(net_glob, dataset_test, args)
    print("Training accuracy (NO ATTACK): {:.2f}".format(acc_train))
    print("Testing accuracy (NO ATTACK): {:.2f}".format(acc_test))

    net_glob1.eval()
    acc_train1, loss_train_1 = test_img(net_glob1, dataset_train, args)
    acc_test1, loss_test_1 = test_img(net_glob1, dataset_test, args)
    print("Training accuracy (CONSTANT ATTACK 1): {:.2f}".format(acc_train1))
    print("Testing accuracy (CONSTANT ATTACK 1): {:.2f}".format(acc_test1))


    net_glob5.eval()
    acc_train5, loss_train_5 = test_img(net_glob5, dataset_train, args)
    acc_test5, loss_test_5 = test_img(net_glob5, dataset_test, args)
    print("Training accuracy (CONSTANT ATTACK 5): {:.2f}".format(acc_train5))
コード例 #24
0
            comp_time.append(end_time - start_time)
        loss_avg = sum(loss_clusters) / len(loss_clusters)
        # 对每个簇产生的 model进行Aggregation
        start_time = time.time()
        w_glob = Semi_FedAvg(w_clusters)
        end_time = time.time()
        fed_time = end_time - start_time
        delta_time = delta_time + (max(comp_time) + fed_time)
        net_glob.load_state_dict(w_glob)
        # 保存每一轮的model
        # torch.save(net_glob.state_dict(),'./semi-fed/logmodel/1/model_'+'%d'%iter+'.pkl')

        # test
        net_glob.eval()
        # acc_train, loss_train = test_img(net_glob.to(args.device), dataset_train, args)
        acc_test, loss_test = test_img(net_glob.to(args.device), dataset_test,
                                       args)
        if acc_test >= acc_threshold:
            duration.append(delta_time)
            duration.append(iter + 1)
            print("duration: {}s".format(duration[0]))
            text_save(
                './log/sfl/Time_{}_iid{}.csv'.format(args.dataset, args.iid),
                duration)
            break
        if should_stop == False:
            if (acc_test > acc_best):
                stop_cnt = 0
                acc_best = acc_test
                net_best = copy.deepcopy(net_glob)
            else:
                stop_cnt += 1
コード例 #25
0
            ("EPOCH", "LAYER_NAME", "MEAN", "STD", "MIN", "MAX", "LAYER_NAME",
             "MEAN", "STD", "MIN", "MAX", "LAYER_NAME", "MEAN", "STD", "MIN",
             "MAX", "LAYER_NAME", "MEAN", "STD", "MIN", "MAX"))

        for items in descriptive_stats.keys():
            my_writer.writerow(
                (items, "conv1", descriptive_stats[items]['conv1'][1],
                 descriptive_stats[items]['conv1'][2],
                 descriptive_stats[items]['conv1'][3],
                 descriptive_stats[items]['conv1'][4], "conv2",
                 descriptive_stats[items]['conv2'][1],
                 descriptive_stats[items]['conv2'][2],
                 descriptive_stats[items]['conv2'][3],
                 descriptive_stats[items]['conv2'][4], "fc1",
                 descriptive_stats[items]['fc1'][1],
                 descriptive_stats[items]['fc1'][2],
                 descriptive_stats[items]['fc1'][3],
                 descriptive_stats[items]['fc1'][4], "fc2",
                 descriptive_stats[items]['fc2'][1],
                 descriptive_stats[items]['fc2'][2],
                 descriptive_stats[items]['fc2'][3],
                 descriptive_stats[items]['fc2'][4]))

    # testing
    net_glob.eval()
    #print("Agent_Found_Count",agent_found_count)
    acc_train, loss_train = test_img(net_glob, dataset_train, args)
    acc_test, loss_test = test_img(net_glob, dataset_test, args)
    print("Training accuracy (NO ATTACK): {:.2f}".format(acc_train))
    print("Testing accuracy (NO ATTACK): {:.2f}".format(acc_test))
コード例 #26
0
def Proposed_G1(net_glob, dict_workers_index, dict_users_data,
                dict_labels_counter_mainFL, args, cost, dataset_train,
                dataset_test, valid_ds, loss_test_final_main,
                optimal_clients_number, optimal_delay):

    data_Global_DCFL = {
        "C": [],
        "Round": [],
        "Average Loss Train": [],
        "SDS Loss": [],
        "SDS Accuracy": [],
        "Workers Number": [],
        "Large Test Loss": [],
        "Large Test Accuracy": [],
        "Communication Cost": []
    }
    Final_LargeDataSetTest_DCFL = {
        "C": [],
        "Test Accuracy": [],
        "Test Loss": [],
        "Train Loss": [],
        "Train Accuracy": [],
        "Total Rounds": [],
        "Communication Cost": []
    }
    # copy weights
    # w_glob = net_glob.state_dict()

    temp = copy.deepcopy(net_glob)

    # training
    loss_train = []
    Loss_local_each_global_total = []
    selected_clients_costs_total = []

    loss_workers_total = np.zeros(shape=(args.num_users, 100 * args.epochs))

    workers_percent_dist = []
    workers_participation = np.zeros((args.num_users, 100 * args.epochs))
    workers = []
    for i in range(args.num_users):
        workers.append(i)

    n_k = np.zeros(shape=(args.num_users))
    for i in range(len(dict_users_data)):
        n_k[i] = len(dict_users_data[i])

    Global_Accuracy_Tracker = np.zeros(100 * args.epochs)
    Global_Loss_Tracker = np.zeros(100 * args.epochs)

    Goal_Loss = float(loss_test_final_main)

    net_glob.eval()
    acc_test_final, loss_test_final = test_img(net_glob, dataset_test, args)
    while_counter = float(loss_test_final)
    iter = 0

    total_rounds_dcfl = 0
    pre_net_glob = copy.deepcopy(net_glob)

    while abs(while_counter - Goal_Loss) >= 0.05:
        # print("G1 Loss is ", while_counter)
        selected_clients_costs_round = []
        w_locals, loss_locals = [], []
        m = max(int(args.frac * args.num_users), 1)

        x = net_glob
        x.eval()
        acc_test_global, loss_test_global = test_img(x, valid_ds, args)
        Loss_local_each_global_total.append(acc_test_global)
        Global_Accuracy_Tracker[iter] = acc_test_global
        Global_Loss_Tracker[iter] = loss_test_global
        workers_count = 0

        temp_w_locals = []
        temp_workers_loss = np.zeros(args.num_users)
        temp_workers_accuracy = np.zeros(args.num_users)
        temp_workers_loss_test = np.zeros(args.num_users)
        temp_workers_loss_difference = np.zeros((args.num_users, 2))
        flag = np.zeros(args.num_users)

        list_of_random_workers_newfl = []
        if iter < (args.epochs):
            for key, value in dict_workers_index.items():
                if key == iter:
                    list_of_random_workers_newfl = dict_workers_index[key]
        else:
            list_of_random_workers_newfl = random.sample(workers, m)

        initial_global_model = copy.deepcopy(net_glob).to(args.device)
        initial_global_model.eval()

        for idx in list_of_random_workers_newfl:
            local = LocalUpdate(args=args,
                                dataset=dataset_train,
                                idxs=dict_users_data[idx])
            w, loss = local.train(net=copy.deepcopy(net_glob).to(args.device))

            temp_w_locals.append(copy.deepcopy(w))
            temp_workers_loss[idx] = copy.deepcopy(loss)

            temp.load_state_dict(w)
            temp.eval()

            acc_test_local_after, loss_test_local_after = test_img(
                temp, valid_ds, args)
            temp_workers_accuracy[idx] = acc_test_local_after
            temp_workers_loss_test[idx] = loss_test_local_after
            temp_workers_loss_difference[idx, 0] = int(idx)
            temp_workers_loss_difference[idx, 1] = (loss_test_local_after)

        global_loss_diff = (Global_Loss_Tracker[iter])
        if global_loss_diff >= 0:
            # print("yes")
            for i in range(len(temp_w_locals)):
                if cost[int(temp_workers_loss_difference[i, 0])] <= optimal_delay and\
                        temp_workers_loss_difference[i, 1] >= global_loss_diff:
                    w_locals.append(copy.deepcopy(temp_w_locals[i]))
                    loss_locals.append(temp_workers_loss[int(
                        temp_workers_loss_difference[i, 0])])
                    flag[int(temp_workers_loss_difference[i, 0])] = 1
                    workers_count += 1
                    workers_participation[int(
                        temp_workers_loss_difference[i, 0])][iter] = 1
                    selected_clients_costs_round.append(cost[int(
                        temp_workers_loss_difference[i, 0])])
        if len(w_locals) < 1:
            for i in range(len(temp_w_locals)):
                w_locals.append(copy.deepcopy(temp_w_locals[i]))
                loss_locals.append(temp_workers_loss[int(
                    temp_workers_loss_difference[i, 0])])
                flag[int(temp_workers_loss_difference[i, 0])] = 1
                workers_count += 1
                workers_participation[int(
                    temp_workers_loss_difference[i, 0])][iter] = 1
                selected_clients_costs_round.append(cost[int(
                    temp_workers_loss_difference[i, 0])])

        # update global weights
        # w_glob = FedAvg(w_locals)

        for n in range(args.num_users - len(w_locals)):
            w_locals.append(pre_net_glob.state_dict())
        w_glob = fed_avg(w_locals, n_k)

        # copy weight to net_glob
        net_glob.load_state_dict(w_glob)

        #print("round completed")
        if len(loss_locals) > 0:
            loss_avg = sum(loss_locals) / len(loss_locals)
        else:
            loss_avg = None
        loss_train.append(loss_avg)
        workers_percent_dist.append(workers_count / args.num_users)
        print(iter, " round G1 fl finished")

        acc_test_final, loss_test_final = test_img(net_glob, dataset_test,
                                                   args)
        while_counter = loss_test_final

        data_Global_DCFL["Round"].append(iter)
        data_Global_DCFL["C"].append(args.frac)
        data_Global_DCFL["Average Loss Train"].append(loss_avg)
        data_Global_DCFL["SDS Accuracy"].append(Global_Accuracy_Tracker[iter])
        data_Global_DCFL["SDS Loss"].append(Global_Loss_Tracker[iter])
        data_Global_DCFL["Workers Number"].append(workers_count)
        data_Global_DCFL["Large Test Loss"].append(float(loss_test_final))
        data_Global_DCFL["Large Test Accuracy"].append(float(acc_test_final))
        data_Global_DCFL["Communication Cost"].append(
            sum(selected_clients_costs_round))

        selected_clients_costs_total.append(sum(selected_clients_costs_round))

        iter += 1
        total_rounds_dcfl = iter
        pre_net_glob = copy.deepcopy(net_glob)

    # plot workers percent of participating
    workers_percent_final = np.zeros(args.num_users)
    workers_name = np.zeros(args.num_users)
    for i in range(len(workers_participation[:, 1])):
        workers_percent_final[i] = sum(
            workers_participation[i, :]) / (iter - 1)
        workers_name[i] = i

    # testing
    net_glob.eval()
    acc_train_final, loss_train_final = test_img(net_glob, dataset_train, args)
    acc_test_final, loss_test_final = test_img(net_glob, dataset_test, args)

    Final_LargeDataSetTest_DCFL["C"].append(args.frac)
    Final_LargeDataSetTest_DCFL["Test Loss"].append(float(loss_test_final))
    Final_LargeDataSetTest_DCFL["Test Accuracy"].append(float(acc_test_final))
    Final_LargeDataSetTest_DCFL["Train Loss"].append(float(loss_train_final))
    Final_LargeDataSetTest_DCFL["Train Accuracy"].append(
        float(acc_train_final))
    Final_LargeDataSetTest_DCFL["Total Rounds"].append(int(total_rounds_dcfl))
    Final_LargeDataSetTest_DCFL["Communication Cost"].append(
        sum(selected_clients_costs_total))

    return Final_LargeDataSetTest_DCFL, data_Global_DCFL
コード例 #27
0
    #plt.ylabel('train_loss')
    #plt.savefig('log/fed_{}_{}_{}_C{}_iid{}.png'.format(args.dataset, args.model, args.epochs, args.frac, args.iid))
    #print("COUNT DATA",str(count_array))
    print("NO ATTACK DATA=", loss_train)
    print("1 ATTACK DATA=", loss_train_1)
    print("5 ATTACK DATA=", loss_train_5)
    print("10 ATTACK DATA=", loss_train_10)
    print("15 ATTACK DATA=", loss_train_15)
    print("20 ATTACK DATA=", loss_train_20)
    print("25 ATTACK DATA=", loss_train_25)
    print("30 ATTACK DATA=", loss_train_30)

    # testing
    net_glob.eval()
    #print("Agent_Found_Count",agent_found_count)
    acc_train, loss_train = test_img(net_glob, dataset_train, args)
    acc_test, loss_test = test_img(net_glob, dataset_test, args)
    print("Training accuracy (NO ATTACK): {:.2f}".format(acc_train))
    print("Testing accuracy (NO ATTACK): {:.2f}".format(acc_test))

    net_glob1.eval()
    acc_train1, loss_train_1 = test_img(net_glob1, dataset_train, args)
    acc_test1, loss_test_1 = test_img(net_glob1, dataset_test, args)
    print("Training accuracy (CONSTANT ATTACK 1): {:.2f}".format(acc_train1))
    print("Testing accuracy (CONSTANT ATTACK 1): {:.2f}".format(acc_test1))

    net_glob5.eval()
    acc_train5, loss_train_5 = test_img(net_glob5, dataset_train, args)
    acc_test5, loss_test_5 = test_img(net_glob5, dataset_test, args)
    print("Training accuracy (CONSTANT ATTACK 5): {:.2f}".format(acc_train5))
    print("Testing accuracy (CONSTANT ATTACK 5): {:.2f}".format(acc_test5))
コード例 #28
0
def pi_pow_d(network: torch.nn.Module, user_data_indices: Dict[int, Any],
             labels_counter: Dict[int, Any], args: Any, cost: Any,
             train_data: DataLoader, test_data: DataLoader,
             shared_data: DataLoader, d, **kwargs) -> Tuple:
    assert int(args.frac * args.num_users) <= d <= args.num_users

    global_data = defaultdict(list)
    global_eval_data = defaultdict(list)
    workers_index = defaultdict(list)
    # client_participation_counter = defaultdict(list)
    comm_cost_total = []
    n_k = np.zeros(shape=(args.num_users))
    for i in range(len(user_data_indices)):
        n_k[i] = len(user_data_indices[i])

    for comm_round in range(args.epochs):
        network.eval()
        with torch.no_grad():
            sds_test_acc, sds_test_loss = test_img(network, shared_data, args)

        ##
        # PART 1: Sample the candidate client set.
        ##
        comm_cost = []
        num_selected = max(1, int(d))
        selected = random.sample(list(user_data_indices.keys()), num_selected)

        ##
        # PART 2: Estimate local losses.
        ##
        local_losses = {}
        with torch.no_grad():
            for client in selected:
                _, local_test_loss = test_img_user(
                    copy.deepcopy(network).to(args.device), train_data,
                    user_data_indices[client], args)
                local_losses[client] = local_test_loss
                # I think this part should be removed
                # comm_cost.append(cost[m])

        ##
        # PART 3: Select highest loss clients.
        ##
        m = max(1, int(args.frac * args.num_users))
        top_m_clients = nlargest(m, local_losses, key=local_losses.get)
        local_models, local_losses = [], []
        for client in top_m_clients:
            trainer = LocalUpdate(args, train_data, user_data_indices[client])
            local_model, local_loss = trainer.train(
                net=copy.deepcopy(network).to(args.device))
            local_models.append(local_model)
            local_losses.append(local_loss)
            comm_cost.append(cost[client])
            # client_participation_counter[client] += 1

        for _ in range(args.num_users - len(local_models)):
            local_models.append(copy.deepcopy(network.state_dict()))
        new_weights = fed_avg(local_models, n_k)
        network.load_state_dict(new_weights)

        ##
        # PART 4: Data saving.
        ##
        network.eval()
        with torch.no_grad():
            test_acc, test_loss = test_img(network, shared_data, args)

        global_data["Round"].append(comm_round)
        global_data["C"].append(args.frac)
        global_data["Average Loss Train"].append(np.mean(local_losses))
        global_data["SDS Loss"].append(float(sds_test_loss))
        global_data["SDS Accuracy"].append(float(sds_test_acc))
        global_data["Workers Number"].append(int(len(selected)))
        global_data["Large Test Loss"].append(float(test_loss))
        global_data["Large Test Accuracy"].append(float(test_acc))
        global_data["Communication Cost"].append(sum(comm_cost))

        comm_cost_total.append(sum(comm_cost))

    # # Calculate the percentage of each workers' participation.
    # for client in client_participation_counter:
    #     client_participation_counter[client] /= args.epochs

    final_train_acc, final_train_loss = test_img(network, train_data, args)
    final_test_acc, final_test_loss = test_img(network, test_data, args)

    # print(comm_cost_total)
    network.eval()
    with torch.no_grad():
        global_eval_data["C"].append(args.frac)
        global_eval_data["Test Loss"].append(float(final_test_loss))
        global_eval_data["Test Accuracy"].append(float(final_test_acc))
        global_eval_data["Train Loss"].append(float(final_train_loss))
        global_eval_data["Train Accuracy"].append(float(final_train_acc))
        global_eval_data["Communication Cost"].append(sum(comm_cost_total))
        global_eval_data["Total Rounds"].append(args.epochs)

    return global_eval_data, global_data