Exemple #1
0
def average_weights(w, rm_dict, rv_dict, *args):
    """
    Returns the average of the weights.
    """
    
    if args:
        num_data_per_client = args[0] 
        rm, rv = average_bn_statistics_unbal(rm_dict, rv_dict, num_data_per_client) 
    else: 
        rm, rv = average_bn_statistics_bal(rm_dict, rv_dict)
    w_avg = copy.deepcopy(w[0])
    for key in w_avg.keys():
        for i in range(1, len(w)):
            w_avg[key] += w[i][key]
        w_avg[key] = torch.div(w_avg[key], len(w))


    args = args_parser()

    w_avg["layer1.1.running_mean"] = rm[0]
    w_avg["layer2.1.running_mean"] = rm[1]

    if args.norm == "BN":   
        w_avg["layer1.1.running_var"] = rv[0]
        w_avg["layer2.1.running_var"] = rv[1]
    elif args.norm == "BRN":
        w_avg["layer1.1.running_std"] = torch.sqrt(rv[0])
        w_avg["layer2.1.running_std"] = torch.sqrt(rv[1])

    '''
    w_avg["BNorm1.running_mean"] = rm[0]
    w_avg["BNorm2.running_mean"] = rm[1]

    if args.norm == "BN":   
        w_avg["BNorm1.running_var"] = rv[0]
        w_avg["BNorm2.running_var"] = rv[1]
    elif args.norm == "BRN":
        w_avg["BNorm1.running_std"] = torch.sqrt(rv[0])
        w_avg["BNorm2.running_std"] = torch.sqrt(rv[1])
    

    w_avg["normalize1.running_mean"] = rm[0]
    w_avg["normalize2.running_mean"] = rm[1]
    w_avg["normalize3.running_mean"] = rm[2]
    w_avg["normalize4.running_mean"] = rm[3]

    if args.norm == "BN":   
        w_avg["normalize1.running_var"] = rv[0]
        w_avg["normalize2.running_var"] = rv[1]
        w_avg["normalize3.running_var"] = rv[2]
        w_avg["normalize4.running_var"] = rv[3]
    elif args.norm == "BRN":
        w_avg["normalize1.running_std"] = rv[0]
        w_avg["normalize2.running_std"] = rv[1]
        w_avg["normalize3.running_std"] = rv[2]
        w_avg["normalize4.running_std"] = rv[3]

    '''

    return w_avg
    def forward(self, x: torch.Tensor) -> torch.Tensor:
        self._check_input_dim(x)
        if x.dim() > 2:
            x = x.transpose(1, -1)
        if self.training:
            dims = [i for i in range(x.dim() - 1)]
            n = x.numel() / x.size(1)
            batch_mean = x.mean(dims)
            batch_std = x.std(dims, unbiased=False) + self.eps
            r = (batch_std.detach() /
                 self.running_std.view_as(batch_std)).clamp_(
                     1 / self.rmax, self.rmax)
            d = (
                (batch_mean.detach() - self.running_mean.view_as(batch_mean)) /
                self.running_std.view_as(batch_std)).clamp_(
                    -self.dmax, self.dmax)
            x = (x - batch_mean) / batch_std * r + d

            self.num_batches_tracked += 1

            args = args_parser()
            if args.ma == "ema-brn":

                self.running_mean += self.momentum * (batch_mean.detach() -
                                                      self.running_mean)
                self.running_std += self.momentum * (batch_std.detach() -
                                                     self.running_std)

            elif args.ma == "cma":
                exponential_average_factor = 1.0 / float(
                    self.num_batches_tracked)

                self.running_mean = exponential_average_factor * batch_mean.detach() \
                                    + (1 - exponential_average_factor) * self.running_mean
                # update running_var with unbiased var
                self.running_std = exponential_average_factor * batch_std.detach() * n / (n - 1) \
                                + (1 - exponential_average_factor) * self.running_std

            elif args.ma == "ema":
                exponential_average_factor = self.momentum

                self.running_mean = exponential_average_factor * batch_mean.detach() \
                                    + (1 - exponential_average_factor) * self.running_mean
                # update running_var with unbiased var
                self.running_std = exponential_average_factor * batch_std.detach() * n / (n - 1) \
                                + (1 - exponential_average_factor) * self.running_std
            elif args.ma == 'sma':
                self.running_mean = 1 / 2 * (batch_mean.detach() +
                                             self.running_mean)
                self.running_std = 1 / 2 * (batch_std.detach() +
                                            self.running_std) + torch.pow(
                                                self.running_mean, 2)

        else:
            x = (x - self.running_mean) / self.running_std
        if self.affine:
            x = self.weight * x + self.bias
        if x.dim() > 2:
            x = x.transpose(1, -1)
        return x
def main():
    # parse args
    args = args_parser()
    args.device = torch.device('cuda:{}'.format(args.gpu))

    #save loss and acc
    loss_path = './runs/'+args.save_path
    acc_path = './test_acc/'+args.save_path
    
    def gen_path:(directory)
Exemple #4
0
def main(chafen_model, V1, V2):
    args = args_parser()
    device = "cuda"
    c = 0
    r = 0.5
    chafen = CNNCifar(args=args)
    V1.to(device)
    V2.to(device)
    chafen.to(device)

    V1_weight = V1.state_dict()
    V2_weight = V2.state_dict()
    chafen_model_weight = chafen_model.state_dict()
    chafen_weight = chafen.state_dict()

    chafen_weight[
        'conv1.weight'] = V2_weight['conv1.weight'] - V1_weight['conv1.weight']
    chafen_weight[
        'conv1.bias'] = V2_weight['conv1.bias'] - V1_weight['conv1.bias']
    chafen_weight[
        'conv2.weight'] = V2_weight['conv2.weight'] - V1_weight['conv2.weight']
    chafen_weight[
        'conv2.bias'] = V2_weight['conv2.bias'] - V1_weight['conv2.bias']

    chafen_weight[
        'fc1.weight'] = V2_weight['fc1.weight'] - V1_weight['fc1.weight']
    chafen_weight['fc1.bias'] = V2_weight['fc1.bias'] - V1_weight['fc1.bias']
    chafen_weight[
        'fc2.weight'] = V2_weight['fc2.weight'] - V1_weight['fc2.weight']
    chafen_weight['fc2.bias'] = V2_weight['fc2.bias'] - V1_weight['fc2.bias']
    chafen_weight[
        'fc3.weight'] = V2_weight['fc3.weight'] - V1_weight['fc3.weight']
    chafen_weight['fc3.bias'] = V2_weight['fc3.bias'] - V1_weight['fc3.bias']

    chafen.load_state_dict(chafen_weight)

    sign_sum = 0
    sign_size = 0

    for k in chafen_weight.keys():
        cur_sign = torch.sign(chafen_weight[k])
        old_sign = torch.sign(chafen_model_weight[k])
        sign = cur_sign * old_sign
        sign[sign < 0] = 0
        sign_sum += torch.sum(sign)
        sign_size += sign.numel()

    e = sign_sum / (sign_size + 0.000001)
    print(e)

    if e < r:
        V2.load_state_dict(V1_weight)
        c += 1
    return V2.state_dict(), c
    def forward(self, input):
        args = args_parser()

        self._check_input_dim(input)

        exponential_average_factor = 0.0

        if self.training and self.track_running_stats:
            if self.num_batches_tracked is not None:
                # print("================== TRACKED ===============", self.num_batches_tracked)
                self.num_batches_tracked += 1
                if self.momentum is None:  # use cumulative moving average
                    exponential_average_factor = 1.0 / float(
                        self.num_batches_tracked)
                else:  # use exponential moving average
                    exponential_average_factor = self.momentum

        # calculate running estimates
        if self.training:
            mean = input.mean([0, 2, 3])
            # use biased var in train
            var = input.var([0, 2, 3], unbiased=False)
            n = input.numel() / input.size(1)
            with torch.no_grad():
                if args.ma == 'sma':
                    self.running_mean = 1 / 2 * (mean + self.running_mean)
                    self.running_var = 1 / 2 * (var +
                                                self.running_var) + torch.pow(
                                                    self.running_mean, 2)
                else:
                    self.running_mean = exponential_average_factor * mean \
                                        + (1 - exponential_average_factor) * self.running_mean
                    # update running_var with unbiased var
                    self.running_var = exponential_average_factor * var * n / (n - 1) \
                                      + (1 - exponential_average_factor) * self.running_var

            mean = (1 - exponential_average_factor) * mean \
                                    +  exponential_average_factor * self.running_mean
            var = (1 - exponential_average_factor) * var \
                                   + exponential_average_factor * self.running_var
        else:
            mean = self.running_mean
            var = self.running_var

        input = (input - mean[None, :, None, None]) / (
            torch.sqrt(var[None, :, None, None] + self.eps))
        if self.affine:
            input = input * self.weight[None, :, None,
                                        None] + self.bias[None, :, None, None]

        # the old one need to be checked

        return input
def main():
    '''参数导入'''
    args = args_parser()
    with open(args.conf, "r") as f:
        conf_dict = eval(f.read())
    '''新建保存路径'''
    sub_path = os.path.join(args.dataset, 'iid' if args.iid else 'no_iid')
    save_dir_path = os.path.abspath(
        os.path.join(conf_dict["result_save_dir"], sub_path))
    model_saved_dir = os.path.abspath(
        os.path.join(conf_dict["model_saved_dir"], sub_path))

    os.makedirs(save_dir_path, exist_ok=True)
    os.makedirs(model_saved_dir, exist_ok=True)
    '''gpu'''
    # if args.gpu:
    #     torch.cuda.set_device(int(args.gpu))
    device = 'cuda' if args.gpu is not None else 'cpu'
    '''导入数据'''
    pre_train_data, FeMD_data = getdataset(args, conf_dict)
    public_dataset, private_data, total_private_data, private_test_data = FeMD_data

    # 第一步 预训练
    config_info = args, conf_dict, model_saved_dir, save_dir_path, device
    parties = pretrain(pre_train_data, config_info)
    '''联邦蒸馏学习'''
    fedmd = FedMD(parties,
                  public_dataset=public_dataset,
                  private_data=private_data,
                  total_private_data=total_private_data,
                  private_test_data=private_test_data,
                  FedMD_params=conf_dict['FedMD_params'],
                  model_init_params=conf_dict['model_init_params'],
                  calculate_theoretical_upper_bounds_params=conf_dict[
                      'calculate_theoretical_upper_bounds_params'],
                  device=device)
    '''计算理论上线和初始化各个客户模型'''
    initialization_result = fedmd.init_result
    pooled_train_result = fedmd.pooled_train_result
    '''联邦蒸馏学习'''
    collaboration_performance = fedmd.collaborative_training()

    # 保存联邦蒸馏学习结果
    with open(os.path.join(save_dir_path, 'init_result.pkl'), 'wb') as f:
        pickle.dump(initialization_result, f, protocol=pickle.HIGHEST_PROTOCOL)
    with open(os.path.join(save_dir_path, 'pooled_train_result.pkl'),
              'wb') as f:
        pickle.dump(pooled_train_result, f, protocol=pickle.HIGHEST_PROTOCOL)
    with open(os.path.join(save_dir_path, 'col_performance.pkl'), 'wb') as f:
        pickle.dump(collaboration_performance,
                    f,
                    protocol=pickle.HIGHEST_PROTOCOL)
    def __init__(self,
                 num_features,
                 eps=1e-5,
                 affine=True,
                 track_running_stats=True):

        args = args_parser()
        if args.ma == 'cma':
            momentum = 0.1
        else:
            momentum = None

        super(MyBatchNorm2d, self).__init__(num_features, eps, momentum,
                                            affine, track_running_stats)
Exemple #8
0
def main(V1,V2,testloader):
    args = args_parser()
    device="cuda"
    r=0.27

    chafen=CNNCifar(args=args)
    V1.to(device)
    V2.to(device)
    chafen.to(device)

    V1_weight=V1.state_dict()
    V2_weight=V2.state_dict()

    chafen_weight=chafen.state_dict()

    i=0
  
    for k in chafen_weight.keys():
        chafen_weight[k]=V2_weight[k]-V1_weight[k]
        a=chafen_weight[k].cpu().numpy()
        shape=a.shape
        a=a.flatten()
        a=my_mask(a,r)
        a=a.reshape(shape)
        chafen_weight[k]=torch.from_numpy(a)
        chafen_weight[k].to(device)
        i+=1
    chafen.load_state_dict(chafen_weight)

    my_sum1=0
    for k in chafen_weight.keys():
        a=chafen_weight[k].cpu().numpy()
        a=a.flatten()
        b=a.tolist()
        my_sum1+=b.count(0.0)
    print(my_sum1)


    

    for k in chafen_weight.keys():
        V2_weight[k]=chafen_weight[k].to(device)+V1_weight[k].to(device)
    V2.load_state_dict(V2_weight)


    return V2.state_dict(),my_sum1
Exemple #9
0
import torch
from tensorboardX import SummaryWriter

from options import args_parser
from update import LocalUpdate, test_inference
from models import MLP, CNNMnist, CNNFashion_Mnist, CNNCifar
from utils import get_dataset, average_weights, exp_details

if __name__ == '__main__':
    start_time = time.time()

    # define paths
    path_project = os.path.abspath('..')
    logger = SummaryWriter('../logs')

    args = args_parser()  # 从options.py中获取所有超参数
    exp_details(args)  # 显示输出所有超参数信息

    # 选择cpu/gpu
    if args.gpu is not None:
        torch.cuda.set_device(args.gpu)
    device = 'cuda' if args.gpu is not None else 'cpu'

    # load dataset and user groups
    train_dataset, test_dataset, user_groups = get_dataset(args)

    # BUILD MODEL
    if args.model == 'cnn':
        # Convolutional neural netork
        if args.dataset == 'mnist':
            global_model = CNNMnist(args=args)
Exemple #10
0
def main():
    start_time = time.time()

    # define paths
    path_project = os.path.abspath('..')
    logger = SummaryWriter('../logs')
    args = args_parser()
    args = adatok.arguments(args)
    exp_details(args)
    if args.gpu:
        torch.cuda.set_device(args.gpu)
    device = 'cuda' if args.gpu else 'cpu'

    # load dataset and user groups
    train_dataset, test_dataset, user_groups = get_dataset(args)

    if adatok.data.image_initialization == True:
        adatok.data.image_initialization = False
        return

    # BUILD MODEL
    if args.model == 'cnn':
        # Convolutional neural netork
        if args.dataset == 'mnist':
            global_model = CNNMnist(args=args)
        elif args.dataset == 'fmnist':
            global_model = CNNFashion_Mnist(args=args)
        elif args.dataset == 'cifar':
            global_model = CNNCifar(args=args)

    elif args.model == 'mlp':
        # Multi-layer preceptron
        img_size = train_dataset[0][0].shape
        len_in = 1
        for x in img_size:
            len_in *= x
            global_model = MLP(dim_in=len_in,
                               dim_hidden=64,
                               dim_out=args.num_classes)
    else:
        exit('Error: unrecognized model')

    # Set the model to train and send it to device.
    global_model.to(device)
    global_model.train()
    #print(global_model)

    # copy weights
    global_weights = global_model.state_dict()

    # Training
    train_loss, train_accuracy = [], []
    val_acc_list, net_list = [], []
    cv_loss, cv_acc = [], []
    print_every = 2
    val_loss_pre, counter = 0, 0

    for epoch in tqdm(range(args.epochs)):
        local_weights, local_losses = [], []
        #print(f'\n | Global Training Round : {epoch+1} |\n')

        global_model.train()
        m = max(int(args.frac * args.num_users), 1)
        idxs_users = np.random.choice(range(args.num_users), m, replace=False)

        for idx in idxs_users:
            local_model = LocalUpdate(args=args,
                                      dataset=train_dataset,
                                      idxs=user_groups[idx],
                                      logger=logger)
            w, loss = local_model.update_weights(
                model=copy.deepcopy(global_model), global_round=epoch)
            local_weights.append(copy.deepcopy(w))
            local_losses.append(copy.deepcopy(loss))

        # update global weights
        global_weights = average_weights(local_weights)

        # update global weights
        global_model.load_state_dict(global_weights)

        loss_avg = sum(local_losses) / len(local_losses)
        train_loss.append(loss_avg)

        # Calculate avg training accuracy over all users at every epoch
        list_acc, list_loss = [], []
        global_model.eval()
        for c in range(args.num_users):
            local_model = LocalUpdate(args=args,
                                      dataset=train_dataset,
                                      idxs=user_groups[idx],
                                      logger=logger)
            acc, loss = local_model.inference(model=global_model)
            list_acc.append(acc)
            list_loss.append(loss)
        train_accuracy.append(sum(list_acc) / len(list_acc))

        # print global training loss after every 'i' rounds
        '''if (epoch+1) % print_every == 0:
            print(f' \nAvg Training Stats after {epoch+1} global rounds:')
            print(f'Training Loss : {np.mean(np.array(train_loss))}')
            print('Train Accuracy: {:.2f}% \n'.format(100*train_accuracy[-1]))'''

        # Test inference after completion of training
        for i in adatok.data.test_groups_in_binary:
            adatok.data.actual_test_group_in_binary = i
            test_acc, test_loss = test_inference(args, global_model,
                                                 test_dataset)
            print("Resoults")
            print(epoch)
            print(adatok.data.actual_train_group_in_binary)
            print(adatok.data.actual_test_group_in_binary)
            print(test_acc)
            print(test_loss)
    '''
Exemple #11
0
def poisoned_pixel_CDP(norm_bound, noise_scale, nb_attackers, seed=1):
    start_time = time.time()

    # define paths
    path_project = os.path.abspath('..')
    logger = SummaryWriter('../logs')

    args = args_parser()
    exp_details(args)

    # set seed
    torch.manual_seed(seed)
    np.random.seed(seed)

    # device
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # load dataset and user groups
    train_dataset, test_dataset, user_groups = get_dataset(args)

    # BUILD MODEL
    if args.model == 'cnn':
        # Convolutional neural netork
        if args.dataset == 'mnist':
            global_model = CNNMnist(args=args)
        elif args.dataset == 'fmnist':
            global_model = CNNFashion_Mnist(args=args)
        elif args.dataset == 'cifar':
            global_model = CNNCifar(args=args)

    elif args.model == 'mlp':
        # Multi-layer preceptron
        img_size = train_dataset[0][0].shape
        len_in = 1
        for x in img_size:
            len_in *= x
            global_model = MLP(dim_in=len_in,
                               dim_hidden=64,
                               dim_out=args.num_classes)
    else:
        exit('Error: unrecognized model')

    # Set the model to train and send it to device.
    global_model.to(device)
    global_model.train()
    print(global_model)

    # copy weights
    global_weights = global_model.state_dict()

    # load poisoned model
    backdoor_model = copy.deepcopy(global_model)
    backdoor_model.load_state_dict(torch.load('../save/poison_model.pth'))

    # testing accuracy for global model
    testing_accuracy = [0.1]
    backdoor_accuracy = [0.1]

    for epoch in tqdm(range(args.epochs)):
        local_del_w, local_norms = [], []
        print(f'\n | Global Training Round : {epoch + 1} |\n')

        global_model.train()
        m = max(int(args.frac * args.num_users), 1)
        idxs_users = np.random.choice(range(args.num_users), m, replace=False)

        # Adversary updates
        print("Evil")
        for idx in idxs_users[0:nb_attackers]:

            # backdoor model
            w = copy.deepcopy(backdoor_model)

            # compute change in parameters and norm
            zeta = 0
            for del_w, w_old in zip(w.parameters(), global_model.parameters()):
                del_w.data = del_w.data - copy.deepcopy(w_old.data)
                zeta += del_w.norm(2).item()**2
            zeta = zeta**(1. / 2)
            del_w = w.state_dict()

            print("EVIL")
            print(zeta)

            # add to global round
            local_del_w.append(copy.deepcopy(del_w))
            local_norms.append(copy.deepcopy(zeta))

        # Non-adversarial updates
        for idx in idxs_users[nb_attackers:]:
            local_model = LocalUpdate(args=args,
                                      dataset=train_dataset,
                                      idxs=user_groups[idx],
                                      logger=logger)
            del_w, zeta = local_model.update_weights(
                model=copy.deepcopy(global_model), change=1)
            local_del_w.append(copy.deepcopy(del_w))
            local_norms.append(copy.deepcopy(zeta))
            print("good")
            #print(zeta)

        # norm bound (e.g. median of norms)
        clip_factor = norm_bound  #min(norm_bound, np.median(local_norms))
        print(clip_factor)

        # clip updates
        for i in range(len(idxs_users)):
            for param in local_del_w[i].values():
                print(max(1, local_norms[i] / clip_factor))
                param /= max(1, local_norms[i] / clip_factor)

        # average local model updates
        average_del_w = average_weights(local_del_w)

        # Update model and add noise
        # w_{t+1} = w_{t} + avg(del_w1 + del_w2 + ... + del_wc) + Noise
        for param, param_del_w in zip(global_weights.values(),
                                      average_del_w.values()):
            param += param_del_w
            param += torch.randn(
                param.size()) * noise_scale * norm_bound / len(idxs_users)
        global_model.load_state_dict(global_weights)

        # test accuracy
        test_acc, test_loss, backdoor = test_backdoor_pixel(
            args, global_model, test_dataset)
        testing_accuracy.append(test_acc)
        backdoor_accuracy.append(backdoor)

        print("Testing & Backdoor accuracies")
        print(testing_accuracy)
        print(backdoor_accuracy)

    # save test accuracy
    np.savetxt(
        '../save/PixelAttack/TestAcc/iid_GDP_{}_{}_clip{}_scale{}_attackers{}_seed{}.txt'
        .format(args.dataset, args.model, norm_bound, noise_scale,
                nb_attackers, s), testing_accuracy)

    np.savetxt(
        '../save/PixelAttack/BackdoorAcc/iid_GDP_{}_{}_clip{}_scale{}_attackers{}_seed{}.txt'
        .format(args.dataset, args.model, norm_bound, noise_scale,
                nb_attackers, s), backdoor_accuracy)
Exemple #12
0
def poisoned_pixel_LDP(norm_bound, noise_scale, nb_attackers, seed=1):
    start_time = time.time()

    # define paths
    path_project = os.path.abspath('..')
    logger = SummaryWriter('../logs')

    args = args_parser()
    exp_details(args)

    # set seed
    torch.manual_seed(seed)
    np.random.seed(seed)

    # device
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # load dataset and user groups
    train_dataset, test_dataset, user_groups = get_dataset(args)

    # BUILD MODEL
    if args.model == 'cnn':
        # Convolutional neural netork
        if args.dataset == 'mnist':
            global_model = CNNMnist(args=args)
        elif args.dataset == 'fmnist':
            global_model = CNNFashion_Mnist(args=args)
        elif args.dataset == 'cifar':
            global_model = CNNCifar(args=args)

    elif args.model == 'mlp':
        # Multi-layer preceptron
        img_size = train_dataset[0][0].shape
        len_in = 1
        for x in img_size:
            len_in *= x
            global_model = MLP(dim_in=len_in,
                               dim_hidden=64,
                               dim_out=args.num_classes)
    else:
        exit('Error: unrecognized model')

    # Set the model to train and send it to device.
    global_model.to(device)
    global_model.train()
    print(global_model)

    # copy weights
    global_weights = global_model.state_dict()

    # testing accuracy for global model
    testing_accuracy = [0.1]
    backdoor_accuracy = [0.1]

    for epoch in tqdm(range(args.epochs)):
        local_w, local_norms = [], []
        print(f'\n | Global Training Round : {epoch+1} |\n')

        global_model.train()
        m = max(int(args.frac * args.num_users), 1)
        idxs_users = np.random.choice(range(args.num_users), m, replace=False)

        # Poisonous updates
        for idx in idxs_users[0:nb_attackers]:
            local_model = LocalUpdate(args=args,
                                      dataset=train_dataset,
                                      idxs=user_groups[idx],
                                      logger=logger)
            w, _ = local_model.pixel_ldp(model=copy.deepcopy(global_model),
                                         norm_bound=norm_bound,
                                         noise_scale=noise_scale)
            local_w.append(copy.deepcopy(w))

        # Regular updates
        for idx in idxs_users[nb_attackers:]:
            local_model = LocalUpdate(args=args,
                                      dataset=train_dataset,
                                      idxs=user_groups[idx],
                                      logger=logger)
            w, _ = local_model.dp_sgd(model=copy.deepcopy(global_model),
                                      norm_bound=norm_bound,
                                      noise_scale=noise_scale)
            local_w.append(copy.deepcopy(w))

        # update global weights
        global_weights = average_weights(local_w)
        global_model.load_state_dict(global_weights)

        # test accuracy
        test_acc, test_loss, backdoor = test_backdoor_pixel(
            args, global_model, test_dataset)
        testing_accuracy.append(test_acc)
        backdoor_accuracy.append(backdoor)

        print("Testing & Backdoor accuracies")
        print(testing_accuracy)
        print(backdoor_accuracy)

    # save test accuracy
    np.savetxt(
        '../save/PixelAttack/TestAcc/LDP_iid_{}_{}_clip{}_scale{}_attackers{}_seed{}.txt'
        .format(args.dataset, args.model, norm_bound, noise_scale,
                nb_attackers, s), testing_accuracy)

    np.savetxt(
        '../save/PixelAttack/BackdoorAcc/LDP_iid_{}_{}_clip{}_scale{}_attackers{}_seed{}.txt'
        .format(args.dataset, args.model, norm_bound, noise_scale,
                nb_attackers, s), backdoor_accuracy)
def poisoned_1to7_NoDefense(seed=1):
    start_time = time.time()

    # define paths
    path_project = os.path.abspath('..')
    logger = SummaryWriter('../logs')

    args = args_parser()
    exp_details(args)

    # set seed
    torch.manual_seed(seed)
    np.random.seed(seed)

    # device
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # load dataset and user groups
    train_dataset, test_dataset, user_groups = get_dataset(args)

    # BUILD MODEL
    if args.model == 'cnn':
        # Convolutional neural netork
        if args.dataset == 'mnist':
            global_model = CNNMnist(args=args)
        elif args.dataset == 'fmnist':
            global_model = CNNFashion_Mnist(args=args)
        elif args.dataset == 'cifar':
            global_model = CNNCifar(args=args)

    elif args.model == 'mlp':
        # Multi-layer preceptron
        img_size = train_dataset[0][0].shape
        len_in = 1
        for x in img_size:
            len_in *= x
            global_model = MLP(dim_in=len_in,
                               dim_hidden=64,
                               dim_out=args.num_classes)
    else:
        exit('Error: unrecognized model')

    # Set the model to train and send it to device.
    global_model.to(device)
    global_model.train()
    print(global_model)

    # copy weights
    global_weights = global_model.state_dict()

    # testing accuracy for global model
    testing_accuracy = [0.1]
    backdoor_accuracy = [0.1]

    for epoch in tqdm(range(args.epochs)):
        local_del_w, local_norms = [], []
        print(f'\n | Global Training Round : {epoch+1} |\n')

        global_model.train()
        m = max(int(args.frac * args.num_users), 1)
        idxs_users = np.random.choice(range(args.num_users), m, replace=False)

        # Adversary updates
        print("Evil norms:")
        for idx in idxs_users[0:args.nb_attackers]:
            local_model = LocalUpdate(args=args,
                                      dataset=train_dataset,
                                      idxs=user_groups[idx],
                                      logger=logger)

            del_w, zeta = local_model.poisoned_1to7(
                model=copy.deepcopy(global_model), change=1)
            local_del_w.append(copy.deepcopy(del_w))
            local_norms.append(copy.deepcopy(zeta))
            print(zeta)

        # Non-adversarial updates
        print("Good norms:")
        for idx in idxs_users[args.nb_attackers:]:
            print(idx)
            local_model = LocalUpdate(args=args,
                                      dataset=train_dataset,
                                      idxs=user_groups[idx],
                                      logger=logger)

            del_w, zeta = local_model.update_weights(
                model=copy.deepcopy(global_model), change=1)
            local_del_w.append(copy.deepcopy(del_w))
            local_norms.append(copy.deepcopy(zeta))
            print(zeta)

        # average local updates
        average_del_w = average_weights(local_del_w)

        # Update global model: w_{t+1} = w_{t} + average_del_w
        for param, param_del_w in zip(global_weights.values(),
                                      average_del_w.values()):
            param += param_del_w
        global_model.load_state_dict(global_weights)

        # test accuracy, backdoor accuracy
        test_acc, test_loss, back_acc = test_inference1to7(
            args, global_model, test_dataset)
        testing_accuracy.append(test_acc)
        backdoor_accuracy.append(back_acc)

        print("Test & Backdoor accuracy")
        print(testing_accuracy)
        print(backdoor_accuracy)

    # save accuracy
    np.savetxt(
        '../save/1to7Attack/TestAcc/NoDefense_{}_{}_seed{}.txt'.format(
            args.dataset, args.model, s), testing_accuracy)

    np.savetxt(
        '../save/1to7Attack/BackAcc/NoDefense_{}_{}_seed{}.txt'.format(
            args.dataset, args.model, s), backdoor_accuracy)
def poisoned_random_CDP(seed=1):
    # Central DP to protect against attackers

    start_time = time.time()

    # define paths
    path_project = os.path.abspath('..')
    logger = SummaryWriter('../logs')

    args = args_parser()
    exp_details(args)

    # set seed
    torch.manual_seed(seed)
    np.random.seed(seed)

    # device
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # load dataset and user groups
    train_dataset, test_dataset, user_groups = get_dataset(args)

    # BUILD MODEL
    if args.model == 'cnn':
        # Convolutional neural netork
        if args.dataset == 'mnist':
            global_model = CNNMnist(args=args)
        elif args.dataset == 'fmnist':
            global_model = CNNFashion_Mnist(args=args)
        elif args.dataset == 'cifar':
            global_model = CNNCifar(args=args)

    elif args.model == 'mlp':
        # Multi-layer preceptron
        img_size = train_dataset[0][0].shape
        len_in = 1
        for x in img_size:
            len_in *= x
            global_model = MLP(dim_in=len_in,
                               dim_hidden=64,
                               dim_out=args.num_classes)
    else:
        exit('Error: unrecognized model')

    # Set the model to train and send it to device.
    global_model.to(device)
    global_model.train()
    print(global_model)

    # copy weights
    global_weights = global_model.state_dict()

    # testing accuracy for global model
    testing_accuracy = [0.1]
    backdoor_accuracy = [0.1]

    for epoch in tqdm(range(args.epochs)):
        local_del_w, local_norms = [], []
        print(f'\n | Global Training Round : {epoch+1} |\n')

        global_model.train()
        m = max(int(args.frac * args.num_users), 1)
        idxs_users = np.random.choice(range(args.num_users), m, replace=False)

        # Adversaries' update
        for idx in idxs_users[0:args.nb_attackers]:
            local_model = LocalUpdate(args=args,
                                      dataset=train_dataset,
                                      idxs=user_groups[idx],
                                      logger=logger)
            del_w, zeta = local_model.poisoned_1to7(
                model=copy.deepcopy(global_model), change=1)
            local_del_w.append(copy.deepcopy(del_w))
            local_norms.append(copy.deepcopy(zeta))

        # Non-adversary updates
        for idx in idxs_users[args.nb_attackers:]:
            local_model = LocalUpdate(args=args,
                                      dataset=train_dataset,
                                      idxs=user_groups[idx],
                                      logger=logger)
            del_w, zeta = local_model.update_weights(
                model=copy.deepcopy(global_model), change=1)
            local_del_w.append(copy.deepcopy(del_w))
            local_norms.append(copy.deepcopy(zeta))

        # norm bound (e.g. median of norms)
        median_norms = args.norm_bound  #np.median(local_norms)  #args.norm_bound
        print(median_norms)

        # clip weight updates
        for i in range(len(idxs_users)):
            for param in local_del_w[i].values():
                param /= max(1, local_norms[i] / median_norms)

        # average the clipped weight updates
        average_del_w = average_weights(local_del_w)

        # Update global model using clipped weight updates, and add noise
        # w_{t+1} = w_{t} + avg(del_w1 + del_w2 + ... + del_wc) + Noise
        for param, param_del_w in zip(global_weights.values(),
                                      average_del_w.values()):
            param += param_del_w
            param += torch.randn(
                param.size()) * args.noise_scale * median_norms / (
                    len(idxs_users)**0.5)
        global_model.load_state_dict(global_weights)

        # test accuracy
        test_acc, test_loss = test_inference(args, global_model, test_dataset)
        testing_accuracy.append(test_acc)

        print("Test accuracy")
        print(testing_accuracy)

    # save test accuracy
    np.savetxt(
        '../save/1to7Attack/GDP_{}_{}_seed{}_clip{}_scale{}.txt'.format(
            args.dataset, args.model, s, args.norm_bound, args.noise_scale),
        testing_accuracy)
def main():
    start_time = time.time()

    # define paths
    path_project = os.path.abspath('..')
    logger = SummaryWriter('../logs')

    args = args_parser()
    exp_details(args)

    if args.gpu:
        torch.cuda.set_device(0)
    device = 'cuda' if args.gpu else 'cpu'

    # load dataset and user groups
    train_dataset, test_dataset, user_groups = get_dataset(args)

    args.num_users = len(user_groups)

    # BUILD MODEL
    if args.model == 'cnn':
        # Convolutional neural netork
        if args.dataset == 'mnist':
            global_model = CNNMnist(args=args)
        elif args.dataset == 'fmnist':
            global_model = CNNFashion_Mnist(args=args)
        elif args.dataset == 'cifar':
            global_model = CNNCifar(args=args)

    elif args.model == 'mlp':
        # Multi-layer preceptron
        img_size = train_dataset[0][0].shape
        len_in = 1
        for x in img_size:
            len_in *= x
            global_model = MLP(dim_in=len_in,
                               dim_hidden=64,
                               dim_out=args.num_classes)
    else:
        exit('Error: unrecognized model')

    # Set the model to train and send it to device.
    global_model.to(device)
    global_model.train()

    # copy weights
    global_weights = global_model.state_dict()

    # Training
    train_loss, train_accuracy = [], []
    val_acc_list, net_list = [], []
    cv_loss, cv_acc = [], []
    print_every = 2
    val_loss_pre, counter = 0, 0

    #Beolvassuk, hogy éppen mely résztvevők vesznek részt a tanításban (0 jelentése, hogy benne van, 1 az hogy nincs)
    users = []
    fp = open('users.txt', "r")
    x = fp.readline().split(' ')
    for i in x:
        if i != '':
            users.append(int(i))
    fp.close()

    #for epoch in tqdm(range(args.epochs)):
    for epoch in range(args.epochs):
        local_weights, local_losses = [], []
        #print(f'\n | Global Training Round : {epoch+1} |\n')

        global_model.train()
        m = max(int(args.frac * args.num_users), 1)
        idxs_users = np.random.choice(range(args.num_users), m, replace=False)

        for idx in idxs_users:
            local_model = LocalUpdate(args=args,
                                      dataset=train_dataset,
                                      idxs=user_groups[idx],
                                      logger=logger)
            w, loss = local_model.update_weights(
                model=copy.deepcopy(global_model), global_round=epoch)
            local_weights.append(copy.deepcopy(w))
            local_losses.append(copy.deepcopy(loss))

        global_weights = average_weights(local_weights)

        # update global weights
        global_model.load_state_dict(global_weights)

        loss_avg = sum(local_losses) / len(local_losses)
        train_loss.append(loss_avg)

        # Calculate avg training accuracy over all users at every epoch
        list_acc, list_loss = [], []
        global_model.eval()
        for c in range(args.num_users):
            local_model = LocalUpdate(args=args,
                                      dataset=train_dataset,
                                      idxs=user_groups[idx],
                                      logger=logger)
            acc, loss = local_model.inference(model=global_model)
            list_acc.append(acc)
            list_loss.append(loss)
        train_accuracy.append(sum(list_acc) / len(list_acc))

        # print global training loss after every 'i' rounds
        '''if (epoch+1) % print_every == 0:
            print(f' \nAvg Training Stats after {epoch+1} global rounds:')
            print(f'Training Loss : {np.mean(np.array(train_loss))}')
            print('Train Accuracy: {:.2f}% \n'.format(100*train_accuracy[-1]))'''

    # Test inference after completion of training

    #Beolvassuk hogy mely résztvevőnek mely labeleket osztottuk ki.
    ftrain = open('traindataset.txt')
    testlabels = []
    line = ftrain.readline()
    while line != "":
        sor = line.split(' ')
        array = []
        for i in sor:
            array.append(int(i))
        testlabels.append(array)
        line = ftrain.readline()
    ftrain.close()

    print("USERS LABELS")
    print(testlabels)

    #Minden lehetséges koalícióra lefut a tesztelés
    for j in range((2**args.num_users) - 1):
        binary = numberToBinary(j, len(users))

        test_acc, test_loss = test_inference(args, global_model, test_dataset,
                                             testlabels, binary, len(binary))

        #Teszt eredmények kiírása
        print("RESZTVEVOK")
        print(users)
        print("TEST NUMBER")
        print(j)
        print("TEST BINARY")
        print(binary)
        print("TEST LABELS")
        print(testlabels)
        print("Test Accuracy")
        print("{:.2f}%".format(100 * test_acc))
        print()

    # Saving the objects train_loss and train_accuracy:
    '''file_name = '../save/objects/{}_{}_{}_C[{}]_iid[{}]_E[{}]_B[{}].pkl'.\
Exemple #16
0
def main(r, flag, x, global_models, chafen_global_model, V1, V2, trainloader,
         chafen_local_weights, testloader):
    args = args_parser()
    device = "cuda"
    c1 = 0
    c2 = 0
    my_sum1 = 0
    acc1, acc2 = 0, 0
    r1 = 0.7
    chafen = CNNCifar(args=args)
    chafen_local_model = CNNCifar(args=args)
    V1.to(device)
    V2.to(device)
    chafen.to(device)
    chafen_local_model.to(device)
    chafen_local_model.load_state_dict(chafen_local_weights)

    V1_weight = V1.state_dict()
    V2_weight = V2.state_dict()
    chafen_global_model_weight = chafen_global_model.state_dict()
    chafen_weight = chafen.state_dict()
    new_model = CNNCifar1(args=args)
    new_model.to(device)
    new_weight = new_model.state_dict()

    for k in chafen_weight.keys():
        chafen_weight[k] = V2_weight[k] - V1_weight[k]
    chafen.load_state_dict(chafen_weight)

    e1 = relevant1(chafen, chafen_global_model)
    e2 = redundancy1(chafen, chafen_local_model)
    print('压缩前', e1)
    print('压缩前', e2)

    if x < 10:
        if e1.item() < r:
            print("相关性不够")
            V2.load_state_dict(V1_weight)
            c1 += 1
            flag = 1
            return V2.state_dict(
            ), chafen_local_weights, c1, c2, my_sum1, acc1, acc2, flag
        else:
            print("相关性够")
            flag = 0
            V2.load_state_dict(V2_weight)
            return V2.state_dict(), chafen.state_dict(
            ), c1, c2, my_sum1, acc1, acc2, flag

    else:
        my_sum = 0
        my_size = 0
        for i in range(1, 10):
            for k in global_models[0].keys():
                a = global_models[x - i][k] - global_models[x - i - 1][k]
                my_sum += torch.sum(a * a)
                my_size += a.numel()
        e = my_sum / (my_size + 0.00000001)
        print(e.item())
        if e1.item() < r:
            print("相关性不够")
            V2.load_state_dict(V1_weight)
            c1 += 1
            flag = 1
            return V2.state_dict(
            ), chafen_local_weights, c1, c2, my_sum1, acc1, acc2, flag
        elif e2.item() < e.item():
            print("差异性不够")
            chafen_weight = chafen_local_weights
            for k in V2_weight.keys():
                V2_weight[k] = V1_weight[k] + chafen_weight[k]
            V2.load_state_dict(V2_weight)
            c2 += 1
            flag = 0
            return V2.state_dict(
            ), chafen_local_weights, c1, c2, my_sum1, acc1, acc2, flag
        else:
            print("都满足,压缩")
            acc1 = inference(V2, testloader)
            print('更新后 准确率')
            print(acc1)

            chafen_weight['conv1.weight'] = V2_weight[
                'conv1.weight'] - V1_weight['conv1.weight']
            c1w = chafen_weight['conv1.weight'].view(450)
            c1w = c1w.cpu().numpy()
            c1w = np.abs(c1w)
            c1w = np.sort(c1w)
            a = c1w[int(len(c1w) * r1)]
            chafen_weight['conv1.weight'] = V2_weight[
                'conv1.weight'] - V1_weight['conv1.weight']
            c1w = chafen_weight['conv1.weight'].view(450)
            c1w = c1w.cpu().numpy()
            c1w = cut(c1w, a)
            chafen_weight['conv1.weight'] = torch.from_numpy(c1w)
            chafen_weight['conv1.weight'] = chafen_weight['conv1.weight'].view(
                6, 3, 5, 5)

            chafen_weight['conv1.bias'] = V2_weight['conv1.bias'] - V1_weight[
                'conv1.bias']
            c1b = chafen_weight['conv1.bias'].cpu().numpy()
            c1b = np.abs(c1b)
            c1b = np.sort(c1b)
            a = c1b[int(len(c1b) * r1) - 1]
            c1b = chafen_weight['conv1.bias'].cpu().numpy()
            c1b = cut(c1b, a)
            chafen_weight['conv1.bias'] = torch.from_numpy(c1b)

            chafen_weight['conv2.weight'] = V2_weight[
                'conv2.weight'] - V1_weight['conv2.weight']
            c2w = chafen_weight['conv2.weight'].view(2400)
            c2w = c2w.cpu().numpy()
            c2w = np.abs(c2w)
            c2w = np.sort(c2w)
            a = c2w[int(len(c2w) * r1)]
            chafen_weight['conv2.weight'] = V2_weight[
                'conv2.weight'] - V1_weight['conv2.weight']
            c2w = chafen_weight['conv2.weight'].view(2400)
            c2w = c2w.cpu().numpy()
            c2w = cut(c2w, a)
            chafen_weight['conv2.weight'] = torch.from_numpy(c2w)
            chafen_weight['conv2.weight'] = chafen_weight['conv2.weight'].view(
                16, 6, 5, 5)

            chafen_weight['conv2.bias'] = V2_weight['conv2.bias'] - V1_weight[
                'conv2.bias']
            c2b = chafen_weight['conv2.bias'].cpu().numpy()
            c2b = np.abs(c2b)
            c2b = np.sort(c2b)
            a = c2b[int(len(c2b) * r1) - 1]
            c2b = chafen_weight['conv2.bias'].cpu().numpy()
            c2b = cut(c2b, a)
            chafen_weight['conv2.bias'] = torch.from_numpy(c2b)

            chafen_weight['fc1.weight'] = V2_weight['fc1.weight'] - V1_weight[
                'fc1.weight']
            fc1w = chafen_weight['fc1.weight'].view(48000)
            fc1w = fc1w.cpu().numpy()
            fc1w = np.abs(fc1w)
            fc1w = np.sort(fc1w)
            a = fc1w[int(len(fc1w) * r1)]
            chafen_weight['fc1.weight'] = V2_weight['fc1.weight'] - V1_weight[
                'fc1.weight']
            fc1w = chafen_weight['fc1.weight'].view(48000)
            fc1w = fc1w.cpu().numpy()
            fc1w = cut(fc1w, a)
            chafen_weight['fc1.weight'] = torch.from_numpy(fc1w)
            chafen_weight['fc1.weight'] = chafen_weight['fc1.weight'].view(
                120, 400)

            chafen_weight[
                'fc1.bias'] = V2_weight['fc1.bias'] - V1_weight['fc1.bias']
            fc1b = chafen_weight['fc1.bias'].cpu().numpy()
            fc1b = np.abs(fc1b)
            fc1b = np.sort(fc1b)
            a = fc1b[int(len(fc1b) * r1) - 1]
            c1b = chafen_weight['fc1.bias'].cpu().numpy()
            c1b = cut(fc1b, a)
            chafen_weight['fc1.bias'] = torch.from_numpy(fc1b)

            chafen_weight['fc2.weight'] = V2_weight['fc2.weight'] - V1_weight[
                'fc2.weight']
            fc2w = chafen_weight['fc2.weight'].view(10080)
            fc2w = fc2w.cpu().numpy()
            fc2w = np.abs(fc2w)
            fc2w = np.sort(fc2w)
            a = fc2w[int(len(fc2w) * r1)]
            chafen_weight['fc2.weight'] = V2_weight['fc2.weight'] - V1_weight[
                'fc2.weight']
            fc2w = chafen_weight['fc2.weight'].view(10080)
            fc2w = fc2w.cpu().numpy()
            fc2w = cut(fc2w, a)
            chafen_weight['fc2.weight'] = torch.from_numpy(fc2w)
            chafen_weight['fc2.weight'] = chafen_weight['fc2.weight'].view(
                84, 120)

            chafen_weight[
                'fc2.bias'] = V2_weight['fc2.bias'] - V1_weight['fc2.bias']
            fc2b = chafen_weight['fc2.bias'].cpu().numpy()
            fc2b = np.abs(fc2b)
            fc2b = np.sort(fc2b)
            a = fc2b[int(len(fc2b) * r1) - 1]
            fc2b = chafen_weight['fc2.bias'].cpu().numpy()
            fc2b = cut(fc2b, a)
            chafen_weight['fc2.bias'] = torch.from_numpy(fc2b)

            chafen_weight['fc3.weight'] = V2_weight['fc3.weight'] - V1_weight[
                'fc3.weight']
            fc3w = chafen_weight['fc3.weight'].view(840)
            fc3w = fc3w.cpu().numpy()
            fc3w = np.abs(fc3w)
            fc3w = np.sort(fc3w)
            a = fc3w[int(len(fc3w) * r1)]
            chafen_weight['fc3.weight'] = V2_weight['fc3.weight'] - V1_weight[
                'fc3.weight']
            fc3w = chafen_weight['fc3.weight'].view(840)
            fc3w = fc3w.cpu().numpy()
            fc3w = cut(fc3w, a)
            chafen_weight['fc3.weight'] = torch.from_numpy(fc3w)
            chafen_weight['fc3.weight'] = chafen_weight['fc3.weight'].view(
                10, 84)

            chafen_weight[
                'fc3.bias'] = V2_weight['fc3.bias'] - V1_weight['fc3.bias']
            fc3b = chafen_weight['fc3.bias'].cpu().numpy()
            fc3b = np.abs(fc3b)
            fc3b = np.sort(fc3b)
            a = fc3b[int(len(fc3b) * r1) - 1]
            fc3b = chafen_weight['fc3.bias'].cpu().numpy()
            fc3b = cut(fc3b, a)
            chafen_weight['fc3.bias'] = torch.from_numpy(fc3b)

            chafen.load_state_dict(chafen_weight)
            new_e1 = relevant1(chafen, chafen_global_model)
            new_e2 = redundancy1(chafen, chafen_local_model)
            print('剪枝后', new_e1)
            print('剪枝后', new_e2)

            new_weight['conv11.weight'] = V1_weight['conv1.weight']
            new_weight['conv12.weight'] = chafen_weight['conv1.weight']
            new_weight['conv11.bias'] = V1_weight['conv1.bias']
            new_weight['conv12.bias'] = chafen_weight['conv1.bias']

            new_weight['conv21.weight'] = V1_weight['conv2.weight']
            new_weight['conv22.weight'] = chafen_weight['conv2.weight']
            new_weight['conv21.bias'] = V1_weight['conv2.bias']
            new_weight['conv22.bias'] = chafen_weight['conv2.bias']

            new_weight['fc11.weight'] = V1_weight['fc1.weight']
            new_weight['fc12.weight'] = chafen_weight['fc1.weight']
            new_weight['fc11.bias'] = V1_weight['fc1.bias']
            new_weight['fc12.bias'] = chafen_weight['fc1.bias']

            new_weight['fc21.weight'] = V1_weight['fc2.weight']
            new_weight['fc22.weight'] = chafen_weight['fc2.weight']
            new_weight['fc21.bias'] = V1_weight['fc2.bias']
            new_weight['fc22.bias'] = chafen_weight['fc2.bias']

            new_weight['fc31.weight'] = V1_weight['fc3.weight']
            new_weight['fc32.weight'] = chafen_weight['fc3.weight']
            new_weight['fc31.bias'] = V1_weight['fc3.bias']
            new_weight['fc32.bias'] = chafen_weight['fc3.bias']

            new_model.load_state_dict(new_weight)

            m = [
                'conv11.weight', 'conv21.weight', 'fc11.weight', 'fc21.weight',
                'fc31.weight', 'conv11.bias', 'conv21.bias', 'fc11.bias',
                'fc21.bias', 'fc31.bias'
            ]
            for name1, param1 in new_model.named_parameters():
                if name1 in m:
                    param1.requires_grad = False
                else:
                    param1.requires_grad = True

            for name, param in V2.named_parameters():
                param.requires_grad = False
            for name, param in V1.named_parameters():
                param.requires_grad = False
            for name, param in chafen_global_model.named_parameters():
                param.requires_grad = False
            for name, param in chafen_local_model.named_parameters():
                param.requires_grad = False

            optimizer = torch.optim.Adam(new_model.parameters(),
                                         lr=0.0001,
                                         weight_decay=1e-4)
            criterion1 = torch.nn.NLLLoss().to(device)
            criterion2 = torch.nn.KLDivLoss().to(device)
            epoch_loss = []
            EPS = 1e-8
            locked_masks = {
                n: torch.abs(w) < EPS
                for n, w in new_model.named_parameters()
                if n.endswith('weight')
            }
            chafen_global_model_weight.requires_grad = False
            chafen_local_weights.requires_grad = False

            for epoch in tqdm(range(50)):
                batch_loss = []
                batch_loss1 = []
                batch_loss2 = []
                batch_loss3 = []
                batch_loss4 = []
                for batch_idx, (images, labels) in enumerate(trainloader):

                    my_e1, my_size = 0.0, 0.0
                    images, labels = images.to(device), labels.to(device)
                    optimizer.zero_grad()
                    outputs = new_model(images)
                    loss1 = criterion1(outputs, labels)
                    x = new_model.f(images)
                    y = V2.f(images)
                    z = new_model.f1(images)
                    loss2 = 0.8 * mmd_rbf(x, y) + 0.2 * mmd_rbf(x, z)
                    loss = 0.7 * loss2 + 0.3 * loss1
                    loss.backward()
                    for n, w in new_model.named_parameters():
                        if w.grad is not None and n in locked_masks:
                            w.grad[locked_masks[n]] = 0
                    optimizer.step()
                    if batch_idx % 50 == 0:
                        print(
                            'Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.
                            format(epoch + 1, batch_idx * len(images),
                                   len(trainloader.dataset),
                                   100. * batch_idx / len(trainloader),
                                   loss.item()))
                    batch_loss.append(loss.item())
                    batch_loss1.append(loss1.item())
                    batch_loss2.append(loss2.item())

            loss_avg = sum(batch_loss) / len(batch_loss)
            loss_avg1 = sum(batch_loss1) / len(batch_loss1)
            loss_avg2 = sum(batch_loss2) / len(batch_loss2)

            print('\nTrain loss:', loss_avg)
            print('\nTrain loss1:', loss_avg1)
            print('\nTrain loss2:', loss_avg2)

            new_weight = new_model.state_dict()

            chafen_weight['conv1.weight'] = new_weight['conv12.weight']
            chafen_weight['conv1.bias'] = new_weight['conv12.bias']
            chafen_weight['conv2.weight'] = new_weight['conv22.weight']
            chafen_weight['conv2.bias'] = new_weight['conv22.bias']

            chafen_weight['fc1.weight'] = new_weight['fc12.weight']
            chafen_weight['fc1.bias'] = new_weight['fc12.bias']
            chafen_weight['fc2.weight'] = new_weight['fc22.weight']
            chafen_weight['fc2.bias'] = new_weight['fc22.bias']
            chafen_weight['fc3.weight'] = new_weight['fc32.weight']
            chafen_weight['fc3.bias'] = new_weight['fc32.bias']
            chafen.load_state_dict(chafen_weight)

            e1 = relevant1(chafen, chafen_global_model)
            e2 = redundancy1(chafen, chafen_local_model)
            print("retrain后 ", e1)
            print("retrain后 ", e2)

            #统计0

            for k in chafen_weight.keys():
                a = chafen_weight[k].cpu().numpy()
                a = a.flatten()
                b = a.tolist()
                my_sum1 += b.count(0.0)
            print(my_sum1)

            for k in V2_weight.keys():
                V2_weight[k] = chafen_weight[k] + V1_weight[k]
            V2.load_state_dict(V2_weight)

            acc2 = inference(V2, testloader)
            print('恢复后 准确率')
            print(acc2)

            flag = 0

            for name, param in V2.named_parameters():
                param.requires_grad = True
            for name, param in V1.named_parameters():
                param.requires_grad = True
            for name, param in chafen_global_model.named_parameters():
                param.requires_grad = True
            for name, param in chafen_local_model.named_parameters():
                param.requires_grad = True

            return V2.state_dict(), chafen.state_dict(
            ), c1, c2, my_sum1, acc1, acc2, flag
Exemple #17
0
def main():
    """
    For test this part
    --dataset: cifar-10
    --model: cnn_tutorial
    --lr  = 0.001
    --momentum = 0.9
    cpu only!
    check(14th/July/2019)
    :return:
    """
    args = args_parser()
    device = 'cpu'
    # build dataset for testing
    model = initialize_model(args, device)
    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010)),
    ])
    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010)),
    ])
    parent_dir = dirname(dirname(abspath(__file__)))
    data_path = join(parent_dir, 'data', 'cifar10')
    trainset = torchvision.datasets.CIFAR10(root=data_path,
                                            train=True,
                                            download=True,
                                            transform=transform_train)
    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=128,
                                              shuffle=True,
                                              num_workers=2)

    testset = torchvision.datasets.CIFAR10(root=data_path,
                                           train=False,
                                           download=True,
                                           transform=transform_test)
    testloader = torch.utils.data.DataLoader(testset,
                                             batch_size=100,
                                             shuffle=False,
                                             num_workers=2)
    for epoch in tqdm(range(350)):  # loop over the dataset multiple times
        model.step_lr_scheduler(epoch)
        running_loss = 0.0
        for i, data in enumerate(trainloader, 0):
            # get the inputs; data is a list of [inputs, labels]
            inputs, labels = data
            inputs = Variable(inputs).to(device)
            labels = Variable(labels).to(device)
            loss = model.optimize_model(input_batch=inputs, label_batch=labels)

            # print statistics
            running_loss += loss
            if i % 2000 == 1999:  # print every 2000 mini-batches
                print('[%d, %5d] loss: %.3f' %
                      (epoch + 1, i + 1, running_loss / 2000))
                running_loss = 0.0

    print('Finished Training')
    correct = 0
    total = 0
    with torch.no_grad():
        for data in testloader:
            images, labels = data
            outputs = model.test_model(input_batch=images)
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()

    print('Accuracy of the network on the 10000 test images: %d %%' %
          (100 * correct / total))
Exemple #18
0
def main(loc_ep, weighted, alg):
    # parse args
    args = args_parser()
    algo_list = ['fedavg', 'fedprox', 'fsvgr']
    # define paths
    path_project = os.path.abspath('..')

    summary = SummaryWriter('local')
    args.gpu = 0  # -1 (CPU only) or GPU = 0
    args.lr = 0.002  # 0.001 for cifar dataset
    args.model = 'mlp'  # 'mlp' or 'cnn'
    args.dataset = 'mnist'  # 'cifar' or 'mnist'
    args.num_users = 5
    args.epochs = 30  # numb of global iters
    args.local_ep = loc_ep  # numb of local iters
    args.local_bs = 1201  # Local Batch size (>=1200 = full dataset size of a user for mnist, 2000 for cifar)
    args.algorithm = alg  # 'fedavg', 'fedprox', 'fsvgr'
    args.iid = False
    args.verbose = False
    print("dataset:", args.dataset, " num_users:", args.num_users, " epochs:", args.epochs, "local_ep:", args.local_ep)

    # load dataset and split users
    dict_users = {}
    dataset_train = []
    if args.dataset == 'mnist':
        dataset_train = datasets.MNIST('../data/mnist/', train=True, download=True,
                                       transform=transforms.Compose([
                                           transforms.ToTensor(),
                                           transforms.Normalize((0.1307,), (0.3081,))
                                       ]))
        # sample users
        if args.iid:
            dict_users = mnist_iid(dataset_train, args.num_users)
        else:
            dict_users = mnist_noniid(dataset_train, args.num_users)
    elif args.dataset == 'cifar':
        transform = transforms.Compose(
            [transforms.ToTensor(),
             transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
        dataset_train = datasets.CIFAR10('../data/cifar', train=True, transform=transform, target_transform=None,
                                         download=True)
        if args.iid:
            dict_users = cifar_iid(dataset_train, args.num_users)
        else:
            dict_users = cifar_noniid(dataset_train, args.num_users)
            # exit('Error: only consider IID setting in CIFAR10')
    else:
        exit('Error: unrecognized dataset')
    img_size = dataset_train[0][0].shape

    # build model
    net_glob = None
    if args.model == 'cnn' and args.dataset == 'cifar':
        if args.gpu != -1:
            torch.cuda.set_device(args.gpu)
            net_glob = CNNCifar(args=args).cuda()
        else:
            net_glob = CNNCifar(args=args)
    elif args.model == 'cnn' and args.dataset == 'mnist':
        if args.gpu != -1:
            torch.cuda.set_device(args.gpu)
            net_glob = CNNMnist(args=args).cuda()
        else:
            net_glob = CNNMnist(args=args)
    elif args.model == 'mlp':
        len_in = 1
        for x in img_size:
            len_in *= x
        if args.gpu != -1:
            torch.cuda.set_device(args.gpu)
            # net_glob = MLP1(dim_in=len_in, dim_hidden=128, dim_out=args.num_classes).cuda()
            net_glob = MLP1(dim_in=len_in, dim_hidden=256, dim_out=args.num_classes).cuda()
        else:
            # net_glob = MLP1(dim_in=len_in, dim_hidden=128, dim_out=args.num_classes)
            net_glob = MLP1(dim_in=len_in, dim_hidden=256, dim_out=args.num_classes)
    else:
        exit('Error: unrecognized model')
    print("Nerual Net:", net_glob)

    net_glob.train()  # Train() does not change the weight values
    # copy weights
    w_glob = net_glob.state_dict()

    # w_size = 0
    # for k in w_glob.keys():
    #     size = w_glob[k].size()
    #     if (len(size) == 1):
    #         nelements = size[0]
    #     else:
    #         nelements = size[0] * size[1]
    #     w_size += nelements * 4
    #     # print("Size ", k, ": ",nelements*4)
    # print("Weight Size:", w_size, " bytes")
    # print("Weight & Grad Size:", w_size * 2, " bytes")
    # print("Each user Training size:", 784 * 8 / 8 * args.local_bs, " bytes")
    # print("Total Training size:", 784 * 8 / 8 * 60000, " bytes")
    # # training
    global_grad = []
    user_grads = []
    loss_test = []
    acc_test = []
    cv_loss, cv_acc = [], []
    val_loss_pre, counter = 0, 0
    net_best = None
    val_acc_list, net_list = [], []
    # print(dict_users.keys())

    rs_avg_acc, rs_avg_loss, rs_glob_acc, rs_glob_loss= [], [], [], []

    ###  FedAvg Aglorithm  ###
    if args.algorithm == 'fedavg':
        # for iter in tqdm(range(args.epochs)):
        for iter in range(args.epochs):
            w_locals, loss_locals, acc_locals, num_samples_list = [], [], [], []
            for idx in range(args.num_users):
                if(args.local_bs>1200):
                    local = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx], tb=summary, bs=200*(4+idx)) #Batch_size bs = full data
                else:
                    local = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx], tb=summary, bs=args.local_bs)
                num_samples, w, loss, acc = local.update_weights(net=copy.deepcopy(net_glob))
                num_samples_list.append(num_samples)
                w_locals.append(copy.deepcopy(w))
                loss_locals.append(copy.deepcopy(loss))
                # print("User ", idx, " Acc:", acc, " Loss:", loss)
                acc_locals.append(copy.deepcopy(acc))
            # update global weights
            if(weighted):
                w_glob = weighted_average_weights(w_locals, num_samples_list)
            else:
                w_glob = average_weights(w_locals)


            # copy weight to net_glob
            net_glob.load_state_dict(w_glob)
            # global test
            list_acc, list_loss = [], []
            net_glob.eval()
            # for c in tqdm(range(args.num_users)):
            for c in range(args.num_users):
                if (args.local_bs > 1200):
                    net_local = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[c], tb=summary, bs=200*(4+c)) #Batch_size bs = full data
                else:
                    net_local = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[c], tb=summary, bs=args.local_bs)
                acc, loss = net_local.test(net=net_glob)
                list_acc.append(acc)
                list_loss.append(loss)
            print("\nEpoch: {}, Global test loss {}, Global test acc: {:.2f}%".format(iter,
                                                                                      sum(list_loss) / len(list_loss),
                                                                                      100. * sum(list_acc) / len(
                                                                                          list_acc)))

            # print loss
            loss_avg = sum(loss_locals) / len(loss_locals)
            acc_avg = sum(acc_locals) / len(acc_locals)
            if args.epochs % 1 == 0:
                print('\nUsers Train Average loss:', loss_avg)
                print('\nTrain Train Average accuracy', acc_avg)
            # loss_test.append(sum(list_loss) / len(list_loss))
            # acc_test.append(sum(list_acc) / len(list_acc))

            rs_avg_acc.append(acc_avg)
            rs_avg_loss.append(loss_avg)
            rs_glob_acc.append(sum(list_acc) / len(list_acc))
            rs_glob_loss.append(sum(list_loss) / len(list_loss))
            # if (acc_avg >= 0.89):
            #     return iter+1

    ###  FedProx Aglorithm  ###
    elif args.algorithm == 'fedprox':
        args.mu = 0.005  ### change mu 0.001
        args.limit = 0.3
        # for iter in tqdm(range(args.epochs)):
        for iter in range(args.epochs):
            w_locals, loss_locals, acc_locals, num_samples_list = [], [], [], []
            # m = max(int(args.frac * args.num_users), 1)
            # idxs_users = np.random.choice(range(args.num_users), m, replace=False)
            for idx in range(args.num_users):
                if(args.local_bs>1200):
                    local = LocalFedProxUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx], tb=summary, bs=200*(4+idx)) #Batch_size bs = full data
                else:
                    local = LocalFedProxUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx], tb=summary, bs=args.local_bs)

                num_samples, w, loss, acc = local.update_FedProx_weights(net=copy.deepcopy(net_glob))
                num_samples_list.append(num_samples)
                w_locals.append(copy.deepcopy(w))
                loss_locals.append(copy.deepcopy(loss))
                # print("User ", idx, " Acc:", acc, " Loss:", loss)
                acc_locals.append(copy.deepcopy(acc))
            # update global weights
            if(weighted):
                w_glob = weighted_average_weights(w_locals, num_samples_list)
            else:
                w_glob = average_weights(w_locals)

            # copy weight to net_glob
            net_glob.load_state_dict(w_glob)
            # global test
            list_acc, list_loss = [], []
            net_glob.eval()
            # for c in tqdm(range(args.num_users)):
            for c in range(args.num_users):
                if (args.local_bs > 1200):
                    net_local = LocalFedProxUpdate(args=args, dataset=dataset_train, idxs=dict_users[c], tb=summary,
                                            bs=200 * (4 + c))  # Batch_size bs = full data
                else:
                    net_local = LocalFedProxUpdate(args=args, dataset=dataset_train, idxs=dict_users[c], tb=summary,
                                            bs=args.local_bs)

                acc, loss = net_local.test(net=net_glob)
                list_acc.append(acc)
                list_loss.append(loss)
            print("\nEpoch: {}, Global test loss {}, Global test acc: {:.2f}%".format(iter,
                                                                                      sum(list_loss) / len(list_loss),
                                                                                      100. * sum(list_acc) / len(
                                                                                          list_acc)))

            # print loss
            loss_avg = sum(loss_locals) / len(loss_locals)
            acc_avg = sum(acc_locals) / len(acc_locals)
            if args.epochs % 1 == 0:
                print('\nUsers train average loss:', loss_avg)
                print('\nUsers train average accuracy', acc_avg)
            # loss_test.append(sum(list_loss) / len(list_loss))
            # acc_test.append(sum(list_acc) / len(list_acc))

            rs_avg_acc.append(acc_avg)
            rs_avg_loss.append(loss_avg)
            rs_glob_acc.append(sum(list_acc) / len(list_acc))
            rs_glob_loss.append(sum(list_loss) / len(list_loss))
            # if (acc_avg >= 0.89):
            #     return iter+1

    ###  FSVGR Aglorithm  ###
    elif args.algorithm == 'fsvgr':
        args.ag_scalar = 1.  # 0.001 or 0.1
        args.lg_scalar = 1
        args.threshold = 0.001
        # for iter in tqdm(range(args.epochs)):
        for iter in range(args.epochs):

            print("=========Global epoch {}=========".format(iter))
            w_locals, loss_locals, acc_locals = [], [], []
            """
            First communication round: server send w_t to client --> client calculate gradient and send
            to sever --> server calculate average global gradient and send to client
            """
            for idx in range(args.num_users):
                local = LocalFSVGRUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx], tb=summary)
                num_sample, grad_k = local.calculate_global_grad(net=copy.deepcopy(net_glob))
                user_grads.append([num_sample, grad_k])
            global_grad = calculate_avg_grad(user_grads)

            """
            Second communication round: client update w_k_t+1 and send to server --> server update global w_t+1
            """
            for idx in range(args.num_users):
                print("Training user {}".format(idx))
                local = LocalFSVGRUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx], tb=summary)
                num_samples, w_k, loss, acc = local.update_FSVGR_weights(global_grad, idx, copy.deepcopy(net_glob),
                                                                         iter)
                w_locals.append(copy.deepcopy([num_samples, w_k]))
                print("Global_Epoch ", iter, "User ", idx, " Acc:", acc, " Loss:", loss)
                loss_locals.append(copy.deepcopy(loss))
                acc_locals.append(copy.deepcopy(acc))

            # w_t = net_glob.state_dict()
            w_glob = average_FSVRG_weights(w_locals, args.ag_scalar, copy.deepcopy(net_glob), args.gpu)

            # copy weight to net_glob
            net_glob.load_state_dict(w_glob)

            # global test
            list_acc, list_loss = [], []
            net_glob.eval()
            # for c in tqdm(range(args.num_users)):
            for c in range(args.num_users):
                net_local = LocalFSVGRUpdate(args=args, dataset=dataset_train, idxs=dict_users[c], tb=summary)
                acc, loss = net_local.test(net=net_glob)
                list_acc.append(acc)
                list_loss.append(loss)

            print("\nTest Global Weight:", list_acc)
            print("\nEpoch: {}, Global test loss {}, Global test acc: {:.2f}%".format(iter,
                                                                                      sum(list_loss) / len(list_loss),
                                                                                      100. * sum(list_acc) / len(
                                                                                          list_acc)))

            # print loss
            loss_avg = sum(loss_locals) / len(loss_locals)
            acc_avg = sum(acc_locals) / len(acc_locals)
            if iter % 1 == 0:
                print('\nEpoch: {}, Users train average loss: {}'.format(iter, loss_avg))
                print('\nEpoch: {}, Users train average accuracy: {}'.format(iter, acc_avg))
            loss_test.append(sum(list_loss) / len(list_loss))
            acc_test.append(sum(list_acc) / len(list_acc))

            if (acc_avg >= 0.89):
                return
    if(weighted):
        alg=alg+'1'
    simple_save_data(loc_ep, alg, rs_avg_acc, rs_avg_loss, rs_glob_acc, rs_glob_loss)
    plot_rs(loc_ep, alg)
Exemple #19
0
def main():
    args = args_parser()
    HierFAVG(args)
Exemple #20
0
# -*- coding: UTF-8 -*-
import numpy as np
from dataset import get_dataset, get_handler
from model import get_net
from torchvision import transforms
from tqdm import tqdm
import torch
from options import args_parser
from query_strategies import RandomSampling, LeastConfidence, MarginSampling, EntropySampling, \
                                LeastConfidenceDropout, MarginSamplingDropout, EntropySamplingDropout, \
                                KMeansSampling, KCenterGreedy, BALDDropout, CoreSet, \
                                AdversarialBIM, AdversarialDeepFool, ActiveLearningByLearning, Balance

# parse args
arg = args_parser()
# parameters
SEED = 1
total_samples = 12000

NUM_INIT_LB = 300
NUM_QUERY = 300
NUM_ROUND = 50

DATA_NAME = 'MNIST'
# DATA_NAME = 'FashionMNIST'
# DATA_NAME = 'SVHN'
# DATA_NAME = 'CIFAR10'

args_pool = {
    'MNIST': {
        'n_epoch':
Exemple #21
0
            p.requires_grad = False

    return global_model


def get_exp_name(args):
    exp_name = 'baseline_{}_{}_c{}_e{}_B[{}]_lr[{}x{}]_{}_{}_weight{}'.\
                format(args.data, args.model, args.num_classes, args.epochs,
                # args.frac, args.iid, args.unequal,args.local_ep,
                args.local_bs, args.lr, args.aux_lr, args.lr_scheduler, args.optimizer, args.weight)

    return exp_name


if __name__ == '__main__':
    args = args_parser()
    device = 'cuda' if torch.cuda.is_available(
    ) and not args.cpu_only else 'cpu'
    torch.manual_seed(args.seed)

    # load datasets
    train_dataset, test_dataset, _ = get_dataset(args)
    train_loader = DataLoader(train_dataset,
                              batch_size=args.local_bs,
                              num_workers=args.num_workers,
                              shuffle=True)
    test_loader = DataLoader(train_dataset,
                             batch_size=1,
                             num_workers=args.num_workers,
                             shuffle=False)
def poisoned_NoDefense(nb_attackers, seed=1):

    # define paths
    path_project = os.path.abspath('..')
    logger = SummaryWriter('../logs')

    args = args_parser()
    exp_details(args)

    # set seed
    torch.manual_seed(seed)
    np.random.seed(seed)

    # device
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # load dataset and user groups
    train_dataset, test_dataset, user_groups = get_dataset(args)


    # BUILD MODEL
    if args.model == 'cnn':
        # Convolutional neural netork
        if args.dataset == 'mnist':
            global_model = CNNMnist(args=args)
        elif args.dataset == 'fmnist':
            global_model = CNNFashion_Mnist(args=args)
        elif args.dataset == 'cifar':
            global_model = CNNCifar(args=args)

    elif args.model == 'mlp':
        # Multi-layer preceptron
        img_size = train_dataset[0][0].shape
        len_in = 1
        for x in img_size:
            len_in *= x
            global_model = MLP(dim_in=len_in, dim_hidden=64,
                               dim_out=args.num_classes)
    else:
        exit('Error: unrecognized model')

    # Set the model to train and send it to device.
    global_model.to(device)
    global_model.train()
    print(global_model)

    # copy weights
    global_weights = global_model.state_dict()

    # backdoor model
    dummy_model = copy.deepcopy(global_model)
    dummy_model.load_state_dict(torch.load('../save/all_5_model.pth'))
    dummy_norm = 0
    for x in dummy_model.state_dict().values():
        dummy_norm += x.norm(2).item() ** 2
    dummy_norm = dummy_norm ** (1. / 2)

    # testing accuracy for global model
    testing_accuracy = [0.1]

    for epoch in tqdm(range(args.epochs)):
        local_del_w = []
        print(f'\n | Global Training Round : {epoch+1} |\n')

        global_model.train()
        m = max(int(args.frac * args.num_users), 1)
        idxs_users = np.random.choice(range(args.num_users), m, replace=False)

        # Adversary updates
        for idx in idxs_users[0:nb_attackers]:
            print("evil")
            local_model = LocalUpdate(args=args, dataset=train_dataset, idxs=user_groups[idx], logger=logger)
            #del_w, _ = local_model.poisoned_SGA(model=copy.deepcopy(global_model), change=1)

            w = copy.deepcopy(dummy_model)
            # compute change in parameters and norm
            zeta = 0
            for del_w, w_old in zip(w.parameters(), global_model.parameters()):
                del_w.data -= copy.deepcopy(w_old.data)
                del_w.data *= m / nb_attackers
                del_w.data += copy.deepcopy(w_old.data)
                zeta += del_w.norm(2).item() ** 2
            zeta = zeta ** (1. / 2)
            del_w = copy.deepcopy(w.state_dict())
            local_del_w.append(copy.deepcopy(del_w))


        # Non-adversarial updates
        for idx in idxs_users[nb_attackers:]:
            print("good")
            local_model = LocalUpdate(args=args, dataset=train_dataset, idxs=user_groups[idx], logger=logger)
            del_w, _ = local_model.update_weights(model=copy.deepcopy(global_model), change=1)
            local_del_w.append(copy.deepcopy(del_w))

        # average local updates
        average_del_w = average_weights(local_del_w)

        # Update global model: w_{t+1} = w_{t} + average_del_w
        for param, param_del_w in zip(global_weights.values(), average_del_w.values()):
            param += param_del_w
        global_model.load_state_dict(global_weights)

        # test accuracy
        test_acc, test_loss = test_inference(args, global_model, test_dataset)
        testing_accuracy.append(test_acc)

        print("Test accuracy")
        print(testing_accuracy)

    # save test accuracy
    np.savetxt('../save/RandomAttack/NoDefense_iid_{}_{}_attackers{}_seed{}.txt'.
                 format(args.dataset, args.model, nb_attackers, s), testing_accuracy)