for idx in idxs_users: local_model = LocalUpdate(args=args, dataset=train_dataset, idxs=user_groups[idx], logger=logger) w, loss = local_model.update_weights( model=copy.deepcopy(global_model), global_round=epoch) local_weights.append(copy.deepcopy(w)) local_losses.append(copy.deepcopy(loss)) # update global weights global_weights = average_weights(local_weights) # update global weights global_model.load_state_dict(global_weights) loss_avg = sum(local_losses) / len(local_losses) train_loss.append(loss_avg) # Calculate avg training accuracy over all users at every epoch list_acc, list_loss = [], [] global_model.eval() for c in range(args.num_users): local_model = LocalUpdate(args=args, dataset=train_dataset, idxs=user_groups[idx], logger=logger) acc, loss = local_model.inference(model=global_model) list_acc.append(acc) list_loss.append(loss)
def poisoned_pixel_LDP(norm_bound, noise_scale, nb_attackers, seed=1): start_time = time.time() # define paths path_project = os.path.abspath('..') logger = SummaryWriter('../logs') args = args_parser() exp_details(args) # set seed torch.manual_seed(seed) np.random.seed(seed) # device device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # load dataset and user groups train_dataset, test_dataset, user_groups = get_dataset(args) # BUILD MODEL if args.model == 'cnn': # Convolutional neural netork if args.dataset == 'mnist': global_model = CNNMnist(args=args) elif args.dataset == 'fmnist': global_model = CNNFashion_Mnist(args=args) elif args.dataset == 'cifar': global_model = CNNCifar(args=args) elif args.model == 'mlp': # Multi-layer preceptron img_size = train_dataset[0][0].shape len_in = 1 for x in img_size: len_in *= x global_model = MLP(dim_in=len_in, dim_hidden=64, dim_out=args.num_classes) else: exit('Error: unrecognized model') # Set the model to train and send it to device. global_model.to(device) global_model.train() print(global_model) # copy weights global_weights = global_model.state_dict() # testing accuracy for global model testing_accuracy = [0.1] backdoor_accuracy = [0.1] for epoch in tqdm(range(args.epochs)): local_w, local_norms = [], [] print(f'\n | Global Training Round : {epoch+1} |\n') global_model.train() m = max(int(args.frac * args.num_users), 1) idxs_users = np.random.choice(range(args.num_users), m, replace=False) # Poisonous updates for idx in idxs_users[0:nb_attackers]: local_model = LocalUpdate(args=args, dataset=train_dataset, idxs=user_groups[idx], logger=logger) w, _ = local_model.pixel_ldp(model=copy.deepcopy(global_model), norm_bound=norm_bound, noise_scale=noise_scale) local_w.append(copy.deepcopy(w)) # Regular updates for idx in idxs_users[nb_attackers:]: local_model = LocalUpdate(args=args, dataset=train_dataset, idxs=user_groups[idx], logger=logger) w, _ = local_model.dp_sgd(model=copy.deepcopy(global_model), norm_bound=norm_bound, noise_scale=noise_scale) local_w.append(copy.deepcopy(w)) # update global weights global_weights = average_weights(local_w) global_model.load_state_dict(global_weights) # test accuracy test_acc, test_loss, backdoor = test_backdoor_pixel( args, global_model, test_dataset) testing_accuracy.append(test_acc) backdoor_accuracy.append(backdoor) print("Testing & Backdoor accuracies") print(testing_accuracy) print(backdoor_accuracy) # save test accuracy np.savetxt( '../save/PixelAttack/TestAcc/LDP_iid_{}_{}_clip{}_scale{}_attackers{}_seed{}.txt' .format(args.dataset, args.model, norm_bound, noise_scale, nb_attackers, s), testing_accuracy) np.savetxt( '../save/PixelAttack/BackdoorAcc/LDP_iid_{}_{}_clip{}_scale{}_attackers{}_seed{}.txt' .format(args.dataset, args.model, norm_bound, noise_scale, nb_attackers, s), backdoor_accuracy)
def poisoned_pixel_CDP(norm_bound, noise_scale, nb_attackers, seed=1): start_time = time.time() # define paths path_project = os.path.abspath('..') logger = SummaryWriter('../logs') args = args_parser() exp_details(args) # set seed torch.manual_seed(seed) np.random.seed(seed) # device device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # load dataset and user groups train_dataset, test_dataset, user_groups = get_dataset(args) # BUILD MODEL if args.model == 'cnn': # Convolutional neural netork if args.dataset == 'mnist': global_model = CNNMnist(args=args) elif args.dataset == 'fmnist': global_model = CNNFashion_Mnist(args=args) elif args.dataset == 'cifar': global_model = CNNCifar(args=args) elif args.model == 'mlp': # Multi-layer preceptron img_size = train_dataset[0][0].shape len_in = 1 for x in img_size: len_in *= x global_model = MLP(dim_in=len_in, dim_hidden=64, dim_out=args.num_classes) else: exit('Error: unrecognized model') # Set the model to train and send it to device. global_model.to(device) global_model.train() print(global_model) # copy weights global_weights = global_model.state_dict() # load poisoned model backdoor_model = copy.deepcopy(global_model) backdoor_model.load_state_dict(torch.load('../save/poison_model.pth')) # testing accuracy for global model testing_accuracy = [0.1] backdoor_accuracy = [0.1] for epoch in tqdm(range(args.epochs)): local_del_w, local_norms = [], [] print(f'\n | Global Training Round : {epoch + 1} |\n') global_model.train() m = max(int(args.frac * args.num_users), 1) idxs_users = np.random.choice(range(args.num_users), m, replace=False) # Adversary updates print("Evil") for idx in idxs_users[0:nb_attackers]: # backdoor model w = copy.deepcopy(backdoor_model) # compute change in parameters and norm zeta = 0 for del_w, w_old in zip(w.parameters(), global_model.parameters()): del_w.data = del_w.data - copy.deepcopy(w_old.data) zeta += del_w.norm(2).item()**2 zeta = zeta**(1. / 2) del_w = w.state_dict() print("EVIL") print(zeta) # add to global round local_del_w.append(copy.deepcopy(del_w)) local_norms.append(copy.deepcopy(zeta)) # Non-adversarial updates for idx in idxs_users[nb_attackers:]: local_model = LocalUpdate(args=args, dataset=train_dataset, idxs=user_groups[idx], logger=logger) del_w, zeta = local_model.update_weights( model=copy.deepcopy(global_model), change=1) local_del_w.append(copy.deepcopy(del_w)) local_norms.append(copy.deepcopy(zeta)) print("good") #print(zeta) # norm bound (e.g. median of norms) clip_factor = norm_bound #min(norm_bound, np.median(local_norms)) print(clip_factor) # clip updates for i in range(len(idxs_users)): for param in local_del_w[i].values(): print(max(1, local_norms[i] / clip_factor)) param /= max(1, local_norms[i] / clip_factor) # average local model updates average_del_w = average_weights(local_del_w) # Update model and add noise # w_{t+1} = w_{t} + avg(del_w1 + del_w2 + ... + del_wc) + Noise for param, param_del_w in zip(global_weights.values(), average_del_w.values()): param += param_del_w param += torch.randn( param.size()) * noise_scale * norm_bound / len(idxs_users) global_model.load_state_dict(global_weights) # test accuracy test_acc, test_loss, backdoor = test_backdoor_pixel( args, global_model, test_dataset) testing_accuracy.append(test_acc) backdoor_accuracy.append(backdoor) print("Testing & Backdoor accuracies") print(testing_accuracy) print(backdoor_accuracy) # save test accuracy np.savetxt( '../save/PixelAttack/TestAcc/iid_GDP_{}_{}_clip{}_scale{}_attackers{}_seed{}.txt' .format(args.dataset, args.model, norm_bound, noise_scale, nb_attackers, s), testing_accuracy) np.savetxt( '../save/PixelAttack/BackdoorAcc/iid_GDP_{}_{}_clip{}_scale{}_attackers{}_seed{}.txt' .format(args.dataset, args.model, norm_bound, noise_scale, nb_attackers, s), backdoor_accuracy)
def poisoned_1to7_NoDefense(seed=1): start_time = time.time() # define paths path_project = os.path.abspath('..') logger = SummaryWriter('../logs') args = args_parser() exp_details(args) # set seed torch.manual_seed(seed) np.random.seed(seed) # device device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # load dataset and user groups train_dataset, test_dataset, user_groups = get_dataset(args) # BUILD MODEL if args.model == 'cnn': # Convolutional neural netork if args.dataset == 'mnist': global_model = CNNMnist(args=args) elif args.dataset == 'fmnist': global_model = CNNFashion_Mnist(args=args) elif args.dataset == 'cifar': global_model = CNNCifar(args=args) elif args.model == 'mlp': # Multi-layer preceptron img_size = train_dataset[0][0].shape len_in = 1 for x in img_size: len_in *= x global_model = MLP(dim_in=len_in, dim_hidden=64, dim_out=args.num_classes) else: exit('Error: unrecognized model') # Set the model to train and send it to device. global_model.to(device) global_model.train() print(global_model) # copy weights global_weights = global_model.state_dict() # testing accuracy for global model testing_accuracy = [0.1] backdoor_accuracy = [0.1] for epoch in tqdm(range(args.epochs)): local_del_w, local_norms = [], [] print(f'\n | Global Training Round : {epoch+1} |\n') global_model.train() m = max(int(args.frac * args.num_users), 1) idxs_users = np.random.choice(range(args.num_users), m, replace=False) # Adversary updates print("Evil norms:") for idx in idxs_users[0:args.nb_attackers]: local_model = LocalUpdate(args=args, dataset=train_dataset, idxs=user_groups[idx], logger=logger) del_w, zeta = local_model.poisoned_1to7( model=copy.deepcopy(global_model), change=1) local_del_w.append(copy.deepcopy(del_w)) local_norms.append(copy.deepcopy(zeta)) print(zeta) # Non-adversarial updates print("Good norms:") for idx in idxs_users[args.nb_attackers:]: print(idx) local_model = LocalUpdate(args=args, dataset=train_dataset, idxs=user_groups[idx], logger=logger) del_w, zeta = local_model.update_weights( model=copy.deepcopy(global_model), change=1) local_del_w.append(copy.deepcopy(del_w)) local_norms.append(copy.deepcopy(zeta)) print(zeta) # average local updates average_del_w = average_weights(local_del_w) # Update global model: w_{t+1} = w_{t} + average_del_w for param, param_del_w in zip(global_weights.values(), average_del_w.values()): param += param_del_w global_model.load_state_dict(global_weights) # test accuracy, backdoor accuracy test_acc, test_loss, back_acc = test_inference1to7( args, global_model, test_dataset) testing_accuracy.append(test_acc) backdoor_accuracy.append(back_acc) print("Test & Backdoor accuracy") print(testing_accuracy) print(backdoor_accuracy) # save accuracy np.savetxt( '../save/1to7Attack/TestAcc/NoDefense_{}_{}_seed{}.txt'.format( args.dataset, args.model, s), testing_accuracy) np.savetxt( '../save/1to7Attack/BackAcc/NoDefense_{}_{}_seed{}.txt'.format( args.dataset, args.model, s), backdoor_accuracy)
elif args.dataset == 'cifar100' or args.dataset == 'cifar10': if args.mode == 'model_heter': if i < 10: args.stride = [1, 4] else: args.stride = [2, 2] else: args.stride = [2, 2] resnet = resnet18(args, pretrained=False, num_classes=args.num_classes) initial_weight = model_zoo.load_url(model_urls['resnet18']) local_model = resnet initial_weight_1 = local_model.state_dict() for key in initial_weight.keys(): if key[0:3] == 'fc.' or key[0:5] == 'conv1' or key[ 0:3] == 'bn1': initial_weight[key] = initial_weight_1[key] local_model.load_state_dict(initial_weight) local_model.to(args.device) local_model.train() local_model_list.append(local_model) if args.mode == 'task_heter': FedProto_taskheter(args, train_dataset, test_dataset, user_groups, user_groups_lt, local_model_list, classes_list) else: FedProto_modelheter(args, train_dataset, test_dataset, user_groups, user_groups_lt, local_model_list, classes_list)
local_model = LocalUpdate( args=args, dataset=train_dataset, idxs=user_groups[idx], logger=logger) # 注意这里发给client的是train_dataset w, loss = local_model.update_weights( model=copy.deepcopy(global_model), global_round=epoch) local_weights.append( copy.deepcopy(w)) # 记录下client model的模型参数和训练集Loss local_losses.append(copy.deepcopy(loss)) # update global weights global_weights = average_weights(local_weights) # update global weights global_model.load_state_dict(global_weights) # 将平均后的模型赋值给global model loss_avg = sum(local_losses) / len(local_losses) train_loss.append(loss_avg) # Calculate avg training accuracy over all users at every epoch list_acc, list_loss = [], [] global_model.eval() for c in range(args.num_users): local_model = LocalUpdate(args=args, dataset=train_dataset, idxs=user_groups[idx], logger=logger) acc, loss = local_model.inference(model=global_model) list_acc.append(acc) list_loss.append(loss)
def poisoned_random_CDP(seed=1): # Central DP to protect against attackers start_time = time.time() # define paths path_project = os.path.abspath('..') logger = SummaryWriter('../logs') args = args_parser() exp_details(args) # set seed torch.manual_seed(seed) np.random.seed(seed) # device device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # load dataset and user groups train_dataset, test_dataset, user_groups = get_dataset(args) # BUILD MODEL if args.model == 'cnn': # Convolutional neural netork if args.dataset == 'mnist': global_model = CNNMnist(args=args) elif args.dataset == 'fmnist': global_model = CNNFashion_Mnist(args=args) elif args.dataset == 'cifar': global_model = CNNCifar(args=args) elif args.model == 'mlp': # Multi-layer preceptron img_size = train_dataset[0][0].shape len_in = 1 for x in img_size: len_in *= x global_model = MLP(dim_in=len_in, dim_hidden=64, dim_out=args.num_classes) else: exit('Error: unrecognized model') # Set the model to train and send it to device. global_model.to(device) global_model.train() print(global_model) # copy weights global_weights = global_model.state_dict() # testing accuracy for global model testing_accuracy = [0.1] backdoor_accuracy = [0.1] for epoch in tqdm(range(args.epochs)): local_del_w, local_norms = [], [] print(f'\n | Global Training Round : {epoch+1} |\n') global_model.train() m = max(int(args.frac * args.num_users), 1) idxs_users = np.random.choice(range(args.num_users), m, replace=False) # Adversaries' update for idx in idxs_users[0:args.nb_attackers]: local_model = LocalUpdate(args=args, dataset=train_dataset, idxs=user_groups[idx], logger=logger) del_w, zeta = local_model.poisoned_1to7( model=copy.deepcopy(global_model), change=1) local_del_w.append(copy.deepcopy(del_w)) local_norms.append(copy.deepcopy(zeta)) # Non-adversary updates for idx in idxs_users[args.nb_attackers:]: local_model = LocalUpdate(args=args, dataset=train_dataset, idxs=user_groups[idx], logger=logger) del_w, zeta = local_model.update_weights( model=copy.deepcopy(global_model), change=1) local_del_w.append(copy.deepcopy(del_w)) local_norms.append(copy.deepcopy(zeta)) # norm bound (e.g. median of norms) median_norms = args.norm_bound #np.median(local_norms) #args.norm_bound print(median_norms) # clip weight updates for i in range(len(idxs_users)): for param in local_del_w[i].values(): param /= max(1, local_norms[i] / median_norms) # average the clipped weight updates average_del_w = average_weights(local_del_w) # Update global model using clipped weight updates, and add noise # w_{t+1} = w_{t} + avg(del_w1 + del_w2 + ... + del_wc) + Noise for param, param_del_w in zip(global_weights.values(), average_del_w.values()): param += param_del_w param += torch.randn( param.size()) * args.noise_scale * median_norms / ( len(idxs_users)**0.5) global_model.load_state_dict(global_weights) # test accuracy test_acc, test_loss = test_inference(args, global_model, test_dataset) testing_accuracy.append(test_acc) print("Test accuracy") print(testing_accuracy) # save test accuracy np.savetxt( '../save/1to7Attack/GDP_{}_{}_seed{}_clip{}_scale{}.txt'.format( args.dataset, args.model, s, args.norm_bound, args.noise_scale), testing_accuracy)
def federated_main(args): # prepare logger output_dir = "../logs" if not os.path.exists(output_dir): os.makedirs(output_dir) session_time = strftime("%y%m%d-%H%M%S", localtime()) output_file = os.path.join(output_dir, "{}-{}.log".format(session_time, args.dataset)) log_file = logging.FileHandler(output_file) log_file.setFormatter(logging.Formatter('%(asctime)s: %(message)s')) logger = logging.getLogger() logger.setLevel(logging.INFO) logger.addHandler(log_file) # prepare client data output directory, related output is at bottom if not os.path.exists("../clients_data"): os.makedirs("../clients_data") # prepare devices if args.gpu: torch.cuda.set_device("cuda:{}".format(args.gpu)) device = "cuda" if args.gpu else "cpu" # prepare dataset if args.dataset == "cifar": dataset = CreateCIFAR10(args.num_users) elif args.dataset == "mnist": dataset = CreateMNIST(args.num_users) elif args.dataset == "fmnist": dataset = CreateMNIST(args.num_users, "../data/fmnist") else: raise ValueError("The dataset you input is not supported yet") if args.iid: user_groups = dataset.create_iid() else: if args.unequal: if args.dataset == "cifar": raise NotImplementedError( "Cifar 10 unequal dataset is not implemented") else: user_groups = dataset.crate_noniid_unequal() else: user_groups = dataset.create_noniid() # initialize the model if args.model == "cnn": if args.dataset == "mnist": global_model = CNNMnist(args=args) elif args.dataset == "fmnist": global_model = CNNFashion_Mnist(args=args) elif args.dataset == "cifar": global_model = CNNCifar(args=args) else: raise ValueError("The dataset you input is not supported yet") elif args.model == "mlp": img_size = dataset.train_dataset.data[0].shape len_in = 1 for x in img_size: len_in *= x global_model = MLP(dim_in=len_in, dim_hidden=64, dim_out=args.num_classes) else: raise ValueError("The model you input is not supported yet") global_model.to(device) global_model.train() # prepare the parameters of global model global_parameters = global_model.state_dict() # Prepare the client container local_outputs = [ ClientOutput(client_idx) for client_idx in range(args.num_users) ] # malicious behavior, some nodes may perform model posion attack by changing the label of data if args.malicious: malicious_changer = MaliciousChange(args, dataset.train_dataset) # the length of source_list and dest_list can influence the final accuracy malicious_changer.data_poision(user_groups, [1, 5, 3], [3, 1, 5]) malicious_client_idxs = malicious_changer.malicious_clients for idx in malicious_client_idxs: local_outputs[idx].update(malicious=True) # Prepare global early stopping if args.global_early_stop: global_early_stopping = EarlyStopping( patience=args.global_early_rounds) # Prepare the aggregation method if args.aggre == "simple": aggre_method = SimpleAggregation(int(args.rate * args.num_users), int((args.num_users - 1) / 3), True, args.no_consensus) elif args.aggre == "time": aggre_method = ThresholdAggregation(int(args.rate * args.num_users), int((args.num_users - 1) / 3), True, args.no_consensus) elif args.aggre == "top": aggre_method = TopAggregation(int(args.rate * args.num_users), int((args.num_users - 1) / 3), False, args.no_consensus) else: raise ValueError( "The aggregation method you indicate has not been implemented") # prepare other things old_global_metric = 0. server_total_profit = 0. epoch_outputs = [] if args.malicious: client_chooser = ChooseClient(args, malicious_client_idxs) else: client_chooser = ChooseClient(args) # Training for epoch in tqdm.tqdm(range(args.epochs)): # Prepare some things logger.info("Epoch {} Started".format(epoch)) # Choose clients participates in this epoch if args.aggre == "simple": # The number of received models will influence the performance # In order to perform a fair comparison, FedAvg has no random screening k = max(int(args.rate * args.num_users), 1) else: k = max(int(args.frac * args.num_users), 1) chosen_clients = client_chooser.part_pick(list(range(args.num_users)), k) # Client part for client_idx in chosen_clients: # Client model training client_updater = LocalUpdate(args, dataset.train_dataset, user_groups[client_idx], logger) client_model, client_loss = client_updater( copy.deepcopy(global_model)) # Client model evaluation evaluator = Evaluator(args, logger) client_global_metric, client_global_loss = evaluator( client_model, dataset.test_dataset) # Update ClientOutput local_outputs[client_idx].update( parameter=copy.deepcopy(client_model.state_dict()), number=len(user_groups[client_idx]), epochs=epoch, accuracy=client_global_metric, local_loss=client_loss, global_loss=client_global_loss) epoch_outputs.append(local_outputs[client_idx]) # Update global parameters epoch_outputs, global_parameters = aggre_method(epoch_outputs) global_model.load_state_dict(global_parameters) # Evaluate global model on global dataset global_evaluator = Evaluator(args, logger) global_metric, global_loss = global_evaluator(global_model, dataset.test_dataset) # Calculate the profit epoch_outputs, publisher_profit, pay_count = epoch_incentive( args, epoch_outputs, global_metric, old_global_metric) server_total_profit += publisher_profit # Epoch ends logger.info("Epoch {} End".format(epoch)) # Print epoch information logger.info("[client]Clients Information:") for single_client in epoch_outputs: client_dict = single_client.output() client_idx = client_dict["idx"] for client_key in client_dict: if client_key != "idx": logger.info("[client-{}]{}: {}".format( client_idx, client_key, client_dict[client_key])) logger.info("[server]Server information:") logger.info("[server]loss: {}".format(global_loss)) logger.info("[server]accuracy: {}".format(global_metric)) logger.info("[server]epoch_profit: {}".format( publisher_profit / pay_count if pay_count != 0 else 0)) logger.info("[server]total_profit: {}".format(server_total_profit)) # Global early stop mechanism if args.global_early_stop and epoch > args.no_stoping_epochs: global_early_stopping(global_loss, global_model) if global_early_stopping.early_stop: break # Reset some values # Only the best performance is meaningful to task participant old_global_metric = max(global_metric, old_global_metric) epoch_outputs = [] pickle.dump( local_outputs, open("../clients_data/{}-{}.pickle".format(session_time, args.dataset), "wb")) logger.info("Task finished!")
# Ensure correct weight is being updated assert name == bl_name # Calculate update delta = weight - baseline local_delta_update.append((name, delta)) local_weights.append(local_delta_update) #local_weights.append(copy.deepcopy(w)) local_losses.append(copy.deepcopy(loss)) # update global weights if epoch == 1: # initialize the global model by vanilla averaging for first epoch global_weights = average_weights(w_s) global_model.load_state_dict(global_weights) else: global_weights = fedavg(baseline_weights, local_weights, num_samples_list, server_lr=server_lr) updated_state_dict = {} for name, weight in global_weights: updated_state_dict[name] = weight global_model.load_state_dict(updated_state_dict, strict=False) loss_avg = sum(local_losses) / len(local_losses) train_loss.append(loss_avg)
def main_test(args): start_time = time.time() now = datetime.datetime.now().strftime('%Y-%m-%d-%H%M%S') # define paths logger = SummaryWriter('../logs') # easydict 사용하는 경우 주석처리 # args = args_parser() # checkpoint 생성위치 args.save_path = os.path.join(args.save_path, args.exp_folder) if not os.path.exists(args.save_path): os.makedirs(args.save_path) save_path_tmp = os.path.join(args.save_path, 'tmp_{}'.format(now)) if not os.path.exists(save_path_tmp): os.makedirs(save_path_tmp) SAVE_PATH = os.path.join(args.save_path, '{}_{}_T[{}]_C[{}]_iid[{}]_E[{}]_B[{}]'. format(args.dataset, args.model, args.epochs, args.frac, args.iid, args.local_ep, args.local_bs)) # 시드 고정 torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True torch.manual_seed(args.seed) torch.cuda.manual_seed(args.seed) random.seed(args.seed) np.random.seed(args.seed) # torch.cuda.set_device(0) device = torch.device("cuda:{}".format(args.gpu) if torch.cuda.is_available() else "cpu") cpu_device = torch.device('cpu') # log 파일 생성 log_path = os.path.join('../logs', args.exp_folder) if not os.path.exists(log_path): os.makedirs(log_path) loggertxt = get_logger( os.path.join(log_path, '{}_{}_{}_{}.log'.format(args.model, args.optimizer, args.norm, now))) logging.info(args) # csv csv_save = '../csv/' + now csv_path = os.path.join(csv_save, 'accuracy.csv') csv_logger_keys = ['train_loss', 'accuracy'] csvlogger = CSVLogger(csv_path, csv_logger_keys) # load dataset and user groups train_dataset, test_dataset, client_loader_dict = get_dataset(args) # cifar-100의 경우 자동 설정 if args.dataset == 'cifar100': args.num_classes = 100 # BUILD MODEL if args.model == 'cnn': # Convolutional neural network if args.dataset == 'mnist': global_model = CNNMnist(args=args) elif args.dataset == 'fmnist': global_model = CNNFashion_Mnist(args=args) elif args.dataset == 'cifar': global_model = CNNCifar(args=args) elif args.dataset == 'cifar100': global_model = CNNCifar(args=args) elif args.model == 'mlp': # Multi-layer preceptron img_size = train_dataset[0][0].shape len_in = 1 for x in img_size: len_in *= x global_model = MLP(dim_in=len_in, dim_hidden=64, dim_out=args.num_classes) elif args.model == 'cnn_vc': global_model = CNNCifar_fedVC(args=args) elif args.model == 'cnn_vcbn': global_model = CNNCifar_VCBN(args=args) elif args.model == 'cnn_vcgn': global_model = CNNCifar_VCGN(args=args) elif args.model == 'resnet18_ws': global_model = resnet18(num_classes=args.num_classes, weight_stand=1) elif args.model == 'resnet18': global_model = resnet18(num_classes=args.num_classes, weight_stand=0) elif args.model == 'resnet32': global_model = ResNet32_test(num_classes=args.num_classes) elif args.model == 'resnet18_mabn': global_model = resnet18_mabn(num_classes=args.num_classes) elif args.model == 'vgg': global_model = vgg11() elif args.model == 'cnn_ws': global_model = CNNCifar_WS(args=args) else: exit('Error: unrecognized model') # Set the model to train and send it to device. loggertxt.info(global_model) # fedBN처럼 gn no communication 용 client_models = [copy.deepcopy(global_model) for idx in range(args.num_users)] # copy weights global_weights = global_model.state_dict() global_model.to(device) global_model.train() # Training train_loss, train_accuracy = [], [] val_acc_list, net_list = [], [] # how does help BN 확인용 client_loss = [[] for i in range(args.num_users)] client_conv_grad = [[] for i in range(args.num_users)] client_fc_grad = [[] for i in range(args.num_users)] client_total_grad_norm = [[] for i in range(args.num_users)] # 전체 loss 추적용 -how does help BN # 재시작 if args.resume: checkpoint = torch.load(SAVE_PATH) global_model.load_state_dict(checkpoint['global_model']) if args.hold_normalize: for client_idx in range(args.num_users): client_models[client_idx].load_state_dict(checkpoint['model_{}'.format(client_idx)]) else: for client_idx in range(args.num_users): client_models[client_idx].load_state_dict(checkpoint['global_model']) resume_iter = int(checkpoint['a_iter']) + 1 print('Resume trainig form epoch {}'.format(resume_iter)) else: resume_iter = 0 # learning rate scheduler #scheduler = torch.optim.lr_scheduler.StepLR(optimizer=optimizer, gamma=0.1,step_size=500) # start training for epoch in tqdm(range(args.epochs)): local_weights, local_losses = [], [] if args.verbose: print(f'\n | Global Training Round : {epoch + 1} |\n') global_model.train() m = max(int(args.frac * args.num_users), 1) idxs_users = np.random.choice(range(args.num_users), m, replace=False) for idx in idxs_users: """ for key in global_model.state_dict().keys(): if args.hold_normalize: if 'bn' not in key: client_models[idx].state_dict()[key].data.copy_(global_model.state_dict()[key]) else: client_models[idx].state_dict()[key].data.copy_(global_model.state_dict()[key]) """ torch.cuda.empty_cache() local_model = LocalUpdate(args=args, logger=logger, train_loader=client_loader_dict[idx], device=device) w, loss, batch_loss, conv_grad, fc_grad, total_gard_norm = local_model.update_weights( model=copy.deepcopy(global_model), global_round=epoch, idx_user=idx) local_weights.append(copy.deepcopy(w)) # client의 1 epoch에서의 평균 loss값 ex)0.35(즉, batch loss들의 평균) local_losses.append(copy.deepcopy(loss)) # 전체 round scheduler # scheduler.step() # loss graph용 -> client당 loss값 진행 저장 -> 모두 client별로 저장. client_loss[idx].append(batch_loss) client_conv_grad[idx].append(conv_grad) client_fc_grad[idx].append(fc_grad) client_total_grad_norm[idx].append(total_gard_norm) # print(total_gard_norm) # gn, bn 복사 # client_models[idx].load_state_dict(w) del local_model del w # update global weights global_weights = average_weights(local_weights, client_loader_dict, idxs_users) # update global weights # opt = OptRepo.name2cls('adam')(global_model.parameters(), lr=0.01, betas=(0.9, 0.99), eps=1e-3) opt = OptRepo.name2cls('sgd')(global_model.parameters(), lr=10, momentum=0.9) opt.zero_grad() opt_state = opt.state_dict() global_weights = aggregation(global_weights, global_model) global_model.load_state_dict(global_weights) opt = OptRepo.name2cls('sgd')(global_model.parameters(), lr=10, momentum=0.9) # opt = OptRepo.name2cls('adam')(global_model.parameters(), lr=0.01, betas=(0.9, 0.99), eps=1e-3) opt.load_state_dict(opt_state) opt.step() loss_avg = sum(local_losses) / len(local_losses) train_loss.append(loss_avg) global_model.eval() # for c in range(args.num_users): # local_model = LocalUpdate(args=args, dataset=train_dataset, # idxs=user_groups[idx], logger=logger) # acc, loss = local_model.inference(model=global_model) # list_acc.append(acc) # list_loss.append(loss) # train_accuracy.append(sum(list_acc)/len(list_acc)) train_accuracy = test_inference(args, global_model, test_dataset, device=device) val_acc_list.append(train_accuracy) # print global training loss after every 'i' rounds # if (epoch+1) % print_every == 0: loggertxt.info(f' \nAvg Training Stats after {epoch + 1} global rounds:') loggertxt.info(f'Training Loss : {loss_avg}') loggertxt.info('Train Accuracy: {:.2f}% \n'.format(100 * train_accuracy)) csvlogger.write_row([loss_avg, 100 * train_accuracy]) if (epoch + 1) % 100 == 0: tmp_save_path = os.path.join(save_path_tmp, 'tmp_{}.pt'.format(epoch+1)) torch.save(global_model.state_dict(),tmp_save_path) # Test inference after completion of training test_acc = test_inference(args, global_model, test_dataset, device=device) print(' Saving checkpoints to {}...'.format(SAVE_PATH)) if args.hold_normalize: client_dict = {} for idx, model in enumerate(client_models): client_dict['model_{}'.format(idx)] = model.state_dict() torch.save(client_dict, SAVE_PATH) else: torch.save({'global_model': global_model.state_dict()}, SAVE_PATH) loggertxt.info(f' \n Results after {args.epochs} global rounds of training:') # loggertxt.info("|---- Avg Train Accuracy: {:.2f}%".format(100*train_accuracy[-1])) loggertxt.info("|---- Test Accuracy: {:.2f}%".format(100 * test_acc)) # frac이 1이 아닐경우 잘 작동하지않음. # batch_loss_list = np.array(client_loss).sum(axis=0) / args.num_users # conv_grad_list = np.array(client_conv_grad).sum(axis=0) / args.num_users # fc_grad_list = np.array(client_fc_grad).sum(axis=0) / args.num_users # total_grad_list = np.array(client_total_grad_norm).sum(axis=0) /args.num_users # client의 avg를 구하고 싶었으나 현재는 client 0만 확인 # client마다 batch가 다를 경우 bug 예상 return train_loss, val_acc_list, client_loss[0], client_conv_grad[0], client_fc_grad[0], client_total_grad_norm[0]
def main(): start_time = time.time() # define paths path_project = os.path.abspath('..') logger = SummaryWriter('../logs') args = args_parser() exp_details(args) if args.gpu: torch.cuda.set_device(0) device = 'cuda' if args.gpu else 'cpu' # load dataset and user groups train_dataset, test_dataset, user_groups = get_dataset(args) args.num_users = len(user_groups) # BUILD MODEL if args.model == 'cnn': # Convolutional neural netork if args.dataset == 'mnist': global_model = CNNMnist(args=args) elif args.dataset == 'fmnist': global_model = CNNFashion_Mnist(args=args) elif args.dataset == 'cifar': global_model = CNNCifar(args=args) elif args.model == 'mlp': # Multi-layer preceptron img_size = train_dataset[0][0].shape len_in = 1 for x in img_size: len_in *= x global_model = MLP(dim_in=len_in, dim_hidden=64, dim_out=args.num_classes) else: exit('Error: unrecognized model') # Set the model to train and send it to device. global_model.to(device) global_model.train() # copy weights global_weights = global_model.state_dict() # Training train_loss, train_accuracy = [], [] val_acc_list, net_list = [], [] cv_loss, cv_acc = [], [] print_every = 2 val_loss_pre, counter = 0, 0 #Beolvassuk, hogy éppen mely résztvevők vesznek részt a tanításban (0 jelentése, hogy benne van, 1 az hogy nincs) users = [] fp = open('users.txt', "r") x = fp.readline().split(' ') for i in x: if i != '': users.append(int(i)) fp.close() #for epoch in tqdm(range(args.epochs)): for epoch in range(args.epochs): local_weights, local_losses = [], [] #print(f'\n | Global Training Round : {epoch+1} |\n') global_model.train() m = max(int(args.frac * args.num_users), 1) idxs_users = np.random.choice(range(args.num_users), m, replace=False) for idx in idxs_users: local_model = LocalUpdate(args=args, dataset=train_dataset, idxs=user_groups[idx], logger=logger) w, loss = local_model.update_weights( model=copy.deepcopy(global_model), global_round=epoch) local_weights.append(copy.deepcopy(w)) local_losses.append(copy.deepcopy(loss)) global_weights = average_weights(local_weights) # update global weights global_model.load_state_dict(global_weights) loss_avg = sum(local_losses) / len(local_losses) train_loss.append(loss_avg) # Calculate avg training accuracy over all users at every epoch list_acc, list_loss = [], [] global_model.eval() for c in range(args.num_users): local_model = LocalUpdate(args=args, dataset=train_dataset, idxs=user_groups[idx], logger=logger) acc, loss = local_model.inference(model=global_model) list_acc.append(acc) list_loss.append(loss) train_accuracy.append(sum(list_acc) / len(list_acc)) # print global training loss after every 'i' rounds '''if (epoch+1) % print_every == 0: print(f' \nAvg Training Stats after {epoch+1} global rounds:') print(f'Training Loss : {np.mean(np.array(train_loss))}') print('Train Accuracy: {:.2f}% \n'.format(100*train_accuracy[-1]))''' # Test inference after completion of training #Beolvassuk hogy mely résztvevőnek mely labeleket osztottuk ki. ftrain = open('traindataset.txt') testlabels = [] line = ftrain.readline() while line != "": sor = line.split(' ') array = [] for i in sor: array.append(int(i)) testlabels.append(array) line = ftrain.readline() ftrain.close() print("USERS LABELS") print(testlabels) #Minden lehetséges koalícióra lefut a tesztelés for j in range((2**args.num_users) - 1): binary = numberToBinary(j, len(users)) test_acc, test_loss = test_inference(args, global_model, test_dataset, testlabels, binary, len(binary)) #Teszt eredmények kiírása print("RESZTVEVOK") print(users) print("TEST NUMBER") print(j) print("TEST BINARY") print(binary) print("TEST LABELS") print(testlabels) print("Test Accuracy") print("{:.2f}%".format(100 * test_acc)) print() # Saving the objects train_loss and train_accuracy: '''file_name = '../save/objects/{}_{}_{}_C[{}]_iid[{}]_E[{}]_B[{}].pkl'.\
cwd = os.getcwd() filepath = os.path.join(cwd,'save/objects/{}_{}_C[{}]_iid[{}]_B[{}]_N[{}]_MA[{}]_R[{}].pkl'.\ format(args.dataset, args.model, args.frac, args.iid, args.local_bs, args.norm, args.ma, args.run)) # Logging setup filename = os.path.join(cwd,'save/objects/{}_{}_{}_C[{}]_iid[{}]_E[{}]_B[{}]_N[{}]_MA[{}]_R[{}].log'.\ format(args.dataset, args.model, args.epochs, args.frac, args.iid, args.local_ep, args.local_bs, args.norm, args.ma, args.run)) logging.basicConfig(filename=filename, filemode='a', level=logging.INFO) if args.resume == 1: state = load_model(filepath, args) global_model.load_state_dict(state['state_dict']) prev_epoch = state['epoch'] else: prev_epoch = 0 # Set the model to train and send it to device. global_model.to(device) global_model.train() print(global_model) # copy weights global_weights = global_model.state_dict() # Training train_loss, train_accuracy = [], [] test_loss, test_accuracy = [], []
def main(): start_time = time.time() # define paths path_project = os.path.abspath('..') logger = SummaryWriter('../logs') args = args_parser() args = adatok.arguments(args) exp_details(args) if args.gpu: torch.cuda.set_device(args.gpu) device = 'cuda' if args.gpu else 'cpu' # load dataset and user groups train_dataset, test_dataset, user_groups = get_dataset(args) if adatok.data.image_initialization == True: adatok.data.image_initialization = False return # BUILD MODEL if args.model == 'cnn': # Convolutional neural netork if args.dataset == 'mnist': global_model = CNNMnist(args=args) elif args.dataset == 'fmnist': global_model = CNNFashion_Mnist(args=args) elif args.dataset == 'cifar': global_model = CNNCifar(args=args) elif args.model == 'mlp': # Multi-layer preceptron img_size = train_dataset[0][0].shape len_in = 1 for x in img_size: len_in *= x global_model = MLP(dim_in=len_in, dim_hidden=64, dim_out=args.num_classes) else: exit('Error: unrecognized model') # Set the model to train and send it to device. global_model.to(device) global_model.train() #print(global_model) # copy weights global_weights = global_model.state_dict() # Training train_loss, train_accuracy = [], [] val_acc_list, net_list = [], [] cv_loss, cv_acc = [], [] print_every = 2 val_loss_pre, counter = 0, 0 for epoch in tqdm(range(args.epochs)): local_weights, local_losses = [], [] #print(f'\n | Global Training Round : {epoch+1} |\n') global_model.train() m = max(int(args.frac * args.num_users), 1) idxs_users = np.random.choice(range(args.num_users), m, replace=False) for idx in idxs_users: local_model = LocalUpdate(args=args, dataset=train_dataset, idxs=user_groups[idx], logger=logger) w, loss = local_model.update_weights( model=copy.deepcopy(global_model), global_round=epoch) local_weights.append(copy.deepcopy(w)) local_losses.append(copy.deepcopy(loss)) # update global weights global_weights = average_weights(local_weights) # update global weights global_model.load_state_dict(global_weights) loss_avg = sum(local_losses) / len(local_losses) train_loss.append(loss_avg) # Calculate avg training accuracy over all users at every epoch list_acc, list_loss = [], [] global_model.eval() for c in range(args.num_users): local_model = LocalUpdate(args=args, dataset=train_dataset, idxs=user_groups[idx], logger=logger) acc, loss = local_model.inference(model=global_model) list_acc.append(acc) list_loss.append(loss) train_accuracy.append(sum(list_acc) / len(list_acc)) # print global training loss after every 'i' rounds '''if (epoch+1) % print_every == 0: print(f' \nAvg Training Stats after {epoch+1} global rounds:') print(f'Training Loss : {np.mean(np.array(train_loss))}') print('Train Accuracy: {:.2f}% \n'.format(100*train_accuracy[-1]))''' # Test inference after completion of training for i in adatok.data.test_groups_in_binary: adatok.data.actual_test_group_in_binary = i test_acc, test_loss = test_inference(args, global_model, test_dataset) print("Resoults") print(epoch) print(adatok.data.actual_train_group_in_binary) print(adatok.data.actual_test_group_in_binary) print(test_acc) print(test_loss) '''
def poisoned_NoDefense(nb_attackers, seed=1): # define paths path_project = os.path.abspath('..') logger = SummaryWriter('../logs') args = args_parser() exp_details(args) # set seed torch.manual_seed(seed) np.random.seed(seed) # device device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # load dataset and user groups train_dataset, test_dataset, user_groups = get_dataset(args) # BUILD MODEL if args.model == 'cnn': # Convolutional neural netork if args.dataset == 'mnist': global_model = CNNMnist(args=args) elif args.dataset == 'fmnist': global_model = CNNFashion_Mnist(args=args) elif args.dataset == 'cifar': global_model = CNNCifar(args=args) elif args.model == 'mlp': # Multi-layer preceptron img_size = train_dataset[0][0].shape len_in = 1 for x in img_size: len_in *= x global_model = MLP(dim_in=len_in, dim_hidden=64, dim_out=args.num_classes) else: exit('Error: unrecognized model') # Set the model to train and send it to device. global_model.to(device) global_model.train() print(global_model) # copy weights global_weights = global_model.state_dict() # backdoor model dummy_model = copy.deepcopy(global_model) dummy_model.load_state_dict(torch.load('../save/all_5_model.pth')) dummy_norm = 0 for x in dummy_model.state_dict().values(): dummy_norm += x.norm(2).item() ** 2 dummy_norm = dummy_norm ** (1. / 2) # testing accuracy for global model testing_accuracy = [0.1] for epoch in tqdm(range(args.epochs)): local_del_w = [] print(f'\n | Global Training Round : {epoch+1} |\n') global_model.train() m = max(int(args.frac * args.num_users), 1) idxs_users = np.random.choice(range(args.num_users), m, replace=False) # Adversary updates for idx in idxs_users[0:nb_attackers]: print("evil") local_model = LocalUpdate(args=args, dataset=train_dataset, idxs=user_groups[idx], logger=logger) #del_w, _ = local_model.poisoned_SGA(model=copy.deepcopy(global_model), change=1) w = copy.deepcopy(dummy_model) # compute change in parameters and norm zeta = 0 for del_w, w_old in zip(w.parameters(), global_model.parameters()): del_w.data -= copy.deepcopy(w_old.data) del_w.data *= m / nb_attackers del_w.data += copy.deepcopy(w_old.data) zeta += del_w.norm(2).item() ** 2 zeta = zeta ** (1. / 2) del_w = copy.deepcopy(w.state_dict()) local_del_w.append(copy.deepcopy(del_w)) # Non-adversarial updates for idx in idxs_users[nb_attackers:]: print("good") local_model = LocalUpdate(args=args, dataset=train_dataset, idxs=user_groups[idx], logger=logger) del_w, _ = local_model.update_weights(model=copy.deepcopy(global_model), change=1) local_del_w.append(copy.deepcopy(del_w)) # average local updates average_del_w = average_weights(local_del_w) # Update global model: w_{t+1} = w_{t} + average_del_w for param, param_del_w in zip(global_weights.values(), average_del_w.values()): param += param_del_w global_model.load_state_dict(global_weights) # test accuracy test_acc, test_loss = test_inference(args, global_model, test_dataset) testing_accuracy.append(test_acc) print("Test accuracy") print(testing_accuracy) # save test accuracy np.savetxt('../save/RandomAttack/NoDefense_iid_{}_{}_attackers{}_seed{}.txt'. format(args.dataset, args.model, nb_attackers, s), testing_accuracy)