#plt.savefig('log/fed_{}_{}_{}_C{}_iid{}.png'.format(args.dataset, args.model, args.epochs, args.frac, args.iid)) #print("COUNT DATA",str(count_array)) print("NO ATTACK DATA=",loss_train) print("1 ATTACK DATA=",loss_train_1) print("5 ATTACK DATA=",loss_train_5) print("10 ATTACK DATA=",loss_train_10) print("15 ATTACK DATA=",loss_train_15) print("20 ATTACK DATA=",loss_train_20) print("25 ATTACK DATA=",loss_train_25) print("30 ATTACK DATA=",loss_train_30) print(malicious_dict) print(malicious_count) # testing net_glob.eval() #print("Agent_Found_Count",agent_found_count) acc_train, loss_train, acc_train0,acc_train1,acc_train2,acc_train3,acc_train4,acc_train5,acc_train6,acc_train7,acc_train8,acc_train9,target_train_freq,correct_train_freq = test_img(net_glob, dataset_train, args) acc_test, loss_test, acc_test0,acc_test1,acc_test2,acc_test3,acc_test4,acc_test5,acc_test6,acc_test7,acc_test8,acc_test9,target_test_freq,correct_test_freq = test_img(net_glob, dataset_test, args) print("Training accuracy (NO ATTACK): {:.2f}".format(acc_train)) print("Testing accuracy (NO ATTACK): {:.2f}".format(acc_test)) print("Training accuracy-airplane (NO ATTACK): {:.2f}".format(acc_train0)) print("Testing accuracy-airplane (NO ATTACK): {:.2f}".format(acc_test0)) print("\n") net_glob1.eval() acc_train_1, loss_train_1, acc_train0_1,acc_train1_1,acc_train2_1,acc_train3_1,acc_train4_1,acc_train5_1,acc_train6_1,acc_train7_1,acc_train8_1,acc_train9_1,target_train_freq_1,correct_train_freq_1 = test_img(net_glob1, dataset_train, args) acc_test_1, loss_test_1, acc_test0_1,acc_test1_1,acc_test2_1,acc_test3_1,acc_test4_1,acc_test5_1,acc_test6_1,acc_test7_1,acc_test8_1,acc_test9_1,target_test_freq_1,correct_test_freq_1 = test_img(net_glob1, dataset_test, args) print("Training accuracy (LABEL FLIPPED 1): {:.2f}".format(acc_train_1)) print("Testing accuracy (LABEL FLIPPED 1): {:.2f}".format(acc_test_1)) print("Training accuracy-airplane (LABEL FLIPPED 1): {:.2f}".format(acc_train0_1))
print("NO ATTACK DATA=",loss_train) print("1 ATTACK DATA=",loss_train_1) print("5 ATTACK DATA=",loss_train_5) print("7 ATTACK DATA=",loss_train_7) print("10 ATTACK DATA=",loss_train_10) # testing net_glob.eval() #print("Agent_Found_Count",agent_found_count) acc_train, loss_train = test_img(net_glob, dataset_train, args) acc_test, loss_test = test_img(net_glob, dataset_test, args) print("Training accuracy (NO ATTACK): {:.2f}".format(acc_train)) print("Testing accuracy (NO ATTACK): {:.2f}".format(acc_test)) net_glob1.eval() acc_train1, loss_train_1 = test_img(net_glob1, dataset_train, args) acc_test1, loss_test_1 = test_img(net_glob1, dataset_test, args) print("Training accuracy (CONSTANT ATTACK 1): {:.2f}".format(acc_train1)) print("Testing accuracy (CONSTANT ATTACK 1): {:.2f}".format(acc_test1)) net_glob5.eval() acc_train5, loss_train_5 = test_img(net_glob5, dataset_train, args) acc_test5, loss_test_5 = test_img(net_glob5, dataset_test, args) print("Training accuracy (CONSTANT ATTACK 5): {:.2f}".format(acc_train5)) print("Testing accuracy (CONSTANT ATTACK 5): {:.2f}".format(acc_test5)) net_glob7.eval() acc_train7, loss_train_7 = test_img(net_glob7, dataset_train, args)
print("5 ATTACK DATA=", loss_train_5) print("10 ATTACK DATA=", loss_train_10) print("15 ATTACK DATA=", loss_train_15) print("20 ATTACK DATA=", loss_train_20) print("25 ATTACK DATA=", loss_train_25) print("30 ATTACK DATA=", loss_train_30) # testing net_glob.eval() #print("Agent_Found_Count",agent_found_count) acc_train, loss_train = test_img(net_glob, dataset_train, args) acc_test, loss_test = test_img(net_glob, dataset_test, args) print("Training accuracy (NO ATTACK): {:.2f}".format(acc_train)) print("Testing accuracy (NO ATTACK): {:.2f}".format(acc_test)) net_glob1.eval() acc_train1, loss_train_1 = test_img(net_glob1, dataset_train, args) acc_test1, loss_test_1 = test_img(net_glob1, dataset_test, args) print("Training accuracy (CONSTANT ATTACK 1): {:.2f}".format(acc_train1)) print("Testing accuracy (CONSTANT ATTACK 1): {:.2f}".format(acc_test1)) net_glob5.eval() acc_train5, loss_train_5 = test_img(net_glob5, dataset_train, args) acc_test5, loss_test_5 = test_img(net_glob5, dataset_test, args) print("Training accuracy (CONSTANT ATTACK 5): {:.2f}".format(acc_train5)) print("Testing accuracy (CONSTANT ATTACK 5): {:.2f}".format(acc_test5)) net_glob10.eval() acc_train10, loss_train_10 = test_img(net_glob10, dataset_train, args) acc_test10, loss_test_10 = test_img(net_glob10, dataset_test, args) print("Training accuracy (CONSTANT ATTACK 10): {:.2f}".format(acc_train10))
class Server(): def __init__(self, args, w): self.args = args self.clients_update_w = [] self.clients_loss = [] self.model = CNNMnist(args=args).to(args.device) self.model.load_state_dict(w) def FedAvg(self): # 1. part one # DP mechanism # cause we choose to add noise at client end,the fedavg should be the same as plain if self.args.mode == 'plain' or self.args.mode == 'DP': update_w_avg = copy.deepcopy(self.clients_update_w[0]) for k in update_w_avg.keys(): for i in range(1, len(self.clients_update_w)): update_w_avg[k] += self.clients_update_w[i][k] update_w_avg[k] = torch.div(update_w_avg[k], len(self.clients_update_w)) self.model.state_dict()[k] += update_w_avg[k] return copy.deepcopy(self.model.state_dict()), sum( self.clients_loss) / len(self.clients_loss) # 2. part two # Paillier add elif self.args.mode == 'Paillier': update_w_avg = copy.deepcopy(self.clients_update_w[0]) for k in update_w_avg.keys(): client_num = len(self.clients_update_w) for i in range(1, client_num): for iter in range(len(update_w_avg[k])): update_w_avg[k][iter] += self.clients_update_w[i][k][ iter] for iter in range(len(update_w_avg[k])): update_w_avg[k][iter] /= client_num return update_w_avg, sum(self.clients_loss) / len( self.clients_loss) else: exit() def test(self, datatest): self.model.eval() # testing test_loss = 0 correct = 0 data_loader = DataLoader(datatest, batch_size=self.args.bs) for idx, (data, target) in enumerate(data_loader): if self.args.gpu != -1: data, target = data.cuda(), target.cuda() log_probs = self.model(data) # sum up batch loss test_loss += F.cross_entropy(log_probs, target, reduction='sum').item() # get the index of the max log-probability y_pred = log_probs.data.max(1, keepdim=True)[1] correct += y_pred.eq( target.data.view_as(y_pred)).long().cpu().sum() test_loss /= len(data_loader.dataset) accuracy = 100.00 * correct / len(data_loader.dataset) return accuracy, test_loss