Example #1
0
    def __init__(self, host, port):
        if model == 'cnn':
            if dataset == 'mnist' or dataset == 'fmnist':
                self.global_model = CNNMnist()
            elif dataset == 'cifar':
                self.global_model = CNNCifar()
        elif model == 'mlp':
            self.global_model = MLP(dim_in=img_size, dim_hidden=64, dim_out=10)
        elif model == 'densenet':
            self.global_model = torch.hub.load('pytorch/vision:v0.5.0',
                                               'densenet121',
                                               pretrained=False)
        elif model == 'vgg19':
            self.global_model = torch.hub.load('pytorch/vision:v0.5.0',
                                               'vgg19',
                                               pretrained=False)
        else:
            exit('Error: unrecognized model')

        self.ready_client_sids = set()

        self.app = Flask(__name__)
        self.socketio = SocketIO(self.app)
        self.host = host
        self.port = port

        self.model_id = str(uuid.uuid4())

        self.current_round = -1  # -1 for not yet started
        self.current_round_client_updates = []
        self.eval_client_updates = []

        self.register_handles()
Example #2
0
def GetModel(op, client_id, global_round):
    # initial
    global stop_round
    if op == 0:
        if model == 'cnn':
            if dataset == 'mnist' or dataset == 'fmnist':
                global_model = CNNMnist()
            elif dataset == 'cifar':
                global_model = CNNCifar()
        elif model == 'mlp':
            global_model = MLP(dim_in=img_size, dim_hidden=64, dim_out=10)
        elif model == 'densenet':
            global_model = torch.hub.load('pytorch/vision:v0.5.0',
                                          'densenet121',
                                          pretrained=False)
        elif model == 'vgg19':
            global_model = torch.hub.load('pytorch/vision:v0.5.0',
                                          'vgg19',
                                          pretrained=False)
        else:
            exit('Error: unrecognized model')
        encoing_string = base64.b64encode(pickle.dumps(global_model)).decode()
        clients_arr.remove(client_id)
        return encoing_string
    else:
        f_path_global = 'Models/global_model' + str(global_round) + '.pkl'
        if client_id == 1:
            while 1:
                filenums, files = GetFileNum(global_round)
                if filenums == client_nums:
                    weights = []
                    for f in files:
                        f_path = "Models/LocalModels/" + str(
                            global_round) + '/' + f
                        weights.append(torch.load(f_path))
                    new_global_model, cvg_flag = Average_weight(weights)
                    if cvg_flag and stop_round is None:
                        stop_round = global_round
                    if global_round == total_global_round:
                        print(global_loss_per_epoch, stop_round)
                    print(f_path_global)
                    save(new_global_model, f_path_global)
                    #torch.save(new_global_model, f_path_global)
                    with open(f_path_global, 'rb') as file:
                        encoing_string = base64.b64encode(file.read())
                        clients_arr.remove(client_id)
                        return encoing_string
                time.sleep(2)
        else:
            while 1:
                if os.path.isfile(f_path_global):
                    with open(f_path_global, 'rb') as file:
                        encoing_string = base64.b64encode(file.read())
                        clients_arr.remove(client_id)
                        return encoing_string
                time.sleep(2)
Example #3
0
def main(chafen_model, V1, V2):
    args = args_parser()
    device = "cuda"
    c = 0
    r = 0.5
    chafen = CNNCifar(args=args)
    V1.to(device)
    V2.to(device)
    chafen.to(device)

    V1_weight = V1.state_dict()
    V2_weight = V2.state_dict()
    chafen_model_weight = chafen_model.state_dict()
    chafen_weight = chafen.state_dict()

    chafen_weight[
        'conv1.weight'] = V2_weight['conv1.weight'] - V1_weight['conv1.weight']
    chafen_weight[
        'conv1.bias'] = V2_weight['conv1.bias'] - V1_weight['conv1.bias']
    chafen_weight[
        'conv2.weight'] = V2_weight['conv2.weight'] - V1_weight['conv2.weight']
    chafen_weight[
        'conv2.bias'] = V2_weight['conv2.bias'] - V1_weight['conv2.bias']

    chafen_weight[
        'fc1.weight'] = V2_weight['fc1.weight'] - V1_weight['fc1.weight']
    chafen_weight['fc1.bias'] = V2_weight['fc1.bias'] - V1_weight['fc1.bias']
    chafen_weight[
        'fc2.weight'] = V2_weight['fc2.weight'] - V1_weight['fc2.weight']
    chafen_weight['fc2.bias'] = V2_weight['fc2.bias'] - V1_weight['fc2.bias']
    chafen_weight[
        'fc3.weight'] = V2_weight['fc3.weight'] - V1_weight['fc3.weight']
    chafen_weight['fc3.bias'] = V2_weight['fc3.bias'] - V1_weight['fc3.bias']

    chafen.load_state_dict(chafen_weight)

    sign_sum = 0
    sign_size = 0

    for k in chafen_weight.keys():
        cur_sign = torch.sign(chafen_weight[k])
        old_sign = torch.sign(chafen_model_weight[k])
        sign = cur_sign * old_sign
        sign[sign < 0] = 0
        sign_sum += torch.sum(sign)
        sign_size += sign.numel()

    e = sign_sum / (sign_size + 0.000001)
    print(e)

    if e < r:
        V2.load_state_dict(V1_weight)
        c += 1
    return V2.state_dict(), c
Example #4
0
def main(V1,V2,testloader):
    args = args_parser()
    device="cuda"
    r=0.27

    chafen=CNNCifar(args=args)
    V1.to(device)
    V2.to(device)
    chafen.to(device)

    V1_weight=V1.state_dict()
    V2_weight=V2.state_dict()

    chafen_weight=chafen.state_dict()

    i=0
  
    for k in chafen_weight.keys():
        chafen_weight[k]=V2_weight[k]-V1_weight[k]
        a=chafen_weight[k].cpu().numpy()
        shape=a.shape
        a=a.flatten()
        a=my_mask(a,r)
        a=a.reshape(shape)
        chafen_weight[k]=torch.from_numpy(a)
        chafen_weight[k].to(device)
        i+=1
    chafen.load_state_dict(chafen_weight)

    my_sum1=0
    for k in chafen_weight.keys():
        a=chafen_weight[k].cpu().numpy()
        a=a.flatten()
        b=a.tolist()
        my_sum1+=b.count(0.0)
    print(my_sum1)


    

    for k in chafen_weight.keys():
        V2_weight[k]=chafen_weight[k].to(device)+V1_weight[k].to(device)
    V2.load_state_dict(V2_weight)


    return V2.state_dict(),my_sum1
def _test():
    from matplotlib import pyplot as plt

    I = 10
    N = 100
    train_batch_size = 2

    tb_fdir = osp.join('runs', 'tensorboard', 'isolation')
    other_fdir = osp.join('runs', 'my-logger')
    checkpoint_fdir = osp.join('runs', 'checkpoint')

    cdir = 'train'
    tb_dir = osp.join(tb_fdir, cdir)
    other_dir = osp.join(other_fdir, cdir)
    checkpoint_dir = osp.join(checkpoint_fdir, cdir)

    model = CNNCifar()

    simulator = Simulator(checkpoint_dir=checkpoint_dir,
                          I=I,
                          N=N,
                          train_batch_size=train_batch_size)

    logger = Logger(tb_dir=tb_dir, other_dir=other_dir)

    # count_train, count_test = 0, 0
    # for worker_ind, worker in enumerate(simulator.workers):
    #     count_train += simulator.workers[worker_ind].num_train
    #     plt.scatter(simulator.workers[worker_ind].num_train, 0)
    # plt.show()
    # print(count_train, count_test)

    simulator.train_(criterion=torch.nn.CrossEntropyLoss(),
                     model=model,
                     logger=logger,
                     learning_rate=1e-1,
                     K=10,
                     num_its=8000,
                     lr_decay=0.9,
                     decay_step_size=1000,
                     print_every=10,
                     checkpoint_interval=5)
def build_model(args, train_dataset):
    if args.model == 'cnn':
        # Convolutional neural network
        if args.dataset == 'mnist':
            model = CNNMnist(args=args)
        elif args.dataset == 'fmnist':
            model = CNNFashion_Mnist(args=args)
        elif args.dataset == 'cifar':
            model = CNNCifar(args=args)

    elif args.model == 'mlp':
        # Multi-layer preceptron
        img_size = train_dataset[0][0].shape
        len_in = 1
        for x in img_size:
            len_in *= x
        model = MLP(dim_in=len_in,
                    dim_hidden=args.mlpdim,
                    dim_out=args.num_classes)
    else:
        exit('Error- unrecognized model: ' + args.model)

    return model
Example #7
0
def federated_main(args):
    # prepare logger
    output_dir = "../logs"
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    session_time = strftime("%y%m%d-%H%M%S", localtime())
    output_file = os.path.join(output_dir,
                               "{}-{}.log".format(session_time, args.dataset))
    log_file = logging.FileHandler(output_file)
    log_file.setFormatter(logging.Formatter('%(asctime)s: %(message)s'))
    logger = logging.getLogger()
    logger.setLevel(logging.INFO)
    logger.addHandler(log_file)

    # prepare client data output directory, related output is at bottom
    if not os.path.exists("../clients_data"):
        os.makedirs("../clients_data")

    # prepare devices
    if args.gpu:
        torch.cuda.set_device("cuda:{}".format(args.gpu))
    device = "cuda" if args.gpu else "cpu"

    # prepare dataset
    if args.dataset == "cifar":
        dataset = CreateCIFAR10(args.num_users)
    elif args.dataset == "mnist":
        dataset = CreateMNIST(args.num_users)
    elif args.dataset == "fmnist":
        dataset = CreateMNIST(args.num_users, "../data/fmnist")
    else:
        raise ValueError("The dataset you input is not supported yet")

    if args.iid:
        user_groups = dataset.create_iid()
    else:
        if args.unequal:
            if args.dataset == "cifar":
                raise NotImplementedError(
                    "Cifar 10 unequal dataset is not implemented")
            else:
                user_groups = dataset.crate_noniid_unequal()
        else:
            user_groups = dataset.create_noniid()

    # initialize the model
    if args.model == "cnn":
        if args.dataset == "mnist":
            global_model = CNNMnist(args=args)
        elif args.dataset == "fmnist":
            global_model = CNNFashion_Mnist(args=args)
        elif args.dataset == "cifar":
            global_model = CNNCifar(args=args)
        else:
            raise ValueError("The dataset you input is not supported yet")
    elif args.model == "mlp":
        img_size = dataset.train_dataset.data[0].shape
        len_in = 1
        for x in img_size:
            len_in *= x
        global_model = MLP(dim_in=len_in,
                           dim_hidden=64,
                           dim_out=args.num_classes)
    else:
        raise ValueError("The model you input is not supported yet")

    global_model.to(device)
    global_model.train()

    # prepare the parameters of global model
    global_parameters = global_model.state_dict()

    # Prepare the client container
    local_outputs = [
        ClientOutput(client_idx) for client_idx in range(args.num_users)
    ]

    # malicious behavior, some nodes may perform model posion attack by changing the label of data
    if args.malicious:
        malicious_changer = MaliciousChange(args, dataset.train_dataset)
        # the length of source_list and dest_list can influence the final accuracy
        malicious_changer.data_poision(user_groups, [1, 5, 3], [3, 1, 5])
        malicious_client_idxs = malicious_changer.malicious_clients
        for idx in malicious_client_idxs:
            local_outputs[idx].update(malicious=True)

    # Prepare global early stopping
    if args.global_early_stop:
        global_early_stopping = EarlyStopping(
            patience=args.global_early_rounds)

    # Prepare the aggregation method
    if args.aggre == "simple":
        aggre_method = SimpleAggregation(int(args.rate * args.num_users),
                                         int((args.num_users - 1) / 3), True,
                                         args.no_consensus)
    elif args.aggre == "time":
        aggre_method = ThresholdAggregation(int(args.rate * args.num_users),
                                            int((args.num_users - 1) / 3),
                                            True, args.no_consensus)
    elif args.aggre == "top":
        aggre_method = TopAggregation(int(args.rate * args.num_users),
                                      int((args.num_users - 1) / 3), False,
                                      args.no_consensus)
    else:
        raise ValueError(
            "The aggregation method you indicate has not been implemented")

    # prepare other things
    old_global_metric = 0.
    server_total_profit = 0.
    epoch_outputs = []
    if args.malicious:
        client_chooser = ChooseClient(args, malicious_client_idxs)
    else:
        client_chooser = ChooseClient(args)

    # Training
    for epoch in tqdm.tqdm(range(args.epochs)):
        # Prepare some things
        logger.info("Epoch {} Started".format(epoch))

        # Choose clients participates in this epoch
        if args.aggre == "simple":
            # The number of received models will influence the performance
            # In order to perform a fair comparison, FedAvg has no random screening
            k = max(int(args.rate * args.num_users), 1)
        else:
            k = max(int(args.frac * args.num_users), 1)
        chosen_clients = client_chooser.part_pick(list(range(args.num_users)),
                                                  k)

        # Client part
        for client_idx in chosen_clients:
            # Client model training
            client_updater = LocalUpdate(args, dataset.train_dataset,
                                         user_groups[client_idx], logger)
            client_model, client_loss = client_updater(
                copy.deepcopy(global_model))

            # Client model evaluation
            evaluator = Evaluator(args, logger)
            client_global_metric, client_global_loss = evaluator(
                client_model, dataset.test_dataset)

            # Update ClientOutput
            local_outputs[client_idx].update(
                parameter=copy.deepcopy(client_model.state_dict()),
                number=len(user_groups[client_idx]),
                epochs=epoch,
                accuracy=client_global_metric,
                local_loss=client_loss,
                global_loss=client_global_loss)
            epoch_outputs.append(local_outputs[client_idx])

        # Update global parameters
        epoch_outputs, global_parameters = aggre_method(epoch_outputs)
        global_model.load_state_dict(global_parameters)

        # Evaluate global model on global dataset
        global_evaluator = Evaluator(args, logger)
        global_metric, global_loss = global_evaluator(global_model,
                                                      dataset.test_dataset)

        # Calculate the profit
        epoch_outputs, publisher_profit, pay_count = epoch_incentive(
            args, epoch_outputs, global_metric, old_global_metric)
        server_total_profit += publisher_profit

        # Epoch ends
        logger.info("Epoch {} End".format(epoch))

        # Print epoch information
        logger.info("[client]Clients Information:")
        for single_client in epoch_outputs:
            client_dict = single_client.output()
            client_idx = client_dict["idx"]
            for client_key in client_dict:
                if client_key != "idx":
                    logger.info("[client-{}]{}: {}".format(
                        client_idx, client_key, client_dict[client_key]))
        logger.info("[server]Server information:")
        logger.info("[server]loss: {}".format(global_loss))
        logger.info("[server]accuracy: {}".format(global_metric))
        logger.info("[server]epoch_profit: {}".format(
            publisher_profit / pay_count if pay_count != 0 else 0))
        logger.info("[server]total_profit: {}".format(server_total_profit))

        # Global early stop mechanism
        if args.global_early_stop and epoch > args.no_stoping_epochs:
            global_early_stopping(global_loss, global_model)
            if global_early_stopping.early_stop:
                break

        # Reset some values
        # Only the best performance is meaningful to task participant
        old_global_metric = max(global_metric, old_global_metric)
        epoch_outputs = []

    pickle.dump(
        local_outputs,
        open("../clients_data/{}-{}.pickle".format(session_time, args.dataset),
             "wb"))
    logger.info("Task finished!")
Example #8
0
if __name__ == '__main__':
    os.environ["CUDA_VISIBLE_DEVICES"] = '0'
    start_time = time.time()

    path_project = os.path.abspath('..')
    logger = SummaryWriter('../logs')

    args = args_parser()
    exp_details(args)

    device = 'cuda'

    train_dataset, test_dataset, user_groups = get_dataset(args)

    global_model = CNNCifar(args=args)
    V2 = CNNCifar(args=args)
    V2.to(device)

    global_model.to(device)
    global_model.train()
    print(global_model)

    global_weights = global_model.state_dict()

    train_loss, test_accuracy = [], []
    val_acc_list, net_list = [], []
    cv_loss, cv_acc = [], []
    print_every = 2
    val_loss_pre, counter = 0, 0
    x = 0
Example #9
0
def main_test(args):
    start_time = time.time()
    now = datetime.datetime.now().strftime('%Y-%m-%d-%H%M%S')
    # define paths

    logger = SummaryWriter('../logs')

    # easydict 사용하는 경우 주석처리
    # args = args_parser()

    # checkpoint 생성위치
    args.save_path = os.path.join(args.save_path, args.exp_folder)
    if not os.path.exists(args.save_path):
        os.makedirs(args.save_path)
    save_path_tmp = os.path.join(args.save_path, 'tmp_{}'.format(now))
    if not os.path.exists(save_path_tmp):
        os.makedirs(save_path_tmp)
    SAVE_PATH = os.path.join(args.save_path, '{}_{}_T[{}]_C[{}]_iid[{}]_E[{}]_B[{}]'.
                             format(args.dataset, args.model, args.epochs, args.frac, args.iid,
                                    args.local_ep, args.local_bs))

    # 시드 고정
    torch.backends.cudnn.benchmark = False
    torch.backends.cudnn.deterministic = True
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    random.seed(args.seed)
    np.random.seed(args.seed)



#    torch.cuda.set_device(0)
    device = torch.device("cuda:{}".format(args.gpu) if torch.cuda.is_available() else "cpu")
    cpu_device = torch.device('cpu')
    # log 파일 생성
    log_path = os.path.join('../logs', args.exp_folder)
    if not os.path.exists(log_path):
        os.makedirs(log_path)

    loggertxt = get_logger(
        os.path.join(log_path, '{}_{}_{}_{}.log'.format(args.model, args.optimizer, args.norm, now)))
    logging.info(args)
    # csv
    csv_save = '../csv/' + now
    csv_path = os.path.join(csv_save, 'accuracy.csv')
    csv_logger_keys = ['train_loss', 'accuracy']
    csvlogger = CSVLogger(csv_path, csv_logger_keys)

    # load dataset and user groups
    train_dataset, test_dataset, client_loader_dict = get_dataset(args)

    # cifar-100의 경우 자동 설정
    if args.dataset == 'cifar100':
        args.num_classes = 100
    # BUILD MODEL
    if args.model == 'cnn':
        # Convolutional neural network
        if args.dataset == 'mnist':
            global_model = CNNMnist(args=args)
        elif args.dataset == 'fmnist':
            global_model = CNNFashion_Mnist(args=args)
        elif args.dataset == 'cifar':
            global_model = CNNCifar(args=args)
        elif args.dataset == 'cifar100':
            global_model = CNNCifar(args=args)

    elif args.model == 'mlp':
        # Multi-layer preceptron
        img_size = train_dataset[0][0].shape
        len_in = 1
        for x in img_size:
            len_in *= x
            global_model = MLP(dim_in=len_in, dim_hidden=64,
                               dim_out=args.num_classes)
    elif args.model == 'cnn_vc':
        global_model = CNNCifar_fedVC(args=args)
    elif args.model == 'cnn_vcbn':
        global_model = CNNCifar_VCBN(args=args)
    elif args.model == 'cnn_vcgn':
        global_model = CNNCifar_VCGN(args=args)
    elif args.model == 'resnet18_ws':
        global_model = resnet18(num_classes=args.num_classes, weight_stand=1)
    elif args.model == 'resnet18':
        global_model = resnet18(num_classes=args.num_classes, weight_stand=0)
    elif args.model == 'resnet32':
        global_model = ResNet32_test(num_classes=args.num_classes)
    elif args.model == 'resnet18_mabn':
        global_model = resnet18_mabn(num_classes=args.num_classes)
    elif args.model == 'vgg':
        global_model = vgg11()
    elif args.model == 'cnn_ws':
        global_model = CNNCifar_WS(args=args)


    else:
        exit('Error: unrecognized model')

    # Set the model to train and send it to device.
    loggertxt.info(global_model)
    # fedBN처럼 gn no communication 용
    client_models = [copy.deepcopy(global_model) for idx in range(args.num_users)]

    # copy weights
    global_weights = global_model.state_dict()

    global_model.to(device)
    global_model.train()

    # Training
    train_loss, train_accuracy = [], []
    val_acc_list, net_list = [], []


    # how does help BN 확인용
    client_loss = [[] for i in range(args.num_users)]
    client_conv_grad = [[] for i in range(args.num_users)]
    client_fc_grad = [[] for i in range(args.num_users)]
    client_total_grad_norm = [[] for i in range(args.num_users)]
    # 전체 loss 추적용 -how does help BN

    # 재시작
    if args.resume:
        checkpoint = torch.load(SAVE_PATH)
        global_model.load_state_dict(checkpoint['global_model'])
        if args.hold_normalize:
            for client_idx in range(args.num_users):
                client_models[client_idx].load_state_dict(checkpoint['model_{}'.format(client_idx)])
        else:
            for client_idx in range(args.num_users):
                client_models[client_idx].load_state_dict(checkpoint['global_model'])
        resume_iter = int(checkpoint['a_iter']) + 1
        print('Resume trainig form epoch {}'.format(resume_iter))
    else:
        resume_iter = 0


    # learning rate scheduler
    #scheduler = torch.optim.lr_scheduler.StepLR(optimizer=optimizer, gamma=0.1,step_size=500)

    # start training
    for epoch in tqdm(range(args.epochs)):
        local_weights, local_losses = [], []
        if args.verbose:
            print(f'\n | Global Training Round : {epoch + 1} |\n')

        global_model.train()
        m = max(int(args.frac * args.num_users), 1)
        idxs_users = np.random.choice(range(args.num_users), m, replace=False)


        for idx in idxs_users:
            """
            for key in global_model.state_dict().keys():
                if args.hold_normalize:
                    if 'bn' not in key:
                        client_models[idx].state_dict()[key].data.copy_(global_model.state_dict()[key])
                else:
                    client_models[idx].state_dict()[key].data.copy_(global_model.state_dict()[key])
            """
            torch.cuda.empty_cache()


            local_model = LocalUpdate(args=args, logger=logger, train_loader=client_loader_dict[idx], device=device)
            w, loss, batch_loss, conv_grad, fc_grad, total_gard_norm = local_model.update_weights(
                model=copy.deepcopy(global_model), global_round=epoch, idx_user=idx)
            local_weights.append(copy.deepcopy(w))
            # client의 1 epoch에서의 평균 loss값  ex)0.35(즉, batch loss들의 평균)
            local_losses.append(copy.deepcopy(loss))

            # 전체 round scheduler
          #  scheduler.step()
            # loss graph용 -> client당 loss값 진행 저장 -> 모두 client별로 저장.
            client_loss[idx].append(batch_loss)
            client_conv_grad[idx].append(conv_grad)
            client_fc_grad[idx].append(fc_grad)
            client_total_grad_norm[idx].append(total_gard_norm)

            # print(total_gard_norm)
            # gn, bn 복사
            # client_models[idx].load_state_dict(w)
            del local_model
            del w
        # update global weights
        global_weights = average_weights(local_weights, client_loader_dict, idxs_users)
        # update global weights
#        opt = OptRepo.name2cls('adam')(global_model.parameters(), lr=0.01, betas=(0.9, 0.99), eps=1e-3)
        opt = OptRepo.name2cls('sgd')(global_model.parameters(), lr=10, momentum=0.9)
        opt.zero_grad()
        opt_state = opt.state_dict()
        global_weights = aggregation(global_weights, global_model)
        global_model.load_state_dict(global_weights)
        opt = OptRepo.name2cls('sgd')(global_model.parameters(), lr=10, momentum=0.9)
#        opt = OptRepo.name2cls('adam')(global_model.parameters(), lr=0.01, betas=(0.9, 0.99), eps=1e-3)
        opt.load_state_dict(opt_state)
        opt.step()
        loss_avg = sum(local_losses) / len(local_losses)
        train_loss.append(loss_avg)

        global_model.eval()
        #        for c in range(args.num_users):
        #            local_model = LocalUpdate(args=args, dataset=train_dataset,
        #                                      idxs=user_groups[idx], logger=logger)
        #            acc, loss = local_model.inference(model=global_model)
        #            list_acc.append(acc)
        #            list_loss.append(loss)
        #        train_accuracy.append(sum(list_acc)/len(list_acc))
        train_accuracy = test_inference(args, global_model, test_dataset, device=device)
        val_acc_list.append(train_accuracy)
        # print global training loss after every 'i' rounds
        # if (epoch+1) % print_every == 0:
        loggertxt.info(f' \nAvg Training Stats after {epoch + 1} global rounds:')
        loggertxt.info(f'Training Loss : {loss_avg}')
        loggertxt.info('Train Accuracy: {:.2f}% \n'.format(100 * train_accuracy))
        csvlogger.write_row([loss_avg, 100 * train_accuracy])
        if (epoch + 1) % 100 == 0:
            tmp_save_path = os.path.join(save_path_tmp, 'tmp_{}.pt'.format(epoch+1))
            torch.save(global_model.state_dict(),tmp_save_path)
    # Test inference after completion of training
    test_acc = test_inference(args, global_model, test_dataset, device=device)

    print(' Saving checkpoints to {}...'.format(SAVE_PATH))
    if args.hold_normalize:
        client_dict = {}
        for idx, model in enumerate(client_models):
            client_dict['model_{}'.format(idx)] = model.state_dict()
        torch.save(client_dict, SAVE_PATH)
    else:
        torch.save({'global_model': global_model.state_dict()}, SAVE_PATH)

    loggertxt.info(f' \n Results after {args.epochs} global rounds of training:')
    # loggertxt.info("|---- Avg Train Accuracy: {:.2f}%".format(100*train_accuracy[-1]))
    loggertxt.info("|---- Test Accuracy: {:.2f}%".format(100 * test_acc))


    # frac이 1이 아닐경우 잘 작동하지않음.
    # batch_loss_list = np.array(client_loss).sum(axis=0) / args.num_users

    # conv_grad_list = np.array(client_conv_grad).sum(axis=0) / args.num_users
    # fc_grad_list = np.array(client_fc_grad).sum(axis=0) / args.num_users
    # total_grad_list = np.array(client_total_grad_norm).sum(axis=0) /args.num_users
    # client의 avg를 구하고 싶었으나 현재는 client 0만 확인
    # client마다 batch가 다를 경우 bug 예상
    return train_loss, val_acc_list, client_loss[0], client_conv_grad[0], client_fc_grad[0], client_total_grad_norm[0]
def poisoned_NoDefense(nb_attackers, seed=1):

    # define paths
    path_project = os.path.abspath('..')
    logger = SummaryWriter('../logs')

    args = args_parser()
    exp_details(args)

    # set seed
    torch.manual_seed(seed)
    np.random.seed(seed)

    # device
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # load dataset and user groups
    train_dataset, test_dataset, user_groups = get_dataset(args)


    # BUILD MODEL
    if args.model == 'cnn':
        # Convolutional neural netork
        if args.dataset == 'mnist':
            global_model = CNNMnist(args=args)
        elif args.dataset == 'fmnist':
            global_model = CNNFashion_Mnist(args=args)
        elif args.dataset == 'cifar':
            global_model = CNNCifar(args=args)

    elif args.model == 'mlp':
        # Multi-layer preceptron
        img_size = train_dataset[0][0].shape
        len_in = 1
        for x in img_size:
            len_in *= x
            global_model = MLP(dim_in=len_in, dim_hidden=64,
                               dim_out=args.num_classes)
    else:
        exit('Error: unrecognized model')

    # Set the model to train and send it to device.
    global_model.to(device)
    global_model.train()
    print(global_model)

    # copy weights
    global_weights = global_model.state_dict()

    # backdoor model
    dummy_model = copy.deepcopy(global_model)
    dummy_model.load_state_dict(torch.load('../save/all_5_model.pth'))
    dummy_norm = 0
    for x in dummy_model.state_dict().values():
        dummy_norm += x.norm(2).item() ** 2
    dummy_norm = dummy_norm ** (1. / 2)

    # testing accuracy for global model
    testing_accuracy = [0.1]

    for epoch in tqdm(range(args.epochs)):
        local_del_w = []
        print(f'\n | Global Training Round : {epoch+1} |\n')

        global_model.train()
        m = max(int(args.frac * args.num_users), 1)
        idxs_users = np.random.choice(range(args.num_users), m, replace=False)

        # Adversary updates
        for idx in idxs_users[0:nb_attackers]:
            print("evil")
            local_model = LocalUpdate(args=args, dataset=train_dataset, idxs=user_groups[idx], logger=logger)
            #del_w, _ = local_model.poisoned_SGA(model=copy.deepcopy(global_model), change=1)

            w = copy.deepcopy(dummy_model)
            # compute change in parameters and norm
            zeta = 0
            for del_w, w_old in zip(w.parameters(), global_model.parameters()):
                del_w.data -= copy.deepcopy(w_old.data)
                del_w.data *= m / nb_attackers
                del_w.data += copy.deepcopy(w_old.data)
                zeta += del_w.norm(2).item() ** 2
            zeta = zeta ** (1. / 2)
            del_w = copy.deepcopy(w.state_dict())
            local_del_w.append(copy.deepcopy(del_w))


        # Non-adversarial updates
        for idx in idxs_users[nb_attackers:]:
            print("good")
            local_model = LocalUpdate(args=args, dataset=train_dataset, idxs=user_groups[idx], logger=logger)
            del_w, _ = local_model.update_weights(model=copy.deepcopy(global_model), change=1)
            local_del_w.append(copy.deepcopy(del_w))

        # average local updates
        average_del_w = average_weights(local_del_w)

        # Update global model: w_{t+1} = w_{t} + average_del_w
        for param, param_del_w in zip(global_weights.values(), average_del_w.values()):
            param += param_del_w
        global_model.load_state_dict(global_weights)

        # test accuracy
        test_acc, test_loss = test_inference(args, global_model, test_dataset)
        testing_accuracy.append(test_acc)

        print("Test accuracy")
        print(testing_accuracy)

    # save test accuracy
    np.savetxt('../save/RandomAttack/NoDefense_iid_{}_{}_attackers{}_seed{}.txt'.
                 format(args.dataset, args.model, nb_attackers, s), testing_accuracy)
Example #11
0
def poisoned_pixel_CDP(norm_bound, noise_scale, nb_attackers, seed=1):
    start_time = time.time()

    # define paths
    path_project = os.path.abspath('..')
    logger = SummaryWriter('../logs')

    args = args_parser()
    exp_details(args)

    # set seed
    torch.manual_seed(seed)
    np.random.seed(seed)

    # device
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # load dataset and user groups
    train_dataset, test_dataset, user_groups = get_dataset(args)

    # BUILD MODEL
    if args.model == 'cnn':
        # Convolutional neural netork
        if args.dataset == 'mnist':
            global_model = CNNMnist(args=args)
        elif args.dataset == 'fmnist':
            global_model = CNNFashion_Mnist(args=args)
        elif args.dataset == 'cifar':
            global_model = CNNCifar(args=args)

    elif args.model == 'mlp':
        # Multi-layer preceptron
        img_size = train_dataset[0][0].shape
        len_in = 1
        for x in img_size:
            len_in *= x
            global_model = MLP(dim_in=len_in,
                               dim_hidden=64,
                               dim_out=args.num_classes)
    else:
        exit('Error: unrecognized model')

    # Set the model to train and send it to device.
    global_model.to(device)
    global_model.train()
    print(global_model)

    # copy weights
    global_weights = global_model.state_dict()

    # load poisoned model
    backdoor_model = copy.deepcopy(global_model)
    backdoor_model.load_state_dict(torch.load('../save/poison_model.pth'))

    # testing accuracy for global model
    testing_accuracy = [0.1]
    backdoor_accuracy = [0.1]

    for epoch in tqdm(range(args.epochs)):
        local_del_w, local_norms = [], []
        print(f'\n | Global Training Round : {epoch + 1} |\n')

        global_model.train()
        m = max(int(args.frac * args.num_users), 1)
        idxs_users = np.random.choice(range(args.num_users), m, replace=False)

        # Adversary updates
        print("Evil")
        for idx in idxs_users[0:nb_attackers]:

            # backdoor model
            w = copy.deepcopy(backdoor_model)

            # compute change in parameters and norm
            zeta = 0
            for del_w, w_old in zip(w.parameters(), global_model.parameters()):
                del_w.data = del_w.data - copy.deepcopy(w_old.data)
                zeta += del_w.norm(2).item()**2
            zeta = zeta**(1. / 2)
            del_w = w.state_dict()

            print("EVIL")
            print(zeta)

            # add to global round
            local_del_w.append(copy.deepcopy(del_w))
            local_norms.append(copy.deepcopy(zeta))

        # Non-adversarial updates
        for idx in idxs_users[nb_attackers:]:
            local_model = LocalUpdate(args=args,
                                      dataset=train_dataset,
                                      idxs=user_groups[idx],
                                      logger=logger)
            del_w, zeta = local_model.update_weights(
                model=copy.deepcopy(global_model), change=1)
            local_del_w.append(copy.deepcopy(del_w))
            local_norms.append(copy.deepcopy(zeta))
            print("good")
            #print(zeta)

        # norm bound (e.g. median of norms)
        clip_factor = norm_bound  #min(norm_bound, np.median(local_norms))
        print(clip_factor)

        # clip updates
        for i in range(len(idxs_users)):
            for param in local_del_w[i].values():
                print(max(1, local_norms[i] / clip_factor))
                param /= max(1, local_norms[i] / clip_factor)

        # average local model updates
        average_del_w = average_weights(local_del_w)

        # Update model and add noise
        # w_{t+1} = w_{t} + avg(del_w1 + del_w2 + ... + del_wc) + Noise
        for param, param_del_w in zip(global_weights.values(),
                                      average_del_w.values()):
            param += param_del_w
            param += torch.randn(
                param.size()) * noise_scale * norm_bound / len(idxs_users)
        global_model.load_state_dict(global_weights)

        # test accuracy
        test_acc, test_loss, backdoor = test_backdoor_pixel(
            args, global_model, test_dataset)
        testing_accuracy.append(test_acc)
        backdoor_accuracy.append(backdoor)

        print("Testing & Backdoor accuracies")
        print(testing_accuracy)
        print(backdoor_accuracy)

    # save test accuracy
    np.savetxt(
        '../save/PixelAttack/TestAcc/iid_GDP_{}_{}_clip{}_scale{}_attackers{}_seed{}.txt'
        .format(args.dataset, args.model, norm_bound, noise_scale,
                nb_attackers, s), testing_accuracy)

    np.savetxt(
        '../save/PixelAttack/BackdoorAcc/iid_GDP_{}_{}_clip{}_scale{}_attackers{}_seed{}.txt'
        .format(args.dataset, args.model, norm_bound, noise_scale,
                nb_attackers, s), backdoor_accuracy)
Example #12
0
def poisoned_1to7_NoDefense(seed=1):
    start_time = time.time()

    # define paths
    path_project = os.path.abspath('..')
    logger = SummaryWriter('../logs')

    args = args_parser()
    exp_details(args)

    # set seed
    torch.manual_seed(seed)
    np.random.seed(seed)

    # device
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # load dataset and user groups
    train_dataset, test_dataset, user_groups = get_dataset(args)

    # BUILD MODEL
    if args.model == 'cnn':
        # Convolutional neural netork
        if args.dataset == 'mnist':
            global_model = CNNMnist(args=args)
        elif args.dataset == 'fmnist':
            global_model = CNNFashion_Mnist(args=args)
        elif args.dataset == 'cifar':
            global_model = CNNCifar(args=args)

    elif args.model == 'mlp':
        # Multi-layer preceptron
        img_size = train_dataset[0][0].shape
        len_in = 1
        for x in img_size:
            len_in *= x
            global_model = MLP(dim_in=len_in,
                               dim_hidden=64,
                               dim_out=args.num_classes)
    else:
        exit('Error: unrecognized model')

    # Set the model to train and send it to device.
    global_model.to(device)
    global_model.train()
    print(global_model)

    # copy weights
    global_weights = global_model.state_dict()

    # testing accuracy for global model
    testing_accuracy = [0.1]
    backdoor_accuracy = [0.1]

    for epoch in tqdm(range(args.epochs)):
        local_del_w, local_norms = [], []
        print(f'\n | Global Training Round : {epoch+1} |\n')

        global_model.train()
        m = max(int(args.frac * args.num_users), 1)
        idxs_users = np.random.choice(range(args.num_users), m, replace=False)

        # Adversary updates
        print("Evil norms:")
        for idx in idxs_users[0:args.nb_attackers]:
            local_model = LocalUpdate(args=args,
                                      dataset=train_dataset,
                                      idxs=user_groups[idx],
                                      logger=logger)

            del_w, zeta = local_model.poisoned_1to7(
                model=copy.deepcopy(global_model), change=1)
            local_del_w.append(copy.deepcopy(del_w))
            local_norms.append(copy.deepcopy(zeta))
            print(zeta)

        # Non-adversarial updates
        print("Good norms:")
        for idx in idxs_users[args.nb_attackers:]:
            print(idx)
            local_model = LocalUpdate(args=args,
                                      dataset=train_dataset,
                                      idxs=user_groups[idx],
                                      logger=logger)

            del_w, zeta = local_model.update_weights(
                model=copy.deepcopy(global_model), change=1)
            local_del_w.append(copy.deepcopy(del_w))
            local_norms.append(copy.deepcopy(zeta))
            print(zeta)

        # average local updates
        average_del_w = average_weights(local_del_w)

        # Update global model: w_{t+1} = w_{t} + average_del_w
        for param, param_del_w in zip(global_weights.values(),
                                      average_del_w.values()):
            param += param_del_w
        global_model.load_state_dict(global_weights)

        # test accuracy, backdoor accuracy
        test_acc, test_loss, back_acc = test_inference1to7(
            args, global_model, test_dataset)
        testing_accuracy.append(test_acc)
        backdoor_accuracy.append(back_acc)

        print("Test & Backdoor accuracy")
        print(testing_accuracy)
        print(backdoor_accuracy)

    # save accuracy
    np.savetxt(
        '../save/1to7Attack/TestAcc/NoDefense_{}_{}_seed{}.txt'.format(
            args.dataset, args.model, s), testing_accuracy)

    np.savetxt(
        '../save/1to7Attack/BackAcc/NoDefense_{}_{}_seed{}.txt'.format(
            args.dataset, args.model, s), backdoor_accuracy)
Example #13
0
if __name__ == '__main__':
    start_time = time.time()
    os.environ["CUDA_VISIBLE_DEVICES"] = '0'
    # define paths
    path_project = os.path.abspath('..')
    logger = SummaryWriter('../logs')

    args = args_parser()
    exp_details(args)

    device = 'cuda'

    # load dataset and user groups
    train_dataset, test_dataset, user_groups = get_dataset(args)

    global_model = CNNCifar(args=args)

    new_global_model = CNNCifar(args=args)
    new_global_model.to(device)
    new_global_model.train()
    chafen_global_model = CNNCifar(args=args)
    chafen_global_model.to(device)
    chafen_global_model.train()
    chafen_local_model = CNNCifar(args=args)
    chafen_local_model.to(device)
    chafen_local_model.train()
    global_model.to(device)
    global_model.train()
    print(global_model)
    V2 = CNNCifar(args=args)
    V2.to(device)
Example #14
0
def main():
    start_time = time.time()

    # define paths
    path_project = os.path.abspath('..')
    logger = SummaryWriter('../logs')
    args = args_parser()
    args = adatok.arguments(args)
    exp_details(args)
    if args.gpu:
        torch.cuda.set_device(args.gpu)
    device = 'cuda' if args.gpu else 'cpu'

    # load dataset and user groups
    train_dataset, test_dataset, user_groups = get_dataset(args)

    if adatok.data.image_initialization == True:
        adatok.data.image_initialization = False
        return

    # BUILD MODEL
    if args.model == 'cnn':
        # Convolutional neural netork
        if args.dataset == 'mnist':
            global_model = CNNMnist(args=args)
        elif args.dataset == 'fmnist':
            global_model = CNNFashion_Mnist(args=args)
        elif args.dataset == 'cifar':
            global_model = CNNCifar(args=args)

    elif args.model == 'mlp':
        # Multi-layer preceptron
        img_size = train_dataset[0][0].shape
        len_in = 1
        for x in img_size:
            len_in *= x
            global_model = MLP(dim_in=len_in,
                               dim_hidden=64,
                               dim_out=args.num_classes)
    else:
        exit('Error: unrecognized model')

    # Set the model to train and send it to device.
    global_model.to(device)
    global_model.train()
    #print(global_model)

    # copy weights
    global_weights = global_model.state_dict()

    # Training
    train_loss, train_accuracy = [], []
    val_acc_list, net_list = [], []
    cv_loss, cv_acc = [], []
    print_every = 2
    val_loss_pre, counter = 0, 0

    for epoch in tqdm(range(args.epochs)):
        local_weights, local_losses = [], []
        #print(f'\n | Global Training Round : {epoch+1} |\n')

        global_model.train()
        m = max(int(args.frac * args.num_users), 1)
        idxs_users = np.random.choice(range(args.num_users), m, replace=False)

        for idx in idxs_users:
            local_model = LocalUpdate(args=args,
                                      dataset=train_dataset,
                                      idxs=user_groups[idx],
                                      logger=logger)
            w, loss = local_model.update_weights(
                model=copy.deepcopy(global_model), global_round=epoch)
            local_weights.append(copy.deepcopy(w))
            local_losses.append(copy.deepcopy(loss))

        # update global weights
        global_weights = average_weights(local_weights)

        # update global weights
        global_model.load_state_dict(global_weights)

        loss_avg = sum(local_losses) / len(local_losses)
        train_loss.append(loss_avg)

        # Calculate avg training accuracy over all users at every epoch
        list_acc, list_loss = [], []
        global_model.eval()
        for c in range(args.num_users):
            local_model = LocalUpdate(args=args,
                                      dataset=train_dataset,
                                      idxs=user_groups[idx],
                                      logger=logger)
            acc, loss = local_model.inference(model=global_model)
            list_acc.append(acc)
            list_loss.append(loss)
        train_accuracy.append(sum(list_acc) / len(list_acc))

        # print global training loss after every 'i' rounds
        '''if (epoch+1) % print_every == 0:
            print(f' \nAvg Training Stats after {epoch+1} global rounds:')
            print(f'Training Loss : {np.mean(np.array(train_loss))}')
            print('Train Accuracy: {:.2f}% \n'.format(100*train_accuracy[-1]))'''

        # Test inference after completion of training
        for i in adatok.data.test_groups_in_binary:
            adatok.data.actual_test_group_in_binary = i
            test_acc, test_loss = test_inference(args, global_model,
                                                 test_dataset)
            print("Resoults")
            print(epoch)
            print(adatok.data.actual_train_group_in_binary)
            print(adatok.data.actual_test_group_in_binary)
            print(test_acc)
            print(test_loss)
    '''
Example #15
0
def main():

    model_path = 'results/%s/%s/%s/seed_%d' % (args.dataset, args.method, args.net_type, args.seed)
    if not os.path.isdir(model_path):
        mkdir_p(model_path)
    # load datasets
    train_dataset, test_dataset, _ = get_dataset(args)

    # BUILD MODEL
    if args.model == 'cnn':
        # Convolutional neural netork
        if args.dataset == 'mnist':
            global_model = CNNMnist(args=args)
        elif args.dataset == 'fmnist':
            global_model = CNNFashion_Mnist(args=args)
        elif args.dataset == 'cifar10':
            global_model = CNNCifar(args=args)
        elif args.dataset == 'cub200':
            if args.net_type == 'resnet':
                #global_model = models.resnet50(pretrained=True)
                global_model = models.resnet18(pretrained=True)
                global_model.fc = torch.nn.Linear(global_model.fc.in_features, cf.num_classes[args.dataset])
    elif args.model == 'mlp':
        # Multi-layer preceptron
        #img_size is torch.Size([1, 28, 28])
        img_size = train_dataset[0][0].shape
        len_in = 1
    #toclarify: why do we have to call the MLP code 3 times?
    #TODO: try to move global_model out of the bracket  
        for x in img_size:
            len_in *= x
            global_model = MLP(dim_in=len_in, dim_hidden=64,
                               dim_out=args.num_classes)
    else:
        exit('Error: unrecognized model')

    # Set the model to train and send it to device.
    global_model.to(device)
    global_model.train()
    print(global_model)
    wandb.watch(global_model)

    # Training
    # Set optimizer and criterion
    if args.optimizer == 'sgd':
        optimizer = torch.optim.SGD(global_model.parameters(), lr=args.lr,
                                    momentum=cf.momentum[args.dataset], weight_decay=5e-4)
    elif args.optimizer == 'adam':
        optimizer = torch.optim.Adam(global_model.parameters(), lr=args.lr,
                                     weight_decay=1e-4)
#batch_size = 
    trainloader = DataLoader(train_dataset, batch_size=int(args.local_bs * (args.num_users * args.frac)), shuffle=True, num_workers=args.workers,
                             pin_memory=use_cuda, drop_last=True)

    criterion = torch.nn.CrossEntropyLoss().to(device)

    epoch_loss = []
    test_acc_lst = []
    best_acc = 0
    args.lr = cf.lr[args.dataset]

    for epoch in tqdm(range(args.epochs)):
        global_model.train()
        batch_loss = []

        # adjest learning rate per global round
        if epoch != 0:
            adjust_learning_rate([optimizer], args, epoch)

        for batch_idx, (images, labels) in enumerate(trainloader):
            images, labels = images.to(device), labels.to(device)

            optimizer.zero_grad()
            outputs = global_model(images)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()

            #if batch_idx % 50 == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tLr: {:.4f}'.format(
                    epoch+1, (batch_idx+1) * len(images), len(trainloader.dataset),
                    100. * (batch_idx+1) / len(trainloader), loss.item(), args.lr))
            batch_loss.append(loss.item())

            wandb.log({'Train Loss': loss.item()})

        loss_avg = sum(batch_loss)/len(batch_loss)
        print('\nTrain loss: \n', loss_avg)
        epoch_loss.append(loss_avg)

        test_acc, test_loss = test_inference(args, global_model, test_dataset)
        test_acc_lst.append(test_acc)


        #save model
        if test_acc > best_acc:
            best_acc = test_acc
            save_checkpoint({
                'epoch': epoch + 1,
                'state_dict': global_model.state_dict(),
                'acc': test_acc,
                'best_acc': best_acc,
                'optimizer': optimizer.state_dict(),
            }, dir=model_path, filename='checkpoint.pth.tar')

        print('\nTrain Epoch: {}, Test acc: {:.2f}%, Best Test acc: {:.2f}%'.format(epoch + 1, test_acc, best_acc))

        # log training loss, test accuracy at wandb
        wandb.log({'Test Acc': test_acc,
                   'Best Acc': best_acc})

        # if model achieves target test acc, stop training
        if best_acc >= args.target_acc:
            print('Total Global round: ', epoch+1)
            break

    if not os.path.isdir(os.path.join(model_path, 'save')):
        mkdir_p(os.path.join(model_path, 'save'))
    # Plot loss
    plt.figure()
    plt.plot(range(len(epoch_loss)), epoch_loss)
    plt.xlabel('epochs')
    plt.ylabel('Train loss')
    plt.savefig(os.path.join(model_path, 'save/nn_{}_{}_{}_loss.png'.format(args.dataset, args.model,
                                                 args.epochs)))

    # Plot test acc per epoch
    plt.figure()
    plt.plot(range(len(test_acc_lst)), test_acc_lst)
    plt.xlabel('epochs')
    plt.ylabel('Test accuracy')
    plt.savefig(os.path.join(model_path, 'save/nn_{}_{}_{}_acc.png'.format(args.dataset, args.model,
                                                                            args.epochs)))
    # testing
    #test_acc, test_loss = test_inference(args, global_model, test_dataset)
    print('Test on', len(test_dataset), 'samples')
    print("Best Test Accuracy: {:.2f}%".format(best_acc))
Example #16
0
def main(r, flag, x, global_models, chafen_global_model, V1, V2, trainloader,
         chafen_local_weights, testloader):
    args = args_parser()
    device = "cuda"
    c1 = 0
    c2 = 0
    my_sum1 = 0
    acc1, acc2 = 0, 0
    r1 = 0.7
    chafen = CNNCifar(args=args)
    chafen_local_model = CNNCifar(args=args)
    V1.to(device)
    V2.to(device)
    chafen.to(device)
    chafen_local_model.to(device)
    chafen_local_model.load_state_dict(chafen_local_weights)

    V1_weight = V1.state_dict()
    V2_weight = V2.state_dict()
    chafen_global_model_weight = chafen_global_model.state_dict()
    chafen_weight = chafen.state_dict()
    new_model = CNNCifar1(args=args)
    new_model.to(device)
    new_weight = new_model.state_dict()

    for k in chafen_weight.keys():
        chafen_weight[k] = V2_weight[k] - V1_weight[k]
    chafen.load_state_dict(chafen_weight)

    e1 = relevant1(chafen, chafen_global_model)
    e2 = redundancy1(chafen, chafen_local_model)
    print('压缩前', e1)
    print('压缩前', e2)

    if x < 10:
        if e1.item() < r:
            print("相关性不够")
            V2.load_state_dict(V1_weight)
            c1 += 1
            flag = 1
            return V2.state_dict(
            ), chafen_local_weights, c1, c2, my_sum1, acc1, acc2, flag
        else:
            print("相关性够")
            flag = 0
            V2.load_state_dict(V2_weight)
            return V2.state_dict(), chafen.state_dict(
            ), c1, c2, my_sum1, acc1, acc2, flag

    else:
        my_sum = 0
        my_size = 0
        for i in range(1, 10):
            for k in global_models[0].keys():
                a = global_models[x - i][k] - global_models[x - i - 1][k]
                my_sum += torch.sum(a * a)
                my_size += a.numel()
        e = my_sum / (my_size + 0.00000001)
        print(e.item())
        if e1.item() < r:
            print("相关性不够")
            V2.load_state_dict(V1_weight)
            c1 += 1
            flag = 1
            return V2.state_dict(
            ), chafen_local_weights, c1, c2, my_sum1, acc1, acc2, flag
        elif e2.item() < e.item():
            print("差异性不够")
            chafen_weight = chafen_local_weights
            for k in V2_weight.keys():
                V2_weight[k] = V1_weight[k] + chafen_weight[k]
            V2.load_state_dict(V2_weight)
            c2 += 1
            flag = 0
            return V2.state_dict(
            ), chafen_local_weights, c1, c2, my_sum1, acc1, acc2, flag
        else:
            print("都满足,压缩")
            acc1 = inference(V2, testloader)
            print('更新后 准确率')
            print(acc1)

            chafen_weight['conv1.weight'] = V2_weight[
                'conv1.weight'] - V1_weight['conv1.weight']
            c1w = chafen_weight['conv1.weight'].view(450)
            c1w = c1w.cpu().numpy()
            c1w = np.abs(c1w)
            c1w = np.sort(c1w)
            a = c1w[int(len(c1w) * r1)]
            chafen_weight['conv1.weight'] = V2_weight[
                'conv1.weight'] - V1_weight['conv1.weight']
            c1w = chafen_weight['conv1.weight'].view(450)
            c1w = c1w.cpu().numpy()
            c1w = cut(c1w, a)
            chafen_weight['conv1.weight'] = torch.from_numpy(c1w)
            chafen_weight['conv1.weight'] = chafen_weight['conv1.weight'].view(
                6, 3, 5, 5)

            chafen_weight['conv1.bias'] = V2_weight['conv1.bias'] - V1_weight[
                'conv1.bias']
            c1b = chafen_weight['conv1.bias'].cpu().numpy()
            c1b = np.abs(c1b)
            c1b = np.sort(c1b)
            a = c1b[int(len(c1b) * r1) - 1]
            c1b = chafen_weight['conv1.bias'].cpu().numpy()
            c1b = cut(c1b, a)
            chafen_weight['conv1.bias'] = torch.from_numpy(c1b)

            chafen_weight['conv2.weight'] = V2_weight[
                'conv2.weight'] - V1_weight['conv2.weight']
            c2w = chafen_weight['conv2.weight'].view(2400)
            c2w = c2w.cpu().numpy()
            c2w = np.abs(c2w)
            c2w = np.sort(c2w)
            a = c2w[int(len(c2w) * r1)]
            chafen_weight['conv2.weight'] = V2_weight[
                'conv2.weight'] - V1_weight['conv2.weight']
            c2w = chafen_weight['conv2.weight'].view(2400)
            c2w = c2w.cpu().numpy()
            c2w = cut(c2w, a)
            chafen_weight['conv2.weight'] = torch.from_numpy(c2w)
            chafen_weight['conv2.weight'] = chafen_weight['conv2.weight'].view(
                16, 6, 5, 5)

            chafen_weight['conv2.bias'] = V2_weight['conv2.bias'] - V1_weight[
                'conv2.bias']
            c2b = chafen_weight['conv2.bias'].cpu().numpy()
            c2b = np.abs(c2b)
            c2b = np.sort(c2b)
            a = c2b[int(len(c2b) * r1) - 1]
            c2b = chafen_weight['conv2.bias'].cpu().numpy()
            c2b = cut(c2b, a)
            chafen_weight['conv2.bias'] = torch.from_numpy(c2b)

            chafen_weight['fc1.weight'] = V2_weight['fc1.weight'] - V1_weight[
                'fc1.weight']
            fc1w = chafen_weight['fc1.weight'].view(48000)
            fc1w = fc1w.cpu().numpy()
            fc1w = np.abs(fc1w)
            fc1w = np.sort(fc1w)
            a = fc1w[int(len(fc1w) * r1)]
            chafen_weight['fc1.weight'] = V2_weight['fc1.weight'] - V1_weight[
                'fc1.weight']
            fc1w = chafen_weight['fc1.weight'].view(48000)
            fc1w = fc1w.cpu().numpy()
            fc1w = cut(fc1w, a)
            chafen_weight['fc1.weight'] = torch.from_numpy(fc1w)
            chafen_weight['fc1.weight'] = chafen_weight['fc1.weight'].view(
                120, 400)

            chafen_weight[
                'fc1.bias'] = V2_weight['fc1.bias'] - V1_weight['fc1.bias']
            fc1b = chafen_weight['fc1.bias'].cpu().numpy()
            fc1b = np.abs(fc1b)
            fc1b = np.sort(fc1b)
            a = fc1b[int(len(fc1b) * r1) - 1]
            c1b = chafen_weight['fc1.bias'].cpu().numpy()
            c1b = cut(fc1b, a)
            chafen_weight['fc1.bias'] = torch.from_numpy(fc1b)

            chafen_weight['fc2.weight'] = V2_weight['fc2.weight'] - V1_weight[
                'fc2.weight']
            fc2w = chafen_weight['fc2.weight'].view(10080)
            fc2w = fc2w.cpu().numpy()
            fc2w = np.abs(fc2w)
            fc2w = np.sort(fc2w)
            a = fc2w[int(len(fc2w) * r1)]
            chafen_weight['fc2.weight'] = V2_weight['fc2.weight'] - V1_weight[
                'fc2.weight']
            fc2w = chafen_weight['fc2.weight'].view(10080)
            fc2w = fc2w.cpu().numpy()
            fc2w = cut(fc2w, a)
            chafen_weight['fc2.weight'] = torch.from_numpy(fc2w)
            chafen_weight['fc2.weight'] = chafen_weight['fc2.weight'].view(
                84, 120)

            chafen_weight[
                'fc2.bias'] = V2_weight['fc2.bias'] - V1_weight['fc2.bias']
            fc2b = chafen_weight['fc2.bias'].cpu().numpy()
            fc2b = np.abs(fc2b)
            fc2b = np.sort(fc2b)
            a = fc2b[int(len(fc2b) * r1) - 1]
            fc2b = chafen_weight['fc2.bias'].cpu().numpy()
            fc2b = cut(fc2b, a)
            chafen_weight['fc2.bias'] = torch.from_numpy(fc2b)

            chafen_weight['fc3.weight'] = V2_weight['fc3.weight'] - V1_weight[
                'fc3.weight']
            fc3w = chafen_weight['fc3.weight'].view(840)
            fc3w = fc3w.cpu().numpy()
            fc3w = np.abs(fc3w)
            fc3w = np.sort(fc3w)
            a = fc3w[int(len(fc3w) * r1)]
            chafen_weight['fc3.weight'] = V2_weight['fc3.weight'] - V1_weight[
                'fc3.weight']
            fc3w = chafen_weight['fc3.weight'].view(840)
            fc3w = fc3w.cpu().numpy()
            fc3w = cut(fc3w, a)
            chafen_weight['fc3.weight'] = torch.from_numpy(fc3w)
            chafen_weight['fc3.weight'] = chafen_weight['fc3.weight'].view(
                10, 84)

            chafen_weight[
                'fc3.bias'] = V2_weight['fc3.bias'] - V1_weight['fc3.bias']
            fc3b = chafen_weight['fc3.bias'].cpu().numpy()
            fc3b = np.abs(fc3b)
            fc3b = np.sort(fc3b)
            a = fc3b[int(len(fc3b) * r1) - 1]
            fc3b = chafen_weight['fc3.bias'].cpu().numpy()
            fc3b = cut(fc3b, a)
            chafen_weight['fc3.bias'] = torch.from_numpy(fc3b)

            chafen.load_state_dict(chafen_weight)
            new_e1 = relevant1(chafen, chafen_global_model)
            new_e2 = redundancy1(chafen, chafen_local_model)
            print('剪枝后', new_e1)
            print('剪枝后', new_e2)

            new_weight['conv11.weight'] = V1_weight['conv1.weight']
            new_weight['conv12.weight'] = chafen_weight['conv1.weight']
            new_weight['conv11.bias'] = V1_weight['conv1.bias']
            new_weight['conv12.bias'] = chafen_weight['conv1.bias']

            new_weight['conv21.weight'] = V1_weight['conv2.weight']
            new_weight['conv22.weight'] = chafen_weight['conv2.weight']
            new_weight['conv21.bias'] = V1_weight['conv2.bias']
            new_weight['conv22.bias'] = chafen_weight['conv2.bias']

            new_weight['fc11.weight'] = V1_weight['fc1.weight']
            new_weight['fc12.weight'] = chafen_weight['fc1.weight']
            new_weight['fc11.bias'] = V1_weight['fc1.bias']
            new_weight['fc12.bias'] = chafen_weight['fc1.bias']

            new_weight['fc21.weight'] = V1_weight['fc2.weight']
            new_weight['fc22.weight'] = chafen_weight['fc2.weight']
            new_weight['fc21.bias'] = V1_weight['fc2.bias']
            new_weight['fc22.bias'] = chafen_weight['fc2.bias']

            new_weight['fc31.weight'] = V1_weight['fc3.weight']
            new_weight['fc32.weight'] = chafen_weight['fc3.weight']
            new_weight['fc31.bias'] = V1_weight['fc3.bias']
            new_weight['fc32.bias'] = chafen_weight['fc3.bias']

            new_model.load_state_dict(new_weight)

            m = [
                'conv11.weight', 'conv21.weight', 'fc11.weight', 'fc21.weight',
                'fc31.weight', 'conv11.bias', 'conv21.bias', 'fc11.bias',
                'fc21.bias', 'fc31.bias'
            ]
            for name1, param1 in new_model.named_parameters():
                if name1 in m:
                    param1.requires_grad = False
                else:
                    param1.requires_grad = True

            for name, param in V2.named_parameters():
                param.requires_grad = False
            for name, param in V1.named_parameters():
                param.requires_grad = False
            for name, param in chafen_global_model.named_parameters():
                param.requires_grad = False
            for name, param in chafen_local_model.named_parameters():
                param.requires_grad = False

            optimizer = torch.optim.Adam(new_model.parameters(),
                                         lr=0.0001,
                                         weight_decay=1e-4)
            criterion1 = torch.nn.NLLLoss().to(device)
            criterion2 = torch.nn.KLDivLoss().to(device)
            epoch_loss = []
            EPS = 1e-8
            locked_masks = {
                n: torch.abs(w) < EPS
                for n, w in new_model.named_parameters()
                if n.endswith('weight')
            }
            chafen_global_model_weight.requires_grad = False
            chafen_local_weights.requires_grad = False

            for epoch in tqdm(range(50)):
                batch_loss = []
                batch_loss1 = []
                batch_loss2 = []
                batch_loss3 = []
                batch_loss4 = []
                for batch_idx, (images, labels) in enumerate(trainloader):

                    my_e1, my_size = 0.0, 0.0
                    images, labels = images.to(device), labels.to(device)
                    optimizer.zero_grad()
                    outputs = new_model(images)
                    loss1 = criterion1(outputs, labels)
                    x = new_model.f(images)
                    y = V2.f(images)
                    z = new_model.f1(images)
                    loss2 = 0.8 * mmd_rbf(x, y) + 0.2 * mmd_rbf(x, z)
                    loss = 0.7 * loss2 + 0.3 * loss1
                    loss.backward()
                    for n, w in new_model.named_parameters():
                        if w.grad is not None and n in locked_masks:
                            w.grad[locked_masks[n]] = 0
                    optimizer.step()
                    if batch_idx % 50 == 0:
                        print(
                            'Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.
                            format(epoch + 1, batch_idx * len(images),
                                   len(trainloader.dataset),
                                   100. * batch_idx / len(trainloader),
                                   loss.item()))
                    batch_loss.append(loss.item())
                    batch_loss1.append(loss1.item())
                    batch_loss2.append(loss2.item())

            loss_avg = sum(batch_loss) / len(batch_loss)
            loss_avg1 = sum(batch_loss1) / len(batch_loss1)
            loss_avg2 = sum(batch_loss2) / len(batch_loss2)

            print('\nTrain loss:', loss_avg)
            print('\nTrain loss1:', loss_avg1)
            print('\nTrain loss2:', loss_avg2)

            new_weight = new_model.state_dict()

            chafen_weight['conv1.weight'] = new_weight['conv12.weight']
            chafen_weight['conv1.bias'] = new_weight['conv12.bias']
            chafen_weight['conv2.weight'] = new_weight['conv22.weight']
            chafen_weight['conv2.bias'] = new_weight['conv22.bias']

            chafen_weight['fc1.weight'] = new_weight['fc12.weight']
            chafen_weight['fc1.bias'] = new_weight['fc12.bias']
            chafen_weight['fc2.weight'] = new_weight['fc22.weight']
            chafen_weight['fc2.bias'] = new_weight['fc22.bias']
            chafen_weight['fc3.weight'] = new_weight['fc32.weight']
            chafen_weight['fc3.bias'] = new_weight['fc32.bias']
            chafen.load_state_dict(chafen_weight)

            e1 = relevant1(chafen, chafen_global_model)
            e2 = redundancy1(chafen, chafen_local_model)
            print("retrain后 ", e1)
            print("retrain后 ", e2)

            #统计0

            for k in chafen_weight.keys():
                a = chafen_weight[k].cpu().numpy()
                a = a.flatten()
                b = a.tolist()
                my_sum1 += b.count(0.0)
            print(my_sum1)

            for k in V2_weight.keys():
                V2_weight[k] = chafen_weight[k] + V1_weight[k]
            V2.load_state_dict(V2_weight)

            acc2 = inference(V2, testloader)
            print('恢复后 准确率')
            print(acc2)

            flag = 0

            for name, param in V2.named_parameters():
                param.requires_grad = True
            for name, param in V1.named_parameters():
                param.requires_grad = True
            for name, param in chafen_global_model.named_parameters():
                param.requires_grad = True
            for name, param in chafen_local_model.named_parameters():
                param.requires_grad = True

            return V2.state_dict(), chafen.state_dict(
            ), c1, c2, my_sum1, acc1, acc2, flag
Example #17
0
def poisoned_random_CDP(seed=1):
    # Central DP to protect against attackers

    start_time = time.time()

    # define paths
    path_project = os.path.abspath('..')
    logger = SummaryWriter('../logs')

    args = args_parser()
    exp_details(args)

    # set seed
    torch.manual_seed(seed)
    np.random.seed(seed)

    # device
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # load dataset and user groups
    train_dataset, test_dataset, user_groups = get_dataset(args)

    # BUILD MODEL
    if args.model == 'cnn':
        # Convolutional neural netork
        if args.dataset == 'mnist':
            global_model = CNNMnist(args=args)
        elif args.dataset == 'fmnist':
            global_model = CNNFashion_Mnist(args=args)
        elif args.dataset == 'cifar':
            global_model = CNNCifar(args=args)

    elif args.model == 'mlp':
        # Multi-layer preceptron
        img_size = train_dataset[0][0].shape
        len_in = 1
        for x in img_size:
            len_in *= x
            global_model = MLP(dim_in=len_in,
                               dim_hidden=64,
                               dim_out=args.num_classes)
    else:
        exit('Error: unrecognized model')

    # Set the model to train and send it to device.
    global_model.to(device)
    global_model.train()
    print(global_model)

    # copy weights
    global_weights = global_model.state_dict()

    # testing accuracy for global model
    testing_accuracy = [0.1]
    backdoor_accuracy = [0.1]

    for epoch in tqdm(range(args.epochs)):
        local_del_w, local_norms = [], []
        print(f'\n | Global Training Round : {epoch+1} |\n')

        global_model.train()
        m = max(int(args.frac * args.num_users), 1)
        idxs_users = np.random.choice(range(args.num_users), m, replace=False)

        # Adversaries' update
        for idx in idxs_users[0:args.nb_attackers]:
            local_model = LocalUpdate(args=args,
                                      dataset=train_dataset,
                                      idxs=user_groups[idx],
                                      logger=logger)
            del_w, zeta = local_model.poisoned_1to7(
                model=copy.deepcopy(global_model), change=1)
            local_del_w.append(copy.deepcopy(del_w))
            local_norms.append(copy.deepcopy(zeta))

        # Non-adversary updates
        for idx in idxs_users[args.nb_attackers:]:
            local_model = LocalUpdate(args=args,
                                      dataset=train_dataset,
                                      idxs=user_groups[idx],
                                      logger=logger)
            del_w, zeta = local_model.update_weights(
                model=copy.deepcopy(global_model), change=1)
            local_del_w.append(copy.deepcopy(del_w))
            local_norms.append(copy.deepcopy(zeta))

        # norm bound (e.g. median of norms)
        median_norms = args.norm_bound  #np.median(local_norms)  #args.norm_bound
        print(median_norms)

        # clip weight updates
        for i in range(len(idxs_users)):
            for param in local_del_w[i].values():
                param /= max(1, local_norms[i] / median_norms)

        # average the clipped weight updates
        average_del_w = average_weights(local_del_w)

        # Update global model using clipped weight updates, and add noise
        # w_{t+1} = w_{t} + avg(del_w1 + del_w2 + ... + del_wc) + Noise
        for param, param_del_w in zip(global_weights.values(),
                                      average_del_w.values()):
            param += param_del_w
            param += torch.randn(
                param.size()) * args.noise_scale * median_norms / (
                    len(idxs_users)**0.5)
        global_model.load_state_dict(global_weights)

        # test accuracy
        test_acc, test_loss = test_inference(args, global_model, test_dataset)
        testing_accuracy.append(test_acc)

        print("Test accuracy")
        print(testing_accuracy)

    # save test accuracy
    np.savetxt(
        '../save/1to7Attack/GDP_{}_{}_seed{}_clip{}_scale{}.txt'.format(
            args.dataset, args.model, s, args.norm_bound, args.noise_scale),
        testing_accuracy)
def main():
    start_time = time.time()

    # define paths
    path_project = os.path.abspath('..')
    logger = SummaryWriter('../logs')

    args = args_parser()
    exp_details(args)

    if args.gpu:
        torch.cuda.set_device(0)
    device = 'cuda' if args.gpu else 'cpu'

    # load dataset and user groups
    train_dataset, test_dataset, user_groups = get_dataset(args)

    args.num_users = len(user_groups)

    # BUILD MODEL
    if args.model == 'cnn':
        # Convolutional neural netork
        if args.dataset == 'mnist':
            global_model = CNNMnist(args=args)
        elif args.dataset == 'fmnist':
            global_model = CNNFashion_Mnist(args=args)
        elif args.dataset == 'cifar':
            global_model = CNNCifar(args=args)

    elif args.model == 'mlp':
        # Multi-layer preceptron
        img_size = train_dataset[0][0].shape
        len_in = 1
        for x in img_size:
            len_in *= x
            global_model = MLP(dim_in=len_in,
                               dim_hidden=64,
                               dim_out=args.num_classes)
    else:
        exit('Error: unrecognized model')

    # Set the model to train and send it to device.
    global_model.to(device)
    global_model.train()

    # copy weights
    global_weights = global_model.state_dict()

    # Training
    train_loss, train_accuracy = [], []
    val_acc_list, net_list = [], []
    cv_loss, cv_acc = [], []
    print_every = 2
    val_loss_pre, counter = 0, 0

    #Beolvassuk, hogy éppen mely résztvevők vesznek részt a tanításban (0 jelentése, hogy benne van, 1 az hogy nincs)
    users = []
    fp = open('users.txt', "r")
    x = fp.readline().split(' ')
    for i in x:
        if i != '':
            users.append(int(i))
    fp.close()

    #for epoch in tqdm(range(args.epochs)):
    for epoch in range(args.epochs):
        local_weights, local_losses = [], []
        #print(f'\n | Global Training Round : {epoch+1} |\n')

        global_model.train()
        m = max(int(args.frac * args.num_users), 1)
        idxs_users = np.random.choice(range(args.num_users), m, replace=False)

        for idx in idxs_users:
            local_model = LocalUpdate(args=args,
                                      dataset=train_dataset,
                                      idxs=user_groups[idx],
                                      logger=logger)
            w, loss = local_model.update_weights(
                model=copy.deepcopy(global_model), global_round=epoch)
            local_weights.append(copy.deepcopy(w))
            local_losses.append(copy.deepcopy(loss))

        global_weights = average_weights(local_weights)

        # update global weights
        global_model.load_state_dict(global_weights)

        loss_avg = sum(local_losses) / len(local_losses)
        train_loss.append(loss_avg)

        # Calculate avg training accuracy over all users at every epoch
        list_acc, list_loss = [], []
        global_model.eval()
        for c in range(args.num_users):
            local_model = LocalUpdate(args=args,
                                      dataset=train_dataset,
                                      idxs=user_groups[idx],
                                      logger=logger)
            acc, loss = local_model.inference(model=global_model)
            list_acc.append(acc)
            list_loss.append(loss)
        train_accuracy.append(sum(list_acc) / len(list_acc))

        # print global training loss after every 'i' rounds
        '''if (epoch+1) % print_every == 0:
            print(f' \nAvg Training Stats after {epoch+1} global rounds:')
            print(f'Training Loss : {np.mean(np.array(train_loss))}')
            print('Train Accuracy: {:.2f}% \n'.format(100*train_accuracy[-1]))'''

    # Test inference after completion of training

    #Beolvassuk hogy mely résztvevőnek mely labeleket osztottuk ki.
    ftrain = open('traindataset.txt')
    testlabels = []
    line = ftrain.readline()
    while line != "":
        sor = line.split(' ')
        array = []
        for i in sor:
            array.append(int(i))
        testlabels.append(array)
        line = ftrain.readline()
    ftrain.close()

    print("USERS LABELS")
    print(testlabels)

    #Minden lehetséges koalícióra lefut a tesztelés
    for j in range((2**args.num_users) - 1):
        binary = numberToBinary(j, len(users))

        test_acc, test_loss = test_inference(args, global_model, test_dataset,
                                             testlabels, binary, len(binary))

        #Teszt eredmények kiírása
        print("RESZTVEVOK")
        print(users)
        print("TEST NUMBER")
        print(j)
        print("TEST BINARY")
        print(binary)
        print("TEST LABELS")
        print(testlabels)
        print("Test Accuracy")
        print("{:.2f}%".format(100 * test_acc))
        print()

    # Saving the objects train_loss and train_accuracy:
    '''file_name = '../save/objects/{}_{}_{}_C[{}]_iid[{}]_E[{}]_B[{}].pkl'.\
Example #19
0
def poisoned_pixel_LDP(norm_bound, noise_scale, nb_attackers, seed=1):
    start_time = time.time()

    # define paths
    path_project = os.path.abspath('..')
    logger = SummaryWriter('../logs')

    args = args_parser()
    exp_details(args)

    # set seed
    torch.manual_seed(seed)
    np.random.seed(seed)

    # device
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # load dataset and user groups
    train_dataset, test_dataset, user_groups = get_dataset(args)

    # BUILD MODEL
    if args.model == 'cnn':
        # Convolutional neural netork
        if args.dataset == 'mnist':
            global_model = CNNMnist(args=args)
        elif args.dataset == 'fmnist':
            global_model = CNNFashion_Mnist(args=args)
        elif args.dataset == 'cifar':
            global_model = CNNCifar(args=args)

    elif args.model == 'mlp':
        # Multi-layer preceptron
        img_size = train_dataset[0][0].shape
        len_in = 1
        for x in img_size:
            len_in *= x
            global_model = MLP(dim_in=len_in,
                               dim_hidden=64,
                               dim_out=args.num_classes)
    else:
        exit('Error: unrecognized model')

    # Set the model to train and send it to device.
    global_model.to(device)
    global_model.train()
    print(global_model)

    # copy weights
    global_weights = global_model.state_dict()

    # testing accuracy for global model
    testing_accuracy = [0.1]
    backdoor_accuracy = [0.1]

    for epoch in tqdm(range(args.epochs)):
        local_w, local_norms = [], []
        print(f'\n | Global Training Round : {epoch+1} |\n')

        global_model.train()
        m = max(int(args.frac * args.num_users), 1)
        idxs_users = np.random.choice(range(args.num_users), m, replace=False)

        # Poisonous updates
        for idx in idxs_users[0:nb_attackers]:
            local_model = LocalUpdate(args=args,
                                      dataset=train_dataset,
                                      idxs=user_groups[idx],
                                      logger=logger)
            w, _ = local_model.pixel_ldp(model=copy.deepcopy(global_model),
                                         norm_bound=norm_bound,
                                         noise_scale=noise_scale)
            local_w.append(copy.deepcopy(w))

        # Regular updates
        for idx in idxs_users[nb_attackers:]:
            local_model = LocalUpdate(args=args,
                                      dataset=train_dataset,
                                      idxs=user_groups[idx],
                                      logger=logger)
            w, _ = local_model.dp_sgd(model=copy.deepcopy(global_model),
                                      norm_bound=norm_bound,
                                      noise_scale=noise_scale)
            local_w.append(copy.deepcopy(w))

        # update global weights
        global_weights = average_weights(local_w)
        global_model.load_state_dict(global_weights)

        # test accuracy
        test_acc, test_loss, backdoor = test_backdoor_pixel(
            args, global_model, test_dataset)
        testing_accuracy.append(test_acc)
        backdoor_accuracy.append(backdoor)

        print("Testing & Backdoor accuracies")
        print(testing_accuracy)
        print(backdoor_accuracy)

    # save test accuracy
    np.savetxt(
        '../save/PixelAttack/TestAcc/LDP_iid_{}_{}_clip{}_scale{}_attackers{}_seed{}.txt'
        .format(args.dataset, args.model, norm_bound, noise_scale,
                nb_attackers, s), testing_accuracy)

    np.savetxt(
        '../save/PixelAttack/BackdoorAcc/LDP_iid_{}_{}_clip{}_scale{}_attackers{}_seed{}.txt'
        .format(args.dataset, args.model, norm_bound, noise_scale,
                nb_attackers, s), backdoor_accuracy)
        torch.cuda.set_device(args.gpu)
    #device = 'cuda' if args.gpu else 'cpu'
    device = 'cuda'

    # load dataset and user groups
    train_dataset, test_dataset, user_groups = get_dataset(args)

    # BUILD MODEL
    if args.model == 'cnn':
        # Convolutional neural netork
        if args.dataset == 'mnist':
            global_model = CNNMnist(args=args)
        elif args.dataset == 'fmnist':
            global_model = CNNFashion_Mnist(args=args)
        elif args.dataset == 'cifar':
            global_model = CNNCifar()

    elif args.model == 'mlp':
        # Multi-layer preceptron
        img_size = train_dataset[0][0].shape
        len_in = 1
        for x in img_size:
            len_in *= x
            global_model = MLP(dim_in=len_in, dim_hidden=64,
                               dim_out=args.num_classes)
    else:
        exit('Error: unrecognized model')

    # Set the model to train and send it to device.
    global_model.to(device)
    global_model.train()
Example #21
0
    if args.gpu_id:
        torch.cuda.set_device(args.gpu_id)
    device = 'cuda' if args.gpu else 'cpu'

    # load dataset and user groups
    train_dataset, test_dataset, user_groups = get_dataset(args)

    # BUILD MODEL
    if args.model == 'cnn':
        # Convolutional neural netork
        if args.dataset == 'mnist':
            global_model = CNNMnist(args=args)
        elif args.dataset == 'fmnist':
            global_model = CNNFashion_Mnist(args=args)
        elif args.dataset == 'cifar':
            global_model = CNNCifar(args=args)

    elif args.model == 'mlp':
        # Multi-layer preceptron
        img_size = train_dataset[0][0].shape
        len_in = 1
        for x in img_size:
            len_in *= x
            global_model = MLP(dim_in=len_in,
                               dim_hidden=64,
                               dim_out=args.num_classes)
    else:
        exit('Error: unrecognized model')

    # Set the model to train and send it to device.
    global_model.to(device)
Example #22
0
if __name__ == '__main__':

    os.environ["CUDA_VISIBLE_DEVICES"] = '0'
    start_time = time.time()

    path_project = os.path.abspath('..')
    logger = SummaryWriter('../logs')

    args = args_parser()
    exp_details(args)

    device = 'cuda'

    train_dataset, test_dataset, user_groups = get_dataset(args)

    global_model = CNNCifar(args=args, builder=get_builder())

    global_model.to(device)
    global_model.train()
    print(global_model)

    global_weights = global_model.state_dict()

    # Training
    train_loss, test_accuracy = [], []
    val_acc_list, net_list = [], []
    cv_loss, cv_acc = [], []
    print_every = 2
    val_loss_pre, counter = 0, 0
    x = 0
    my_sum = 0