class Client:
    def __init__(self, name, train_data_dir, test_data_dir):
        self.name = name

        transform = transforms.ToTensor()

        trainset = datasets.ImageFolder(train_data_dir, transform=transform)
        self.trainloader = torch.utils.data.DataLoader(
            trainset,
            batch_size=BATCH_SIZE,
            shuffle=True
        )

        testset = datasets.ImageFolder(test_data_dir, transform=transform)
        self.testloader = torch.utils.data.DataLoader(
            testset,
            batch_size=BATCH_SIZE,
            shuffle=False
        )

        dataset_list = list(self.trainloader)
        self.dataset_len = len(dataset_list)

        self.net = LeNet().to(device)

        self.criterion = nn.CrossEntropyLoss()

    def update(self, net_dict, center_params_dict):
        self.net.load_state_dict(net_dict)

        for i in range(LOCAL_EPOCH_NUM):
            data_iter = iter(self.trainloader)
            for b in range(self.dataset_len):
                inputs, labels = next(data_iter)
                inputs = torch.index_select(inputs, 1, torch.LongTensor([0]))
                inputs, labels = inputs.to(device), labels.to(device)
                outputs = self.net(inputs)
                loss = self.criterion(outputs, labels)
                optimizer = optim.SGD(self.net.parameters(), lr=LR, momentum=0.9)
                optimizer.zero_grad()
                loss.backward()

                params_modules = list(self.net.named_parameters())
                for params_module in params_modules:
                    name, params = params_module
                    params.grad += MU * (params.data - center_params_dict[name])

                optimizer.step()

        return self.net.state_dict()
        opt.zero_grad()

        loss.backward()

        opt.step()

    # 每个epoch进行测试一下精度
    net.eval()

    train_acc = 0

    for test_step, (data, target) in enumerate(train_dl, 1):

        data, target = data.cuda(), target.cuda()

        outputs = net(data)

        train_acc += sum(
            torch.max(outputs, 1)[1].data.cpu().numpy() ==
            target.data.cpu().numpy())

    train_acc /= train_ds_size

    net.train()

    print('epoch:{}, train_acc:{:.3f} %, loss:{:.3f}, time:{:.1f} min'.format(
        epoch, train_acc * 100, loss.data.item(), (time() - start) / 60))

torch.save(net.state_dict(), './modle/net{}-{}.pth'.format(epoch, step))
for i in range(CLIENT_NUM):
    client_name = 'client' + str(i)
    client_list.append(Client(client_name, train_data_root + client_name + '/', test_data_root + client_name + '/'))

center_params_dict = dict()
center_params_modules = list(self.net.named_parameters())
for params_module in center_params_modules:
    name, params = params_module
    center_params_dict[name] = copy.deepcopy(params.data)

st = time.time()

for t in range(ROUND_NUM):
    client_net_dict_list = []
    net_dict = net.state_dict()

    for i in range(CLIENT_NUM):
        client_net_dict_list.append(client_list[i].update(net_dict, center_params_dict))

    client_average_net_dict = client_net_dict_list[0]
    for key in client_average_net_dict:
        for i in range(1, CLIENT_NUM):
            client_average_net_dict[key] += client_net_dict_list[i][key]
    for key in client_net_dict_list[0]:
        client_average_net_dict[key] /= CLIENT_NUM

    net.load_state_dict(client_average_net_dict)

    for key in center_params_dict:
        tmp_params = center_params_dict[key]
예제 #4
0
            loss = criterion(prediction, y_batch)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            batch_loss = loss.data.item()
            running_loss += batch_loss

        lenet.eval()
        train_accuracy = accuracy(train_loader, lenet)
        test_accuracy = accuracy(test_loader, lenet)
        lenet.train()
        if (test_accuracy >= best_result):
            best_result = test_accuracy
            torch.save(lenet.state_dict(),
                       os.path.join(opt.path, 'lenet_best.pt'))
        logprint(
            'Epoch [%d], Loss: %.4f, KL: %.4f, Train accuracy: %.4f, Test accuracy: %.4f, Best: %.4f'
            % (e, running_loss / num_batch, running_klloss / num_batch,
               train_accuracy, test_accuracy, best_result))

lenet_path = os.path.join(opt.path, "lenet_best.pt")
lenet_best = LeNet().cuda()
lenet_best.load_state_dict(torch.load(lenet_path))
baseline_acc = accuracy(test_loader, lenet_best)
logprint("loaded pretrained model. baseline acc = %.4f" % baseline_acc)

lenet_sbp = LeNet_SBP()
sbp_parameters = [
    {
        output[k] = torch.from_numpy(l)
    output = output.reshape([-1, 1, 28, 28])
    return output


for epoch in range(epochs):
    sum_loss = 0.0
    # 处理数据
    for index in range(client_len):
        # client 0
        print("training process---epochs:{}  iteration:{}".format(
            epoch + 1, index + 1))
        client_0_inputs, client_0_labels = dataset_list[index]
        client_0_inputs, client_0_labels = client_0_inputs.to(
            device), client_0_labels.to(device)
        net_dict = net.state_dict()  # 提取server网络参数
        client_0_grad_dict = get_client_grad(client_0_inputs, client_0_labels,
                                             net_dict, client_0_net)
        # client 1
        client_1_inputs, client_1_labels = dataset_list[index + client_len]
        client_1_inputs, client_1_labels = client_1_inputs.to(
            device), client_1_labels.to(device)
        net_dict = net.state_dict()  # 提取server网络参数
        client_1_grad_dict = get_client_grad(client_1_inputs, client_1_labels,
                                             net_dict, client_1_net)
        # client 2
        client_2_inputs, client_2_labels = dataset_list[index + client_len * 2]
        client_2_inputs, client_2_labels = client_2_inputs.to(
            device), client_2_labels.to(device)
        net_dict = net.state_dict()  # 提取server网络参数
        client_2_grad_dict = get_client_grad(client_2_inputs, client_2_labels,
        m.weight.data.normal_(0, math.sqrt(2. / n))
    elif isinstance(m, nn.BatchNorm2d):
        m.weight.data.fill_(1)
        m.bias.data.zero_()


net = LeNet()
net.apply(weight_init)

criterion = nn.CrossEntropyLoss()
optimizer_server = optim.SGD(net.parameters(), lr=LR, momentum=0.9)

client_0_net = LeNet().to(device)
client_1_net = LeNet().to(device)

model_parameters = net.state_dict()

model_parameters_dict = collections.OrderedDict()
for key, value in model_parameters.items():
    model_parameters_dict[key] = torch.numel(value), value.shape


def get_client_encrypted_grad(client_inputs, client_labels, net_dict,
                              client_net):
    client_net.load_state_dict(net_dict)
    client_outputs = client_net(client_inputs)
    client_loss = criterion(client_outputs, client_labels)
    client_optimizer = optim.SGD(client_net.parameters(), lr=LR, momentum=0.9)
    client_optimizer.zero_grad()
    client_loss.backward()
예제 #7
0
        m.bias.data.zero_()


net = LeNet()
# 初始化网络参数
net.apply(weight_init)  # apply函数会递归地搜索网络内的所有module并把参数表示的函数应用到所有的module上

# 定义损失函数
criterion = nn.CrossEntropyLoss()  # 交叉熵损失函数,通常用于多分类问题上
optimizer_server = optim.SGD(net.parameters(), lr=LR, momentum=0.9)

# 分配用户参数
client_0_net = LeNet().to(device)
client_1_net = LeNet().to(device)

model_parameters = net.state_dict()  # 提取网络参数

model_parameters_dict = collections.OrderedDict()
for key, value in model_parameters.items():
    model_parameters_dict[key] = torch.numel(value), value.shape


# client训练,获取梯度并加密
def get_client_encrypted_grad(client_inputs, client_labels, net_dict,
                              client_net):
    client_net.load_state_dict(net_dict)
    client_outputs = client_net(client_inputs)
    client_loss = criterion(client_outputs, client_labels)
    client_optimizer = optim.SGD(client_net.parameters(), lr=LR, momentum=0.9)
    client_optimizer.zero_grad()  # 梯度置零
    client_loss.backward()  # 求取梯度
예제 #8
0
        loss = loss_function(outputs, labels)
        loss.backward()
        optimizer.step()
        # print statistics
        running_loss += loss.data

        if step % 1000 == 999:  # print every 500 mini-batches
            #with torch.no_grad():
            for data_test in val_loader:
                val_image, val_labels = data_test
                val_image = val_image.cuda()
                val_labels = val_labels.cuda()
                outputs = net(val_image)
                outputs = outputs.cuda()

            predict_y = torch.max(outputs, dim=1)[1]
            accuracy = (predict_y
                        == val_labels).sum().item() / val_labels.size(
                            0)  #预测正确时,并求和,item返回该数值
            print((predict_y == val_labels).sum().item())
            print(val_labels.size(0))
            print('[%d, %5d] train_loss: %.3f  test_accuracy: %.3f' %
                  (epoch + 1, step + 1, running_loss / 1000, accuracy))
            running_loss = 0.0
print('Finished Training')

#-------------------存储模型-----------------------------------------
save_path = path + './result/Lenet.pth'
torch.save(net.state_dict(), save_path)
def run_lenet(args):

    train_set = datasets.CIFAR100(
        "./data/",
        train=True,
        download=True,
        transform=transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize(  # pre-computed
                (0.5071, 0.4865, 0.4409), (0.2673, 0.2564, 0.2762))
        ]))
    test_set = datasets.CIFAR100(
        "./data/",
        train=False,
        download=True,
        transform=transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize(  # pre-computed
                (0.5088, 0.4874, 0.4419), (0.2683, 0.2574, 0.2771))
        ]))

    train_loader = torch.utils.data.DataLoader(train_set,
                                               batch_size=args.b,
                                               shuffle=True)
    test_loader = torch.utils.data.DataLoader(test_set,
                                              batch_size=args.b,
                                              shuffle=True)

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    model = LeNet().to(device)
    optimizer = optim.SGD(model.parameters(), lr=args.lr)
    loss = nn.CrossEntropyLoss()

    checkpoints_dir = "checkpoints/lenet/"
    final_dir = 'models/'

    for epoch in range(1, args.e + 1):
        loss_train = 0.0

        for images, labels in train_loader:

            images = images.to(device)
            labels = labels.to(device)

            predictions = model(images)
            batch_loss = loss(predictions, labels)

            optimizer.zero_grad()
            batch_loss.backward()
            optimizer.step()

            loss_train += batch_loss.item()

        print('{} Epoch {}, Training loss {}'.format(
            datetime.datetime.now(), epoch, loss_train / len(train_loader)))

        if epoch % 10 == 0 or epoch == 0:

            checkpoint_path = os.path.join(checkpoints_dir,
                                           'epoch_' + str(epoch) + '.pt')
            torch.save(
                {
                    'epoch': epoch,
                    'model_state_dict': model.state_dict(),
                    'optimizer_state_dict': optimizer.state_dict()
                }, checkpoint_path)

    model_path = os.path.join(final_dir, 'lenet.pth')
    torch.save(model.state_dict(), model_path)

    model.eval()
    with torch.no_grad():

        correct = 0
        total = 0
        for images, labels in test_loader:

            images = images.to(device)
            labels = labels.to(device)

            outputs = model(images)
            _, predicted = torch.max(outputs, dim=1)
            total += labels.shape[0]
            correct += (predicted == labels).sum().item()

        print("Accuracy = {}".format(100 * (correct / total)))
예제 #10
0

losses = []
accs = []
hv = [[] for i in range(epochs)]
for epoch in range(epochs):
    sum_loss = 0.0
    judge = random.random()
    # 处理数据
    for index in range(client_len):
        clients_grad_dict = []
        for c in range(Num_client):
            client_inputs, client_labels = dataset_list[index + client_len * c]
            client_inputs, client_labels = client_inputs.to(
                device), client_labels.to(device)
            net_dict = net.state_dict()  # 提取server网络参数
            t, tl = get_client_grad(client_inputs, client_labels, net_dict,
                                    clients_net[c], epoch)
            clients_grad_dict.append(t)
            sum_loss += tl
        # 取各client参数梯度均值
        client_average_grad_dict = dict()
        for key in clients_grad_dict[0]:
            temp = []
            noise0 = torch.from_numpy(
                sts.laplace.rvs(loc=0,
                                scale=0.375,
                                size=clients_grad_dict[0][key].shape)).type(
                                    torch.float).to(torch.device("cpu"))
            temp.append(clients_grad_dict[0][key] + noise0)
            t_vector = [
예제 #11
0
# Load model to device
logging.info("# Load model to %s" % (DEVICE))
model = model.to(DEVICE)

# training
logging.info("# Start training")
start_time = time.time()
for epoch in range(epoch_start, hp.epochs + 1):
    num_batch = math.floor(len(train_set) / hp.batch_size)
    for i, data in enumerate(train_loader, 0):
        # data comes in the form of [src, target]
        src, target = data[0].to(DEVICE), data[1].to(DEVICE)
        # feed forward
        predicts = model(src)
        loss = xentropy(predicts, target)
        # back propagation
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
    # validation
    with torch.no_grad():
        accur = validation(DEVICE, model)
    end_time = time.time()
    elapse = end_time - start_time
    # console visualization
    view_bar("training: ", epoch, hp.epochs, accur, elapse)
    torch.save({"epoch_start": epoch,
                "model_state_dict": model.state_dict()}, "./ckpt/ckpt_%s.pth" % (epoch))


            loss = criterion(prediction, y_batch)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            batch_loss = loss.data[0]
            running_loss += batch_loss

        lenet.eval()
        train_accuracy = accuracy(train_loader, lenet)
        test_accuracy = accuracy(test_loader, lenet)
        lenet.train()
        if (test_accuracy >= best_result):
            best_result = test_accuracy
            torch.save(lenet.state_dict(), os.path.join(path, 'lenet_best.pt'))
        print(
            'Epoch [%d], Loss: %.4f, KL: %.4f, Train accuracy: %.4f, Test accuracy: %.4f, Best: %.4f'
            % (e, running_loss / num_batch, running_klloss / num_batch,
               train_accuracy, test_accuracy, best_result))

alex_path = os.path.join(path, "lenet_best.pt")
lenet_best = LeNet()
lenet_best.load_state_dict(torch.load(alex_path))

lenet_sbp = LeNet_SBP()
sbp_learningrate = 2e-5
sbp_parameters = [
    {
        'params': lenet_sbp.conv1.weight
    },