class Client:
    def __init__(self, name, train_data_dir, test_data_dir):
        self.name = name

        transform = transforms.ToTensor()

        trainset = datasets.ImageFolder(train_data_dir, transform=transform)
        self.trainloader = torch.utils.data.DataLoader(
            trainset,
            batch_size=BATCH_SIZE,
            shuffle=True
        )

        testset = datasets.ImageFolder(test_data_dir, transform=transform)
        self.testloader = torch.utils.data.DataLoader(
            testset,
            batch_size=BATCH_SIZE,
            shuffle=False
        )

        dataset_list = list(self.trainloader)
        self.dataset_len = len(dataset_list)

        self.net = LeNet().to(device)

        self.criterion = nn.CrossEntropyLoss()

    def update(self, net_dict, center_params_dict):
        self.net.load_state_dict(net_dict)

        for i in range(LOCAL_EPOCH_NUM):
            data_iter = iter(self.trainloader)
            for b in range(self.dataset_len):
                inputs, labels = next(data_iter)
                inputs = torch.index_select(inputs, 1, torch.LongTensor([0]))
                inputs, labels = inputs.to(device), labels.to(device)
                outputs = self.net(inputs)
                loss = self.criterion(outputs, labels)
                optimizer = optim.SGD(self.net.parameters(), lr=LR, momentum=0.9)
                optimizer.zero_grad()
                loss.backward()

                params_modules = list(self.net.named_parameters())
                for params_module in params_modules:
                    name, params = params_module
                    params.grad += MU * (params.data - center_params_dict[name])

                optimizer.step()

        return self.net.state_dict()
class Client:
    def __init__(self, name, train_data_dir, test_data_dir, pk, sk):
        self.name = name
        self.pk = pk
        self.sk = sk

        transform = transforms.ToTensor()

        trainset = datasets.ImageFolder(train_data_dir, transform=transform)
        self.trainloader = torch.utils.data.DataLoader(trainset,
                                                       batch_size=BATCH_SIZE,
                                                       shuffle=True)

        testset = datasets.ImageFolder(test_data_dir, transform=transform)
        self.testloader = torch.utils.data.DataLoader(testset,
                                                      batch_size=BATCH_SIZE,
                                                      shuffle=False)

        dataset_list = list(self.trainloader)
        self.dataset_len = len(dataset_list)

        self.net = LeNet().to(device)

        self.criterion = nn.CrossEntropyLoss()

    def get_encrypted_grad(self, client_inputs, client_labels, net_dict):
        self.net.load_state_dict(net_dict)
        client_outputs = self.net(client_inputs)
        client_loss = self.criterion(client_outputs, client_labels)
        client_optimizer = optim.SGD(self.net.parameters(),
                                     lr=LR,
                                     momentum=0.9)
        client_optimizer.zero_grad()
        client_loss.backward()

        params_modules = list(self.net.named_parameters())
        params_grad_list = []
        for params_module in params_modules:
            name, params = params_module
            params_grad_list.append(copy.deepcopy(params.grad).view(-1))

        params_grad = ((torch.cat(params_grad_list, 0) + bound) *
                       2**prec).long().cuda()
        client_encrypted_grad = Enc(self.pk, params_grad)

        client_optimizer.zero_grad()

        return client_encrypted_grad
Beispiel #3
0
class Client:
    def __init__(self, name, train_data_dir, test_data_dir):
        self.name = name

        transform = transforms.ToTensor()

        trainset = datasets.ImageFolder(train_data_dir, transform=transform)
        self.trainloader = torch.utils.data.DataLoader(trainset,
                                                       batch_size=BATCH_SIZE,
                                                       shuffle=True)

        testset = datasets.ImageFolder(test_data_dir, transform=transform)
        self.testloader = torch.utils.data.DataLoader(testset,
                                                      batch_size=BATCH_SIZE,
                                                      shuffle=False)

        dataset_list = list(self.trainloader)
        self.dataset_len = len(dataset_list)

        self.net = LeNet().to(device)

        self.criterion = nn.CrossEntropyLoss()

    def get_grad(self, client_inputs, client_labels, net_dict):
        self.net.load_state_dict(net_dict)
        client_outputs = self.net(client_inputs)
        client_loss = self.criterion(client_outputs, client_labels)
        client_optimizer = optim.SGD(self.net.parameters(),
                                     lr=LR,
                                     momentum=0.9)
        client_optimizer.zero_grad()
        client_loss.backward()

        client_grad_dict = dict()
        params_modules = list(self.net.named_parameters())
        for params_module in params_modules:
            name, params = params_module
            params_grad = copy.deepcopy(params.grad)
            client_grad_dict[name] = params_grad
        client_optimizer.zero_grad()
        return client_grad_dict
Beispiel #4
0
                    epsilon=args.epsilon,
                    pro_num=args.pro_num,
                    batch_size=args.batchsize,
                    if_dropout=args.dropout)
        cnn.apply(conv_init)
    elif args.model == 'vgg':
        cnn = VGG16(enable_lat=args.enable_lat,
                    epsilon=args.epsilon,
                    pro_num=args.pro_num,
                    batch_size=args.batchsize,
                    if_dropout=args.dropout)
    elif args.model == 'alexnet':
        cnn = AlexNet(enable_lat=args.enable_lat,
                      epsilon=args.epsilon, 
                      pro_num=args.pro_num, 
                      batch_size=args.batchsize, 
                      if_dropout=args.dropout)
    cnn.cuda()

    if os.path.exists(real_model_path):
        cnn.load_state_dict(torch.load(real_model_path))
        print('load model.')
    else:
        print("load failed.")

    if args.test_flag:
        test_op(cnn)
    else:
        train_op(cnn)
        
for t in range(ROUND_NUM):
    client_net_dict_list = []
    net_dict = net.state_dict()

    for i in range(CLIENT_NUM):
        client_net_dict_list.append(client_list[i].update(net_dict, center_params_dict))

    client_average_net_dict = client_net_dict_list[0]
    for key in client_average_net_dict:
        for i in range(1, CLIENT_NUM):
            client_average_net_dict[key] += client_net_dict_list[i][key]
    for key in client_net_dict_list[0]:
        client_average_net_dict[key] /= CLIENT_NUM

    net.load_state_dict(client_average_net_dict)

    for key in center_params_dict:
        tmp_params = center_params_dict[key]
        for i in range(CLIENT_NUM):
            center_params_dict[key] += LR * MU * (client_net_dict_list[i][key] - tmp_params)

    with torch.no_grad():
        '''
        # test per client
        for i in range(CLIENT_NUM):
            correct = 0
            total = 0
            for data in client_list[i].testloader:
                images, labels = data
                images = torch.index_select(images, 1, torch.LongTensor([0]))
Beispiel #6
0
        lenet.eval()
        train_accuracy = accuracy(train_loader, lenet)
        test_accuracy = accuracy(test_loader, lenet)
        lenet.train()
        if (test_accuracy >= best_result):
            best_result = test_accuracy
            torch.save(lenet.state_dict(),
                       os.path.join(opt.path, 'lenet_best.pt'))
        logprint(
            'Epoch [%d], Loss: %.4f, KL: %.4f, Train accuracy: %.4f, Test accuracy: %.4f, Best: %.4f'
            % (e, running_loss / num_batch, running_klloss / num_batch,
               train_accuracy, test_accuracy, best_result))

lenet_path = os.path.join(opt.path, "lenet_best.pt")
lenet_best = LeNet().cuda()
lenet_best.load_state_dict(torch.load(lenet_path))
baseline_acc = accuracy(test_loader, lenet_best)
logprint("loaded pretrained model. baseline acc = %.4f" % baseline_acc)

lenet_sbp = LeNet_SBP()
sbp_parameters = [
    {
        'params': lenet_sbp.conv1.weight
    },
    {
        'params': lenet_sbp.conv2.weight
    },
    {
        'params': lenet_sbp.fc1.weight
    },
    {
Beispiel #7
0
from LeNet import LeNet
from dataset import test_ds

# Hyper parameters
batch_size = 128
workers = 2  # subprocess number for load the image
module_dir = './modle/net299-329.pth'

pred_label = []

# dataset
test_dl = DataLoader(test_ds, batch_size, num_workers=workers)

# use cuda if you have GPU
net = LeNet().cuda()
net.load_state_dict(torch.load(module_dir))
net.eval()

# 预测结果
for step, data in enumerate(test_dl, 1):

    data = data.cuda()

    with torch.no_grad():
        outputs = net(data)

    outputs = torch.max(outputs, 1)[1].data.cpu().numpy().tolist()
    pred_label += outputs

# 将预测结果写入到csv文件中
with open('pred.csv', 'w') as file:
import numpy
import os
import sys
path = os.path.abspath(os.path.dirname(sys.argv[0]))

transform = transforms.Compose([
    transforms.Resize((32, 32)),  #模型的输入是32*32
    transforms.ToTensor(),  #转化为tensor才可以
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])

classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
           'ship', 'truck')

net = LeNet()
net.cuda()

net.load_state_dict(torch.load(path + './result/Lenet.pth'))

image = Image.open(path + './img/2.png')
image = transform(image)  # [C, H, W] channel hight width
image = torch.unsqueeze(
    image, dim=0)  # [N, C, H, W] 再加一个维度 变成batch channel hight width
image = image.cuda()

with torch.no_grad():
    outputs = net(image)
    outputs = outputs.cpu()  #需要把CUDA格式转化为CPU格式才能训练和预测
    predict = torch.max(outputs, dim=1)[1]
print(classes[int(predict)])
Beispiel #9
0
from LeNet import LeNet
import torch
import torch.nn as nn
import torch.nn.functional as F

Training = False
Testing = True
loadState = True

TrainingData = np.array(pd.read_csv("mnist_train.csv"))[0:50000, :]
ValidationData = np.array(pd.read_csv("mnist_train.csv"))[50000:,:]
TestingData = np.array(pd.read_csv("mnist_test.csv"))

model = LeNet()
if loadState:
    model.load_state_dict(torch.load("bestState.pth"))
optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
loss_fn =  nn.CrossEntropyLoss()

if Training:
    while model.EpochRunner:
        trainloader = torch.utils.data.DataLoader(TrainingData, batch_size=250, shuffle=True, num_workers=8)
        for batch in trainloader:
            optimizer.zero_grad()

            batchLabel = batch[:,0]
            batchData = batch[:,1:].reshape(250,1,28,28).float()

            y_pred = model.forward(batchData)

            loss = loss_fn(y_pred, batchLabel)
Beispiel #10
0
# [71, 12], [76, 12], [85, 12], [90, 12], [99, 12], [104, 12]
digits = [[8, 12], [12, 12], [21, 12], [26, 12], [35, 12], [40, 12], [44, 12],
          [49, 12], [71, 12], [76, 12], [85, 12], [90, 12], [99, 12],
          [104, 12]]

# [x_min, y_min, x_max, y_max]
digit_areas = []
for d in digits:
    digit_areas.append([d[0], d[1], d[0] + d_w, d[1] + d_h])

ret, img = cap.read()
if ret:
    cv2.imwrite('images/img.png', img)

model = LeNet()
model.load_state_dict(torch.load('lenet.pt', map_location='cpu'))
model = model.eval()

k = 0
while True:
    ret, img = cap.read()

    if ret:
        if k % 30 == 0:
            # os.system(f'mkdir ./digits2_raw/{k}')

            for idx, d in enumerate(digit_areas):
                # img = cv2.rectangle(img, (d[0], d[1]), (d[2], d[3]), (255, 255, 255), 1)
                tmp = img[d[1]:d[3], d[0]:d[2]]
                tmp = cv2.cvtColor(tmp, cv2.COLOR_BGR2GRAY)
model = LeNet()
xentropy = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(),
                      lr=hp.lr,
                      momentum=0.9)

# check the latest checkpoint to resume training
ckpt_path = latest_ckpt(hp.ckpt)
if ckpt_path is None:
    logging.info("Initializing from scratch")
    epoch_start = 1
else:
    # resume training
    logging.info("Loading the latest checkpoint")
    ckpt = torch.load(ckpt_path)
    model.load_state_dict(ckpt['model_state_dict'])
    epoch_start = ckpt['epoch_start']
    model.train()

# Load model to device
logging.info("# Load model to %s" % (DEVICE))
model = model.to(DEVICE)

# training
logging.info("# Start training")
start_time = time.time()
for epoch in range(epoch_start, hp.epochs + 1):
    num_batch = math.floor(len(train_set) / hp.batch_size)
    for i, data in enumerate(train_loader, 0):
        # data comes in the form of [src, target]
        src, target = data[0].to(DEVICE), data[1].to(DEVICE)