Exemplo n.º 1
0
def init(dataset, filename, epsilon, batch_size):

    global myclient

    D_in = datasets.get_num_features(dataset)
    D_out = datasets.get_num_classes(dataset)
    nParams = datasets.get_num_params(dataset)

    model = SoftmaxModel(D_in, D_out)
    train_cut = 0.8

    myclient = client.Client(dataset, filename, batch_size, model, train_cut)

    global samples
    samples = []

    global this_batch_size
    this_batch_size = batch_size

    def lnprob(x, alpha):
        return -(alpha / 2) * np.linalg.norm(x)

    if epsilon > 0:

        if diffPriv13:

            nwalkers = max(4 * nParams, 250)
            sampler = emcee.EnsembleSampler(nwalkers,
                                            nParams,
                                            lnprob,
                                            args=[epsilon])

            p0 = [np.random.rand(nParams) for i in range(nwalkers)]
            pos, _, state = sampler.run_mcmc(p0, 100)

            sampler.reset()
            sampler.run_mcmc(pos, 1000, rstate0=state)

            print("Mean acceptance fraction:",
                  np.mean(sampler.acceptance_fraction))

            samples = sampler.flatchain

        else:

            sigma = np.sqrt(2 * np.log(1.25)) / epsilon
            noise = sigma * np.random.randn(batch_size, expected_iters,
                                            nParams)
            samples = np.sum(noise, axis=0)

    else:

        samples = np.zeros((expected_iters, nParams))

    return nParams
Exemplo n.º 2
0
    def __init__(self, dataset, filename, train_cut=.80):
        # initializes dataset
        self.batch_size = 4
        Dataset = datasets.get_dataset(dataset)
        transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ])
        self.trainset = Dataset(filename,
                                "data/" + dataset,
                                is_train=True,
                                train_cut=train_cut,
                                transform=transform)
        self.testset = Dataset(filename,
                               "data/" + dataset,
                               is_train=False,
                               train_cut=train_cut,
                               transform=transform)
        self.trainloader = torch.utils.data.DataLoader(
            self.trainset, batch_size=self.batch_size, shuffle=True)
        self.testloader = torch.utils.data.DataLoader(self.testset,
                                                      batch_size=len(
                                                          self.testset),
                                                      shuffle=False)

        D_in = datasets.get_num_features(dataset)
        D_out = datasets.get_num_classes(dataset)

        self.model = SoftmaxModel(D_in, D_out)
        # self.model = MNISTCNNModel()
        # self.model = LFWCNNModel()

        # self.model = SVMModel(D_in, D_out)
        # self.criterion = nn.MultiLabelMarginLoss()
        ### Tunables ###
        self.criterion = nn.CrossEntropyLoss()
        self.optimizer = optim.SGD(self.model.parameters(),
                                   lr=0.001,
                                   momentum=0.9,
                                   weight_decay=0.001)
        self.aggregatedGradients = []
        self.loss = 0.0
Exemplo n.º 3
0
class Client():
    def __init__(self, dataset, filename, train_cut=.80):
        # initializes dataset
        self.batch_size = 4
        Dataset = datasets.get_dataset(dataset)
        transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ])
        self.trainset = Dataset(filename,
                                "data/" + dataset,
                                is_train=True,
                                train_cut=train_cut,
                                transform=transform)
        self.testset = Dataset(filename,
                               "data/" + dataset,
                               is_train=False,
                               train_cut=train_cut,
                               transform=transform)
        self.trainloader = torch.utils.data.DataLoader(
            self.trainset, batch_size=self.batch_size, shuffle=True)
        self.testloader = torch.utils.data.DataLoader(self.testset,
                                                      batch_size=len(
                                                          self.testset),
                                                      shuffle=False)

        D_in = datasets.get_num_features(dataset)
        D_out = datasets.get_num_classes(dataset)

        self.model = SoftmaxModel(D_in, D_out)
        # self.model = MNISTCNNModel()
        # self.model = LFWCNNModel()

        # self.model = SVMModel(D_in, D_out)
        # self.criterion = nn.MultiLabelMarginLoss()
        ### Tunables ###
        self.criterion = nn.CrossEntropyLoss()
        self.optimizer = optim.SGD(self.model.parameters(),
                                   lr=0.001,
                                   momentum=0.9,
                                   weight_decay=0.001)
        self.aggregatedGradients = []
        self.loss = 0.0

    # TODO:: Get noise for diff priv
    def getGrad(self):
        for i, data in enumerate(self.trainloader, 0):
            # get the inputs
            inputs = data['image'].float()
            labels = data['label'].long()

            # for svm
            # padded_labels = torch.zeros(self.batch_size,10).long()
            # padded_labels.transpose(0,1)[labels] = 1
            # labels = padded_labels

            # zero the parameter gradients
            self.optimizer.zero_grad()

            # forward + backward + optimize
            outputs = self.model(inputs)
            loss = self.criterion(outputs, labels)
            loss.backward()
            nn.utils.clip_grad_norm(self.model.parameters(), 100)
            self.loss = loss.item()

            # TODO: Find more efficient way to flatten params
            # get gradients into layers
            layers = np.zeros(0)
            for name, param in self.model.named_parameters():
                if param.requires_grad:
                    layers = np.concatenate(
                        (layers, param.grad.numpy().flatten()), axis=None)
            return layers

    # Called when an aggregator receives a new gradient
    def updateGrad(self, gradient):
        # Reshape into original tensor
        layers = self.model.reshape(gradient)
        self.aggregatedGradients.append(layers)

    # Step in the direction of provided gradient.
    # Used in BlockML when gradient is aggregated in Go
    def simpleStep(self, gradient):
        print("Simple step")
        layers = self.model.reshape(gradient)
        # Manually updates parameter gradients
        layer = 0
        for name, param in self.model.named_parameters():
            if param.requires_grad:
                param.grad = layers[layer]
                layer += 1

        # Step in direction of parameter gradients
        self.optimizer.step()

    # Called when sufficient gradients are aggregated to generate updated model
    def step(self):
        # Aggregate gradients together in place
        for i in range(1, len(self.aggregatedGradients)):
            gradients = self.aggregatedGradients[i]
            for g, gradient in enumerate(gradients):
                self.aggregatedGradients[0][g] += gradient

        # Average gradients
        for g, gradient in enumerate(self.aggregatedGradients[0]):
            gradient /= len(self.aggregatedGradients)

        # Manually updates parameter gradients
        layer = 0
        for name, param in self.model.named_parameters():
            if param.requires_grad:
                param.grad = self.aggregatedGradients[0][layer]
                layer += 1

        # Step in direction of parameter gradients
        self.optimizer.step()
        self.aggregatedGradients = []

    # Called when the aggregator shares the updated model
    def updateModel(self, modelWeights):
        layer = 0
        for name, param in self.model.named_parameters():
            if param.requires_grad:
                param.data = modelWeights[layer]
                layer += 1

    def getModelWeights(self):
        layers = []
        for name, param in self.model.named_parameters():
            if param.requires_grad:
                layers.append(param.data)
        return layers

    def getLoss(self):
        return self.loss

    def getModel(self):
        return self.model

    def getTestErr(self):
        for i, data in enumerate(self.testloader, 0):
            # get the inputs
            inputs = data['image'].float()
            labels = data['label'].long()
            inputs, labels = Variable(inputs), Variable(labels)
            out = self.model(inputs)
            pred = np.argmax(out.detach().numpy(), axis=1)
            return 1 - accuracy_score(pred, labels)
Exemplo n.º 4
0
def returnModel(D_in, D_out):
    model = SoftmaxModel(D_in, D_out)
    # model = MNISTCNNModel()
    return model
Exemplo n.º 5
0
def returnModel(D_in, D_out):
    model = SoftmaxModel(D_in, D_out)
    # model = CIFARCNNModel()
    return model