Esempio n. 1
0
def init(dataset, filename, epsilon, batch_size):

    global myclient

    D_in = datasets.get_num_features(dataset)
    D_out = datasets.get_num_classes(dataset)
    nParams = datasets.get_num_params(dataset)

    model = SoftmaxModel(D_in, D_out)
    train_cut = 0.8

    myclient = client.Client(dataset, filename, batch_size, model, train_cut)

    global samples
    samples = []

    global this_batch_size
    this_batch_size = batch_size

    def lnprob(x, alpha):
        return -(alpha / 2) * np.linalg.norm(x)

    if epsilon > 0:

        if diffPriv13:

            nwalkers = max(4 * nParams, 250)
            sampler = emcee.EnsembleSampler(nwalkers,
                                            nParams,
                                            lnprob,
                                            args=[epsilon])

            p0 = [np.random.rand(nParams) for i in range(nwalkers)]
            pos, _, state = sampler.run_mcmc(p0, 100)

            sampler.reset()
            sampler.run_mcmc(pos, 1000, rstate0=state)

            print("Mean acceptance fraction:",
                  np.mean(sampler.acceptance_fraction))

            samples = sampler.flatchain

        else:

            sigma = np.sqrt(2 * np.log(1.25)) / epsilon
            noise = sigma * np.random.randn(batch_size, expected_iters,
                                            nParams)
            samples = np.sum(noise, axis=0)

    else:

        samples = np.zeros((expected_iters, nParams))

    return nParams
Esempio n. 2
0
    def __init__(self, dataset, filename, train_cut=.80):
        # initializes dataset
        self.batch_size = 4
        Dataset = datasets.get_dataset(dataset)
        transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ])
        self.trainset = Dataset(filename,
                                "data/" + dataset,
                                is_train=True,
                                train_cut=train_cut,
                                transform=transform)
        self.testset = Dataset(filename,
                               "data/" + dataset,
                               is_train=False,
                               train_cut=train_cut,
                               transform=transform)
        self.trainloader = torch.utils.data.DataLoader(
            self.trainset, batch_size=self.batch_size, shuffle=True)
        self.testloader = torch.utils.data.DataLoader(self.testset,
                                                      batch_size=len(
                                                          self.testset),
                                                      shuffle=False)

        D_in = datasets.get_num_features(dataset)
        D_out = datasets.get_num_classes(dataset)

        self.model = SoftmaxModel(D_in, D_out)
        # self.model = MNISTCNNModel()
        # self.model = LFWCNNModel()

        # self.model = SVMModel(D_in, D_out)
        # self.criterion = nn.MultiLabelMarginLoss()
        ### Tunables ###
        self.criterion = nn.CrossEntropyLoss()
        self.optimizer = optim.SGD(self.model.parameters(),
                                   lr=0.001,
                                   momentum=0.9,
                                   weight_decay=0.001)
        self.aggregatedGradients = []
        self.loss = 0.0
Esempio n. 3
0
def main(batchsize, epsilon, dataset, data_filename, dataclass):

    train_cut = 0.8

    iter_time = 1
    clients = []
    average_loss = []
    test_accuracy_rate = []
    D_in = datasets.get_num_features(dataset)
    D_out = datasets.get_num_classes(dataset)

    global batch_size
    batch_size = batchsize

    global nParams
    nParams = datasets.get_num_params(dataset)

    print("Creating clients")
    # for i in range(10):
    #     model = returnModel(D_in, D_out)
    #     clients.append(Client("lfw", "lfw_maleness_train" + str(i), batch_size, model, train_cut))
    model = returnModel(D_in, D_out)

    # clients.append(Client("lfw", "lfw_maleness_person61_over20", batch_size, model, train_cut))
    clients.append(Client(dataset, data_filename, batch_size, model,
                          train_cut))

    print("Training for iterations")

    for iter in range(iter_time):
        # Calculate and aggregate gradients
        for i in range(1):
            grad = clients[i].getGrad()
            if epsilon == 0:
                showImage(grad, dataset, dataclass, batch_size, 0)
            else:
                varyEpsilonShowImage(grad, batch_size, epsilon, dataset,
                                     data_class)
Esempio n. 4
0
def main():
    iter_time = 2000
    clients = []
    test_accuracy_rate = []
    average_loss = []
    D_in = datasets.get_num_features("mnist")
    D_out = datasets.get_num_classes("mnist")
    batch_size = 10
    train_cut = 0.8

    for i in range(6):
        model = returnModel(D_in, D_out)
        clients.append(
            Client("mnist", "mnist" + str(i), batch_size, model, train_cut))

    for i in range(4):
        model = returnModel(D_in, D_out)
        clients.append(
            Client("mnist", "mnist_bad_full", batch_size, model, train_cut))

    model = returnModel(D_in, D_out)
    test_client = Client("mnist", "mnist_test", batch_size, model, 0)

    rejections = np.zeros(10)

    for iter in range(iter_time):

        modelWeights = clients[0].getModelWeights()
        # Calculate and aggregaate gradients
        for i in range(10):
            grad = clients[i].getGrad()
            # roni = test_client.roni(modelWeights, grad)
            # print "Client " + str(i) + " RONI is " + str(roni)
            # if roni > 0.02:
            #     rejections[i] += 1
            clients[0].updateGrad(grad)

        # Share updated model
        clients[0].step()
        modelWeights = clients[0].getModelWeights()
        for i in range(10):
            clients[i].updateModel(modelWeights)

        # Print average loss across clients
        if iter % 100 == 0:

            loss = 0.0
            for i in range(10):
                loss += clients[i].getLoss()

            test_client.updateModel(modelWeights)
            test_err = test_client.getTestErr()
            attack_rate = test_client.get17AttackRate()

            print("Average loss is " + str(loss / len(clients)))
            print("Test error: " + str(test_err))
            print("Attack rate on 1s: " + str(attack_rate) + "\n")

            average_loss.append(loss / len(clients))
            test_accuracy_rate.append(attack_rate)

    # plot average loss and accuracy rate of the updating model
    x = range(1, int(math.floor(iter_time / 100)) + 1)
    fig, ax1 = plt.subplots()
    ax1.plot(x, average_loss, color='orangered', label='mnist_average_loss')
    plt.legend(loc=2)
    ax2 = ax1.twinx()
    ax2.plot(x,
             test_accuracy_rate,
             color='blue',
             label='mnist_test_accuracy_rate')
    plt.legend(loc=1)
    ax1.set_xlabel("iteration time / 100")
    ax1.set_ylabel("average_loss")
    ax2.set_ylabel("accuracy_rate")
    plt.title("mnist_graph")
    plt.legend()
    mp.show()

    test_client.updateModel(modelWeights)
    test_err = test_client.getTestErr()
    print("Test error: " + str(test_err))
    accuracy_rate = 1 - test_err
    print("Accuracy rate: " + str(accuracy_rate))
Esempio n. 5
0
def main():
    iter_time = 1500
    clients = []
    average_loss = []
    test_accuracy_rate = []
    D_in = datasets.get_num_features("lfw")
    D_out = datasets.get_num_classes("lfw")
    batch_size = 4
    train_cut = 0.8

    print("Creating clients")
    for i in range(10):
        model = returnModel(D_in, D_out)
        clients.append(
            Client("lfw", "lfw_maleness_train" + str(i), batch_size, model,
                   train_cut))

    model = returnModel(D_in, D_out)
    test_client = Client("lfw", "lfw_maleness_test", batch_size, model, 0)

    print("Training for iterations")
    for iter in range(iter_time):
        # Calculate and aggregaate gradients
        for i in range(10):
            clients[0].updateGrad(clients[i].getGrad())

        # Share updated model
        clients[0].step()
        modelWeights = clients[0].getModelWeights()
        for i in range(10):
            clients[i].updateModel(modelWeights)

        # Print average loss across clients
        if iter % 100 == 0:
            loss = 0.0
            for i in range(10):
                loss += clients[i].getLoss()
            print("Average loss is " + str(loss / len(clients)))
            test_client.updateModel(modelWeights)
            test_err = test_client.getTestErr()
            print("Test error: " + str(test_err))
            accuracy_rate = 1 - test_err
            print("Accuracy rate: " + str(accuracy_rate) + "\n")
            average_loss.append(loss / len(clients))
            test_accuracy_rate.append(accuracy_rate)

    # plot average loss and accuracy rate of the updating model
    x = range(1, int(math.floor(iter_time / 100)) + 1)
    fig, ax1 = plt.subplots()
    ax1.plot(x,
             average_loss,
             color='orangered',
             label='lfw_gender_average_loss')
    plt.legend(loc=2)
    ax2 = ax1.twinx()
    ax2.plot(x,
             test_accuracy_rate,
             color='blue',
             label='lfw_gender_test_accuracy_rate')
    plt.legend(loc=1)
    ax1.set_xlabel("iteration time / 100")
    ax1.set_ylabel("average_loss")
    ax2.set_ylabel("accuracy_rate")
    plt.title("lfw_gender_graph")
    plt.legend()
    mp.show()

    test_client.updateModel(modelWeights)
    test_err = test_client.getTestErr()
    print("Test error: " + str(test_err))
    accuracy_rate = 1 - test_err
    print("Accuracy rate: " + str(accuracy_rate) + "\n")