Пример #1
0
def train(params):
    train_dataset = datasets.MNIST('data/mnist',
                                   train=True,
                                   download=True,
                                   transform=transforms.Compose([
                                       transforms.ToTensor(),
                                       transforms.Normalize((0.5, ), (0.5, ))
                                   ]))

    classifier = Classifier(input_dim=np.prod(train_dataset[0][0].shape),
                            device=params['device'])

    optimizer = optim.DPSGD(
        l2_norm_clip=params['l2_norm_clip'],
        noise_multiplier=params['noise_multiplier'],
        minibatch_size=params['minibatch_size'],
        microbatch_size=params['microbatch_size'],
        params=classifier.parameters(),
        lr=params['lr'],
        weight_decay=params['l2_penalty'],
    )

    print('Achieves ({}, {})-DP'.format(
        analysis.epsilon(len(train_dataset), params['minibatch_size'],
                         params['noise_multiplier'], params['iterations'],
                         params['delta']),
        params['delta'],
    ))

    loss_function = nn.NLLLoss()

    minibatch_loader, microbatch_loader = sampling.get_data_loaders(
        params['minibatch_size'], params['microbatch_size'],
        params['iterations'])

    iteration = 0
    for X_minibatch, y_minibatch in minibatch_loader(train_dataset):
        optimizer.zero_grad()
        for X_microbatch, y_microbatch in microbatch_loader(
                TensorDataset(X_minibatch, y_minibatch)):
            X_microbatch = X_microbatch.to(params['device'])
            y_microbatch = y_microbatch.to(params['device'])

            optimizer.zero_microbatch_grad()
            loss = loss_function(classifier(X_microbatch), y_microbatch)
            loss.backward()
            optimizer.microbatch_step()
        optimizer.step()

        if iteration % 10 == 0:
            print('[Iteration %d/%d] [Loss: %f]' %
                  (iteration, params['iterations'], loss.item()))
        iteration += 1

    return classifier
Пример #2
0
def train(params):
    train_dataset = datasets.MNIST('data/mnist',
        train=True,
        download=True,
        transform=transforms.Compose([
           transforms.ToTensor(),
           transforms.Normalize((0.5,), (0.5,))
        ])
    )

    generator = Generator(
        input_dim=params['latent_dim'],
        output_dim=np.prod(train_dataset[0][0].shape),
        device=params['device'],
    )

    g_optimizer = torch.optim.RMSprop(
        params=generator.parameters(),
        lr=params['lr'],
        weight_decay=params['l2_penalty'],
    )

    discriminator = Discriminator(
        input_dim=np.prod(train_dataset[0][0].shape),
        device=params['device']
    )

    d_optimizer = optim.DPRMSprop(
        l2_norm_clip=params['l2_norm_clip'],
        noise_multiplier=params['noise_multiplier'],
        minibatch_size=params['minibatch_size'],
        microbatch_size=params['microbatch_size'],
        params=discriminator.parameters(),
        lr=params['lr'],
        weight_decay=params['l2_penalty'],
    )

    print('Achieves ({}, {})-DP'.format(
        analysis.epsilon(
            len(train_dataset),
            params['minibatch_size'],
            params['noise_multiplier'],
            params['iterations'],
            params['delta']
        ),
        params['delta'],
    ))

    minibatch_loader, microbatch_loader = sampling.get_data_loaders(
        params['minibatch_size'],
        params['microbatch_size'],
        params['iterations']
    )

    for iteration, (X_minibatch, _) in enumerate(minibatch_loader(train_dataset)):
        d_optimizer.zero_grad()
        for X_microbatch in microbatch_loader(X_minibatch):
            X_microbatch = X_microbatch.to(params['device'])

            z = torch.randn(X_microbatch.size(0), params['latent_dim'], device=params['device'])
            fake = generator(z).detach()
            d_optimizer.zero_microbatch_grad()
            d_loss = -torch.mean(discriminator(X_microbatch)) + torch.mean(discriminator(fake))
            d_loss.backward()
            d_optimizer.microbatch_step()
        d_optimizer.step()

        for parameter in discriminator.parameters():
            parameter.data.clamp_(-params['clip_value'], params['clip_value'])

        if iteration % params['d_updates'] == 0:
            z = torch.randn(X_minibatch.size(0), params['latent_dim'], device=params['device'])
            fake = generator(z)
            g_optimizer.zero_grad()
            g_loss = -torch.mean(discriminator(fake))
            g_loss.backward()
            g_optimizer.step()

        if iteration % 100 == 0:
            print('[Iteration %d/%d] [D loss: %f] [G loss: %f]' % (iteration, params['iterations'], d_loss.item(), g_loss.item()))
            z = torch.randn(X_minibatch.size(0), params['latent_dim'], device=params['device'])
            fake = generator(z)
            save_image(fake.data[:25], "%d.png" % iteration, nrow=5, normalize=True)

    return generator
Пример #3
0
    microbatch_size=microbatch_size,
    params=model.parameters(),
    lr=lr,
)

minibatch_loader, microbatch_loader = sampling.get_data_loaders(
    minibatch_size,
    microbatch_size,
    iterations
)

print('Achieves ({}, {})-DP'.format(
    analysis.epsilon(
        len(train_dataset),
        minibatch_size,
        noise_multiplier,
        iterations,
        delta,
    ),
    delta,
))

for X_minibatch, y_minibatch in minibatch_loader(train_dataset):
    optimizer.zero_grad()
    for X_microbatch, y_microbatch in microbatch_loader(TensorDataset(X_minibatch, y_minibatch)):
        optimizer.zero_microbatch_grad()
        y_pred = model(X_microbatch)
        loss = loss_function(y_pred, y_microbatch)
        loss.backward()
        optimizer.microbatch_step()
    optimizer.step()
                                   download=True,
                                   transform=transforms.Compose([
                                       transforms.ToTensor(),
                                       transforms.Normalize((0.5, ), (0.5, ))
                                   ]))

    test_dataset = datasets.MNIST('data/mnist',
                                  train=False,
                                  download=True,
                                  transform=transforms.Compose([
                                      transforms.ToTensor(),
                                      transforms.Normalize((0.5, ), (0.5, ))
                                  ]))

    epsilon = analysis.epsilon(len(train_dataset), params['minibatch_size'],
                               params['noise_multiplier'],
                               params['iterations'], params['delta'])
    print('Achieves ({}, {})-DP'.format(
        epsilon,
        params['delta'],
    ))

    result_folder = './results/mnist/noiseMPL{}/'.format(
        params['noise_multiplier'])
    if not os.path.exists(result_folder):
        os.makedirs(result_folder)
    result_model_folder = result_folder + 'models/'
    if not os.path.exists(result_model_folder):
        os.makedirs(result_model_folder)

    aggregate_result = np.zeros([len(test_dataset), 10 + 1], dtype=np.int)
Пример #5
0
def main(fpath, batch_size, noise_size, hidden_size, epochs, n_critic,
         n_generator, D_lr, G_lr, epoch_sample_cycle, epoch_sample_count,
         save_path, noise_multiplier, l2_norm_clip, weight_decay, tiny):

    global model
    if tiny:
        model = model_tiny

    epsilon = analysis.epsilon(N=DATASET_SIZE,
                               batch_size=batch_size,
                               iterations=epochs,
                               noise_multiplier=noise_multiplier)
    print("Epsilon: %.4f" % epsilon)

    tset = create_tensorset(fpath, MAX_TRAJ_LENGTH)

    if not os.path.isdir(save_path):
        os.makedirs(save_path)

    modo_path = os.path.join(save_path, "I%05d.pkl")
    save_path = os.path.join(save_path, "I%05d.png")

    device = ["cpu", "cuda"][torch.cuda.is_available()]
    print("Using: %s" % device)

    G = model.Generator(noise_size, hidden_size, MAX_TRAJ_LENGTH).to(device)
    D = model.Discriminator(MAX_TRAJ_LENGTH, hidden_size).to(device)

    D_optim = optim.DPAdam(params=D.parameters(),
                           lr=D_lr,
                           betas=(0, 0.999),
                           l2_norm_clip=l2_norm_clip,
                           microbatch_size=1,
                           minibatch_size=batch_size,
                           noise_multiplier=noise_multiplier)
    G_optim = torch.optim.Adam(G.parameters(), lr=G_lr, betas=(0, 0.999))

    minibatch_loader, microbatch_loader = sampling.get_data_loaders(
        minibatch_size=batch_size, microbatch_size=1, iterations=epochs)

    for i, (X, ) in enumerate(minibatch_loader(tset), 1):

        z = torch.randn(X.size(0), noise_size)

        D_optim.zero_grad()

        losses = []

        for Xi, zi in microbatch_loader(torch.utils.data.TensorDataset(X, z)):
            Xi = Xi.to(device)
            zi = zi.to(device)

            with torch.no_grad():
                G.eval()
                Xh = G(zi)

            D.train()
            loss = D(Xh).mean() - D(Xi).mean()

            D_optim.zero_microbatch_grad()
            loss.backward()
            D_optim.microbatch_step()

            losses.append(loss.item())

        D_optim.step()

        print("[E%05d] %.4f" % (i, sum(losses) / len(losses)))

        if i % n_critic == 0:

            G.train()
            D.eval()

            for j in range(n_generator):
                z = torch.randn(batch_size, noise_size)
                G_optim.zero_grad()
                (-D(G(z.to(device)))).mean().backward()
                G_optim.step()

        if i % epoch_sample_cycle == 0:

            with torch.no_grad():
                G.eval()
                z = torch.randn(epoch_sample_count, noise_size).to(device)
                Xh = G(z)
                plot_and_save(Xh, save_path % i)

            modo = modo_path % i
            torch.save(G.state_dict(), modo)

            print("Saved %s" % modo)