Beispiel #1
0
def main():

    # test(fc_model, test_loader)

    batch_size = 1 # plese note: For deep fool batch_size MUST be 1
    train_loader, test_loader = get_dataloader(batch_size)

    fc_model = FC_model()  # Base model, used for classification, also used for crafting adversarial samples
    defender = AE()  # Defender: input goes through this before going to the base model

    load_model(fc_model, './pretrained_models/fc_model.pth')
    load_model(defender, './pretrained_models/autoencoder_pretrained.pth')

    fc_model.to(device)
    defender.to(device)

    criterion = nn.CrossEntropyLoss()

    # craft adversarial examples for epsilon value in [0,1] at step size of 0.05
    '''
    acc_list= []
    for i in range(21):
        acc_list.append(adv_attack(fc_model, defender, test_loader, criterion, i*0.05))
    print(acc_list)
    '''
    # defender = None

    # FGSM attack
    adv_attack(fc_model, defender, test_loader, criterion, attack_type="fgsm")

    # deep fool attack
    adv_attack(fc_model, defender, test_loader, criterion, attack_type="deepfool")

    # universal attack
    adv_attack(fc_model, defender, test_loader, criterion, attack_type="universal")
Beispiel #2
0
            data = data.to(device).view(-1, 28 * 28)
            label = label.to(device).view(-1, 28 * 28)
            _, recons_x = model(data)
            loss = criterion(recons_x, label)
            test_loss += loss.item()
            total += label.size(0)

    avg_loss = test_loss / total

    print('===> Test Average loss: {:.7f}\n'.format(avg_loss))

    return avg_loss


ae_model = AE()
ae_model.to(device)
optimizer = optim.Adam(ae_model.parameters(), lr=0.001, betas=(0.9, 0.999))
criterion = nn.MSELoss()
batch_size = 256

train_loader, test_loader = get_customDataLoader(
    './data_for_ae/data_for_autoencoder.pth', batch_size=batch_size)
trainer(ae_model,
        train_loader,
        test_loader,
        optimizer,
        criterion,
        save_path='./pretrained_models/autoencoder_pretrained_1.pth')
#load_model(ae_model, './autoencoder_pretrained.pth')
#test(ae_model, test_loader)
Beispiel #3
0
def train(*,
          folder=None,
          dataset='mnist',
          patch_size=8,
          resume=False,
          log_interval=1,
          device='cpu',
          objective='vae',
          batch_size=64,
          nz=100,
          lr=0.001,
          num_workers=1,
          nb_filters=64,
          nb_draw_layers=1):
    if folder is None:
        folder = f'results/{dataset}/{patch_size}x{patch_size}'
    try:
        os.makedirs(folder)
    except Exception:
        pass
    act = 'sigmoid'
    nb_epochs = 3000
    dataset = load_dataset(dataset, split='train')
    if patch_size is not None:
        patch_size = int(patch_size)
        dataset = PatchDataset(dataset, patch_size)
    x0, _ = dataset[0]
    nc = x0.size(0)
    dataloader = torch.utils.data.DataLoader(
        dataset,
        batch_size=batch_size,
        shuffle=True,
        num_workers=num_workers,
    )
    if resume:
        net = torch.load('{}/net.th'.format(folder))
    else:
        net = AE(
            latent_size=nz,
            nc=nc,
            w=patch_size,
            ndf=nb_filters,
            act=act,
            objective=objective,
        )
    opt = optim.Adam(net.parameters(), lr=lr)
    net = net.to(device)
    niter = 0
    for epoch in range(nb_epochs):
        for i, (X, _), in enumerate(dataloader):
            net.zero_grad()
            X = X.to(device)
            Xrec, mu, logvar = net(X)
            rec, kld = net.loss_function(X, Xrec, mu, logvar)
            loss = rec + kld
            loss.backward()
            opt.step()
            if niter % log_interval == 0:
                print(
                    f'Epoch: {epoch:05d}/{nb_epochs:05d} iter: {niter:05d} loss: {loss.item():.2f} rec: {rec.item():.2f} kld:{kld.item():.2f}'
                )
            if niter % 100 == 0:
                Xsamples = net.sample(nb_examples=100)
                X = 0.5 * (X + 1) if act == 'tanh' else X
                Xrecs = 0.5 * (Xrec + 1) if act == 'tanh' else Xrec
                Xsamples = 0.5 * (Xsamples + 1) if act == 'tanh' else Xsamples
                X = X.detach().to('cpu').numpy()
                Xrecs = Xrecs.detach().to('cpu').numpy()
                Xsamples = Xsamples.detach().to('cpu').numpy()
                imsave(f'{folder}/real_samples.png', grid_of_images_default(X))
                imsave(f'{folder}/rec_samples.png',
                       grid_of_images_default(Xrecs))
                imsave(f'{folder}/fake_samples.png',
                       grid_of_images_default(Xsamples))
                torch.save(net, '{}/net.th'.format(folder))
            niter += 1