Ejemplo n.º 1
0
def train():

    # Modeling Adversarial Loss
    for epoch in range(args.epoch):

        # train environment
        net.train()

        print('\n\n[LP/Epoch] : {}'.format(epoch+1))

        total_cross_loss = 0
        correct = 0
        total = 0

        for batch_idx, (inputs, targets) in enumerate(tqdm(trainloader)):

            # dataloader parsing and generate adversarial examples
            inputs, targets = inputs.cuda(), targets.cuda()

            # learning network parameters
            optimizer.zero_grad()
            adv_x = attack(inputs, targets) if args.eps != 0 else inputs
            adv_logits = net(adv_x)
            logits = net(inputs)
            loss = 1/2*cross_entropy(adv_logits, targets)+1/2*cross_entropy(logits, targets)+mse_loss(adv_logits, logits)
            loss.backward()
            optimizer.step()

            # validation
            pred = torch.max(net(adv_x).detach(), dim=1)[1]
            correct += torch.sum(pred.eq(targets)).item()
            total += targets.numel()

            # logging two types loss and total loss
            total_cross_loss += loss.item()

            if batch_idx % 50 == 0 and batch_idx != 0:
                print('[LP/Train] Iter: {}, Acc: {:.3f}, CE: {:.3f}'.format(
                    batch_idx, # Iter
                    100.*correct / total, # Acc
                    total_cross_loss / (batch_idx+1) # CrossEntropy
                    )
                )

        # Scheduling learning rate by stepLR
        scheduler.step()

        # Adversarial validation
        adversarial_test()

        # Save checkpoint file
        torch.save({
            'epoch': epoch+1,
            'model_state_dict': net.state_dict(),
            'optimizer_state_dict': optimizer.state_dict(),
            'total_cross_entropy_loss' : total_cross_loss / (batch_idx+1)
            }, os.path.join(args.save_dir,checkpoint_name))

        # argument print
        argument_print(args, checkpoint_name)
Ejemplo n.º 2
0
trainloader, testloader = dataset_loader(args)
net = network_loader(args, mean=args.mean, std=args.std).cuda()
attack = attack_loader(args, net)

# Adam Optimizer with KL divergence, and Scheduling Learning rate
optimizer = torch.optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-2)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.2)

# Setting checkpoint date time
date_time = datetime.today().strftime("%m%d%H%M")

# checkpoint_name
checkpoint_name = 'LP_'+args.network+'_'+args.dataset+'_'+date_time+'.pth'

# argument print
argument_print(args, checkpoint_name)


def train():

    # Modeling Adversarial Loss
    for epoch in range(args.epoch):

        # train environment
        net.train()

        print('\n\n[LP/Epoch] : {}'.format(epoch+1))

        total_cross_loss = 0
        correct = 0
        total = 0