コード例 #1
0
def train(dataloader, model,criterion, optimizer, scheduler, epoch):
    model.train()
    print('epoch ' + str(epoch))

    train_loss = 0.0
    train_acc = 0.0
    total = len(dataloader)
    start = time.time()
    toPilImage = transforms.ToPILImage()    # transform tensor into PIL image to save

    for batch_num, (x, y) in enumerate(dataloader):
        x = x.to(device)
        y = y.to(device)


        # gauss noise training
        gauss_noise = torch.randn_like(x, device=device) * args.noise_sd
        # x_noise = x + torch.randn_like(x, device=device) * args.noise_sd

        # targeted noise training
        tmp_criterion = nn.CrossEntropyLoss()
        tmp_optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
        classifier = PyTorchClassifier(
            model=model,
            clip_values=(min_pixel_value, max_pixel_value),
            loss=tmp_criterion,
            optimizer=tmp_optimizer,
            input_shape=(3, 32, 32),
            nb_classes=10,
        )

        # all other classes
        targets = []
        y_np = y.cpu().numpy()
        for i in range(y.shape[0]) :
            targets.append( np.expand_dims( np.random.permutation( np.delete(np.arange(get_num_classes()), y_np[i]) ), axis=0 ) )
        # print(targets[0].shape)
        targets = np.concatenate(targets)
        # print(targets.shape)
        # exit(0)

        mix_noise = torch.zeros_like(x)
        for t in range(targets.shape[1]):
            # generate random targets
            # targets = art.utils.random_targets(y.cpu().numpy(), get_num_classes())

            # calculate loss gradient
            # print(np.squeeze(targets[:,t]).shape)
            # exit()

            y_slice = np.squeeze(targets[:,t])
            y_oh = np.zeros((y_slice.size, get_num_classes()))
            y_oh[np.arange(y_slice.size), y_slice] = 1


            grad = classifier.loss_gradient(x=x.cpu().numpy(), y=y_oh) * (-1.0)
            scaled_grad = torch.Tensor(grad * args.eps_step).to(device)

            mix_noise += scaled_grad

            model.zero_grad()
            tmp_optimizer.zero_grad()

            # print((scaled_grad.shape, gauss_noise.shape, targets.shape))

        # combine noise and targeted noise
        x_combine = x + (gauss_noise * (1.0 - args.k_value)) + (mix_noise * args.k_value)

        model.zero_grad()

        output = model(x_combine)
        loss = criterion(output, y)
        acc = accuracy(output, y)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        train_loss += loss.item()       
        train_acc += acc

    scheduler.step()
    end = time.time()
    print('trainning time:',end - start,'sec, loss: ', train_loss/total, 'acc: ', train_acc/total)
    return train_loss/total, train_acc/total
コード例 #2
0
ファイル: train_target.py プロジェクト: yxzi/at-field
def train(dataloader, model, criterion, optimizer, scheduler, epoch):
    model.train()
    print('epoch ' + str(epoch))

    train_loss = 0.0
    train_acc = 0.0
    total = len(dataloader)
    start = time.time()
    toPilImage = transforms.ToPILImage(
    )  # transform tensor into PIL image to save

    for batch_num, (x, y) in enumerate(dataloader):
        x = x.to(device)
        y = y.to(device)

        # gauss noise training
        gauss_noise = torch.randn_like(x, device=device) * args.noise_sd
        # x_noise = x + torch.randn_like(x, device=device) * args.noise_sd

        # targeted noise training
        tmp_criterion = nn.CrossEntropyLoss()
        tmp_optimizer = optim.SGD(model.parameters(),
                                  lr=args.lr,
                                  momentum=args.momentum,
                                  weight_decay=args.weight_decay)
        classifier = PyTorchClassifier(
            model=model,
            clip_values=(min_pixel_value, max_pixel_value),
            loss=tmp_criterion,
            optimizer=tmp_optimizer,
            input_shape=(3, 32, 32),
            nb_classes=10,
        )
        # generate random targets
        targets = art.utils.random_targets(y.cpu().numpy(), get_num_classes())

        # calculate loss gradient
        grad = classifier.loss_gradient(x=x.cpu().numpy(), y=targets) * (-1.0)
        scaled_grad = torch.Tensor(grad * args.eps_step).to(device)

        # print((scaled_grad.shape, gauss_noise.shape, targets.shape))

        # combine noise and targeted noise
        x_combine = x + (gauss_noise *
                         (1.0 - args.k_value)) + (scaled_grad * args.k_value)

        model.zero_grad()

        output = model(x_combine)
        loss = criterion(output, y)
        acc = accuracy(output, y)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        train_loss += loss.item()
        train_acc += acc

    scheduler.step()
    end = time.time()
    print('trainning time:', end - start, 'sec, loss: ', train_loss / total,
          'acc: ', train_acc / total)
    return train_loss / total, train_acc / total