Example #1
0
        if args.shape is not None and epoch >= args.shape:
            model.eval()
            requires_grad_(m, False)
            noise = torch.randn_like(images, device='cuda') * args.noise_sd
            image_shape = images + noise
            #if args.max_norm:
            # image_shape = torch.renorm(image_shape - images, p=2, dim=0, maxnorm=args.max_norm) + images
            requires_grad_(m, True)
            model.train()
            print('238')

            #logits_shape = model(image_shape.detach())
            #loss_shape = F.cross_entropy(logits_shape, labels)

            logits = model.forward_attention(images.detach(),
                                             image_shape.detach())
            loss_attention = F.cross_entropy(logits, labels)
            #attention
            #loss=loss_adv+loss_attention
            #attention+plp
            loss = loss_adv + loss_attention  #+ 0.2*F.mse_loss(logits_adv,logits_clean)+0.2*F.mse_loss(logits_clean,logits_shape)

        #loss = loss+ loss_adv + 0.5*F.mse_loss(logits_adv,logits)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        accs.append((logits.argmax(1) == labels).float().mean().item())
        losses.append(loss.item())

        if CALLBACK and not ((i + 1) % args.print_freq):
Example #2
0
            #loss=loss+ loss_adv #+ 0.5*F.mse_loss(logits_adv,logits)

        if args.shape is not None and epoch >= args.shape:
            model.eval()
            requires_grad_(m, False)
            noise = torch.randn_like(images, device='cuda') * args.noise_sd
            image_shape = images + noise
            #if args.max_norm:
            # image_shape = torch.renorm(image_shape - images, p=2, dim=0, maxnorm=args.max_norm) + images
            requires_grad_(m, True)
            model.train()

            #logits_shape = model(image_shape.detach())
            #loss_shape = F.cross_entropy(logits_shape, labels)

            logits, feature_mix = model.forward_attention(
                images.detach(), image_shape.detach())
            loss_attention = F.cross_entropy(logits, labels)
            #attention
            #loss=loss_adv+loss_attention
            #attention+plp
            loss = loss_adv + loss_attention + 0.2 * F.mse_loss(
                logits_adv, logits_clean) + 0.1 * F.mse_loss(
                    feature_adv, feature_clean)

        #loss = loss+ loss_adv + 0.5*F.mse_loss(logits_adv,logits)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        accs.append((logits.argmax(1) == labels).float().mean().item())
        losses.append(loss.item())