コード例 #1
0
    scheduler.step()
    cudnn.benchmark = True
    model.train()
    requires_grad_(m, True)
    accs = AverageMeter()
    losses = AverageMeter()
    attack_norms = AverageMeter()

    length = len(train_loader)
    for i, (images, labels) in enumerate(tqdm.tqdm(train_loader, ncols=80)):

        images, labels = images.to(DEVICE), labels.to(DEVICE)
        #原图loss
        #logits_clean = model.forward(images)
        logits_clean, feature_clean = model.forward(images)
        #loss = F.cross_entropy(logits_clean, labels)

        if args.adv is not None and epoch >= args.adv:
            model.eval()
            requires_grad_(m, False)
            adv = attacker.attack(model, images, labels)
            l2_norms = (adv - images).view(args.batch_size, -1).norm(2, 1)
            mean_norm = l2_norms.mean()
            if args.max_norm:
                adv = torch.renorm(
                    adv - images, p=2, dim=0, maxnorm=args.max_norm) + images
            attack_norms.append(mean_norm.item())
            requires_grad_(m, True)
            model.train()
コード例 #2
0
    model.train()
    requires_grad_(m, True)
    accs = AverageMeter()
    losses = AverageMeter()
    attack_norms = AverageMeter()

    length = len(train_loader)
    for i, (images, labels) in enumerate(tqdm.tqdm(train_loader, ncols=80)):

        images, labels = images.to(DEVICE), labels.to(DEVICE)
        # augment inputs with noise
        noise = torch.randn_like(images, device='cuda') * args.noise_sd
        images_noise = images + noise
        #原图loss
        #logits_clean = model.forward(images)
        logits_clean = model.forward(images_noise)
        loss = F.cross_entropy(logits_clean, labels)

        if args.adv is not None and epoch >= args.adv:
            model.eval()
            requires_grad_(m, False)
            adv = attacker.attack(model, images, labels, noise=noise)
            l2_norms = (adv - images).view(args.batch_size, -1).norm(2, 1)
            mean_norm = l2_norms.mean()
            if args.max_norm:
                adv = torch.renorm(
                    adv - images, p=2, dim=0, maxnorm=args.max_norm) + images
            attack_norms.append(mean_norm.item())
            requires_grad_(m, True)
            model.train()
            adv_noise = adv + noise
コード例 #3
0
ファイル: cifar10_ALP.py プロジェクト: a-zuoxi/tianchi_f
    scheduler.step()
    cudnn.benchmark = True
    model.train()
    requires_grad_(m, True)
    accs = AverageMeter()
    losses = AverageMeter()
    attack_norms = AverageMeter()

    length = len(train_loader)
    for i, (images, labels) in enumerate(tqdm.tqdm(train_loader, ncols=80)):

        images, labels = images.to(DEVICE), labels.to(DEVICE)
        #原图loss
        #logits_clean = model.forward(images)
        logits = model.forward(images)
        loss = F.cross_entropy(logits, labels)

        if args.adv is not None and epoch >= args.adv:

            model.eval()
            requires_grad_(m, False)
            adv = attacker.attack(model, images, labels)
            l2_norms = (adv - images).view(args.batch_size, -1).norm(2, 1)
            mean_norm = l2_norms.mean()
            if args.max_norm:
                adv = torch.renorm(
                    adv - images, p=2, dim=0, maxnorm=args.max_norm) + images
            attack_norms.append(mean_norm.item())
            requires_grad_(m, True)
            model.train()
コード例 #4
0
ファイル: cifar10_shape.py プロジェクト: a-zuoxi/tianchi_f
        ETA(), ' ',
        FileTransferSpeed()
    ]
    pbar = ProgressBar(widgets=widgets)
    length = len(train_loader)
    i = 0
    sigma = 0.5
    for batch_data in pbar(train_loader):

        images, labels = batch_data['image'].to(
            DEVICE), batch_data['label_idx'].to(DEVICE)
        #images, labels = images.to(DEVICE), labels.to(DEVICE)

        #原图loss
        #logits_clean = model.forward(images)
        logits_clean = model.forward(images)
        loss = F.cross_entropy(logits_clean, labels)

        if args.adv is not None and epoch >= args.adv:
            model.eval()
            requires_grad_(m, False)
            adv = attacker.attack(model, images, labels)
            l2_norms = (adv - images).view(args.batch_size, -1).norm(2, 1)
            mean_norm = l2_norms.mean()
            if args.max_norm:
                adv = torch.renorm(
                    adv - images, p=2, dim=0, maxnorm=args.max_norm) + images

            attack_norms.append(mean_norm.item())
            requires_grad_(m, True)
            model.train()