def test(testloader, model, params, config): pgd = attack.AttackPGD(config) model.eval() correct, total = 0, 0 for i, (x, y) in enumerate(testloader): x, y = x.cuda(), y.cuda() # print(y) x = pgd.attack(model, x, y) scores, _ = model.forward(x) predicted = torch.argmax(scores, 1) correct += (predicted == y).sum().item() total += predicted.size(0) print('Accuracy {:f}'.format(100. * correct / total))
def train(trainloader, model, optimization, start_epoch, stop_epoch, params, config): if optimization == 'Adam': optimizer = torch.optim.Adam(model.parameters()) else: raise ValueError('Unknown optimization, please define by yourself') loss_fn = nn.CrossEntropyLoss() pgd = attack.AttackPGD(config) for epoch in range(start_epoch, stop_epoch): t = 0 model.train() print_freq = 50 avg_loss = 0 correct, total = 0, 0 for i, (x, y, gt) in enumerate(trainloader): x, y = x.cuda(), y.cuda() a = time.time() if params.method == 'nadv': x = pgd.attack4(model, x, y) optimizer.zero_grad() scores, _ = model.forward(x) predicted = torch.argmax(scores, 1) correct += (predicted == y).sum().item() total += predicted.size(0) loss = loss_fn(scores, y) loss.backward() optimizer.step() b = time.time() t += b - a avg_loss = avg_loss + loss.data.item() if i % print_freq == 0: print( 'Epoch {:d} | Batch {:d}/{:d} | Loss {:f} | Train Acc {:f}' .format(epoch, i, len(trainloader), avg_loss / float(i + 1), 100. * correct / total)) print(t) if not os.path.isdir(params.checkpoint_dir): os.makedirs(params.checkpoint_dir) if (epoch % params.save_freq == 0) or (epoch == stop_epoch - 1): outfile = os.path.join(params.checkpoint_dir, '{:d}.tar'.format(epoch)) state_dict = {} state_dict['epoch'] = epoch state_dict['feature'] = model.feature.state_dict() state_dict['classifier'] = model.classifier.state_dict() torch.save(state_dict, outfile) return model
def test_adv(testloader, model, params, config): model.eval() _, epsilons = test_dict['adv'] for e in epsilons: print('Testing at epsilon {:f}'.format(e)) config['epsilon'] = e attacker = attack.AttackPGD(config) correct, total = 0, 0 for i, (x, y) in enumerate(testloader): x, y = x.cuda(), y.cuda() x = attacker.attack(model, x, y) scores, _ = model(x) predicted = torch.argmax(scores, 1) correct += (predicted == y).sum().item() total += predicted.size(0) print('Accuracy {:f}'.format(100. * correct / total))
def train(trainloader, model, teacher, optimization, start_epoch, stop_epoch, params, config): if params.exp == 'gbp': gbp = GuidedBackprop(teacher) elif params.exp == 'sgrad': gbp = SmoothGrad(teacher) elif params.exp == 'igrad': gbp = IntegratedGrad(teacher) elif params.exp == 'gcam++': gbp = GradCAMpp('resnet', 'layer4', teacher) elif params.exp == 'ggcam++': gbp = GuidedGradCAM('resnet', 'layer4', teacher) attacker = attack.AttackPGD(config) optimizer = torch.optim.Adam(model.parameters()) loss_fn = nn.CrossEntropyLoss() loss_fn2 = nn.MSELoss() epsilon = params.e / 255. for epoch in range(start_epoch, stop_epoch): model.train() print_freq = 50 avg_closs = 0 avg_mloss = 0 correct, total = 0, 0 p = 1. - np.power(params.a, epoch / 10.0) for i, (x, y) in enumerate(trainloader): x, y = x.cuda(), y.cuda() target_mask = [] for j in range(x.size(0)): m = gbp.forward(x[j].unsqueeze(0)) target_mask.append(m) target_mask = torch.cat(target_mask, 0) torchvision.utils.save_image(target_mask.cpu(), 'target.jpg', nrow=16) noise = torch.sign(torch.FloatTensor(x.size()).uniform_(-1, 1)).cuda() prob = torch.Tensor(x.size()).fill_(p) m = torch.distributions.bernoulli.Bernoulli(prob) w = m.sample() w = w.cuda() grad = (1 - w) * noise + w * target_mask x_ = x.clone() if params.method == 'adv': x_ = attacker.attack2(model, x_, y, target_mask) else: x_ = x_ - epsilon * grad x_ = torch.clamp(x_, 0, 1) scores, _ = model(x_) closs = loss_fn(scores, y) predicted = torch.argmax(scores, 1) correct += (predicted == y).sum().item() total += predicted.size(0) optimizer.zero_grad() closs.backward() optimizer.step() avg_closs += closs.data.item() if i % print_freq == 0: print( 'Epoch {:d} | Batch {:d}/{:d} | CLoss {:f} | Train Acc {:f}' .format(epoch, i, len(trainloader), avg_closs / float(i + 1), 100. * correct / total)) if not os.path.isdir(params.checkpoint_dir): os.makedirs(params.checkpoint_dir) if (epoch % params.save_freq == 0) or (epoch == stop_epoch - 1): outfile = os.path.join(params.checkpoint_dir, '{:d}.tar'.format(epoch)) state_dict = {} state_dict['epoch'] = epoch state_dict['feature'] = model.feature.state_dict() state_dict['classifier'] = model.classifier.state_dict() torch.save(state_dict, outfile) return model
def train(trainloader, model, teacher, optimization, start_epoch, stop_epoch, params, config): attacker = attack.AttackPGD(config) optimizer = torch.optim.Adam(model.parameters()) loss_fn = nn.CrossEntropyLoss() loss_fn2 = nn.MSELoss() epsilon = params.e / 255. for epoch in range(start_epoch, stop_epoch): t = 0 model.train() print_freq = 50 avg_closs = 0 avg_mloss = 0 correct, total = 0, 0 p = 1. - np.power(params.a, epoch / 10.0) for i, (x, y, target_mask) in enumerate(trainloader): x, y = x.cuda(), y.cuda() target_mask = target_mask a = time.time() noise = torch.sign(torch.FloatTensor(x.size()).uniform_(-1, 1)) prob = torch.Tensor(x.size()).fill_(p) m = torch.distributions.bernoulli.Bernoulli(prob) w = m.sample() # w = w.cuda() grad = (1 - w) * noise + w * target_mask grad = grad.cuda() x_ = x.clone() if params.method == 'adv': print(1) x_ = attacker.attack3(model, x_, y, grad) else: x_ = x_ - epsilon * grad x_ = torch.clamp(x_, 0, 1) scores, _ = model(x_) closs = loss_fn(scores, y) predicted = torch.argmax(scores, 1) correct += (predicted == y).sum().item() total += predicted.size(0) optimizer.zero_grad() closs.backward() optimizer.step() b = time.time() avg_closs += closs.data.item() t += float(b - a) if i % print_freq == 0: print( 'Epoch {:d} | Batch {:d}/{:d} | CLoss {:f} | Train Acc {:f}' .format(epoch, i, len(trainloader), avg_closs / float(i + 1), 100. * correct / total)) print(t) if not os.path.isdir(params.checkpoint_dir): os.makedirs(params.checkpoint_dir) if (epoch % params.save_freq == 0) or (epoch == stop_epoch - 1): outfile = os.path.join(params.checkpoint_dir, '{:d}.tar'.format(epoch)) state_dict = {} state_dict['epoch'] = epoch state_dict['feature'] = model.feature.state_dict() state_dict['classifier'] = model.classifier.state_dict() torch.save(state_dict, outfile) return model
stop_epoch, params, config): if params.exp == 'gbp': gbp1 = GuidedBackprop(teacher1) elif params.exp == 'sgrad': gbp1 = SmoothGrad(teacher1) elif params.exp == 'gcam++': gbp1 = GradCAMpp('resnet', 'layer4', teacher1) if params.exp2 == 'gbp': gbp2 = GuidedBackprop(teacher2) elif params.exp2 == 'sgrad': gbp2 = SmoothGrad(teacher2) elif params.exp2 == 'gcam++': gbp2 = GradCAMpp('resnet', 'layer4', teacher2) attacker = attack.AttackPGD(config) optimizer = torch.optim.Adam(model.parameters()) loss_fn = nn.CrossEntropyLoss() loss_fn2 = nn.MSELoss() epsilon = params.e / 255. for epoch in range(start_epoch, stop_epoch): model.train() print_freq = 50 avg_closs = 0 avg_mloss = 0 correct, total = 0, 0 p = 1. - np.power(params.a, epoch / 10.0) m = torch.distributions.bernoulli.Bernoulli(torch.tensor(0.5)) r = int(m.sample())