Beispiel #1
0
def infer_minibatch(valid_queue, model, criterion):
    objs = utils.AvgrageMeter()
    top1 = utils.AvgrageMeter()
    top5 = utils.AvgrageMeter()
    model.eval()

    for step, (input, target) in enumerate(valid_queue):
        input = Variable(input).cuda()
        target = Variable(target).cuda(async=True)
        input = ifgsm(model,
                      input,
                      target,
                      niters=args.niters,
                      epsilon=args.eps)

        input = input.detach()
        target = target.detach()

        logits = model(input)
        loss = criterion(logits, target)

        prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
        n = input.size(0)
        objs.update(loss.data[0], n)
        top1.update(prec1.data[0], n)
        top5.update(prec5.data[0], n)

        if step % args.report_freq == 0:
            logging.info('valid %03d %e %f %f', step, objs.avg, top1.avg,
                         top5.avg)

        del input, target

    return top1.avg, objs.avg
Beispiel #2
0
def adversarial_workbench(network_list=model_pair):
    fool_ratio_dict = []
    pert_acc_dict = []
    ori_acc_dict = []
    for i in range(0, len(model_pair)):
        base_model, base_model_name = model_pair[i]
        net = base_model.cuda()
        net.eval()

        adv_examples = ifgsm(net, test_ims, test_lbls, niters=10, epsilon=0.03)
        transfer_results = test_adv_examples_across_full_models(
            adv_examples,
            test_ims,
            test_lbls,
            network_list=model_pair,
        )

        fool_ratio_dict.append([])
        pert_acc_dict.append([])
        ori_acc_dict.append([])
        for j in range(0, len(transfer_results)):
            fool_ratio_dict[i].append(transfer_results[j][1])
            pert_acc_dict[i].append(transfer_results[j][2])
            ori_acc_dict[i].append(transfer_results[j][3])

    return fool_ratio_dict, pert_acc_dict, ori_acc_dict
Beispiel #3
0
def train(train_queue, valid_queue, model, architect, criterion, optimizer,
          lr):
    objs = utils.AvgrageMeter()
    top1 = utils.AvgrageMeter()
    top5 = utils.AvgrageMeter()

    for step, (input, target) in enumerate(train_queue):
        model.train()
        n = input.size(0)

        input = Variable(input, requires_grad=False).cuda()
        target = Variable(target, requires_grad=False).cuda(async=True)

        # get a random minibatch from the search queue with replacement
        input_search, target_search = next(iter(valid_queue))
        input_search = Variable(input_search).cuda()
        target_search = Variable(target_search).cuda(async=True)
        input_search = ifgsm(model,
                             input_search,
                             target_search,
                             niters=args.niters,
                             epsilon=args.eps)

        architect.step(input,
                       target,
                       input_search,
                       target_search,
                       lr,
                       optimizer,
                       unrolled=args.unrolled)

        optimizer.zero_grad()
        logits = model(input)
        loss = criterion(logits, target)

        loss.backward()
        nn.utils.clip_grad_norm(model.parameters(), args.grad_clip)
        optimizer.step()

        prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
        objs.update(loss.data[0], n)
        top1.update(prec1.data[0], n)
        top5.update(prec5.data[0], n)

        if step % args.report_freq == 0:
            logging.info('train %03d %e %f %f', step, objs.avg, top1.avg,
                         top5.avg)

    return top1.avg, objs.avg
Beispiel #4
0
def infer_minibatch(valid_queue, model, criterion):
    objs = utils.AvgrageMeter()
    top1 = utils.AvgrageMeter()
    top5 = utils.AvgrageMeter()
    model.eval()

    input, target = next(iter(valid_queue))
    input = Variable(input).cuda()
    target = Variable(target).cuda(async=True)
    input = ifgsm(model, input, target)
    logits = model(input)
    loss = criterion(logits, target)

    prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
    n = input.size(0)

    objs.update(loss.data[0], n)
    top1.update(prec1.data[0], n)
    top5.update(prec5.data[0], n)

    logging.info('valid %03d %e %f %f', 0, objs.avg, top1.avg, top5.avg)

    return top1.avg, objs.avg
Beispiel #5
0
transfer_model_names = [x for x in all_model_names if x != opt.modeltype]
transfer_models = [load_model(x) for x in transfer_model_names]

print('Loaded model...')

# pre-process input image
mean, stddev = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
transform_resize = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224)])
transform_norm = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, stddev)])
img_pil = Image.open(opt.imagepath)
img_pil_resize = transform_resize(img_pil.copy())
img = transform_norm(img_pil_resize.copy()).cuda().unsqueeze(0)
lbl = torch.tensor([opt.imagelabel]).cuda()

# run attack on source model
img_ifgsm = ifgsm(model, img, lbl, niters=opt.niters_baseline, dataset='imagenet')

source_layers = get_source_layers(opt.modeltype, model)
ifgsm_guide = ifgsm(model, img, lbl, learning_rate=0.008, epsilon=opt.epsilon, niters=opt.niters_ila, dataset='imagenet')
img_ila = ILA(model, img, ifgsm_guide, lbl, source_layers[opt.layerindex][1][1], learning_rate=0.01, epsilon=opt.epsilon, niters=opt.niters_ila, dataset='imagenet')

# get labels for source
model.eval()
orig_pred_label, ifgsm_pred_label, ila_pred_label = model(img).max(dim=1)[1].item(), model(img_ifgsm).max(dim=1)[1].item(), model(img_ila).max(dim=1)[1].item()

# get labels for transfer
transfer_labs = []
for mod in transfer_models:
    mod.eval()
    o, f, i = mod(img).max(dim=1)[1].item(), mod(img_ifgsm).max(dim=1)[1].item(), mod(img_ila).max(dim=1)[1].item()
    transfer_labs.append((o, f, i))