def train(epoch, train_loader, model, classifier, criterion, optimizer, opt):
    """
    one epoch training
    """

    model.eval()
    classifier.train()

    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    end = time.time()
    for idx, (x, y) in enumerate(train_loader):
        # measure data loading time
        data_time.update(time.time() - end)

        x = x.cuda(non_blocking=True)
        y = y.cuda(non_blocking=True)

        # ===================forward=====================
        with torch.no_grad():
            feat = model(x, opt.layer)

        output = classifier(feat)
        loss = criterion(output, y)

        acc1, acc5 = accuracy(output, y, topk=(1, 5))
        losses.update(loss.item(), x.size(0))
        top1.update(acc1[0], x.size(0))
        top5.update(acc5[0], x.size(0))

        # ===================backward=====================
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # ===================meters=====================
        batch_time.update(time.time() - end)
        end = time.time()

        # print info
        if opt.local_rank == 0:
            if idx % opt.print_freq == 0:
                lr = optimizer.param_groups[0]['lr']
                print(f'Epoch: [{epoch}][{idx}/{len(train_loader)}]\t'
                      f'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                      f'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                      f'Lr {lr:.3f} \t'
                      f'Loss {losses.val:.4f} ({losses.avg:.4f})\t'
                      f'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                      f'Acc@5 {top5.val:.3f} ({top5.avg:.3f})')

    return top1.avg, top5.avg, losses.avg
Example #2
0
def _test(model, data_loader, return_probability=False):

    # assertions
    assert isinstance(model, torch.nn.Module)
    assert isinstance(data_loader, torch.utils.data.dataloader.DataLoader)

    # are we on CPU or GPU?
    is_gpu = not isinstance(model, torch.nn.backends.thnn.THNNFunctionBackend)

    # loop over data:
    model.eval()
    precs1, precs5, num_batches, num_total = [], [], 0, 0
    probs, all_targets = None, None
    bar = progressbar.ProgressBar(len(data_loader))
    bar.start()
    for num_batches, (imgs, targets) in enumerate(data_loader):

        # copy data to GPU:
        if is_gpu:
            cpu_targets = targets.clone()
            targets = targets.cuda(async=True)
            # Make sure the imgs are converted to cuda tensor too
            imgs = imgs.cuda(async=True)

        # perform prediction:
        imgsvar = torch.autograd.Variable(imgs.squeeze(), volatile=True)
        output = model(imgsvar)
        pred = output.data.cpu()

        if return_probability:
            probs = pred if probs is None else torch.cat((probs, pred), dim=0)
            all_targets = targets if all_targets is None else (torch.cat(
                (all_targets, targets), dim=0))

        # measure accuracy:
        prec1, prec5 = accuracy(pred, cpu_targets, topk=(1, 5))
        precs1.append(prec1[0] * targets.size(0))
        precs5.append(prec5[0] * targets.size(0))
        num_total += imgs.size(0)
        bar.update(num_batches)

    if return_probability:
        return probs, all_targets
    else:
        # return average accuracy (@ 1 and 5):
        return sum(precs1) / num_total, sum(precs5) / num_total
def validate(val_loader, model, classifier, criterion, args):
    batch_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    # switch to evaluate mode
    model.eval()
    classifier.eval()

    with torch.no_grad():
        end = time.time()
        for idx, (x, y) in enumerate(val_loader):
            x = x.cuda(non_blocking=True)
            y = y.cuda(non_blocking=True)

            # compute output
            feat = model(x, args.layer)
            output = classifier(feat)
            loss = criterion(output, y)

            # measure accuracy and record loss
            acc1, acc5 = accuracy(output, y, topk=(1, 5))
            losses.update(loss.item(), x.size(0))
            top1.update(acc1[0], x.size(0))
            top5.update(acc5[0], x.size(0))

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if idx % args.print_freq == 0:
                print(f'Test: [{idx}/{len(val_loader)}]\t'
                      f'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                      f'Loss {losses.val:.4f} ({losses.avg:.4f})\t'
                      f'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                      f'Acc@5 {top5.val:.3f} ({top5.avg:.3f})')

        print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'.format(top1=top1,
                                                                    top5=top5))

    return top1.avg, top5.avg, losses.avg
Example #4
0
def _test(model, data_loader, return_probability=False):

    # assertions
    assert isinstance(model, torch.nn.Module)
    assert isinstance(data_loader, torch.utils.data.dataloader.DataLoader)

    # loop over data:
    model.eval()
    precs1, precs5, num_batches, num_total = [], [], 0, 0
    probs, all_targets = None, None
    bar = progressbar.ProgressBar(len(data_loader))
    bar.start()
    for num_batches, (imgs, targets) in enumerate(data_loader):

        # perform prediction:
        imgsvar = torch.autograd.Variable(imgs.squeeze(), volatile=True)
        output = model(imgsvar)
        pred = output.data.cpu()

        if return_probability:
            probs = pred if probs is None else torch.cat((probs, pred), dim=0)
            all_targets = targets if all_targets is None else (
                torch.cat((all_targets, targets), dim=0))

        # measure accuracy:
        prec1, prec5 = accuracy(pred, targets, topk=(1, 5))
        precs1.append(prec1[0] * targets.size(0))
        precs5.append(prec5[0] * targets.size(0))
        num_total += imgs.size(0)
        bar.update(num_batches)

    if return_probability:
        return probs, all_targets
    else:
        # return average accuracy (@ 1 and 5):
        return sum(precs1) / num_total, sum(precs5) / num_total
Example #5
0
    with tf.name_scope("fc2"):
        fc_w2 = tf.Variable(tf.truncated_normal((512, 10)), name="w2")
        fc_b2 = tf.Variable(np.ones((10)) * 0.1,
                            dtype=tf.float32,
                            name="fc_b2")
        fc_ouptut2 = tf.matmul(fc_ouptut1, fc_w2) + fc_b2

    with tf.name_scope("softmax"):
        predict = tf.nn.softmax(fc_ouptut2)

    with tf.name_scope("metrics"):
        loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(
            logits=fc_ouptut2, labels=labels),
                              name="loss")
        acc = util.accuracy(predict, labels)

    with tf.name_scope("main_train"):
        update_ops = tf.compat.v1.get_collection(tf.GraphKeys.UPDATE_OPS)
        with tf.control_dependencies(update_ops):
            opt = tf.train.AdamOptimizer(0.0001)
            grad_and_var = opt.compute_gradients(loss)
            train_step = opt.apply_gradients(grad_and_var,
                                             global_step=global_step)

gen_ops = []
gen_loss_op = []
with tf.name_scope("inspect"):
    gen_image = tf.Variable(tf.truncated_normal((1, 28, 28, 1)),
                            name="gen_image")
    conv2d_output_x = tf.nn.conv2d(gen_image, filter1, [1, 1, 1, 1],
Example #6
0
def train(model,
          criterion,
          optimizer,
          data_loader_hook=None,
          start_epoch_hook=None,
          end_epoch_hook=None,
          start_epoch=0,
          end_epoch=90,
          learning_rate=0.1):

    # assertions:
    assert isinstance(model, nn.Module)
    assert isinstance(criterion, nn.modules.loss._Loss)
    assert isinstance(optimizer, torch.optim.Optimizer)
    assert type(start_epoch) == int and start_epoch >= 0
    assert type(end_epoch) == int and end_epoch >= start_epoch
    assert type(learning_rate) == float and learning_rate > .0
    if start_epoch_hook is not None:
        assert callable(start_epoch_hook)
    if end_epoch_hook is not None:
        assert callable(end_epoch_hook)
    assert data_loader_hook is not None
    assert callable(data_loader_hook)

    # are we on CPU or GPU?
    is_gpu = not isinstance(model, torch.nn.backends.thnn.THNNFunctionBackend)

    # train the model:
    model.train()
    for epoch in range(start_epoch, end_epoch):

        data_loader = data_loader_hook(epoch)
        assert isinstance(data_loader, torch.utils.data.dataloader.DataLoader)

        # start-of-epoch hook:
        if start_epoch_hook is not None:
            start_epoch_hook(epoch, model, optimizer)

        # loop over training data:
        model.train()
        precs1, precs5, num_batches, num_total = [], [], 0, 0
        bar = progressbar.ProgressBar(len(data_loader))
        bar.start()
        for num_batches, (imgs, targets) in enumerate(data_loader):

            # copy data to GPU:
            if is_gpu:
                cpu_targets = targets.clone()
                targets = targets.cuda(async=True)
                # Make sure the imgs are converted to cuda tensor too
                imgs = imgs.cuda(async=True)

            imgsvar = torch.autograd.Variable(imgs)
            tgtsvar = torch.autograd.Variable(targets)

            # perform forward pass:
            out = model(imgsvar)
            loss = criterion(out, tgtsvar)

            # measure accuracy:
            prec1, prec5 = accuracy(out.data.cpu(), cpu_targets, topk=(1, 5))
            precs1.append(prec1[0] * targets.size(0))
            precs5.append(prec5[0] * targets.size(0))
            num_total += imgs.size(0)

            # compute gradient and do SGD step:
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            bar.update(num_batches)

        # end-of-epoch hook:
        if end_epoch_hook is not None:
            prec1 = sum(precs1) / num_total
            prec5 = sum(precs5) / num_total
            end_epoch_hook(epoch, model, optimizer, prec1=prec1, prec5=prec5)

    # return trained model:
    return model
Example #7
0
def classify_images(args):

    # assertions
    assert args.ensemble is None or args.ensemble in ENSEMBLE_TYPE, \
        "{} not a supported type. Only supported ensembling are {}".format(
            args.ensemble, ENSEMBLE_TYPE)
    if not args.ensemble:
        assert args.ncrops is None or (len(args.ncrops) == 1
                                       and args.ncrops[0] == 1)
    if args.defenses is not None:
        for d in args.defenses:
            assert DefenseType.has_value(d), \
                "\"{}\" defense not defined".format(d)
        # crops expected for each defense
        assert (args.ncrops is None or len(args.ncrops) == len(
            args.defenses)), ("Number of crops for each defense is expected")
        assert (args.crop_type is None or len(args.crop_type) == len(
            args.defenses)), ("crop_type for each defense is expected")
        # assert (len(args.crop_frac) == len(args.defenses)), (
        #     "crop_frac for each defense is expected")
    elif args.ncrops is not None:
        # no crop ensembling when defense is None
        assert len(args.ncrops) == 1
        assert args.crop_frac is not None and len(args.crop_frac) == 1, \
            "Only one crop_frac is expected as there is no defense"
        assert args.crop_type is not None and len(args.crop_type) == 1, \
            "Only one crop_type is expected as there is no defense"

    if args.defenses is None or len(args.defenses) == 0:
        defenses = [None]
    else:
        defenses = args.defenses

    all_defense_probs = None
    for idx, defense_name in enumerate(defenses):
        # initialize dataset
        defense = get_defense(defense_name, args)
        # Read preset params for adversary based on args
        adv_params = constants.get_adv_params(args, idx)
        print("| adv_params: ", adv_params)
        # setup crop
        ncrops = 1
        crop_type = None
        crop_frac = 1.0
        if args.ncrops:
            crop_type = args.crop_type[idx]
            crop_frac = args.crop_frac[idx]
            if crop_type == 'sliding':
                ncrops = 9
            else:
                ncrops = args.ncrops[idx]
        # Init custom crop function
        crop = transforms.Crop(crop_type, crop_frac)
        # initialize dataset
        dataset = load_dataset(args, 'valid', defense, adv_params, crop)
        # load model
        model, _, _ = get_model(args,
                                load_checkpoint=True,
                                defense_name=defense_name)

        # get crop probabilities for crops for current defense
        probs, targets = _eval_crops(args, dataset, model, defense, crop,
                                     ncrops, crop_type)

        if all_defense_probs is None:
            all_defense_probs = torch.zeros(len(defenses), len(dataset),
                                            probs.size(2))
        # Ensemble crop probabilities
        if args.ensemble == 'max':
            probs = torch.max(probs, dim=0)[0]
        elif args.ensemble == 'avg':  # for average ensembling
            probs = torch.mean(probs, dim=0)
        else:  # for no ensembling
            assert all_defense_probs.size(0) == 1
            probs = probs[0]
        all_defense_probs[idx, :, :] = probs

        # free memory
        dataset = None
        model = None

    # Ensemble defense probabilities
    if args.ensemble == 'max':
        all_defense_probs = torch.max(all_defense_probs, dim=0)[0]
    elif args.ensemble == 'avg':  # for average ensembling
        all_defense_probs = torch.mean(all_defense_probs, dim=0)
    else:  # for no ensembling
        assert all_defense_probs.size(0) == 1
        all_defense_probs = all_defense_probs[0]
    # Calculate top1 and top5 accuracy
    prec1, prec5 = accuracy(all_defense_probs, targets, topk=(1, 5))
    print('=' * 50)
    print('Results for model={}, attack={}, ensemble_type={} '.format(
        args.model, args.adversary, args.ensemble))
    prec1 = prec1[0]
    prec5 = prec5[0]
    print('| classification accuracy @1: %2.5f' % (prec1))
    print('| classification accuracy @5: %2.5f' % (prec5))
    print('| classification error @1: %2.5f' % (100. - prec1))
    print('| classification error @5: %2.5f' % (100. - prec5))
    print('| done.')