def evaluate(loader, model, apply_func, log, verbose):
    batch_time = AverageMeter()
    losses = AverageMeter()
    errors = AverageMeter()

    end = time.time()

    for i, (X, y) in enumerate(loader):
        X, y = X.cuda(), y.cuda().long()
        if apply_func is not None:
            X, y = apply_func(model, X, y)
        if y.dim() == 2:
            y = y.squeeze(1)

        with torch.no_grad():
            out = model(Variable(X))
            ce = nn.CrossEntropyLoss()(out, Variable(y))
            err = (out.max(1)[1] != y).float().sum() / X.size(0)

        # measure accuracy and record loss
        losses.update(ce.item(), X.size(0))
        errors.update(err.item(), X.size(0))

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        # print(i, ce.item(), err.item(), file=log)

        if verbose:
            endline = '\n' if i % verbose == 0 else '\r'
            print('Test: [{0}/{1}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Error {errors.val:.3f} ({errors.avg:.3f})'.format(
                      i,
                      len(loader),
                      batch_time=batch_time,
                      loss=losses,
                      errors=errors),
                  end=endline)
        log.flush()

        del X, y, out, ce, err
        if DEBUG and i == 10:
            break
    return losses.avg, errors.avg
def evaluate_robust(loader, model, epsilon, log, verbose):
    batch_time = AverageMeter()
    robust_losses = AverageMeter()
    robust_errors = AverageMeter()

    end = time.time()

    torch.set_grad_enabled(False)
    for i, (X, y) in enumerate(loader):
        X, y = X.cuda(), y.cuda().long()
        if y.dim() == 2:
            y = y.squeeze(1)

        robust_ce, robust_err = robust_loss(model, epsilon, X, y)

        # measure accuracy and record loss
        robust_losses.update(robust_ce.item(), X.size(0))
        robust_errors.update(robust_err, X.size(0))

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        # print(i, robust_ce.item(), robust_err, file=log)

        if verbose:
            endline = '\n' if i % verbose == 0 else '\r'
            print('Test: [{0}/{1}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Robust loss {rloss.val:.3f} ({rloss.avg:.3f})\t'
                  'Robust error {rerrors.val:.3f} ({rerrors.avg:.3f})'.format(
                      i,
                      len(loader),
                      batch_time=batch_time,
                      rloss=robust_losses,
                      rerrors=robust_errors),
                  end=endline)
        log.flush()

        del X, y, robust_ce
        if DEBUG and i == 10:
            break
    torch.set_grad_enabled(True)
    torch.cuda.empty_cache()
    return robust_losses.avg, robust_errors.avg
def evaluate_transfer_robust(loader,
                             t_model,
                             model,
                             epsilon,
                             adaptive_vp_rate,
                             log,
                             verbose,
                             real_time=False,
                             evaluate=False,
                             clip_grad=None,
                             **kwargs):

    batch_time = AverageMeter()
    robust_losses = AverageMeter()
    robust_errors = AverageMeter()
    vp_rates = AverageMeter()
    invp_rates = AverageMeter()

    end = time.time()
    for i, (X, y) in enumerate(loader):
        X, y = X.cuda(), y.cuda().long()
        if y.dim() == 2:
            y = y.squeeze(1)

        robust_ce, robust_err, _, _, v_point_rate, eta, inv_point_rate = \
            robust_loss_transfer(model, t_model, epsilon,
                                 Variable(X), Variable(y), **kwargs)

        # measure accuracy and record loss
        robust_losses.update(robust_ce.detach().item(), X.size(0))
        robust_errors.update(robust_err, X.size(0))
        vp_rates.update(v_point_rate, X.size(0))
        invp_rates.update(inv_point_rate, X.size(0))

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        # print(i, robust_ce.detach().item(), robust_err, v_point_rate, inv_point_rate, file=log)

        if verbose:
            endline = '\n' if i % verbose == 0 else '\r'
            print('Test: [{0}/{1}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'T Robust loss {rloss.val:.4f} ({rloss.avg:.4f})\t'
                  'T Robust error {rerrors.val:.3f} ({rerrors.avg:.3f})\t'
                  'VP Rate {vp_rate.val: .3f} ({vp_rate.avg:.3f})\t'
                  'INVP Rate {invp_rate.val: .3f} ({invp_rate.avg:.3f})\t'.
                  format(i,
                         len(loader),
                         batch_time=batch_time,
                         vp_rate=vp_rates,
                         invp_rate=invp_rates,
                         rloss=robust_losses,
                         rerrors=robust_errors),
                  end=endline)

        log.flush()

        del X, y, robust_ce, robust_err
        if DEBUG and i == 10:
            break
    torch.cuda.empty_cache()
    return robust_losses.avg, robust_errors.avg, vp_rates.avg, invp_rates.avg
def test_transferability_subset(loader, attack_method, epsilon, torch_model1,
                                torch_model2, verbose, batch_size):
    batch_time = AverageMeter()
    err12s = AverageMeter()
    err21s = AverageMeter()

    end = time.time()

    sess = tf.Session(config=config)
    x_op = tf.placeholder(tf.float32, shape=(
        None,
        1,
        28,
        28,
    ))

    # Convert pytorch model to a tf_model and wrap it in cleverhans
    tf_model_fn1 = convert_pytorch_model_to_tf(torch_model1)
    tf_model_fn2 = convert_pytorch_model_to_tf(torch_model2)

    # Attack Parameters
    if attack_method == 'CW':
        params = {
            'binary_search_steps': 1,
            # 'y': None,
            'max_iterations': CW_ATTACK_ITERATIONS,
            'learning_rate': CW_LEARNING_RATE,
            'batch_size': batch_size,
            'initial_const': 10
        }
    elif attack_method == 'PGD':
        params = {
            'eps': epsilon,
            'clip_min': 0.,
            'clip_max': 1.,
            'eps_iter': 0.005,
            'nb_iter': 100,
            'rand_init': False
        }
    elif attack_method == 'FGSM':
        params = {'eps': epsilon, 'clip_min': 0., 'clip_max': 1.}
    else:
        raise Exception('Unknown attack method %s'.format(attack_method))

    # Model1 --> Model2
    cleverhans_model1 = CallableModelWrapper(tf_model_fn1,
                                             output_layer='logits')
    cleverhans_model2 = CallableModelWrapper(tf_model_fn2,
                                             output_layer='logits')

    # Create an attack
    if attack_method == 'CW':
        attk1 = CarliniWagnerL2(cleverhans_model1, sess=sess)
    if attack_method == 'PGD':
        attk1 = ProjectedGradientDescent(cleverhans_model1, sess=sess)
    if attack_method == 'FGSM':
        attk1 = FastGradientMethod(cleverhans_model1, sess=sess)

    if attack_method == 'CW':
        attk2 = CarliniWagnerL2(cleverhans_model2, sess=sess)
    if attack_method == 'PGD':
        attk2 = ProjectedGradientDescent(cleverhans_model2, sess=sess)
    if attack_method == 'FGSM':
        attk2 = FastGradientMethod(cleverhans_model2, sess=sess)

    adv_x_op1 = attk1.generate(x_op, **params)
    adv_x_op2 = attk2.generate(x_op, **params)

    # Test on model1 and model2
    adv_preds_op11 = tf_model_fn1(adv_x_op1)
    adv_preds_op12 = tf_model_fn2(adv_x_op1)
    adv_preds_op21 = tf_model_fn1(adv_x_op2)
    adv_preds_op22 = tf_model_fn2(adv_x_op2)

    for i, (xs, ys) in enumerate(loader):
        (adv_preds11, adv_preds12) = sess.run((adv_preds_op11, adv_preds_op12),
                                              feed_dict={x_op: xs})
        (adv_preds21, adv_preds22) = sess.run((adv_preds_op21, adv_preds_op22),
                                              feed_dict={x_op: xs})
        cnt11 = int((np.argmax(adv_preds11, axis=1) != ys).sum())
        cnt22 = int((np.argmax(adv_preds22, axis=1) != ys).sum())
        if cnt11 > 0:
            err12 = float(
                ((np.argmax(adv_preds12, axis=1) != ys) *
                 (np.argmax(adv_preds11, axis=1) != ys)).sum()) / float(cnt11)
            err12s.update(err12, cnt11)
        if cnt22 > 0:
            err21 = float(
                ((np.argmax(adv_preds22, axis=1) != ys) *
                 (np.argmax(adv_preds21, axis=1) != ys)).sum()) / float(cnt22)
            err21s.update(err21, cnt22)

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if verbose:
            endline = '\n' if i % verbose == 0 else '\r'
            print('Test: [{0}/{1}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'error 1->2 {err12.val:.3f} ({err12.avg:.3f})\t'
                  'error 2->1 {err21.val:.3f} ({err21.avg:.3f})\t'.format(
                      i,
                      len(loader),
                      batch_time=batch_time,
                      err12=err12s,
                      err21=err21s),
                  end=endline)

    sess.close()
    return err12s.avg, err21s.avg
Beispiel #5
0
def evaluate_trans(loader,
                   dataset,
                   model,
                   epoch,
                   epsilon,
                   ref_model,
                   clip_min=0.,
                   clip_max=1.,
                   eps_iter=0.005,
                   nb_iter=100,
                   rand_init=False,
                   verbose=20):
    batch_time = AverageMeter()
    losses = AverageMeter()
    errors = AverageMeter()

    params = {
        'eps': epsilon,
        'clip_min': clip_min,
        'clip_max': clip_max,
        'eps_iter': eps_iter,
        'nb_iter': nb_iter,
        'rand_init': rand_init
    }

    sess = tf.Session(config=config)
    x_op = tf.placeholder(tf.float32, shape=(
        None,
        1,
        28,
        28,
    ))

    model.eval()
    ref_model.eval()
    tf_model = convert_pytorch_model_to_tf(ref_model)
    cleverhans_model = CallableModelWrapper(tf_model, output_layer='logits')
    attk = ProjectedGradientDescent(cleverhans_model, sess=sess)
    adv_x_op = attk.generate(x_op, **params)

    end = time.time()
    for i, (X, y) in enumerate(loader):

        X_adv = sess.run((adv_x_op), feed_dict={x_op: X})
        X, y = Variable(torch.tensor(X_adv)).cuda(), y.cuda()
        out = model(Variable(X))
        ce = nn.CrossEntropyLoss()(out, Variable(y))
        err = (out.data.max(1)[1] != y).float().sum() / X.size(0)

        # measure accuracy and record loss
        losses.update(ce.item(), X.size(0))
        errors.update(err.item(), X.size(0))

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        endline = '\n' if i % verbose == 0 else '\r'
        print('Adv test: [{0}/{1}]\t'
              'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
              'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
              'Error {error.val:.3f} ({error.avg:.3f})'.format(
                  i,
                  len(loader),
                  batch_time=batch_time,
                  loss=losses,
                  error=errors),
              end=endline)

        if DEBUG and i == 10:
            break

    print('\n * Error {error.avg:.3f}'.format(error=errors))
    return losses.avg, errors.avg
Beispiel #6
0
def evaluate_clean(loader, model, epoch, verbose=20):
    batch_time = AverageMeter()
    losses = AverageMeter()
    errors = AverageMeter()

    model.eval()

    end = time.time()
    for i, (X, y) in enumerate(loader):
        X, y = X.cuda(), y.cuda()
        out = model(Variable(X))
        ce = nn.CrossEntropyLoss()(out, Variable(y))
        err = (out.data.max(1)[1] != y).float().sum() / X.size(0)

        # measure accuracy and record loss
        losses.update(ce.item(), X.size(0))
        errors.update(err.item(), X.size(0))

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        endline = '\n' if i % verbose == 0 else '\r'
        print('Test: [{0}/{1}]\t'
              'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
              'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
              'Error {error.val:.3f} ({error.avg:.3f})'.format(
                  i,
                  len(loader),
                  batch_time=batch_time,
                  loss=losses,
                  error=errors),
              end=endline)

        if DEBUG and i == 10:
            break

    print('\n * Error {error.avg:.3f}'.format(error=errors))
    return losses.avg, errors.avg
Beispiel #7
0
def trans_reg_train(loader,
                    model,
                    opt,
                    epoch,
                    epsilon,
                    ref_model,
                    lbda,
                    verbose=20):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    errors = AverageMeter()
    dps = AverageMeter()

    model.train()
    ref_model.train()

    end = time.time()
    for i, (X, y) in enumerate(loader):

        X, y = Variable(X.cuda(), requires_grad=True), Variable(y).cuda()
        data_time.update(time.time() - end)

        pred2 = ref_model(X)
        loss2 = F.cross_entropy(pred2, y)
        (grad2, ) = grad(loss2, X, create_graph=True)
        grad2 = grad2.view(len(X), -1)
        grad2 = F.normalize(grad2)
        grad2 = torch.tensor(grad2.detach().cpu().numpy()).cuda()

        pred1 = model(X)
        loss1 = F.cross_entropy(pred1, y)
        (grad1, ) = grad(loss1, X, create_graph=True)
        grad1 = grad1.view(len(X), -1)
        grad1 = F.normalize(grad1)

        X.requires_grad_(False)

        dp = torch.sum(torch.mul(grad1, grad2), dim=1)
        dp = torch.mean(dp)

        loss = loss1 + dp * lbda
        opt.zero_grad()
        loss.backward()
        opt.step()

        err = (pred1.data.max(1)[1] != y).float().sum() / X.size(0)

        batch_time.update(time.time() - end)
        end = time.time()
        losses.update(loss.item(), X.size(0))
        errors.update(err.item(), X.size(0))
        dps.update(dp, X.size(0))

        if verbose and i % verbose == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Error {errors.val:.3f} ({errors.avg:.3f})\t'
                  'Dp {dps.val:.3f} ({dps.avg:.3f})'.format(
                      epoch,
                      i,
                      len(loader),
                      batch_time=batch_time,
                      data_time=data_time,
                      loss=losses,
                      errors=errors,
                      dps=dps))

        if DEBUG and i == 10:
            break

    return losses.avg, errors.avg, dps.avg
Beispiel #8
0
def trans_train(loader,
                model,
                opt,
                epoch,
                epsilon,
                ref_model,
                clip_min=0.,
                clip_max=1.,
                eps_iter=0.005,
                nb_iter=100,
                rand_init=False,
                verbose=20):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    errors = AverageMeter()

    model.train()

    params = {
        'eps': epsilon,
        'clip_min': clip_min,
        'clip_max': clip_max,
        'eps_iter': eps_iter,
        'nb_iter': nb_iter,
        'rand_init': rand_init
    }

    sess = tf.Session(config=config)
    x_op = tf.placeholder(tf.float32, shape=(
        None,
        1,
        28,
        28,
    ))

    tf_model = convert_pytorch_model_to_tf(ref_model)
    cleverhans_model = CallableModelWrapper(tf_model, output_layer='logits')
    attk = ProjectedGradientDescent(cleverhans_model, sess=sess)
    adv_x_op = attk.generate(x_op, **params)

    end = time.time()
    for i, (X, y) in enumerate(loader):
        X_adv = sess.run((adv_x_op), feed_dict={x_op: X})

        X, y = Variable(torch.tensor(X_adv)).cuda(), y.cuda()
        data_time.update(time.time() - end)

        out = model(Variable(X))
        ce = nn.CrossEntropyLoss()(out, Variable(y))
        err = (out.data.max(1)[1] != y).float().sum() / X.size(0)

        opt.zero_grad()
        ce.backward()
        opt.step()

        batch_time.update(time.time() - end)
        end = time.time()
        losses.update(ce.item(), X.size(0))
        errors.update(err.item(), X.size(0))

        if verbose and i % verbose == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Error {errors.val:.3f} ({errors.avg:.3f})'.format(
                      epoch,
                      i,
                      len(loader),
                      batch_time=batch_time,
                      data_time=data_time,
                      loss=losses,
                      errors=errors))

        if DEBUG and i == 10:
            break

    return losses.avg, errors.avg
Beispiel #9
0
def train(loader, model, opt, epoch, verbose):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    errors = AverageMeter()

    model.train()

    end = time.time()
    for i, (X, y) in enumerate(loader):
        X, y = X.cuda(), y.cuda()
        data_time.update(time.time() - end)

        out = model(Variable(X))
        ce = nn.CrossEntropyLoss()(out, Variable(y))
        err = (out.data.max(1)[1] != y).float().sum() / X.size(0)

        opt.zero_grad()
        ce.backward()
        opt.step()

        batch_time.update(time.time() - end)
        end = time.time()
        losses.update(ce.item(), X.size(0))
        errors.update(err.item(), X.size(0))

        if verbose and i % verbose == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Error {errors.val:.3f} ({errors.avg:.3f})'.format(
                      epoch,
                      i,
                      len(loader),
                      batch_time=batch_time,
                      data_time=data_time,
                      loss=losses,
                      errors=errors))

        if DEBUG and i == 10:
            break

    return losses.avg, errors.avg