예제 #1
0
def train_robust(loader, model, opt, epsilon, epoch, log):
    model.train()
    if epoch == 0:
        blank_state = opt.state_dict()

    for i, (X, y) in enumerate(loader):
        X, y = X.cuda(), y.cuda()

        robust_ce, robust_err = robust_loss(model, epsilon, Variable(X),
                                            Variable(y))
        out = model(Variable(X))
        ce = nn.CrossEntropyLoss()(out, Variable(y))
        err = (out.data.max(1)[1] != y).float().sum() / X.size(0)

        opt.zero_grad()
        robust_ce.backward()
        opt.step()

        print(epoch,
              i,
              robust_ce.data[0],
              robust_err[0],
              ce.data[0],
              err,
              file=log)
        print(epoch, i, robust_ce.data[0], robust_err[0], ce.data[0], err)
        log.flush()
예제 #2
0
def evaluate_robust(loader, model, epsilon, epoch, log, verbose):
    model.eval()
    for i, (X, y) in enumerate(loader):
        X, y = X.cuda(), y.cuda().long()
        if y.dim() == 2:
            y = y.squeeze(1)
        robust_ce, robust_err = robust_loss(model,
                                            epsilon,
                                            Variable(X, volatile=True),
                                            Variable(y, volatile=True),
                                            alpha_grad=True,
                                            scatter_grad=True)
        out = model(Variable(X))
        ce = nn.CrossEntropyLoss()(out, Variable(y))
        err = (out.data.max(1)[1] != y).float().sum() / X.size(0)

        print(epoch,
              i,
              robust_ce.data[0],
              robust_err,
              ce.data[0],
              err,
              file=log)
        if i % verbose == 0:
            print(epoch, i, robust_ce.data[0], robust_err, ce.data[0], err)
        log.flush()

        del X, y, robust_ce, out, ce
예제 #3
0
def evaluate_robust(loader, model, epsilon, epoch, log):
    model.eval()
    for i, (X,y) in enumerate(loader):
        X,y = X.cuda(), y.cuda()
        robust_ce, robust_err = robust_loss(model, epsilon, 
                                            Variable(X), Variable(y))
        out = model(Variable(X))
        ce = nn.CrossEntropyLoss()(out, Variable(y))
        err = (out.data.max(1)[1] != y).float().sum()  / X.size(0)

        print(epoch, i, robust_ce.data[0], robust_err[0], ce.data[0], err, log)
        print(epoch, i, robust_ce.data[0], robust_err[0], ce.data[0], err)
        log.flush()
예제 #4
0
def evaluate_robust(loader, model, epsilon, log, verbose):
    batch_time = AverageMeter()
    robust_losses = AverageMeter()
    robust_errors = AverageMeter()

    end = time.time()

    torch.set_grad_enabled(False)
    for i, (X, y) in enumerate(loader):
        X, y = X.cuda(), y.cuda().long()
        if y.dim() == 2:
            y = y.squeeze(1)

        robust_ce, robust_err = robust_loss(model, epsilon, X, y)

        # measure accuracy and record loss
        robust_losses.update(robust_ce.item(), X.size(0))
        robust_errors.update(robust_err, X.size(0))

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        # print(i, robust_ce.item(), robust_err, file=log)

        if verbose:
            endline = '\n' if i % verbose == 0 else '\r'
            print('Test: [{0}/{1}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Robust loss {rloss.val:.3f} ({rloss.avg:.3f})\t'
                  'Robust error {rerrors.val:.3f} ({rerrors.avg:.3f})'.format(
                      i,
                      len(loader),
                      batch_time=batch_time,
                      rloss=robust_losses,
                      rerrors=robust_errors),
                  end=endline)
        log.flush()

        del X, y, robust_ce
        if DEBUG and i == 10:
            break
    torch.set_grad_enabled(True)
    torch.cuda.empty_cache()
    return robust_losses.avg, robust_errors.avg
예제 #5
0
def train_robust(loader, model, opt, epsilon, epoch, log, verbose, alpha_grad,
                 scatter_grad, l1_proj):
    model.train()
    if epoch == 0:
        blank_state = opt.state_dict()

    for i, (X, y) in enumerate(loader):
        X, y = X.cuda(), y.cuda().long()
        if y.dim() == 2:
            y = y.squeeze(1)

        robust_ce, robust_err = robust_loss(model,
                                            epsilon,
                                            Variable(X),
                                            Variable(y),
                                            alpha_grad=alpha_grad,
                                            scatter_grad=scatter_grad)

        out = model(Variable(X))
        ce = nn.CrossEntropyLoss()(out, Variable(y))
        err = (out.data.max(1)[1] != y).float().sum() / X.size(0)

        opt.zero_grad()
        robust_ce.backward()

        opt.step()

        print(epoch,
              i,
              robust_ce.data[0],
              robust_err,
              ce.data[0],
              err,
              file=log)

        if i % verbose == 0:
            print(epoch, i, robust_ce.data[0], robust_err, ce.data[0], err)
        log.flush()

        del X, y, robust_ce, out, ce
예제 #6
0
def attack(loader, model, epsilon, verbose=False, atk=None, robust=False):

    total_err, total_fgs, total_robust = [], [], []
    if verbose:
        print("Requiring no gradients for parameters.")
    for p in model.parameters():
        p.requires_grad = False

    for i, (X, y) in enumerate(loader):
        X, y = Variable(X.cuda(),
                        requires_grad=True), Variable(y.cuda().long())

        if y.dim() == 2:
            y = y.squeeze(1)

        if robust:
            robust_ce, robust_err = robust_loss(model, epsilon, X, y, False,
                                                False)

        err, err_fgs = atk(model, X, y, epsilon)

        total_err.append(err)
        total_fgs.append(err_fgs)
        if robust:
            total_robust.append(robust_err)
        if verbose:
            if robust:
                print('err: {} | attack: {} | robust: {}'.format(
                    err, err_fgs, robust_err))
            else:
                print('err: {} | attack: {}'.format(err, err_fgs))

    if robust:
        print('[TOTAL] err: {} | attack: {} | robust: {}'.format(
            mean(total_err), mean(total_fgs), mean(total_robust)))
    else:
        print('[TOTAL] err: {} | attack: {}'.format(mean(total_err),
                                                    mean(total_fgs)))
    return total_err, total_fgs, total_robust
예제 #7
0
def evaluate_robust(loader, model, epsilon, epoch, log, verbose, 
                    real_time=False, parallel=False, **kwargs):
    batch_time = AverageMeter()
    losses = AverageMeter()
    errors = AverageMeter()
    robust_losses = AverageMeter()
    robust_errors = AverageMeter()

    model.eval()

    end = time.time()

    torch.set_grad_enabled(False)
    for i, (X,y) in enumerate(loader):
        X,y = X.cuda(), y.cuda().long()
        if y.dim() == 2: 
            y = y.squeeze(1)

        robust_ce, robust_err = robust_loss(model, epsilon, X, y, **kwargs)

        out = model(Variable(X))
        ce = nn.CrossEntropyLoss()(out, Variable(y))
        err = (out.max(1)[1] != y).float().sum()  / X.size(0)

        # _,pgd_err = _pgd(model, Variable(X), Variable(y), epsilon)

        # measure accuracy and record loss
        losses.update(ce.item(), X.size(0))
        errors.update(err, X.size(0))
        robust_losses.update(robust_ce.item(), X.size(0))
        robust_errors.update(robust_err, X.size(0))

        # measure elapsed time
        batch_time.update(time.time()-end)
        end = time.time()

        print(epoch, i, robust_ce.item(), robust_err, ce.item(), err.item(),
           file=log)
        if verbose: 
            # print(epoch, i, robust_ce.data[0], robust_err, ce.data[0], err)
            endline = '\n' if i % verbose == 0 else '\r'
            print('Test: [{0}/{1}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Robust loss {rloss.val:.3f} ({rloss.avg:.3f})\t'
                  'Robust error {rerrors.val:.3f} ({rerrors.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Error {error.val:.3f} ({error.avg:.3f})'.format(
                      i, len(loader), batch_time=batch_time, 
                      loss=losses, error=errors, rloss = robust_losses, 
                      rerrors = robust_errors), end=endline)
        log.flush()

        del X, y, robust_ce, out, ce
        if DEBUG and i ==10: 
            break
    torch.set_grad_enabled(True)
    torch.cuda.empty_cache()
    print('')
    print(' * Robust error {rerror.avg:.3f}\t'
          'Error {error.avg:.3f}'
          .format(rerror=robust_errors, error=errors))
    return robust_errors.avg
예제 #8
0
def train_robust(loader, model, opt, epsilon, epoch, log, verbose, 
                real_time=False, clip_grad=None, **kwargs):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    errors = AverageMeter()
    robust_losses = AverageMeter()
    robust_errors = AverageMeter()

    model.train()

    end = time.time()
    for i, (X,y) in enumerate(loader):
        X,y = X.cuda(), y.cuda().long()
        if y.dim() == 2: 
            y = y.squeeze(1)
        data_time.update(time.time() - end)

        with torch.no_grad(): 
            out = model(Variable(X))
            ce = nn.CrossEntropyLoss()(out, Variable(y))
            err = (out.max(1)[1] != y).float().sum()  / X.size(0)


        robust_ce, robust_err = robust_loss(model, epsilon, 
                                             Variable(X), Variable(y), 
                                             **kwargs)
        opt.zero_grad()
        robust_ce.backward()


        if clip_grad: 
            nn.utils.clip_grad_norm_(model.parameters(), clip_grad)

        opt.step()

        # measure accuracy and record loss
        losses.update(ce.item(), X.size(0))
        errors.update(err.item(), X.size(0))
        robust_losses.update(robust_ce.detach().item(), X.size(0))
        robust_errors.update(robust_err, X.size(0))

        # measure elapsed time
        batch_time.update(time.time()-end)
        end = time.time()

        print(epoch, i, robust_ce.detach().item(), 
                robust_err, ce.item(), err.item(), file=log)

        if verbose and (i % verbose == 0 or real_time): 
            endline = '\n' if i % verbose == 0 else '\r'
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Robust loss {rloss.val:.4f} ({rloss.avg:.4f})\t'
                  'Robust error {rerrors.val:.3f} ({rerrors.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Error {errors.val:.3f} ({errors.avg:.3f})'.format(
                   epoch, i, len(loader), batch_time=batch_time,
                   data_time=data_time, loss=losses, errors=errors, 
                   rloss = robust_losses, rerrors = robust_errors), end=endline)
        log.flush()

        del X, y, robust_ce, out, ce, err, robust_err
        if DEBUG and i ==10: 
            break
    print('')
    torch.cuda.empty_cache()
예제 #9
0
def evaluate_ensemble(model_paths):
    torch.manual_seed(SEED)
    torch.cuda.manual_seed(SEED)

    test_loader = get_mnist_test_loader()

    models = read_models(model_paths)
    avg_model = averaging_model(models)
    models.append(avg_model)

    rows = list(csv.reader(open('examples/random_indices.csv')))
    idx = set([int(row[0]) for row in rows[1:1001]])

    header = [
        'eps', 'unanm_norm_err', 'unanm_norm_rej', 'unanm_rob_certs',
        'maj_norm_err', 'maj_norm_rej', 'maj_rob_certs', 'avg_norm_err'
    ]
    header += [
        'model_' + str(i + 1) + '_rob_certs' for i in range(len(models) - 1)
    ]
    print(','.join([col for col in header]))

    for k in range(1, 21):
        unanm = {'norm_errs': 0, 'norm_rejs': 0, 'rob_certs': 0}
        maj = {'norm_errs': 0, 'norm_rejs': 0, 'rob_certs': 0}
        avg_norm_errs = 0
        indiv_rob_certs = [0 for _ in range(len(models) - 1)]
        eps = k / 100.0
        for i, (X, y) in enumerate(test_loader):
            if i not in idx:
                continue
            X, y = X.cuda(), y.cuda()

            true_class = y.data.item()
            preds = [model(X).data for model in models[:-1]]
            pred_classes = [pred.max(1)[1].item() for pred in preds]
            counter = Counter(pred_classes)
            max_class = -1
            max_count = 0
            for c in counter:
                if counter[c] > max_count:
                    max_count = counter[c]
                    max_class = c

            if max_count == len(preds) and max_class == true_class:
                pass
            elif max_count == len(preds) and max_class != true_class:
                unanm['norm_errs'] += 1
            else:
                unanm['norm_rejs'] += 1

            if max_count >= len(preds) // 2 + 1 and max_class == true_class:
                pass
            elif max_count >= len(preds) // 2 + 1 and max_class != true_class:
                maj['norm_errs'] += 1
            else:
                maj['norm_rejs'] += 1

            preds_avg = models[-1](X).data
            if preds_avg.max(1)[1].item() == true_class:
                pass
            else:
                avg_norm_errs += 1

            robust_errs = []
            for i, model in enumerate(models):
                _, robust_err = robust_loss(model,
                                            eps,
                                            X,
                                            y,
                                            bounded_input=False)
                robust_errs.append(int(robust_err))

            if sum(robust_errs) < len(robust_errs):
                unanm['rob_certs'] += 1
            elif sum(robust_errs[:-1]) < len(robust_errs[:-1]) // 2 + 1:
                maj['rob_certs'] += 1

            for i in range(len(indiv_rob_certs)):
                indiv_rob_certs[i] += (1 - robust_errs[i])

        lst = [(k / 100.0), unanm['norm_errs'], unanm['norm_rejs'],
               unanm['rob_certs'], maj['norm_errs'], maj['norm_rejs'],
               maj['rob_certs'], avg_norm_errs]
        lst += indiv_rob_certs
        print(','.join([str(t) for t in lst]))
예제 #10
0
print("train")
for epoch in range(nbepoch):
    print("epoch=", epoch, "/", nbepoch)
    net.train()
    total, correct = 0, 0
    for _, (inputs, targets) in enumerate(trainloader):
        inputs, targets = inputs.to(device), targets.to(device)

        if debug or epoch == 0:
            outputs = net(inputs)
            loss = criterion(outputs, targets)
        else:
            with torch.no_grad():
                outputs = net(inputs)
            loss, _ = convex_adversarial.robust_loss(net, 3.0 / 255, inputs, targets)

        meanloss.append(loss.cpu().data.numpy())

        if epoch > 0:
            loss *= 0.0001

        optimizer.zero_grad()
        loss.backward()
        torch.nn.utils.clip_grad_norm_(net.parameters(), 1)
        optimizer.step()

        _, predicted = outputs.max(1)
        total += targets.size(0)
        correct += predicted.eq(targets).sum().item()
예제 #11
0
def robust_loss_cascade(models, epsilon, X, y, **kwargs):
    total_robust_ce = 0.
    total_ce = 0.
    total_robust_err = 0.
    total_err = 0.

    batch_size = float(X.size(0))

    I = torch.arange(X.size(0)).type_as(y.data)

    for j, model in enumerate(models[:-1]):

        out = model(X)
        ce = nn.CrossEntropyLoss(reduce=False)(out, y)
        _, uncertified = robust_loss(model,
                                     epsilon,
                                     X,
                                     out.max(1)[1],
                                     size_average=False,
                                     **kwargs)
        certified = ~uncertified
        l = []
        if certified.sum() == 0:
            pass
            # print("Warning: Cascade stage {} has no certified values.".format(j+1))
        else:
            X_cert = X[Variable(certified.nonzero()[:, 0])]
            y_cert = y[Variable(certified.nonzero()[:, 0])]

            ce = ce[Variable(certified.nonzero()[:, 0])]
            out = out[Variable(certified.nonzero()[:, 0])]
            err = (out.data.max(1)[1] != y_cert.data).float()
            robust_ce, robust_err = robust_loss(model,
                                                epsilon,
                                                X_cert,
                                                y_cert,
                                                size_average=False,
                                                **kwargs)
            # add statistics for certified examples
            total_robust_ce += robust_ce.sum()
            total_ce += ce.data.sum()
            total_robust_err += robust_err.sum()
            total_err += err.sum()
            l.append(certified.sum())
            # reduce data set to uncertified examples
            if uncertified.sum() > 0:
                X = X[Variable(uncertified.nonzero()[:, 0])]
                y = y[Variable(uncertified.nonzero()[:, 0])]
                I = I[uncertified.nonzero()[:, 0]]
            else:
                robust_ce = total_robust_ce / batch_size
                ce = total_ce / batch_size
                robust_err = total_robust_err.item() / batch_size
                err = total_err.item() / batch_size
                return robust_ce, robust_err, ce, err, None
        ####################################################################
    # compute normal ce and robust ce for the last model
    out = models[-1](X)
    ce = nn.CrossEntropyLoss(reduce=False)(out, y)
    err = (out.data.max(1)[1] != y.data).float()

    robust_ce, robust_err = robust_loss(models[-1],
                                        epsilon,
                                        X,
                                        y,
                                        size_average=False,
                                        **kwargs)

    # update statistics with the remaining model and take the average
    total_robust_ce += robust_ce.sum()
    total_ce += ce.data.sum()
    total_robust_err += robust_err.sum()
    total_err += err.sum()

    robust_ce = total_robust_ce / batch_size
    ce = total_ce / batch_size
    robust_err = total_robust_err.item() / batch_size
    err = total_err.item() / batch_size

    _, uncertified = robust_loss(models[-1],
                                 epsilon,
                                 X,
                                 out.max(1)[1],
                                 size_average=False,
                                 **kwargs)
    if uncertified.sum() > 0:
        I = I[uncertified.nonzero()[:, 0]]
    else:
        I = None

    return robust_ce, robust_err, ce, err, I
예제 #12
0
            # torch.manual_seed(1)
            robust_net = nn.Sequential(nn.Conv2d(1, j, 3, stride=1, padding=1),
                                       nn.ReLU(), Flatten(),
                                       nn.Linear(j * 28 * 28, 2)).cuda()
            data = []
            opt = optim.Adam(robust_net.parameters(), lr=1e-3)

            ts = []

            for i in range(10):
                start_time = time.time()
                if PROJ:
                    robust_ce, robust_err = robust_loss(robust_net,
                                                        epsilon,
                                                        X,
                                                        y,
                                                        parallel=False,
                                                        l1_proj=50,
                                                        l1_type='median')
                else:
                    robust_ce, robust_err = robust_loss(robust_net,
                                                        epsilon,
                                                        X,
                                                        y,
                                                        parallel=False)

                out = robust_net(X)
                l2 = nn.CrossEntropyLoss()(out, y).item()
                err = (out.max(1)[1] != y).float().mean().item()
                data.append([l2, robust_ce.item(), err, robust_err])
                # if i % 100 == 0:
예제 #13
0
    if min(np.max(np.abs(p - a)) for a in x) > 3 * r:
        x.append(p)
X = tr.tensor(np.array(x), dtype=tr.float32)
tr.manual_seed(1)
y = (tr.rand(m) + 0.5).long()

# Inizializzaimo una rete neurale
tr.manual_seed(1)
net = nn.Sequential(nn.Linear(2, 100), nn.ReLU(), nn.Linear(100, 100),
                    nn.ReLU(), nn.Linear(100, 100), nn.ReLU(),
                    nn.Linear(100, 100), nn.ReLU(), nn.Linear(100, 2))
epoch = 1000
learning_rate = 1e-3
opt = optim.Adam(net.parameters(), lr=learning_rate)
for i in range(epoch):
    loss, err = robust_loss(net, r, X, y)
    print('Epoch : %d, error : %e' % (i + 1, loss.item()), end='\r')
    #    err = (out.max(1)[1].item != y).float().mean()
    opt.zero_grad()
    loss.backward()
    opt.step()

print('Error : ' + str(loss.item()))
print('Training :' + str(x))
print('Targets :' + str(y))
l = 1
for par in net.parameters():
    if len(par.data.shape) > 1:
        l = l * par.data.norm(1, 1).max()

print('Lipschitz norm of the network : ' + str(l.item()))
예제 #14
0
eps_max = 0.05
print('Optimization method : ')
print(opt)
epochs = 4
print('Start Learning Process...')
for epoch in range(epochs):
    total_loss = 0.0
    train_iter = iter(trainloader)
    for i, data in enumerate(trainloader, 0):
        inputs, labels = data
        # Training relu network
        if epoch < 3:
            out = net(inputs)
            loss = lossy_fn(out, labels)
        else:
            loss, err = robust_loss(net, eps_max, inputs, labels)
        opt.zero_grad()
        loss.backward()
        opt.step()
        if i % 20 == 19:
            print(' Epoch : %d, training object : %5d, local error  : %.2e '
                  % (epoch + 1, (i + 1)*batch_size, loss.item()), end='\r')
        total_loss += loss.item()
    print()
    print(' Epoch : %d, main error  : %e' % (epoch + 1, total_loss/((i+1)*batch_size)))
print('End of Training.')
print(' ')
print('Start of Testing...')

eps_set = (0.001*(1+np.arange(9))).tolist() + (0.01*(1+np.arange(10))).tolist()
corr_unit = list(0. for i in range(len(eps_set)))
예제 #15
0
def train_robust(loader, model, opt, epsilon, epoch, log, verbose, 
                real_time=False, clip_grad=None, **kwargs):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    errors = AverageMeter()
    robust_losses = AverageMeter()
    robust_errors = AverageMeter()

    model.train()

    end = time.time()
    NAN_EXIST = False
    NAN_count = 0
    for i, (X,y) in enumerate(loader):
        X,y = X.cuda(), y.cuda().long()
        if y.dim() == 2: 
            y = y.squeeze(1)
        data_time.update(time.time() - end)

        with torch.no_grad(): 
            out = model(Variable(X))
            ce = nn.CrossEntropyLoss()(out, Variable(y))
            err = (out.max(1)[1] != y).float().sum()  / X.size(0)


        robust_ce, robust_err = robust_loss(model, epsilon, 
                                             Variable(X), Variable(y), 
                                             **kwargs)
        ###############################################################
        if np.isnan(ce.item()):
            print('Natural Loss goes to NAN VALUE!!!')
        if np.isnan(robust_ce.detach().item()):
            print('Adversarial Loss goes to NAN VALUE!!!')
            
        opt.zero_grad()
        robust_ce.backward()
        ############################################################
        for p in model.parameters():
            #nan_idx = np.argwhere(np.isnan(p.grad))
            if torch.sum(torch.isnan(p.grad))>0:
                if NAN_EXIST == False:
                    #
                    NAN_EXIST = True
                    if NAN_count == 0:
                        print('!!!!!!!!!!!!!!!!!!!!!!!!! Number of NAN elements: ', torch.sum(torch.isnan(p.grad)))
                #break
                p.grad[torch.isnan(p.grad)] = 0
            #print(torch.sum(p.grad>10))
            
            #nan_idx = np.argwhere(p.grad<=100)
            #print(p.grad.shape)
            #print(nan_idx.shape)
            #p.grad[nan_idx] = 0
        '''
        if NAN_EXIST == False:
            for p in model.parameters():
                if np.isnan(p.grad).any():
                    print('Gradient just goes to NAN VALUE!!!!!!!!!!!!!!!!!!!!!!!!!!')
                    NAN_EXIST = True
                    break
        if NAN_EXIST == True:
            break
        '''
        ############################################################
        if NAN_EXIST == False:
            if clip_grad: 
                nn.utils.clip_grad_norm_(model.parameters(), clip_grad)
            
            opt.step()
        else:
            opt.step()
            NAN_count += 1
            NAN_EXIST = False

        # measure accuracy and record loss
        losses.update(ce.item(), X.size(0))
        errors.update(err.item(), X.size(0))
        robust_losses.update(robust_ce.detach().item(), X.size(0))
        robust_errors.update(robust_err, X.size(0))

        # measure elapsed time
        batch_time.update(time.time()-end)
        end = time.time()

        print(epoch, i, robust_ce.detach().item(), 
                robust_err, ce.item(), err.item(), file=log)

        if verbose and (i % verbose == 0 or real_time): 
            endline = '\n' if i % verbose == 0 else '\r'
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Robust loss {rloss.val:.4f} ({rloss.avg:.4f})\t'
                  'Robust error {rerrors.val:.3f} ({rerrors.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Error {errors.val:.3f} ({errors.avg:.3f})'.format(
                   epoch, i, len(loader), batch_time=batch_time,
                   data_time=data_time, loss=losses, errors=errors, 
                   rloss = robust_losses, rerrors = robust_errors), end=endline)
        log.flush()

        del X, y, robust_ce, out, ce, err, robust_err
        if DEBUG and i ==10: 
            break
        #if i==100:
        #    break
    print('In totoal, the number of NAN gradient elements happen is : ', NAN_count)
    print('')
    torch.cuda.empty_cache()
예제 #16
0
        break

    if args.method == "convexdual" or args.compare_all:
        from convex_adversarial import robust_loss
        '''
		if(PARALLEL):
			# The first run with parallel will take a few seconds
			# to warm up.
			eric_loss, eric_err = robust_loss(model,\
						 epsilon, X, y, parallel=PARALLEL)
			del eric_loss, eric_err
		'''

        start = time.time()
        eric_loss, eric_err = robust_loss(model,\
          epsilon, X, y,parallel=True,\
          bounded_input={0, 1})
        #norm_type="l1_median", proj=20)
        #eric_loss, eric_err = robust_loss(model,\
        #epsilon, X, y, parallel=PARALLEL)

        print("eric loss", eric_loss)
        print("eric err:", eric_err)
        print("eric time per sample:", (time.time() - start) / X.shape[0])
        del eric_loss, eric_err
        print()

    if args.method == "baseline" or args.compare_all:

        #if(method == BASELINE):
        start = time.time()
예제 #17
0
def plot_certificates():
    # Load data
    # The bounds in NN-space
    x_min = 0.
    x_max = 1.

    # Fix random seed for reproducibility
    seed = 0
    np.random.seed(seed)
    torch.manual_seed(seed)

    _, test_loader = pblm.mnist_loaders(50)

    # Get data into arrays for convenience
    for idx, (data, target) in enumerate(test_loader):
        if CUDA:
            data, target = data.float().cuda(), target.long().cuda()
        else:
            data, target = data.float(), target.long()
        #print(data.size())
        #raise Exception()

        data = data.view(-1, 1, 28, 28)
        x = data
        y = target
        break

    #print(x.size(), y.size())
    #raise Exception()

    #epochs = np.array([-1,0,2,4,6,9,14,19,24,32,41,49,61,74,86,99])
    epochs = np.array([32, 41, 49, 61, 74, 86, 99])
    robust_errs = []
    for epoch in epochs:
        epsilon = 0.1

        model = pblm.mnist_model()
        if epoch == -1:
            model.load_state_dict(
                torch.load(
                    './snapshots/mnist_baseline_batch_size_50_epochs_100_lr_0.001_opt_adam_real_time_False_seed_0_checkpoint_99.pth'
                ))
        else:
            model.load_state_dict(
                torch.load(
                    f'./snapshots/mnist_robustified_robust_batch_size_50_epochs_100_epsilon_0.1_l1_test_exact_l1_train_exact_lr_0.001_opt_adam_real_time_False_schedule_length_50_seed_0_starting_epsilon_0.01_checkpoint_{epoch}.pth'
                ))
        if CUDA:
            model.cuda()

        _, robust_err = robust_loss(model, epsilon, x, y)
        robust_errs.append(robust_err)

    robust_errs = np.array(robust_errs)

    results = pickle.load(
        open(f'./snapshots/mnist_extracted_exp_results.pickle', 'rb'))
    xs = np.array([-1, 0, 2, 4, 6, 9, 14, 19, 24, 32, 41, 49, 61, 74, 86, 99])

    our_results = {}
    for sigma in [0.1, 0.2, 0.3]:
        sigma_lg_ps = []
        our_results[sigma] = np.zeros(16)

        for sample_id in range(50):
            #print('sample id', sample_id)
            #if sample_id == 24:
            #  print([r.shape for r in results[(sample_id,sigma)]])
            #  lg_ps = np.array(results[(sample_id,sigma)])
            #  print('lg_ps', lg_ps.shape)
            #
            #  input()

            lg_ps = np.array(results[(sample_id, sigma)])

            #print('lg_ps', lg_ps.shape)
            #input()

            #print(lg_ps.shape)
            #if len(lg_ps.shape) == 1:
            #  lg_ps = lg_ps.reshape((-1,1))
            #  #print(lg_ps.shape)
            mean_ps = np.mean(lg_ps, axis=1)

            #print(mean_ps.shape, lg_ps.shape)

            our_results[sigma] += mean_ps != -250.0

            #print(mean_ps)
            #input()
            #raise Exception()

            #print(mean_ps.shape)
            #raise Exception()

        our_results[sigma] /= 50.0

    fig = plt.figure(figsize=(cm2inch(8.0), cm2inch(6.0)))
    ax = fig.add_subplot(1,
                         1,
                         1,
                         xlabel='epoch',
                         ylabel='fraction certified',
                         ylim=(-0.05, 1.0))

    ax.plot(xs,
            1. - our_results[0.3],
            color='navy',
            marker='.',
            linewidth=1.0,
            label=r'AMLS $\epsilon=0.3$')
    ax.plot(xs,
            1. - our_results[0.2],
            color='seagreen',
            marker='.',
            linewidth=1.0,
            label=r'AMLS $\epsilon=0.2$')
    ax.plot(xs,
            1. - our_results[0.1],
            color='firebrick',
            marker='.',
            linewidth=1.0,
            label=r'AMLS $\epsilon=0.1$')
    ax.plot(epochs,
            1. - robust_errs,
            color='grey',
            marker='.',
            linestyle='--',
            linewidth=1.0,
            label=r'W\&K $\epsilon=0.1$')

    #ax.legend(loc='lower right')
    ax.legend(bbox_to_anchor=(0.9, 0.15),
              loc="lower right",
              bbox_transform=fig.transFigure)
    ax.xaxis.set_tick_params(width=0.5)
    ax.yaxis.set_tick_params(width=0.5)
    ax.spines['left'].set_linewidth(0.5)
    ax.spines['bottom'].set_linewidth(0.5)
    sns.despine()

    #fig.savefig(f'mnist_test_robust_losses.pdf', bbox_inches='tight')
    fig.savefig(f'./results/robust/mnist_certificates.svg',
                bbox_inches='tight')
    plt.close(fig)