Пример #1
0
    def init_kw_bounds_full(self, domain):
        lower_bounds = [domain[:,0]]
        upper_bounds = [domain[:,1]]
        
        if min(domain[:,0])<0 or max(domain[:,1])>1: 
            print('for non 0-1 ball please use get_lower_bounds')
            return

        
        dual = DualNetwork(self.net, self.x, self.ball_eps, bounded_input=True)
        for layer in dual.dual_net[1:]:
            if type(layer) is DualReLU:
                # K&W has this as input bounds to the ReLU, but our
                # codes reasons in terms of output bounds of a layer
                # We get this bounds and enqueue them, they correspond
                # to the output of the ReLU from before.
                
                lower_bounds[-1] = layer.zl.squeeze()
                upper_bounds[-1]= layer.zu.squeeze()
                lower_bounds.append(F.relu(lower_bounds[-1]))
                upper_bounds.append(F.relu(upper_bounds[-1]))
                
            elif Flatten:
                lower_bounds.append(lower_bounds[-1].view(-1))
                upper_bounds.append(upper_bounds[-1].view(-1))
            else:
                lower_bounds.append([])
                upper_bounds.append([])


        # Also add the bounds on the final thing
        lower_bounds.append(dual(torch.ones(1,1,1)).view(-1))
        upper_bounds.append(-dual(-torch.ones(1,1,1)).view(-1))
        #self.nf = dual.nf 
        return lower_bounds, upper_bounds
Пример #2
0
    def build_approximation_v2(self, domain):
        # This works only in the case where what we are optimizing is an
        # L_inf eps-ball, with potentially added margins.
        domain_lb = domain.min()
        domain_ub = domain.max()

        lbs = domain.select(-1, 0)
        ubs = domain.select(-1, 1)
        rng = ubs - lbs
        eps = (rng.max() / 2).item()

        # Let's try to identify what x is
        # If the lb is domain_lb, we assume that the point is ub - eps
        # If the ub is domain_ub, we assume that the point is lb + eps
        # If neither, the point should be (ub + lb) / 2
        x = torch.where((rng - 2*eps).abs() <= 1e-6,
                        (ubs + lbs) / 2,
                        torch.where(lbs == domain_lb,
                                    ubs - eps,
                                    lbs + eps))

        # Rebuild the deduced lower bounds and upper bounds to verify if
        # we have identified correctly the system.
        rebuild_domain = torch.stack([torch.clamp(x - eps, domain_lb, None),
                                      torch.clamp(x + eps, None, domain_ub)], -1)
        assert (rebuild_domain - domain).abs().max() < 1e-6

        if x.dim() == 3 or x.dim() == 1:
            x = x.unsqueeze(0)
        # The K&W code doesn't give a way to change those bounds, but so far
        # our problem fits.
        if ((rng - 2*eps).abs() < 1e-6).sum() == rng.numel():
            # Assume that we either have no enforced bounds, all the
            # coordinates just have the epsilon constraint.
            dual = DualNetwork(nn.Sequential(*self.layers), x, eps, bounded_input=False)
        else:
            assert domain_lb == 0
            assert domain_ub == 1
            dual = DualNetwork(nn.Sequential(*self.layers), x, eps, bounded_input=True)


        return dual
Пример #3
0
    def build_approximation(self, domains):
        # Okay, this is a disgusting, disgusting hack. This DEFINITELY should
        # be replaced by something more proper in practice but I'm doing this
        # for a quick experiment.

        # The code from https://github.com/locuslab/convex_adversarial only
        # works in the case of adversarial examples, that is, it assumes the
        # domain is centered around a point, and is limited by an infinity norm
        # constraint. Rather than properly implementing the general
        # optimization, I'm just going to convert the problem into this form,
        # by adding a fake linear at the beginning. This is definitely not
        # clean :)
        batched = domains.shape[0] > 1

        domain_lb = domains.select(2, 0)
        domain_ub = domains.select(2, 1)

        with torch.no_grad():
            x = (domain_ub + domain_lb) / 2
            domain_radius = (domain_ub - domain_lb) / 2

            if batched:
                # Verify that we can use the same epsilon for both parts
                assert (domain_radius[0] - domain_radius[1]).abs().sum() < 1e-6
                # We have written the code assuming that the batch size would
                # be limited to 2, check that it is the case.
                assert domains.shape[0] <= 2

            domain_radius = domain_radius[0]

            # Disgusting hack number 2:
            # In certain case we don't want to allow a variable to move.
            # Let's just allow it to move a tiny tiny bit
            domain_radius[domain_radius == 0] = 1e-6

            bias = x[0].clone()
            x[0].fill_(0)
            if batched:
                x[1] = (x[1] - bias) / domain_radius

            inp_layer = nn.Linear(domains.size(1), domains.size(1), bias=True)
            inp_layer.weight.copy_(torch.diag(domain_radius))
            inp_layer.bias.copy_(bias)
            fake_net = nn.Sequential(*simplify_network([inp_layer] +
                                                       self.layers))

            dual = DualNetwork(fake_net, x, 1)

        return dual
Пример #4
0
def Train(model, t, loader, eps_scheduler, max_eps, norm, logger, verbose,
          train, opt, method, **kwargs):
    # if train=True, use training mode
    # if train=False, use test mode, no back prop

    num_class = 10
    losses = AverageMeter()
    l1_losses = AverageMeter()
    errors = AverageMeter()
    robust_errors = AverageMeter()
    regular_ce_losses = AverageMeter()
    robust_ce_losses = AverageMeter()
    relu_activities = AverageMeter()
    bound_bias = AverageMeter()
    bound_diff = AverageMeter()
    unstable_neurons = AverageMeter()
    dead_neurons = AverageMeter()
    alive_neurons = AverageMeter()
    batch_time = AverageMeter()
    batch_multiplier = kwargs.get("batch_multiplier", 1)
    kappa = 1
    beta = 1
    if train:
        model.train()
    else:
        model.eval()
    # pregenerate the array for specifications, will be used for scatter
    sa = np.zeros((num_class, num_class - 1), dtype=np.int32)
    for i in range(sa.shape[0]):
        for j in range(sa.shape[1]):
            if j < i:
                sa[i][j] = j
            else:
                sa[i][j] = j + 1
    sa = torch.LongTensor(sa)
    batch_size = loader.batch_size * batch_multiplier
    if batch_multiplier > 1 and train:
        logger.log(
            'Warning: Large batch training. The equivalent batch size is {} * {} = {}.'
            .format(batch_multiplier, loader.batch_size, batch_size))
    # per-channel std and mean
    std = torch.tensor(loader.std).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
    mean = torch.tensor(loader.mean).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)

    model_range = 0.0
    end_eps = eps_scheduler.get_eps(t + 1, 0)
    if end_eps < np.finfo(np.float32).tiny:
        logger.log('eps {} close to 0, using natural training'.format(end_eps))
        method = "natural"
    for i, (data, labels) in enumerate(loader):
        start = time.time()
        eps = eps_scheduler.get_eps(t, int(i // batch_multiplier))
        if train and i % batch_multiplier == 0:
            opt.zero_grad()
        # generate specifications
        c = torch.eye(num_class).type_as(data)[labels].unsqueeze(
            1) - torch.eye(num_class).type_as(data).unsqueeze(0)
        # remove specifications to self
        I = (~(labels.data.unsqueeze(1) == torch.arange(num_class).type_as(
            labels.data).unsqueeze(0)))
        c = (c[I].view(data.size(0), num_class - 1, num_class))
        # scatter matrix to avoid compute margin to self
        sa_labels = sa[labels]
        # storing computed lower bounds after scatter
        lb_s = torch.zeros(data.size(0), num_class)
        ub_s = torch.zeros(data.size(0), num_class)

        # FIXME: Assume unnormalized data is from range 0 - 1
        if kwargs["bounded_input"]:
            if norm != np.inf:
                raise ValueError(
                    "bounded input only makes sense for Linf perturbation. "
                    "Please set the bounded_input option to false.")
            data_max = torch.reshape((1. - mean) / std, (1, -1, 1, 1))
            data_min = torch.reshape((0. - mean) / std, (1, -1, 1, 1))
            data_ub = torch.min(data + (eps / std), data_max)
            data_lb = torch.max(data - (eps / std), data_min)
        else:
            if norm == np.inf:
                data_ub = data + (eps / std)
                data_lb = data - (eps / std)
            else:
                # For other norms, eps will be used instead.
                data_ub = data_lb = data

        if list(model.parameters())[0].is_cuda:
            data = data.cuda()
            data_ub = data_ub.cuda()
            data_lb = data_lb.cuda()
            labels = labels.cuda()
            c = c.cuda()
            sa_labels = sa_labels.cuda()
            lb_s = lb_s.cuda()
            ub_s = ub_s.cuda()
        # convert epsilon to a tensor
        eps_tensor = data.new(1)
        eps_tensor[0] = eps

        # omit the regular cross entropy, since we use robust error
        output = model(data,
                       method_opt="forward",
                       disable_multi_gpu=(method == "natural"))
        regular_ce = CrossEntropyLoss()(output, labels)
        regular_ce_losses.update(regular_ce.cpu().detach().numpy(),
                                 data.size(0))
        errors.update(
            torch.sum(
                torch.argmax(output, dim=1) != labels).cpu().detach().numpy() /
            data.size(0), data.size(0))
        # get range statistic
        model_range = output.max().detach().cpu().item() - output.min().detach(
        ).cpu().item()
        '''
        torch.set_printoptions(threshold=5000)
        print('prediction:  ', output)
        ub, lb, _, _, _, _ = model(norm=norm, x_U=data_ub, x_L=data_lb, eps=eps, C=c, method_opt="interval_range")
        lb = lb_s.scatter(1, sa_labels, lb)
        ub = ub_s.scatter(1, sa_labels, ub)
        print('interval ub: ', ub)
        print('interval lb: ', lb)
        ub, _, lb, _ = model(norm=norm, x_U=data_ub, x_L=data_lb, eps=eps, C=c, upper=True, lower=True, method_opt="backward_range")
        lb = lb_s.scatter(1, sa_labels, lb)
        ub = ub_s.scatter(1, sa_labels, ub)
        print('crown-ibp ub: ', ub)
        print('crown-ibp lb: ', lb) 
        ub, _, lb, _ = model(norm=norm, x_U=data_ub, x_L=data_lb, eps=eps, C=c, upper=True, lower=True, method_opt="full_backward_range")
        lb = lb_s.scatter(1, sa_labels, lb)
        ub = ub_s.scatter(1, sa_labels, ub)
        print('full-crown ub: ', ub)
        print('full-crown lb: ', lb)
        input()
        '''

        if verbose or method != "natural":
            if kwargs["bound_type"] == "convex-adv":
                # Wong and Kolter's bound, or equivalently Fast-Lin
                if kwargs["convex-proj"] is not None:
                    proj = kwargs["convex-proj"]
                    if norm == np.inf:
                        norm_type = "l1_median"
                    elif norm == 2:
                        norm_type = "l2_normal"
                    else:
                        raise (ValueError(
                            "Unsupported norm {} for convex-adv".format(norm)))
                else:
                    proj = None
                    if norm == np.inf:
                        norm_type = "l1"
                    elif norm == 2:
                        norm_type = "l2"
                    else:
                        raise (ValueError(
                            "Unsupported norm {} for convex-adv".format(norm)))
                if loader.std == [1] or loader.std == [1, 1, 1]:
                    convex_eps = eps
                else:
                    convex_eps = eps / np.mean(loader.std)
                    # for CIFAR we are roughly / 0.2
                    # FIXME this is due to a bug in convex_adversarial, we cannot use per-channel eps
                if norm == np.inf:
                    # bounded input is only for Linf
                    if kwargs["bounded_input"]:
                        # FIXME the bounded projection in convex_adversarial has a bug, data range must be positive
                        assert loader.std == [1, 1, 1] or loader.std == [1]
                        data_l = 0.0
                        data_u = 1.0
                    else:
                        data_l = -np.inf
                        data_u = np.inf
                else:
                    data_l = data_u = None
                f = DualNetwork(model,
                                data,
                                convex_eps,
                                proj=proj,
                                norm_type=norm_type,
                                bounded_input=kwargs["bounded_input"],
                                data_l=data_l,
                                data_u=data_u)
                lb = f(c)
            elif kwargs["bound_type"] == "interval":
                ub, lb, relu_activity, unstable, dead, alive = model(
                    norm=norm,
                    x_U=data_ub,
                    x_L=data_lb,
                    eps=eps,
                    C=c,
                    method_opt="interval_range")
            elif kwargs["bound_type"] == "crown-full":
                _, _, lb, _ = model(norm=norm,
                                    x_U=data_ub,
                                    x_L=data_lb,
                                    eps=eps,
                                    C=c,
                                    upper=False,
                                    lower=True,
                                    method_opt="full_backward_range")
                unstable = dead = alive = relu_activity = torch.tensor([0])
            elif kwargs["bound_type"] == "crown-interval":
                # Enable multi-GPU only for the computationally expensive CROWN-IBP bounds,
                # not for regular forward propagation and IBP because the communication overhead can outweigh benefits, giving little speedup.
                ub, ilb, relu_activity, unstable, dead, alive = model(
                    norm=norm,
                    x_U=data_ub,
                    x_L=data_lb,
                    eps=eps,
                    C=c,
                    method_opt="interval_range")
                crown_final_beta = kwargs['final-beta']
                beta = (max_eps - eps * (1.0 - crown_final_beta)) / max_eps
                if beta < 1e-5:
                    lb = ilb
                else:
                    if kwargs["runnerup_only"]:
                        # regenerate a smaller c, with just the runner-up prediction
                        # mask ground truthlabel output, select the second largest class
                        # print(output)
                        # torch.set_printoptions(threshold=5000)
                        masked_output = output.detach().scatter(
                            1, labels.unsqueeze(-1), -100)
                        # print(masked_output)
                        # location of the runner up prediction
                        runner_up = masked_output.max(1)[1]
                        # print(runner_up)
                        # print(labels)
                        # get margin from the groud-truth to runner-up only
                        runnerup_c = torch.eye(num_class).type_as(data)[labels]
                        # print(runnerup_c)
                        # set the runner up location to -
                        runnerup_c.scatter_(1, runner_up.unsqueeze(-1), -1)
                        runnerup_c = runnerup_c.unsqueeze(1).detach()
                        # print(runnerup_c)
                        # get the bound for runnerup_c
                        _, _, clb, bias = model(norm=norm,
                                                x_U=data_ub,
                                                x_L=data_lb,
                                                eps=eps,
                                                C=c,
                                                method_opt="backward_range")
                        clb = clb.expand(clb.size(0), num_class - 1)
                    else:
                        # get the CROWN bound using interval bounds
                        _, _, clb, bias = model(norm=norm,
                                                x_U=data_ub,
                                                x_L=data_lb,
                                                eps=eps,
                                                C=c,
                                                method_opt="backward_range")
                        bound_bias.update(bias.sum() / data.size(0))
                    # how much better is crown-ibp better than ibp?
                    diff = (clb - ilb).sum().item()
                    bound_diff.update(diff / data.size(0), data.size(0))
                    # lb = torch.max(lb, clb)
                    lb = clb * beta + ilb * (1 - beta)
            else:
                raise RuntimeError("Unknown bound_type " +
                                   kwargs["bound_type"])
            lb = lb_s.scatter(1, sa_labels, lb)
            robust_ce = CrossEntropyLoss()(-lb, labels)
            if kwargs["bound_type"] != "convex-adv":

                relu_activities.update(
                    relu_activity.sum().detach().cpu().item() / data.size(0),
                    data.size(0))
                unstable_neurons.update(
                    unstable.sum().detach().cpu().item() / data.size(0),
                    data.size(0))
                dead_neurons.update(
                    dead.sum().detach().cpu().item() / data.size(0),
                    data.size(0))
                alive_neurons.update(
                    alive.sum().detach().cpu().item() / data.size(0),
                    data.size(0))

        if method == "robust":
            loss = robust_ce
        elif method == "robust_activity":
            loss = robust_ce + kwargs["activity_reg"] * relu_activity.sum()
        elif method == "natural":
            loss = regular_ce
        elif method == "robust_natural":
            natural_final_factor = kwargs["final-kappa"]
            kappa = (max_eps - eps * (1.0 - natural_final_factor)) / max_eps
            loss = (1 - kappa) * robust_ce + kappa * regular_ce
        else:
            raise ValueError("Unknown method " + method)

        if train and kwargs["l1_reg"] > np.finfo(np.float32).tiny:
            reg = kwargs["l1_reg"]
            l1_loss = 0.0
            for name, param in model.named_parameters():
                if 'bias' not in name:
                    l1_loss = l1_loss + torch.sum(torch.abs(param))
            l1_loss = reg * l1_loss
            loss = loss + l1_loss
            l1_losses.update(l1_loss.cpu().detach().numpy(), data.size(0))
        if train:
            loss.backward()
            if i % batch_multiplier == 0 or i == len(loader) - 1:
                opt.step()

        losses.update(loss.cpu().detach().numpy(), data.size(0))

        if verbose or method != "natural":
            robust_ce_losses.update(robust_ce.cpu().detach().numpy(),
                                    data.size(0))
            # robust_ce_losses.update(robust_ce, data.size(0))
            robust_errors.update(
                torch.sum(
                    (lb < 0).any(dim=1)).cpu().detach().numpy() / data.size(0),
                data.size(0))

        batch_time.update(time.time() - start)
        if i % 50 == 0 and train:
            logger.log(
                '[{:2d}:{:4d}]: eps {:4f}  '
                'Time {batch_time.val:.3f} ({batch_time.avg:.3f})  '
                'Total Loss {loss.val:.4f} ({loss.avg:.4f})  '
                'L1 Loss {l1_loss.val:.4f} ({l1_loss.avg:.4f})  '
                'CE {regular_ce_loss.val:.4f} ({regular_ce_loss.avg:.4f})  '
                'RCE {robust_ce_loss.val:.4f} ({robust_ce_loss.avg:.4f})  '
                'Err {errors.val:.4f} ({errors.avg:.4f})  '
                'Rob Err {robust_errors.val:.4f} ({robust_errors.avg:.4f})  '
                'Uns {unstable.val:.1f} ({unstable.avg:.1f})  '
                'Dead {dead.val:.1f} ({dead.avg:.1f})  '
                'Alive {alive.val:.1f} ({alive.avg:.1f})  '
                'Tightness {tight.val:.5f} ({tight.avg:.5f})  '
                'Bias {bias.val:.5f} ({bias.avg:.5f})  '
                'Diff {diff.val:.5f} ({diff.avg:.5f})  '
                'R {model_range:.3f}  '
                'beta {beta:.3f} ({beta:.3f})  '
                'kappa {kappa:.3f} ({kappa:.3f})  '.format(
                    t,
                    i,
                    eps,
                    batch_time=batch_time,
                    loss=losses,
                    errors=errors,
                    robust_errors=robust_errors,
                    l1_loss=l1_losses,
                    regular_ce_loss=regular_ce_losses,
                    robust_ce_loss=robust_ce_losses,
                    unstable=unstable_neurons,
                    dead=dead_neurons,
                    alive=alive_neurons,
                    tight=relu_activities,
                    bias=bound_bias,
                    diff=bound_diff,
                    model_range=model_range,
                    beta=beta,
                    kappa=kappa))

    logger.log('[FINAL RESULT epoch:{:2d} eps:{:.4f}]: '
               'Time {batch_time.val:.3f} ({batch_time.avg:.3f})  '
               'Total Loss {loss.val:.4f} ({loss.avg:.4f})  '
               'L1 Loss {l1_loss.val:.4f} ({l1_loss.avg:.4f})  '
               'CE {regular_ce_loss.val:.4f} ({regular_ce_loss.avg:.4f})  '
               'RCE {robust_ce_loss.val:.4f} ({robust_ce_loss.avg:.4f})  '
               'Uns {unstable.val:.3f} ({unstable.avg:.3f})  '
               'Dead {dead.val:.1f} ({dead.avg:.1f})  '
               'Alive {alive.val:.1f} ({alive.avg:.1f})  '
               'Tight {tight.val:.5f} ({tight.avg:.5f})  '
               'Bias {bias.val:.5f} ({bias.avg:.5f})  '
               'Diff {diff.val:.5f} ({diff.avg:.5f})  '
               'Err {errors.val:.4f} ({errors.avg:.4f})  '
               'Rob Err {robust_errors.val:.4f} ({robust_errors.avg:.4f})  '
               'R {model_range:.3f}  '
               'beta {beta:.3f} ({beta:.3f})  '
               'kappa {kappa:.3f} ({kappa:.3f})  \n'.format(
                   t,
                   eps,
                   batch_time=batch_time,
                   loss=losses,
                   errors=errors,
                   robust_errors=robust_errors,
                   l1_loss=l1_losses,
                   regular_ce_loss=regular_ce_losses,
                   robust_ce_loss=robust_ce_losses,
                   unstable=unstable_neurons,
                   dead=dead_neurons,
                   alive=alive_neurons,
                   tight=relu_activities,
                   bias=bound_bias,
                   diff=bound_diff,
                   model_range=model_range,
                   kappa=kappa,
                   beta=beta))
    for i, l in enumerate(
            model if isinstance(model, BoundSequential) else model.module):
        if isinstance(l, BoundLinear) or isinstance(l, BoundConv2d):
            norm = l.weight.data.detach().view(l.weight.size(0),
                                               -1).abs().sum(1).max().cpu()
            logger.log('layer {} norm {}'.format(i, norm))
    if method == "natural":
        return errors.avg, errors.avg
    else:
        return robust_errors.avg, errors.avg
Пример #5
0
def Train(model, t, loader, start_eps, end_eps, max_eps, logger, verbose, train, opt, method, **kwargs):
    # if train=True, use training mode
    # if train=False, use test mode, no back prop
    num_class = 10
    losses = AverageMeter()
    l1_losses = AverageMeter()
    errors = AverageMeter()
    robust_errors = AverageMeter()
    regular_ce_losses = AverageMeter()
    robust_ce_losses = AverageMeter()
    relu_activities = AverageMeter()
    bound_bias = AverageMeter()
    bound_diff = AverageMeter()
    unstable_neurons = AverageMeter()
    dead_neurons = AverageMeter()
    alive_neurons = AverageMeter()
    batch_time = AverageMeter()
    # initial 
    kappa = 1
    factor = 1
    if train:
        model.train()
    else:
        model.eval()
    # pregenerate the array for specifications, will be used for scatter
    sa = np.zeros((num_class, num_class - 1), dtype = np.int32)
    for i in range(sa.shape[0]):
        for j in range(sa.shape[1]):
            if j < i:
                sa[i][j] = j
            else:
                sa[i][j] = j + 1
    sa = torch.LongTensor(sa)
    total = len(loader.dataset)
    batch_size = loader.batch_size
    std = torch.tensor(loader.std).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)

    batch_eps = np.linspace(start_eps, end_eps, (total // batch_size) + 1)
    model_range = 0.0
    if end_eps < 1e-6:
        logger.log('eps {} close to 0, using natural training'.format(end_eps))
        method = "natural"
    for i, (data, labels) in enumerate(loader): 
        start = time.time()
        eps = batch_eps[i]
        if train:   
            opt.zero_grad()
        # generate specifications
        c = torch.eye(num_class).type_as(data)[labels].unsqueeze(1) - torch.eye(num_class).type_as(data).unsqueeze(0) 
        # remove specifications to self
        I = (~(labels.data.unsqueeze(1) == torch.arange(num_class).type_as(labels.data).unsqueeze(0)))
        c = (c[I].view(data.size(0),num_class-1,num_class))
        # scatter matrix to avoid compute margin to self
        sa_labels = sa[labels]
        # storing computed lower bounds after scatter
        lb_s = torch.zeros(data.size(0), num_class)

        # FIXME: Assume data is from range 0 - 1
        if kwargs["bounded_input"]:
            assert loader.std == [1,1,1] or loader.std == [1]
            data_ub = (data + eps).clamp(max=1.0)
            data_lb = (data - eps).clamp(min=0.0)
        else:
            data_ub = data + (eps / std)
            data_lb = data - (eps / std)

        if list(model.parameters())[0].is_cuda:
            data = data.cuda()
            data_ub = data_ub.cuda()
            data_lb = data_lb.cuda()
            labels = labels.cuda()
            c = c.cuda()
            sa_labels = sa_labels.cuda()
            lb_s = lb_s.cuda()
        # convert epsilon to a tensor
        eps_tensor = data.new(1)
        eps_tensor[0] = eps

        # omit the regular cross entropy, since we use robust error
        output = model(data)
        regular_ce = CrossEntropyLoss()(output, labels)
        regular_ce_losses.update(regular_ce.cpu().detach().numpy(), data.size(0))
        errors.update(torch.sum(torch.argmax(output, dim=1)!=labels).cpu().detach().numpy()/data.size(0), data.size(0))
        # get range statistic
        model_range = output.max().detach().cpu().item() - output.min().detach().cpu().item()
        
        """
        ub, lb, _, _, _, _ = model.interval_range(data_lb, data_ub, c)
        lb = lb_s.scatter(1, sa_labels, lb)
        print('interval ub: ', ub)
        print('interval lb: ', lb)
        lb, _ = model.backward_range(data_lb, data_ub, c)
        lb = lb_s.scatter(1, sa_labels, lb)
        print('full lb: ', lb)
        input()
        """

        if verbose or method != "natural":
            if kwargs["bound_type"] == "convex-adv":
                # Wong and Kolter's bound, or equivalently Fast-Lin
                if kwargs["convex-proj"] is not None:
                    proj = kwargs["convex-proj"]
                    norm_type = "l1_median"
                else:
                    proj = None
                    norm_type = "l1"
                if loader.std == [1] or loader.std == [1, 1, 1]:
                    convex_eps = eps
                else:
                    convex_eps = eps / np.mean(loader.std)
                    # for CIFAR we are roughly / 0.2
                    # FIXME this is due to a bug in convex_adversarial, we cannot use per-channel eps
                if kwargs["bounded_input"]:
                    # FIXME the bounded projection in convex_adversarial has a bug, data range must be positive
                    data_l = 0.0
                    data_u = 1.0
                else:
                    data_l = -np.inf
                    data_u = np.inf
                f = DualNetwork(model, data, convex_eps, proj = proj, norm_type = norm_type, bounded_input = kwargs["bounded_input"], data_l = data_l, data_u = data_u)
                lb = f(c)
            elif kwargs["bound_type"] == "interval":
                ub, lb, relu_activity, unstable, dead, alive = model.interval_range(data_lb, data_ub, c)
            elif kwargs["bound_type"] == "crown-interval":
                ub, ilb, relu_activity, unstable, dead, alive = model.interval_range(data_lb, data_ub, c)
                crown_final_factor = kwargs['final-beta']
                factor = (max_eps - eps * (1.0 - crown_final_factor)) / max_eps
                if factor < 1e-5:
                    lb = ilb
                else:
                    if kwargs["runnerup_only"]:
                        # regenerate a smaller c, with just the runner-up prediction
                        # mask ground truthlabel output, select the second largest class
                        # print(output)
                        # torch.set_printoptions(threshold=5000)
                        masked_output = output.detach().scatter(1, labels.unsqueeze(-1), -100)
                        # print(masked_output)
                        # location of the runner up prediction
                        runner_up = masked_output.max(1)[1]
                        # print(runner_up)
                        # print(labels)
                        # get margin from the groud-truth to runner-up only
                        runnerup_c = torch.eye(num_class).type_as(data)[labels]
                        # print(runnerup_c)
                        # set the runner up location to -
                        runnerup_c.scatter_(1, runner_up.unsqueeze(-1), -1)
                        runnerup_c = runnerup_c.unsqueeze(1).detach()
                        # print(runnerup_c)
                        # get the bound for runnerup_c
                        clb, bias = model.backward_range(data_lb, data_ub, runnerup_c)
                        clb = clb.expand(clb.size(0), num_class - 1)
                    else:
                        # get the CROWN bound using interval bounds
                        clb, bias = model.backward_range(data_lb, data_ub, c)
                        bound_bias.update(bias.sum() / data.size(0))
                    # how much better is crown-ibp better than ibp?
                    diff = (clb - ilb).sum().item()
                    bound_diff.update(diff / data.size(0), data.size(0))
                    # lb = torch.max(lb, clb)
                    lb = clb * factor + ilb * (1 - factor)
            else:
                raise RuntimeError("Unknown bound_type " + kwargs["bound_type"])

            lb = lb_s.scatter(1, sa_labels, lb)
            robust_ce = CrossEntropyLoss()(-lb, labels)
            if kwargs["bound_type"] != "convex-adv":
                relu_activities.update(relu_activity.detach().cpu().item() / data.size(0), data.size(0))
                unstable_neurons.update(unstable / data.size(0), data.size(0))
                dead_neurons.update(dead / data.size(0), data.size(0))
                alive_neurons.update(alive / data.size(0), data.size(0))

        if method == "robust":
            loss = robust_ce
        elif method == "robust_activity":
            loss = robust_ce + kwargs["activity_reg"] * relu_activity
        elif method == "natural":
            loss = regular_ce
        elif method == "robust_natural":
            natural_final_factor = kwargs["final-kappa"]
            kappa = (max_eps - eps * (1.0 - natural_final_factor)) / max_eps
            loss = (1-kappa) * robust_ce + kappa * regular_ce
        else:
            raise ValueError("Unknown method " + method)

        if "l1_reg" in kwargs:
            reg = kwargs["l1_reg"]
            l1_loss = 0.0
            for name, param in model.named_parameters():
                if 'bias' not in name:
                    l1_loss = l1_loss + (reg * torch.sum(torch.abs(param)))
            loss = loss + l1_loss
            l1_losses.update(l1_loss.cpu().detach().numpy(), data.size(0))
        if train:
            loss.backward()
            opt.step()

        batch_time.update(time.time() - start)
        losses.update(loss.cpu().detach().numpy(), data.size(0))

        if verbose or method != "natural":
            robust_ce_losses.update(robust_ce.cpu().detach().numpy(), data.size(0))
            # robust_ce_losses.update(robust_ce, data.size(0))
            robust_errors.update(torch.sum((lb<0).any(dim=1)).cpu().detach().numpy() / data.size(0), data.size(0))
        if i % 50 == 0 and train:
            logger.log(  '[{:2d}:{:4d}]: eps {:4f}  '
                    'Time {batch_time.val:.3f} ({batch_time.avg:.3f})  '
                    'Total Loss {loss.val:.4f} ({loss.avg:.4f})  '
                    'L1 Loss {l1_loss.val:.4f} ({l1_loss.avg:.4f})  '
                    'CE {regular_ce_loss.val:.4f} ({regular_ce_loss.avg:.4f})  '
                    'RCE {robust_ce_loss.val:.4f} ({robust_ce_loss.avg:.4f})  '
                    'Err {errors.val:.4f} ({errors.avg:.4f})  '
                    'Rob Err {robust_errors.val:.4f} ({robust_errors.avg:.4f})  '
                    'Uns {unstable.val:.1f} ({unstable.avg:.1f})  '
                    'Dead {dead.val:.1f} ({dead.avg:.1f})  '
                    'Alive {alive.val:.1f} ({alive.avg:.1f})  '
                    'Tightness {tight.val:.5f} ({tight.avg:.5f})  '
                    'Bias {bias.val:.5f} ({bias.avg:.5f})  '
                    'Diff {diff.val:.5f} ({diff.avg:.5f})  '
                    'R {model_range:.3f}  '
                    'beta {factor:.3f} ({factor:.3f})  '
                    'kappa {kappa:.3f} ({kappa:.3f})  '.format(
                    t, i, eps, batch_time=batch_time,
                    loss=losses, errors=errors, robust_errors = robust_errors, l1_loss = l1_losses,
                    regular_ce_loss = regular_ce_losses, robust_ce_loss = robust_ce_losses, 
                    unstable = unstable_neurons, dead = dead_neurons, alive = alive_neurons,
                    tight = relu_activities, bias = bound_bias, diff = bound_diff,
                    model_range = model_range, 
                    factor=factor, kappa = kappa))
    
                    
    logger.log(  '[FINAL RESULT epoch:{:2d} eps:{:.4f}]: '
        'Time {batch_time.val:.3f} ({batch_time.avg:.3f})  '
        'Total Loss {loss.val:.4f} ({loss.avg:.4f})  '
        'L1 Loss {l1_loss.val:.4f} ({l1_loss.avg:.4f})  '
        'CE {regular_ce_loss.val:.4f} ({regular_ce_loss.avg:.4f})  '
        'RCE {robust_ce_loss.val:.4f} ({robust_ce_loss.avg:.4f})  '
        'Uns {unstable.val:.3f} ({unstable.avg:.3f})  '
        'Dead {dead.val:.1f} ({dead.avg:.1f})  '
        'Alive {alive.val:.1f} ({alive.avg:.1f})  '
        'Tight {tight.val:.5f} ({tight.avg:.5f})  '
        'Bias {bias.val:.5f} ({bias.avg:.5f})  '
        'Diff {diff.val:.5f} ({diff.avg:.5f})  '
        'Err {errors.val:.4f} ({errors.avg:.4f})  '
        'Rob Err {robust_errors.val:.4f} ({robust_errors.avg:.4f})  '
        'R {model_range:.3f}  '
        'beta {factor:.3f} ({factor:.3f})  '
        'kappa {kappa:.3f} ({kappa:.3f})  \n'.format(
        t, eps, batch_time=batch_time,
        loss=losses, errors=errors, robust_errors = robust_errors, l1_loss = l1_losses,
        regular_ce_loss = regular_ce_losses, robust_ce_loss = robust_ce_losses, 
        unstable = unstable_neurons, dead = dead_neurons, alive = alive_neurons,
        tight = relu_activities, bias = bound_bias, diff = bound_diff,
        model_range = model_range, 
        kappa = kappa, factor=factor))
    for i, l in enumerate(model):
        if isinstance(l, BoundLinear) or isinstance(l, BoundConv2d):
            norm = l.weight.data.detach().view(l.weight.size(0), -1).abs().sum(1).max().cpu()
            logger.log('layer {} norm {}'.format(i, norm))
    if method == "natural":
        return errors.avg, errors.avg
    else:
        return robust_errors.avg, errors.avg
Пример #6
0
        c = (pred == labels).squeeze().sum()
        corr_unit[j] += c
        total[j] += batch_size
        # Creating Network's Adversarial Examples
        loss = lossy_fn(out, labels)
        loss.backward()
        perturbation = images.grad.sign()
        images_adv = images.detach() + eps*perturbation
        images_adv = tr.clamp(images_adv, 0, 1)
        out = net(images_adv)
        _, pred = tr.max(out, 1)
        c = (pred == labels).squeeze().sum()
        corr_unit_adv[j] += c
        total_adv[j] += batch_size
        # Compute dual bound (As minimum of all possible adv examples
        dual_net = DualNetwork(net, images, eps)
        Labtest = tr.zeros(batch_size, 9, 10)
        for i in range(batch_size):
            label = labels[i]
            Labtest[i, :, :] = Adv_Label[label, :, :]
        test, _ = dual_net(Labtest).min(1)
        c = (test > 0).squeeze().sum()
        corr_unit_dual[j] += c
        total_dual[j] += batch_size
    print('%2f' % eps)


for i in range(len(eps_set)):
    print('Certified epsilon %2f | Predicted correctly  : %2d %% | Adversarial Examples: %2d %% | '
          'Proved Robust %2d %%'
          % (eps_set[i], 100 * corr_unit[i] / total[i], 100 - 100 * corr_unit_adv[i] / total_adv[i],
Пример #7
0
    def init_kw_debug(self, bounded, pre_lb_all=None, pre_ub_all=None, decision=None, choice=None):
        '''
        ALWAYS create a new DualNetwork instance

        Returing intermediate kw bounds

        Input: mask and pre-computed intermedidate bounds consisted only of linear layers
        right before relu layers. 
        Since changing the relu mask is only going to affect bounds for layers after 
        the changed relu layer, we directly pass these values and only compute bounds 
        for later layers. However, we do create a new dual instance every time when we 
        compute bounds.

        '''
        if pre_lb_all is None and pre_ub_all is None:        
            lower_bounds = []
            upper_bounds = []
            
            dual = DualNetwork(self.net, self.x, self.ball_eps, bounded_input=bounded)
            self.pre_relu_indices = []
            idx = -1
            for layer in dual.dual_net[0:]:
                if type(layer) is DualReLU:
                    # K&W has this as input bounds to the ReLU, but our
                    # codes reasons in terms of output bounds of a layer
                    # We get this bounds and enqueue them, they correspond
                    # to the output of the ReLU from before.
                    lower_bounds[-1] = layer.zl.squeeze()
                    upper_bounds[-1]= layer.zu.squeeze()
                    lower_bounds.append([])
                    upper_bounds.append([])
                    self.pre_relu_indices.append(idx)
                    

                else:
                    lower_bounds.append([])
                    upper_bounds.append([])
                idx += 1

            # Also add the bounds on the final thing
            #lower_bounds.append(dual(torch.ones(1,1,1)).view(-1))
            #upper_bounds.append(-dual(-torch.ones(1,1,1)).view(-1))
            #self.nf = dual.nf 
            dual_info = [dual.dual_net, dual.last_layer]
            #self.dual_info = dual_info
            
            # debug
            final_lower, sub_values = self.last_layer_objective_debug(torch.ones(1,1,1), dual_info[0], dual_info[1])
            final_upper, _  = self.last_layer_objective_debug(-torch.ones(1,1,1), dual_info[0], dual_info[1])

            lower_bounds.append(final_lower.view(-1))
            upper_bounds.append(-final_upper.view(-1))

            return lower_bounds, upper_bounds, self.pre_relu_indices, dual_info, sub_values

        else:
            #assert min(pre_lb_all[0].view(-1))>=0 or max(pre_ub_all[0].view(-1))<=1, 'for non 0-1 bounded ball please use get_lower_bounds'
            pre_relu_lb = [pre_lb_all[i].clone() for i in self.pre_relu_indices]
            pre_relu_ub = [pre_ub_all[i].clone() for i in self.pre_relu_indices]
            if decision is not None:
                # upper_bound for the corresponding relu is forced to be 0
                if choice ==0:
                    pre_relu_ub[decision[0]].view(-1)[decision[1]] = 0
                else:
                    pre_relu_lb[decision[0]].view(-1)[decision[1]] = 0
            dual = DualNetwork(self.net, self.x, self.ball_eps, bounded_input=bounded, provided_zl=pre_relu_lb, provided_zu=pre_relu_ub)
        #    #dual = DualNetwork(self.net, self.x, self.ball_eps, bounded_input=True, mask =mask)
            lower_bounds = [pre_lb_all[0]]
            upper_bounds = [pre_ub_all[0]]
            #bounds_index = 0
            #changed_bounds_index = self.pre_relu_indices[-1]
            #bounds_unchanged = True
            for layer in dual.dual_net[1:]:
                if type(layer) is DualReLU:
                    # K&W has this as input bounds to the ReLU, but our
                    # codes reasons in terms of output bounds of a layer
                    # We get this bounds and enqueue them, they correspond
                    # to the output of the ReLU from before.
                    lower_bounds[-1] = layer.zl.squeeze()
                    upper_bounds[-1]= layer.zu.squeeze()
                    lower_bounds.append(F.relu(lower_bounds[-1]))
                    upper_bounds.append(F.relu(upper_bounds[-1]))
                    
                elif Flatten:
                    lower_bounds.append(lower_bounds[-1].view(-1))
                    upper_bounds.append(upper_bounds[-1].view(-1))
                else:
                    lower_bounds.append([])
                    upper_bounds.append([])
                #bounds_index += 1

            # Also add the bounds on the final thing
            #lower_bounds.append(max(dual(torch.ones(1,1,1)).view(-1), pre_lb_all[-1]))
            #upper_bounds.append(min(-dual(-torch.ones(1,1,1)).view(-1), pre_ub_all[-1]))
            dual_info = [dual.dual_net, dual.last_layer]


            # debug
            final_lower, sub_values = self.last_layer_objective_debug(torch.ones(1,1,1), dual_info[0], dual_info[1])
            final_upper, _  = self.last_layer_objective_debug(-torch.ones(1,1,1), dual_info[0], dual_info[1])
            lower_bounds.append(max(final_lower.view(-1),pre_lb_all[-1]))
            upper_bounds.append(min(-final_upper.view(-1), pre_ub_all[-1]))


            return lower_bounds, upper_bounds, dual_info, sub_values
Пример #8
0
def attack(model, model_name, loader, start_eps, end_eps, max_eps, norm,
           logger, verbose, method, **kwargs):
    torch.manual_seed(6247423)
    num_class = 10
    losses = AverageMeter()
    l1_losses = AverageMeter()
    errors = AverageMeter()
    robust_errors = AverageMeter()
    regular_ce_losses = AverageMeter()
    robust_ce_losses = AverageMeter()
    relu_activities = AverageMeter()
    bound_bias = AverageMeter()
    bound_diff = AverageMeter()
    unstable_neurons = AverageMeter()
    dead_neurons = AverageMeter()
    alive_neurons = AverageMeter()
    batch_time = AverageMeter()
    # initial
    model.eval()
    duplicate_rgb = True
    # pregenerate the array for specifications, will be used for scatter
    sa = np.zeros((num_class, num_class - 1), dtype=np.int32)
    for i in range(sa.shape[0]):
        for j in range(sa.shape[1]):
            if j < i:
                sa[i][j] = j
            else:
                sa[i][j] = j + 1
    sa = torch.LongTensor(sa)
    total = len(loader.dataset)
    batch_size = loader.batch_size
    print(batch_size)
    std = torch.tensor(loader.std).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
    total_steps = 300

    batch_eps = np.linspace(start_eps, end_eps, (total // batch_size) + 1)
    if end_eps < 1e-6:
        logger.log('eps {} close to 0, using natural training'.format(end_eps))
        method = "natural"

    exp_name = 'outputs/[{}:{}]'.format(get_exp_name(), model_name)
    # real_i = 0
    for i, (init_data, init_labels) in enumerate(loader):
        # labels = torch.zeros_like(init_labels)
        init_data = init_data.cuda()
        tv_eps, tv_lam, reg_lam = get_args(duplicate_rgb=duplicate_rgb)
        attacker = Shadow(init_data, init_labels, tv_lam, reg_lam, tv_eps)
        success = np.zeros(len(init_data))
        # saved_advs = torch.zeros_like(init_data).cuda()
        for t_i in range(9):

            attacker.iterate_labels_not_equal_to(init_labels)
            attacker.renew_t()
            labels = attacker.labels

            for rep in range(total_steps):
                ct = attacker.get_ct()
                data = init_data + ct
                data.data = get_normal(get_unit01(data))

                # ========================== The rest of code is taken from CROWN-IBP REPO
                start = time.time()
                eps = batch_eps[i]
                c = torch.eye(num_class).type_as(data)[labels].unsqueeze(
                    1) - torch.eye(num_class).type_as(data).unsqueeze(0)
                # remove specifications to self
                eye = (~(labels.data.unsqueeze(1)
                         == torch.arange(num_class).type_as(
                             labels.data).unsqueeze(0)))
                c = (c[eye].view(data.size(0), num_class - 1, num_class))
                # scatter matrix to avoid compute margin to self
                sa_labels = sa[labels]
                # storing computed lower bounds after scatter
                lb_s = torch.zeros(data.size(0), num_class)

                # FIXME: Assume data is from range 0 - 1
                if kwargs["bounded_input"]:
                    assert loader.std == [1, 1, 1] or loader.std == [1]
                    # bounded input only makes sense for Linf perturbation
                    assert norm == np.inf
                    data_ub = (data + eps).clamp(max=1.0)
                    data_lb = (data - eps).clamp(min=0.0)
                else:
                    if norm == np.inf:
                        data_ub = data.cpu() + (eps / std)
                        data_lb = data.cpu() - (eps / std)
                    else:
                        data_ub = data_lb = data

                if list(model.parameters())[0].is_cuda:
                    data = data.cuda()
                    data_ub = data_ub.cuda()
                    data_lb = data_lb.cuda()
                    labels = labels.cuda()
                    c = c.cuda()
                    sa_labels = sa_labels.cuda()
                    lb_s = lb_s.cuda()
                # convert epsilon to a tensor
                eps_tensor = data.new(1)
                eps_tensor[0] = eps

                # omit the regular cross entropy, since we use robust error
                output = model(data)
                regular_ce = torch.nn.CrossEntropyLoss()(output, labels)
                regular_ce_losses.update(regular_ce.cpu().detach().numpy(),
                                         data.size(0))
                errors.update(
                    torch.sum(torch.argmax(output, dim=1) != labels).cpu().
                    detach().numpy() / data.size(0), data.size(0))
                # get range statistic

                if verbose or method != "natural":
                    if kwargs["bound_type"] == "convex-adv":
                        # Wong and Kolter's bound, or equivalently Fast-Lin
                        if kwargs["convex-proj"] is not None:
                            proj = kwargs["convex-proj"]
                            if norm == np.inf:
                                norm_type = "l1_median"
                            elif norm == 2:
                                norm_type = "l2_normal"
                            else:
                                raise (ValueError(
                                    "Unsupported norm {} for convex-adv".
                                    format(norm)))
                        else:
                            proj = None
                            if norm == np.inf:
                                norm_type = "l1"
                            elif norm == 2:
                                norm_type = "l2"
                            else:
                                raise (ValueError(
                                    "Unsupported norm {} for convex-adv".
                                    format(norm)))
                        if loader.std == [1] or loader.std == [1, 1, 1]:
                            convex_eps = eps
                        else:
                            convex_eps = eps / np.mean(loader.std)
                            # for CIFAR we are roughly / 0.2
                            # FIXME this is due to a bug in convex_adversarial, we cannot use per-channel eps
                        if norm == np.inf:
                            # bounded input is only for Linf
                            if kwargs["bounded_input"]:
                                # FIXME the bounded projection in convex_adversarial has a bug, data range must be positive
                                data_l = 0.0
                                data_u = 1.0
                            else:
                                data_l = -np.inf
                                data_u = np.inf
                        else:
                            data_l = data_u = None
                        f = DualNetwork(model,
                                        data,
                                        convex_eps,
                                        proj=proj,
                                        norm_type=norm_type,
                                        bounded_input=kwargs["bounded_input"],
                                        data_l=data_l,
                                        data_u=data_u)
                        lb = f(c)
                    elif kwargs["bound_type"] == "interval":
                        ub, lb, relu_activity, unstable, dead, alive = model.interval_range(
                            norm=norm, x_U=data_ub, x_L=data_lb, eps=eps, C=c)
                    elif kwargs["bound_type"] == "crown-interval":
                        ub, ilb, relu_activity, unstable, dead, alive = model.interval_range(
                            norm=norm, x_U=data_ub, x_L=data_lb, eps=eps, C=c)
                        crown_final_factor = kwargs['final-beta']
                        factor = (max_eps - eps *
                                  (1.0 - crown_final_factor)) / max_eps
                        if factor < 1e-5:
                            lb = ilb
                        else:
                            if kwargs["runnerup_only"]:
                                masked_output = output.detach().scatter(
                                    1, labels.unsqueeze(-1), -100)
                                runner_up = masked_output.max(1)[1]
                                runnerup_c = torch.eye(num_class).type_as(
                                    data)[labels]
                                runnerup_c.scatter_(1, runner_up.unsqueeze(-1),
                                                    -1)
                                runnerup_c = runnerup_c.unsqueeze(1).detach()
                                clb, bias = model.backward_range(norm=norm,
                                                                 x_U=data_ub,
                                                                 x_L=data_lb,
                                                                 eps=eps,
                                                                 C=c)
                                clb = clb.expand(clb.size(0), num_class - 1)
                            else:
                                clb, bias = model.backward_range(norm=norm,
                                                                 x_U=data_ub,
                                                                 x_L=data_lb,
                                                                 eps=eps,
                                                                 C=c)
                                bound_bias.update(bias.sum() / data.size(0))
                            diff = (clb - ilb).sum().item()
                            bound_diff.update(diff / data.size(0),
                                              data.size(0))
                            lb = clb * factor + ilb * (1 - factor)
                    else:
                        raise RuntimeError("Unknown bound_type " +
                                           kwargs["bound_type"])

                    lb = lb_s.scatter(1, sa_labels, lb)
                    robust_ce = torch.nn.CrossEntropyLoss()(-lb, labels)
                    if kwargs["bound_type"] != "convex-adv":
                        relu_activities.update(
                            relu_activity.detach().cpu().item() / data.size(0),
                            data.size(0))
                        unstable_neurons.update(unstable / data.size(0),
                                                data.size(0))
                        dead_neurons.update(dead / data.size(0), data.size(0))
                        alive_neurons.update(alive / data.size(0),
                                             data.size(0))

                if method == "robust":
                    loss = robust_ce
                elif method == "robust_activity":
                    loss = robust_ce + kwargs["activity_reg"] * relu_activity
                elif method == "natural":
                    loss = regular_ce
                elif method == "robust_natural":
                    natural_final_factor = kwargs["final-kappa"]
                    kappa = (max_eps - eps *
                             (1.0 - natural_final_factor)) / max_eps
                    loss = (1 - kappa) * robust_ce + kappa * regular_ce
                else:
                    raise ValueError("Unknown method " + method)

                if "l1_reg" in kwargs:
                    reg = kwargs["l1_reg"]
                    l1_loss = 0.0
                    for name, param in model.named_parameters():
                        if 'bias' not in name:
                            l1_loss = l1_loss + (reg *
                                                 torch.sum(torch.abs(param)))
                    loss = loss + l1_loss
                    l1_losses.update(l1_loss.cpu().detach().numpy(),
                                     data.size(0))

                # =========================================== The rest is from breaking paper not from CROWN-IBP Repo
                c_loss = -loss
                attacker.back_prop(c_loss, rep)

                batch_time.update(time.time() - start)
                losses.update(loss.cpu().detach().numpy(), data.size(0))

                if (verbose or method != "natural") and rep == total_steps - 1:
                    robust_ce_losses.update(robust_ce.cpu().detach().numpy(),
                                            data.size(0))
                    certified = (lb < 0).any(dim=1).cpu().numpy()
                    success = success + np.ones(len(success)) - certified
                    # saved_advs[certified == False] = data[certified == False].data
            torch.cuda.empty_cache()
            to_print = '{}\t{}\t{}'.format((success > 0).sum(), t_i,
                                           attacker.log)
            print(to_print, flush=True)
            attacker.labels = attacker.labels + 1
        # save_images(get_unit01(torch.cat((saved_advs, init_data), dim=-1)), success.astype(np.bool), real_i, exp_name)
        # real_i += len(saved_advs)
        robust_errors.update((success > 0).sum() / len(success), len(success))
        print('====', robust_errors.avg, '===', flush=True)
    for i, l in enumerate(model):
        if isinstance(l, BoundLinear) or isinstance(l, BoundConv2d):
            norm = l.weight.data.detach().view(l.weight.size(0),
                                               -1).abs().sum(1).max().cpu()
            logger.log('layer {} norm {}'.format(i, norm))
    if method == "natural":
        return errors.avg, errors.avg
    else:
        return robust_errors.avg, errors.avg
Пример #9
0
ZZ = (y0[:, 0] - y0[:, 1]).view(100, 100).numpy()

fig, ax = plt.subplots(figsize=(8, 8))
ax.contourf(XX, YY, -ZZ, cmap="Spectral", levels=np.linspace(-1000, 1000, 3))
ax.scatter(X.numpy()[:, 0],
           X.numpy()[:, 1],
           c=y.numpy(),
           cmap="Spectral",
           s=70)
ax.axis("equal")
ax.axis([0, 1, 0, 1])
plt.axis('off')
plt.savefig('./images/2Dpoints' + str(m) + '.png')

#testing the net
dual_net = DualNetwork(net, X, r)
Y = tr.zeros(m, 1, 2)
for i in range(m):
    aux_i = y[i].item()
    Y[i, 0, aux_i] = 1
    Y[i, 0, 1 - aux_i] = -1
test = dual_net(Y)
for i in range(m):
    ax.annotate(str(round(test[i, 0].item(), 2)), X[i, :])
plt.savefig('./images/2Dpoints' + str(m) + 'testing.png')
for i in range(m):
    ax.add_patch(
        patches.Rectangle((X[i, 0] - r, X[i, 1] - r), 2 * r, 2 * r,
                          fill=False))
plt.savefig('./images/2Dpoints' + str(m) + 'tested.png')