Exemplo n.º 1
0
def step(model, ptb, batch, eps=args.eps, train=False):
    ptb.set_eps(eps)
    X, y = model.get_input(batch)
    X = BoundedTensor(X, ptb)
    logits = model.core(X)

    num_class = args.num_classes
    c = torch.eye(num_class).type_as(X)[y].unsqueeze(1) - \
        torch.eye(num_class).type_as(X).unsqueeze(0)
    I = (~(y.data.unsqueeze(1) == torch.arange(num_class).type_as(
        y.data).unsqueeze(0)))
    c = (c[I].view(X.size(0), num_class - 1, num_class))

    lb, ub = model.core.compute_bounds(IBP=True,
                                       C=c,
                                       method='backward',
                                       bound_upper=False)

    lb_padded = torch.cat((torch.zeros(
        size=(lb.size(0), 1), dtype=lb.dtype, device=lb.device), lb),
                          dim=1)
    fake_labels = torch.zeros(size=(lb.size(0), ),
                              dtype=torch.int64,
                              device=lb.device)
    acc = (torch.argmax(logits, dim=-1) == y).float().mean()
    acc_robust = 1 - torch.mean((lb < 0).any(dim=1).float())
    loss = nn.CrossEntropyLoss()(-lb_padded, fake_labels)

    if train:
        loss.backward()

    return acc.detach(), acc_robust.detach(), loss.detach()
Exemplo n.º 2
0
 def actor_bound(self, phi_lb, phi_ub, beta=1.0, eps=None, norm=np.inf, upper=True, lower=True, phi = None, center = None):
     if self.use_loss_fusion: # Use loss fusion (not typically enabled)
         assert center is not None
         ptb = PerturbationLpNorm(norm=norm, eps=eps, x_L=phi_lb, x_U=phi_ub)
         x = BoundedTensor(phi, ptb)
         val = self.fc_action(x, center.detach())
         ilb, iub = self.fc_action.compute_bounds(IBP=True, method=None)
         if beta > 1e-10:
             clb, cub = self.fc_action.compute_bounds(IBP=False, method="backward", bound_lower=False, bound_upper=True)
             ub = cub * beta + iub * (1.0 - beta)
             return ub
         else:
             return iub
     else:
         assert center is None
         # Invoke auto_LiRPA for convex relaxation.
         ptb = PerturbationLpNorm(norm=norm, eps=eps, x_L=phi_lb, x_U=phi_ub)
         x = BoundedTensor(phi, ptb)
         if self.use_full_backward:
             clb, cub = self.fc_action.compute_bounds(x=(x,), IBP=False, method="backward")
             return cub, clb
         else:
             ilb, iub = self.fc_action.compute_bounds(x=(x,), IBP=True, method=None)
             if beta > 1e-10:
                 clb, cub = self.fc_action.compute_bounds(IBP=False, method="backward")
                 ub = cub * beta + iub * (1.0 - beta)
                 lb = clb * beta + ilb * (1.0 - beta)
                 return ub, lb
             else:
                 return iub, ilb
 def get_kl_bound(model, x, means, eps, beta=None, stdev=None, use_full_backward=False):
     # Set each layer's perturbation eps and log_stdev's perturbation.
     x = BoundedTensor(x, ptb=PerturbationLpNorm(norm=np.inf, eps=eps)).requires_grad_(False)
     if forward_one:
         inputs = (x, )
     else:
         inputs = (x, means)
     if use_full_backward:
         # Full backward method, tightest bound.
         ilb, iub = model.compute_bounds(inputs, IBP=False, C=None, method="backward", bound_lower=True, bound_upper=True)
         # Fake beta, avoid backward below.
         beta = 1.0
     else:
         # IBP Pass.
         ilb, iub = model.compute_bounds(inputs, IBP=True, C=None, method=None, bound_lower=True, bound_upper=True)
     if beta is None or (1 - beta) > 1e-20:
         # CROWN Pass.
         clb, cub = model.compute_bounds(x=None, IBP=False, C=None, method='backward', bound_lower=True, bound_upper=True)
     if beta is None:
         # Bound final output neuron.
         ikl = intermediate_to_kl(ilb, iub, means, stdev=stdev)
         ckl = intermediate_to_kl(clb, cub, means, stdev=stdev)
         return ikl, ckl
     else:
         # Beta schedule is from 0 to 1.
         if 1 - beta < 1e-20:
             lb = ilb
             ub = iub
         else:
             lb = beta * ilb + (1 - beta) * clb
             ub = beta * iub + (1 - beta) * cub
         kl = intermediate_to_kl(lb, ub, means, stdev=stdev)
         return kl
Exemplo n.º 4
0
    def test(self):
        model_oris = [
            models.model_resnet(width=1, mult=2),
            models.ResNet18(in_planes=2)
        ]
        self.result = []

        for model_ori in model_oris:
            conv_mode = 'patches'  # conv_mode can be set as 'matrix' or 'patches'

            normalize = torchvision.transforms.Normalize(
                mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.2010])
            test_data = torchvision.datasets.CIFAR10(
                "./data",
                train=False,
                download=True,
                transform=torchvision.transforms.Compose(
                    [torchvision.transforms.ToTensor(), normalize]))
            N = 1
            n_classes = 10

            image = torch.Tensor(test_data.data[:N]).reshape(N, 3, 32, 32)
            image = image.to(torch.float32) / 255.0

            model = BoundedModule(model_ori,
                                  image,
                                  bound_opts={"conv_mode": conv_mode})

            ptb = PerturbationLpNorm(norm=np.inf, eps=0.03)
            image = BoundedTensor(image, ptb)
            pred = model(image)
            lb, ub = model.compute_bounds(IBP=False, C=None, method='backward')
            self.result += [lb, ub]

        self.check()
Exemplo n.º 5
0
def test():
    torch.manual_seed(1)
    torch.cuda.manual_seed_all(1)

    model = cnn_MNIST()
    checkpoint = torch.load("../examples/vision/pretrain/mnist_cnn_small.pth",
                            map_location="cpu")
    model.load_state_dict(checkpoint)

    N = 2
    n_classes = 10
    image = torch.randn(N, 1, 28, 28)
    image = image.to(torch.float32) / 255.0

    model = BoundedModule(model, torch.empty_like(image), device="cpu")
    eps = 0.3
    norm = np.inf
    ptb = PerturbationLpNorm(norm=norm, eps=eps)
    image = BoundedTensor(image, ptb)
    pred = model(image)
    lb, ub = model.compute_bounds()

    assert lb.shape == ub.shape == torch.Size((2, 10))

    path = 'data/constant_test_data'
    if args.gen_ref:
        torch.save((lb, ub), path)
    else:
        lb_ref, ub_ref = torch.load(path)
        print(lb)
        print(lb_ref)
        assert torch.allclose(lb, lb_ref)
        assert torch.allclose(ub, ub_ref)
Exemplo n.º 6
0
    def compute_and_compare_bounds(self, eps, norm, IBP, method):
        input_data = torch.randn((N, 256))
        model = BoundedModule(self.original_model,
                              torch.empty_like(input_data))
        ptb = PerturbationLpNorm(norm=norm, eps=eps)
        ptb_data = BoundedTensor(input_data, ptb)
        pred = model(ptb_data)
        label = torch.argmax(pred, dim=1).cpu().detach().numpy()
        # Compute bounds.
        lb, ub = model.compute_bounds(IBP=IBP, method=method)
        # Compute dual norm.
        if norm == 1:
            q = np.inf
        elif norm == np.inf:
            q = 1.0
        else:
            q = 1.0 / (1.0 - (1.0 / norm))
        # Compute reference manually.
        weight, bias = list(model.parameters())
        norm = weight.norm(p=q, dim=1)
        expected_pred = input_data.matmul(weight.t()) + bias
        expected_ub = eps * norm + expected_pred
        expected_lb = -eps * norm + expected_pred

        # Check equivalence.
        self.assertEqual(expected_pred, pred)
        self.assertEqual(expected_ub, ub)
        self.assertEqual(expected_lb, lb)
Exemplo n.º 7
0
def get_logits_lower_bound(model, state, state_ub, state_lb, eps, C, beta):
    ptb = PerturbationLpNorm(norm=np.inf, eps=eps, x_L=state_lb, x_U=state_ub)
    bnd_state = BoundedTensor(state, ptb)
    pred = model.features(bnd_state, method_opt="forward")
    logits_ilb, _ = model.features.compute_bounds(C=C, IBP=True, method=None)
    if beta < 1e-5:
        logits_lb = logits_ilb
    else:
        logits_clb, _ = model.features.compute_bounds(IBP=False, C=C, method="backward", bound_upper=False)
        logits_lb = beta * logits_clb + (1-beta) * logits_ilb
    return logits_lb
Exemplo n.º 8
0
def bab(model_ori,
        data,
        target,
        norm,
        eps,
        args,
        data_max=None,
        data_min=None):

    if norm == np.inf:
        if data_max is None:
            # data_max = torch.reshape((1. - loader.mean) / loader.std, (1, -1, 1, 1))
            # data_min = torch.reshape((0. - loader.mean) / loader.std, (1, -1, 1, 1))
            data_ub = data + eps  # torch.min(data + eps, data_max)  # eps is already normalized
            data_lb = data - eps  # torch.max(data - eps, data_min)
        else:
            data_ub = torch.min(data + eps, data_max)
            data_lb = torch.max(data - eps, data_min)
    else:
        data_ub = data_lb = data

    pred = torch.argmax(model_ori(data), dim=1)
    # LiRPA wrapper
    model = LiRPAConvNet(model_ori,
                         pred,
                         target,
                         solve_slope=args.solve_slope,
                         device=args.device,
                         in_size=data.shape)

    if list(model.net.parameters())[0].is_cuda:
        data = data.cuda()
        data_lb, data_ub = data_lb.cuda(), data_ub.cuda()

    ptb = PerturbationLpNorm(norm=norm, eps=eps, x_L=data_lb, x_U=data_ub)
    x = BoundedTensor(data, ptb).to(data_lb.device)
    domain = torch.stack([data_lb.squeeze(0), data_ub.squeeze(0)], dim=-1)

    # with torch.autograd.set_detect_anomaly(True):
    print('beta splits:', not args.no_beta)
    min_lb, min_ub, ub_point, nb_states = relu_bab_parallel(
        model,
        domain,
        x,
        batch=args.batch_size,
        no_LP=True,
        decision_thresh=args.decision_thresh,
        beta=not args.no_beta,
        max_subproblems_list=args.max_subproblems_list,
        timeout=args.timeout)

    if isinstance(min_lb, torch.Tensor):
        min_lb = min_lb.item()
    return min_lb, nb_states
 def get_kl_bound(model, x, means, eps):
     # Set each layer's perturbation eps and log_stdev's perturbation.
     x = BoundedTensor(x, ptb=PerturbationLpNorm(norm=np.inf, eps=eps))
     if forward_one:
         inputs = (x, )
     else:
         inputs = (x, means)
     # IBP Pass.
     _, iub = model.compute_bounds(inputs, IBP=True, C=None, method=None, bound_lower=False, bound_upper=True)
     # CROWN Pass.
     _, cub = model.compute_bounds(x=None, IBP=False, C=None, method='backward', bound_lower=False, bound_upper=True)
     # iub = cub
     return iub, cub
Exemplo n.º 10
0
def step(model, ptb, batch, eps=args.eps, train=False):
    # We increase the perturbation each batch.
    ptb.set_eps(eps)
    # We create a BoundedTensor object with current batch of data.
    X, y = model.get_input(batch)
    X = BoundedTensor(X, ptb)
    logits = model.core(X)

    # Form the linear speicifications, which are margins of ground truth class and other classes.
    num_class = args.num_classes
    c = torch.eye(num_class).type_as(X)[y].unsqueeze(1) - \
        torch.eye(num_class).type_as(X).unsqueeze(0)
    I = (~(y.data.unsqueeze(1) == torch.arange(num_class).type_as(
        y.data).unsqueeze(0)))
    c = (c[I].view(X.size(0), num_class - 1, num_class))

    # Compute CROWN-IBP (IBP+backward) bounds for training. We only need the lower bound.
    # Here we can omit the x=(X,) argument because we have just used X for forward propagation.
    lb, ub = model.core.compute_bounds(C=c,
                                       method='CROWN-IBP',
                                       bound_upper=False)

    # Compute robust cross entropy loss.
    lb_padded = torch.cat((torch.zeros(
        size=(lb.size(0), 1), dtype=lb.dtype, device=lb.device), lb),
                          dim=1)
    fake_labels = torch.zeros(size=(lb.size(0), ),
                              dtype=torch.int64,
                              device=lb.device)
    loss = nn.CrossEntropyLoss()(-lb_padded, fake_labels)

    # Report accuracy and robust accuracy.
    acc = (torch.argmax(logits, dim=-1) == y).float().mean()
    acc_robust = 1 - torch.mean((lb < 0).any(dim=1).float())

    if train:
        loss.backward()

    return acc.detach(), acc_robust.detach(), loss.detach()
Exemplo n.º 11
0
 def critic_bound(self, phi_lb, phi_ub, a_lb, a_ub, beta=1.0, eps=None, phi=None, action=None, norm=np.inf, upper=True, lower=True):
     x_L = torch.cat([phi_lb, a_lb], dim=1)
     x_U = torch.cat([phi_ub, a_ub], dim=1)
     ptb = PerturbationLpNorm(norm=norm, eps=eps, x_L=x_L, x_U=x_U)
     x = BoundedTensor(torch.cat([phi, action], dim=1), ptb)
     ilb, iub = self.fc_critic.compute_bounds(x=(x,), IBP=True, method=None)
     if beta > 1e-10:
         clb, cub = self.fc_critic.compute_bounds(IBP=False, method="backward")
         ub = cub * beta + iub * (1.0 - beta)
         lb = clb * beta + ilb * (1.0 - beta)
         return ub, lb
     else:
         return iub, ilb
Exemplo n.º 12
0
def test():
    net = ResNet18()
    N = 2
    n_classes = 10
    x = torch.randn(N, 3, 32, 32)
    y = net(x)

    device = 'cpu'
    if device == 'cuda':
        x = x.cuda()
        y = y.cuda()

    model = BoundedModule(net,
                          torch.empty_like(x),
                          bound_opts={"conv_mode": "patches"},
                          device=device)
    print("Model structure: \n", str(net))
    eps = 0.3
    norm = np.inf
    ptb = PerturbationLpNorm(norm=norm, eps=eps)
    image = BoundedTensor(x, ptb)
    pred = model(image)
    lb, ub = model.compute_bounds()

    model = BoundedModule(net,
                          torch.empty_like(x),
                          bound_opts={"conv_mode": "matrix"},
                          device=device)
    eps = 0.3
    norm = np.inf
    ptb = PerturbationLpNorm(norm=norm, eps=eps)
    image = BoundedTensor(x, ptb)
    pred = model(image)
    lb_ref, ub_ref = model.compute_bounds()

    # assert lb.shape == ub.shape == torch.Size((N, n_classes))
    print((lb - lb_ref).sum(), (ub - ub_ref).sum())
    assert torch.allclose(lb, lb_ref)
    assert torch.allclose(ub, ub_ref)
Exemplo n.º 13
0
def compute_perturbations(model, x, means, perturbations):
    use_ibp = True
    method = 'backward'
    x = BoundedTensor(x, ptb=PerturbationLpNorm(norm=np.inf, eps=0))
    inputs = (x, means)
    for p in perturbations:
        x.ptb.eps = p
        lb, ub = model.compute_bounds(inputs, IBP=use_ibp, C=None, method=method, bound_lower=True, bound_upper=True)
        lb = lb.detach().cpu().numpy().squeeze()
        ub = ub.detach().cpu().numpy().squeeze()
        print("eps={:.4f}, lb={}, ub={}".format(p, lb, ub))
    x.ptb.eps = 0.0
    lb, ub = model.compute_bounds(inputs, IBP=use_ibp, C=None, method=method, bound_lower=True, bound_upper=True)
    lb = lb.detach().cpu().numpy().squeeze()
    ub = ub.detach().cpu().numpy().squeeze()
    print("eps=0.0000, lb={}, ub={}".format(lb, ub))
Exemplo n.º 14
0
def test():
    torch.manual_seed(1)
    torch.cuda.manual_seed_all(1)

    models = [2, 3]
    paddings = [1, 2]
    strides = [1, 3]

    N = 2
    n_classes = 10
    image = torch.randn(N, 1, 28, 28)
    image = image.to(torch.float32) / 255.0

    for layer_num in models:
        for padding in paddings:
            for stride in strides:
                # print(layer_num, padding, stride)
                try:
                    model_ori = cnn_model(layer_num, padding, stride)
                except:
                    continue

                model = BoundedModule(model_ori,
                                      torch.empty_like(image),
                                      device="cpu",
                                      bound_opts={"conv_mode": "patches"})
                eps = 0.3
                norm = np.inf
                ptb = PerturbationLpNorm(norm=norm, eps=eps)
                image = BoundedTensor(image, ptb)
                pred = model(image)
                lb, ub = model.compute_bounds()

                model = BoundedModule(model_ori,
                                      torch.empty_like(image),
                                      device="cpu",
                                      bound_opts={"conv_mode": "matrix"})
                pred = model(image)
                lb_ref, ub_ref = model.compute_bounds()

                assert lb.shape == ub.shape == torch.Size((N, n_classes))
                assert torch.allclose(lb, lb_ref)
                assert torch.allclose(ub, ub_ref)
Exemplo n.º 15
0
 def compute_and_compare_bounds(self, eps, norm, IBP, method):
     input_data = torch.randn((N, 1, input_dim, input_dim))
     model = BoundedModule(self.original_model,
                           torch.empty_like(input_data))
     ptb = PerturbationLpNorm(norm=norm, eps=eps)
     ptb_data = BoundedTensor(input_data, ptb)
     pred = model(ptb_data)
     label = torch.argmax(pred, dim=1).cpu().detach().numpy()
     # Compute bounds.
     lb, ub = model.compute_bounds(IBP=IBP, method=method)
     # Compute reference.
     conv_weight, conv_bias = list(model.parameters())
     conv_bias = conv_bias.view(1, out_channel, 1, 1)
     matrix_eye = torch.eye(input_dim * input_dim).view(
         input_dim * input_dim, 1, input_dim, input_dim)
     # Obtain equivalent weight and bias for convolution.
     weight = self.original_model.conv(
         matrix_eye
     ) - conv_bias  # Output is (batch, channel, weight, height).
     weight = weight.view(
         input_dim * input_dim,
         -1)  # Dimension is (flattened_input, flattened_output).
     bias = conv_bias.repeat(1, 1, input_dim // 2, input_dim // 2).view(-1)
     flattend_data = input_data.view(N, -1)
     # Compute dual norm.
     if norm == 1:
         q = np.inf
     elif norm == np.inf:
         q = 1.0
     else:
         q = 1.0 / (1.0 - (1.0 / norm))
     # Manually compute bounds.
     norm = weight.t().norm(p=q, dim=1)
     expected_pred = flattend_data.matmul(weight) + bias
     expected_ub = eps * norm + expected_pred
     expected_lb = -eps * norm + expected_pred
     # Check equivalence.
     if method == 'backward' or method == 'forward':
         self.assertEqual(expected_pred, pred)
         self.assertEqual(expected_ub, ub)
         self.assertEqual(expected_lb, lb)
Exemplo n.º 16
0
    def get_output_range(self, input_range, verbose=False):
        from auto_LiRPA import PerturbationLpNorm, BoundedTensor

        center = (input_range[..., 1] + input_range[..., 0]) / 2.0
        radius = ((input_range[..., 1] - input_range[..., 0]) / 2.0).astype(
            np.float32)

        # Define perturbation
        ptb = PerturbationLpNorm(norm=np.inf, eps=radius)
        # Make the input a BoundedTensor with perturbation
        my_input = BoundedTensor(torch.Tensor([center]), ptb)
        # Forward propagation using BoundedTensor
        prediction = self.network(my_input)
        # Compute LiRPA bounds
        lb, ub = self.compute_bounds()

        num_outputs = lb.shape[-1]
        output_range = np.empty((num_outputs, 2))
        output_range[:, 0] = lb.data.numpy().squeeze()
        output_range[:, 1] = ub.data.numpy().squeeze()

        return output_range, {}
Exemplo n.º 17
0
    def test(self):
        model = cnn_MNIST()
        checkpoint = torch.load(
            "../examples/vision/pretrain/mnist_cnn_small.pth",
            map_location="cpu")
        model.load_state_dict(checkpoint)

        N = 2
        n_classes = 10
        image = torch.randn(N, 1, 28, 28)
        image = image.to(torch.float32) / 255.0

        model = BoundedModule(model, torch.empty_like(image), device="cpu")
        eps = 0.3
        norm = np.inf
        ptb = PerturbationLpNorm(norm=norm, eps=eps)
        image = BoundedTensor(image, ptb)
        pred = model(image)
        lb, ub = model.compute_bounds()

        assert lb.shape == ub.shape == torch.Size((2, 10))

        self.result = (lb, ub)
        self.check()
Exemplo n.º 18
0
def step(model, ptb, batch, eps=1.0, train=False):
    model_bound = model.model_from_embeddings
    if train:
        model.train()
        model_bound.train()
        grad = torch.enable_grad()
        if args.loss_fusion:
            model_loss.train()
    else:
        model.eval()
        model_bound.eval()
        grad = torch.no_grad()
    if args.auto_test:
        grad = torch.enable_grad()

    with grad:
        ptb.set_eps(eps)
        ptb.set_train(train)
        embeddings_unbounded, mask, tokens, labels = model.get_input(batch)
        aux = (tokens, batch)
        if args.robust and eps > 1e-9:
            embeddings = BoundedTensor(embeddings_unbounded, ptb)
        else:
            embeddings = embeddings_unbounded.detach().requires_grad_(True)

        robust = args.robust and eps > 1e-6

        if train and robust and args.loss_fusion:
            # loss_fusion loss
            if args.method == 'IBP+backward_train':
                lb, ub = model_loss.compute_bounds(x=(labels, embeddings,
                                                      mask),
                                                   aux=aux,
                                                   IBP=True,
                                                   C=None,
                                                   method='backward',
                                                   bound_lower=False)
            else:
                raise NotImplementedError
            loss_robust = torch.log(ub).mean()
            loss = acc = acc_robust = -1  # unknown
        else:
            # regular loss
            logits = model_bound(embeddings, mask)
            loss = CrossEntropyLoss()(logits, labels)
            acc = (torch.argmax(logits, dim=1) == labels).float().mean()

            if robust:
                num_class = args.num_classes
                c = torch.eye(num_class).type_as(embeddings)[labels].unsqueeze(1) - \
                    torch.eye(num_class).type_as(embeddings).unsqueeze(0)
                I = (~(labels.data.unsqueeze(1)
                       == torch.arange(num_class).type_as(
                           labels.data).unsqueeze(0)))
                c = (c[I].view(embeddings.size(0), num_class - 1, num_class))
                if args.method == 'IBP':
                    lb, ub = model_bound.compute_bounds(aux=aux,
                                                        IBP=True,
                                                        C=c,
                                                        method=None)
                elif args.method == 'IBP+backward':
                    lb, ub = model_bound.compute_bounds(aux=aux,
                                                        IBP=True,
                                                        C=c,
                                                        method='backward',
                                                        bound_upper=False)
                elif args.method == 'IBP+backward_train':
                    if 1 - eps > 1e-4:
                        lb, ub = model_bound.compute_bounds(aux=aux,
                                                            IBP=True,
                                                            C=c,
                                                            method='backward',
                                                            bound_upper=False)
                        ilb, iub = model_bound.compute_bounds(aux=aux,
                                                              IBP=True,
                                                              C=c,
                                                              method=None,
                                                              reuse_ibp=True)
                        lb = eps * ilb + (1 - eps) * lb
                    else:
                        lb, ub = model_bound.compute_bounds(aux=aux,
                                                            IBP=True,
                                                            C=c,
                                                            method=None)
                elif args.method == 'forward':
                    lb, ub = model_bound.compute_bounds(aux=aux,
                                                        IBP=False,
                                                        C=c,
                                                        method='forward',
                                                        bound_upper=False)
                elif args.method == 'forward+backward':
                    lb, ub = model_bound.compute_bounds(aux=aux,
                                                        IBP=False,
                                                        forward=True,
                                                        C=c,
                                                        method='backward',
                                                        bound_upper=False)
                else:
                    raise NotImplementedError
                lb_padded = torch.cat((torch.zeros(size=(lb.size(0), 1),
                                                   dtype=lb.dtype,
                                                   device=lb.device), lb),
                                      dim=1)
                fake_labels = torch.zeros(size=(lb.size(0), ),
                                          dtype=torch.int64,
                                          device=lb.device)
                loss_robust = robust_ce = CrossEntropyLoss()(-lb_padded,
                                                             fake_labels)
                acc_robust = 1 - torch.mean((lb < 0).any(dim=1).float())
            else:
                acc_robust, loss_robust = acc, loss

    if train or args.auto_test:
        loss_robust.backward()
        grad_embed = torch.autograd.grad(embeddings_unbounded,
                                         model.word_embeddings.weight,
                                         grad_outputs=embeddings.grad)[0]
        if model.word_embeddings.weight.grad is None:
            model.word_embeddings.weight.grad = grad_embed
        else:
            model.word_embeddings.weight.grad += grad_embed

    if args.auto_test:
        with open('res_test.pkl', 'wb') as file:
            pickle.dump((float(acc), float(loss), float(acc_robust),
                         float(loss_robust), grad_embed.detach().numpy()),
                        file)

    return acc, loss, acc_robust, loss_robust
Exemplo n.º 19
0
for batch_idx, (inputs, targets) in enumerate(testloader):
    if True:
        inputs, targets = inputs.cuda(), targets.cuda()

    if batch_idx < 2:

        ## Step 3: wrap model with auto_LiRPA
        # The second parameter is for constructing the trace of the computational graph, and its content is not important.

        model = BoundedModule(model, inputs, device="cuda")

        ## Step 4: Compute bounds using LiRPA given a perturbation
        eps = 0.3
        norm = np.inf
        ptb = PerturbationLpNorm(norm=norm, eps=eps)
        image = BoundedTensor(inputs, ptb)
        # Get model prediction as usual
        pred = model(image)
        label = torch.argmax(pred, dim=1).cpu().numpy()
        # Compute bounds
        lb, ub = model.compute_bounds()

        ## Step 5: Final output
        pred = pred.detach().cpu().numpy()
        lb = lb.detach().cpu().numpy()
        ub = ub.detach().cpu().numpy()
        for i in range(N):
            print("Image {} top-1 prediction {}".format(i, label[i]))
            for j in range(n_classes):
                print(
                    "f_{j}(x_0) = {fx0:8.3f},   {l:8.3f} <= f_{j}(x_0+delta) <= {u:8.3f}"
Exemplo n.º 20
0
n_classes = 10
image = test_data.data[:N].view(N, 1, 28, 28).cuda()
# Convert to float
image = image.to(torch.float32) / 255.0

## Step 3: wrap model with auto_LiRPA
# The second parameter is for constructing the trace of the computational graph, and its content is not important.
image_1, image_2 = torch.split(torch.empty_like(image), [14, 14], dim=2)
model = BoundedModule(model, (image_1, image_2), device="cuda")

## Step 4: Compute bounds using LiRPA given a perturbation
eps = 0.3
norm = np.inf
ptb = PerturbationLpNorm(norm=norm, eps=eps)
image_1, image_2 = torch.split(image, [14, 14], dim=2)
image_1 = BoundedTensor(image_1, ptb)
image_2 = BoundedTensor(image_2, ptb)
# Get model prediction as usual
pred = model(image_1, image_2)
label = torch.argmax(pred, dim=1).cpu().numpy()
# Compute bounds
lb, ub = model.compute_bounds()

## Step 5: Final output
pred = pred.detach().cpu().numpy()
lb = lb.detach().cpu().numpy()
ub = ub.detach().cpu().numpy()
for i in range(N):
    print("Image {} top-1 prediction {}".format(i, label[i]))
    for j in range(n_classes):
        print(
Exemplo n.º 21
0
test_batches = get_batches(data_test, args.batch_size)

# Set all random seeds.
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)

# Create a LSTM sequence classifier.
logger.info("Creating LSTM model...")
model = LSTM(args).to(args.device)
X, y = model.get_input(test_batches[0])
# Create the perturbation object once here, and we can reuse it.
ptb = PerturbationLpNorm(norm=args.norm, eps=args.eps)
# Convert the LSTM to BoundedModule
X = BoundedTensor(X, ptb)
model.core = BoundedModule(model.core, (X, ), device=args.device)
optimizer = model.build_optimizer()

# Averaging accuracym robust accuracy and loss.
avg_acc, avg_acc_robust, avg_loss = avg = [AverageMeter() for i in range(3)]

# Main training loop.
for t in range(model.checkpoint, args.num_epochs):
    train(t + 1)
    test(t + 1, test_batches)

# If the loaded model has already reached the last epoch, test it directly.
if model.checkpoint == args.num_epochs:
    test(args.num_epochs, test_batches)
Exemplo n.º 22
0
def Train(model,
          t,
          loader,
          eps_scheduler,
          norm,
          train,
          opt,
          bound_type,
          method='robust',
          loss_fusion=True,
          final_node_name=None):
    num_class = 200
    meter = MultiAverageMeter()
    if train:
        model.train()
        eps_scheduler.train()
        eps_scheduler.step_epoch()
        eps_scheduler.set_epoch_length(
            int((len(loader.dataset) + loader.batch_size - 1) /
                loader.batch_size))
    else:
        model.eval()
        eps_scheduler.eval()

    exp_module = get_exp_module(model)

    def get_bound_loss(x=None, c=None):
        if loss_fusion:
            bound_lower, bound_upper = False, True
        else:
            bound_lower, bound_upper = True, False

        if bound_type == 'IBP':
            lb, ub = model(method_opt="compute_bounds",
                           x=x,
                           IBP=True,
                           C=c,
                           method=None,
                           final_node_name=final_node_name,
                           no_replicas=True)
        elif bound_type == 'CROWN':
            lb, ub = model(method_opt="compute_bounds",
                           x=x,
                           IBP=False,
                           C=c,
                           method='backward',
                           bound_lower=bound_lower,
                           bound_upper=bound_upper)
        elif bound_type == 'CROWN-IBP':
            # lb, ub = model.compute_bounds(ptb=ptb, IBP=True, x=data, C=c, method='backward')  # pure IBP bound
            # we use a mixed IBP and CROWN-IBP bounds, leading to better performance (Zhang et al., ICLR 2020)
            factor = (eps_scheduler.get_max_eps() -
                      eps_scheduler.get_eps()) / eps_scheduler.get_max_eps()
            ilb, iub = model(method_opt="compute_bounds",
                             x=x,
                             IBP=True,
                             C=c,
                             method=None,
                             final_node_name=final_node_name,
                             no_replicas=True)
            if factor < 1e-50:
                lb, ub = ilb, iub
            else:
                clb, cub = model(method_opt="compute_bounds",
                                 IBP=False,
                                 C=c,
                                 method='backward',
                                 bound_lower=bound_lower,
                                 bound_upper=bound_upper,
                                 final_node_name=final_node_name,
                                 no_replicas=True)
                if loss_fusion:
                    ub = cub * factor + iub * (1 - factor)
                else:
                    lb = clb * factor + ilb * (1 - factor)

        if loss_fusion:
            if isinstance(model, BoundDataParallel):
                max_input = model(get_property=True,
                                  node_class=BoundExp,
                                  att_name='max_input')
            else:
                max_input = exp_module.max_input
            return None, torch.mean(torch.log(ub) + max_input)
        else:
            # Pad zero at the beginning for each example, and use fake label '0' for all examples
            lb_padded = torch.cat((torch.zeros(
                size=(lb.size(0), 1), dtype=lb.dtype, device=lb.device), lb),
                                  dim=1)
            fake_labels = torch.zeros(size=(lb.size(0), ),
                                      dtype=torch.int64,
                                      device=lb.device)
            robust_ce = CrossEntropyLoss()(-lb_padded, fake_labels)
            return lb, robust_ce

    for i, (data, labels) in enumerate(loader):
        start = time.time()
        eps_scheduler.step_batch()
        eps = eps_scheduler.get_eps()
        # For small eps just use natural training, no need to compute LiRPA bounds
        batch_method = method
        if eps < 1e-50:
            batch_method = "natural"
        if train:
            opt.zero_grad()
        # bound input for Linf norm used only
        if norm == np.inf:
            data_max = torch.reshape((1. - loader.mean) / loader.std,
                                     (1, -1, 1, 1))
            data_min = torch.reshape((0. - loader.mean) / loader.std,
                                     (1, -1, 1, 1))
            data_ub = torch.min(data + (eps / loader.std).view(1, -1, 1, 1),
                                data_max)
            data_lb = torch.max(data - (eps / loader.std).view(1, -1, 1, 1),
                                data_min)
        else:
            data_ub = data_lb = data

        if list(model.parameters())[0].is_cuda:
            data, labels = data.cuda(), labels.cuda()
            data_lb, data_ub = data_lb.cuda(), data_ub.cuda()

        ptb = PerturbationLpNorm(norm=norm, eps=eps, x_L=data_lb, x_U=data_ub)
        x = BoundedTensor(data, ptb)
        if loss_fusion:
            if batch_method == 'natural' or not train:
                output = model(x, labels)
                regular_ce = torch.mean(torch.log(output))
            else:
                model(x, labels)
                regular_ce = torch.tensor(0., device=data.device)
            meter.update('CE', regular_ce.item(), x.size(0))
            x = (x, labels)
            c = None
        else:
            c = torch.eye(num_class).type_as(data)[labels].unsqueeze(
                1) - torch.eye(num_class).type_as(data).unsqueeze(0)
            # remove specifications to self
            I = (~(labels.data.unsqueeze(1) == torch.arange(num_class).type_as(
                labels.data).unsqueeze(0)))
            c = (c[I].view(data.size(0), num_class - 1, num_class))
            x = (x, labels)
            output = model(x, final_node_name=final_node_name)
            regular_ce = CrossEntropyLoss()(
                output, labels)  # regular CrossEntropyLoss used for warming up
            meter.update('CE', regular_ce.item(), x[0].size(0))
            meter.update(
                'Err',
                torch.sum(torch.argmax(output, dim=1) != labels).item() /
                x[0].size(0), x[0].size(0))

        if batch_method == 'robust':
            # print(data.sum())
            lb, robust_ce = get_bound_loss(x=x, c=c)
            loss = robust_ce
        elif batch_method == 'natural':
            loss = regular_ce

        if train:
            loss.backward()

            if args.clip_grad_norm:
                grad_norm = torch.nn.utils.clip_grad_norm_(
                    model.parameters(), max_norm=args.clip_grad_norm)
                meter.update('grad_norm', grad_norm)

            if isinstance(eps_scheduler, AdaptiveScheduler):
                eps_scheduler.update_loss(loss.item() - regular_ce.item())
            opt.step()
        meter.update('Loss', loss.item(), data.size(0))

        if batch_method != 'natural':
            meter.update('Robust_CE', robust_ce.item(), data.size(0))
            if not loss_fusion:
                # For an example, if lower bounds of margins is >0 for all classes, the output is verifiably correct.
                # If any margin is < 0 this example is counted as an error
                meter.update(
                    'Verified_Err',
                    torch.sum((lb < 0).any(dim=1)).item() / data.size(0),
                    data.size(0))
        meter.update('Time', time.time() - start)

        if (i + 1) % 250 == 0 and train:
            logger.log('[{:2d}:{:4d}]: eps={:.12f} {}'.format(
                t, i + 1, eps, meter))

    logger.log('[{:2d}:{:4d}]: eps={:.12f} {}'.format(t, i + 1, eps, meter))
    return meter
Exemplo n.º 23
0
    def update_bounds_parallel(self,
                               pre_lb_all=None,
                               pre_ub_all=None,
                               decision=None,
                               slopes=None,
                               beta=True,
                               early_stop=True,
                               opt_choice="default",
                               iteration=20,
                               history=[],
                               decision_thresh=0,
                               layer_set_bound=True):
        # update optimize-CROWN bounds in a parallel way
        total_batch = len(decision)
        decision = np.array(decision)

        layers_need_change = np.unique(decision[:, 0])
        layers_need_change.sort()

        # initial results with empty list
        ret_l = [[] for _ in range(len(decision) * 2)]
        ret_u = [[] for _ in range(len(decision) * 2)]
        masks = [[] for _ in range(len(decision) * 2)]
        ret_s = [[] for _ in range(len(decision) * 2)]

        pre_lb_all_cp = copy.deepcopy(pre_lb_all)
        pre_ub_all_cp = copy.deepcopy(pre_ub_all)

        for idx in layers_need_change:
            # iteratively change upper and lower bound from former to later layer
            tmp_d = np.argwhere(decision[:, 0] == idx)  # .squeeze()
            # idx is the index of relu layers, change_idx is the index of all layers
            change_idx = self.pre_relu_indices[idx]

            batch = len(tmp_d)
            select_history = [
                history[idx] for idx in tmp_d.squeeze().reshape(-1)
            ]

            if beta:
                # update beta mask, put it after reset_beta
                # reset beta according to the shape of batch
                self.reset_beta(self.net, batch)

                # print("select history", select_history)

                bound_relus = []
                for m in self.net._modules.values():
                    if isinstance(m, BoundRelu):
                        bound_relus.append(m)
                        m.beta_mask.data = m.beta_mask.data.view(batch * 2, -1)

                for bi in range(batch):
                    d = tmp_d[bi][0]
                    # assign current decision to each point of a batch
                    bound_relus[int(decision[d][0])].beta_mask.data[
                        bi, int(decision[d][1])] = 1
                    bound_relus[int(decision[d][0])].beta_mask.data[
                        bi + batch, int(decision[d][1])] = -1
                    # print("assign", bi, decision[d], 1, bound_relus[decision[d][0]].beta_mask.data[bi, decision[d][1]])
                    # print("assign", bi+batch, decision[d], -1, bound_relus[decision[d][0]].beta_mask.data[bi+batch, decision[d][1]])
                    # assign history decision according to select_history
                    for (hid, hl), hc in select_history[bi]:
                        bound_relus[hid].beta_mask.data[bi, hl] = int(
                            (hc - 0.5) * 2)
                        bound_relus[hid].beta_mask.data[bi + batch, hl] = int(
                            (hc - 0.5) * 2)
                        # print("assign", bi, [hid, hl], hc, bound_relus[hid].beta_mask.data[bi, hl])
                        # print("assign", bi+batch, [hid, hl], hc, bound_relus[hid].beta_mask.data[bi+batch, hl])

                # sanity check: beta_mask should only be assigned for split nodes
                for m in bound_relus:
                    m.beta_mask.data = m.beta_mask.data.view(m.beta[0].shape)

            slope_select = [i[tmp_d.squeeze()].clone() for i in slopes]

            pre_lb_all = [i[tmp_d.squeeze()].clone() for i in pre_lb_all_cp]
            pre_ub_all = [i[tmp_d.squeeze()].clone() for i in pre_ub_all_cp]

            if batch == 1:
                pre_lb_all = [i.clone().unsqueeze(0) for i in pre_lb_all]
                pre_ub_all = [i.clone().unsqueeze(0) for i in pre_ub_all]
                slope_select = [i.clone().unsqueeze(0) for i in slope_select]

            upper_bounds = [i.clone() for i in pre_ub_all[:change_idx + 1]]
            lower_bounds = [i.clone() for i in pre_lb_all[:change_idx + 1]]
            upper_bounds_cp = copy.deepcopy(upper_bounds)
            lower_bounds_cp = copy.deepcopy(lower_bounds)

            for i in range(batch):
                d = tmp_d[i][0]
                upper_bounds[change_idx].view(batch, -1)[i][decision[d][1]] = 0
                lower_bounds[change_idx].view(batch, -1)[i][decision[d][1]] = 0

            pre_lb_all = [torch.cat(2 * [i]) for i in pre_lb_all]
            pre_ub_all = [torch.cat(2 * [i]) for i in pre_ub_all]

            # merge the inactive and active splits together
            new_candidate = {}
            for i, (l, uc, lc, u) in enumerate(
                    zip(lower_bounds, upper_bounds_cp, lower_bounds_cp,
                        upper_bounds)):
                # we set lower = 0 in first half batch, and upper = 0 in second half batch
                new_candidate[self.name_dict[i]] = [
                    torch.cat((l, lc), dim=0),
                    torch.cat((uc, u), dim=0)
                ]

            if not layer_set_bound:
                new_candidate_p = {}
                for i, (l,
                        u) in enumerate(zip(pre_lb_all[:-2], pre_ub_all[:-2])):
                    # we set lower = 0 in first half batch, and upper = 0 in second half batch
                    new_candidate_p[self.name_dict[i]] = [l, u]

            # create new_x here since batch may change
            ptb = PerturbationLpNorm(
                norm=self.x.ptb.norm,
                eps=self.x.ptb.eps,
                x_L=self.x.ptb.x_L.repeat(batch * 2, 1, 1, 1),
                x_U=self.x.ptb.x_U.repeat(batch * 2, 1, 1, 1))
            new_x = BoundedTensor(self.x.data.repeat(batch * 2, 1, 1, 1), ptb)
            self.net(
                new_x
            )  # batch may change, so we need to do forward to set some shapes here

            if len(slope_select) > 0:
                # set slope here again
                self.set_slope(self.net, slope_select)

            torch.cuda.empty_cache()
            if layer_set_bound:
                # we fix the intermediate bounds before change_idx-th layer by using normal CROWN
                if self.solve_slope and change_idx >= self.pre_relu_indices[-1]:
                    # we split the ReLU at last layer, directly use Optimized CROWN
                    self.net.set_bound_opts({
                        'ob_start_idx':
                        sum(change_idx <= x for x in self.pre_relu_indices),
                        'ob_beta':
                        beta,
                        'ob_update_by_layer':
                        layer_set_bound,
                        'ob_iteration':
                        iteration
                    })
                    lb, ub, = self.net.compute_bounds(
                        x=(new_x, ),
                        IBP=False,
                        C=None,
                        method='CROWN-Optimized',
                        new_interval=new_candidate,
                        return_A=False,
                        bound_upper=False)
                else:
                    # we split the ReLU before the last layer, calculate intermediate bounds by using normal CROWN
                    self.net.set_relu_used_count(
                        sum(change_idx <= x for x in self.pre_relu_indices))
                    with torch.no_grad():
                        lb, ub, = self.net.compute_bounds(
                            x=(new_x, ),
                            IBP=False,
                            C=None,
                            method='backward',
                            new_interval=new_candidate,
                            bound_upper=False,
                            return_A=False)

                # we don't care about the upper bound of the last layer
                lower_bounds_new, upper_bounds_new = self.get_candidate_parallel(
                    self.net, lb, lb + 99, batch * 2)

                if change_idx < self.pre_relu_indices[-1]:
                    # check whether we have a better bounds before, and preset all intermediate bounds
                    for i, (l, u) in enumerate(
                            zip(lower_bounds_new[change_idx + 2:-1],
                                upper_bounds_new[change_idx + 2:-1])):
                        new_candidate[self.name_dict[i + change_idx + 2]] = [
                            torch.max(l, pre_lb_all[i + change_idx + 2]),
                            torch.min(u, pre_ub_all[i + change_idx + 2])
                        ]

                    if self.solve_slope:
                        self.net.set_bound_opts({
                            'ob_start_idx':
                            sum(change_idx <= x
                                for x in self.pre_relu_indices),
                            'ob_beta':
                            beta,
                            'ob_update_by_layer':
                            layer_set_bound,
                            'ob_iteration':
                            iteration
                        })
                        lb, ub, = self.net.compute_bounds(
                            x=(new_x, ),
                            IBP=False,
                            C=None,
                            method='CROWN-Optimized',
                            new_interval=new_candidate,
                            return_A=False,
                            bound_upper=False)
                    else:
                        self.net.set_relu_used_count(
                            sum(change_idx <= x
                                for x in self.pre_relu_indices))
                        with torch.no_grad():
                            lb, ub, = self.net.compute_bounds(
                                x=(new_x, ),
                                IBP=False,
                                C=None,
                                method='backward',
                                new_interval=new_candidate,
                                bound_upper=False,
                                return_A=False)

            else:
                # all intermediate bounds are re-calculate by optimized CROWN
                self.net.set_bound_opts({
                    'ob_start_idx': 99,
                    'ob_beta': beta,
                    'ob_update_by_layer': layer_set_bound,
                    'ob_iteration': iteration
                })
                lb, ub, = self.net.compute_bounds(x=(new_x, ),
                                                  IBP=False,
                                                  C=None,
                                                  method='CROWN-Optimized',
                                                  new_interval=new_candidate_p,
                                                  return_A=False,
                                                  bound_upper=False)

            # print('best results of parent nodes', pre_lb_all[-1].repeat(2, 1))
            # print('finally, after optimization:', lower_bounds_new[-1])

            # primal = self.get_primals(A_dict, return_x=True)
            lower_bounds_new, upper_bounds_new = self.get_candidate_parallel(
                self.net, lb, lb + 99, batch * 2)

            lower_bounds_new[-1] = torch.max(lower_bounds_new[-1],
                                             pre_lb_all[-1])
            upper_bounds_new[-1] = torch.min(upper_bounds_new[-1],
                                             pre_ub_all[-1])

            mask = self.get_mask_parallel(self.net)
            if len(slope_select) > 0:
                slope = self.get_slope(self.net)

            # reshape the results
            for i in range(len(tmp_d)):
                ret_l[int(tmp_d[i])] = [j[i] for j in lower_bounds_new]
                ret_l[int(tmp_d[i] + total_batch)] = [
                    j[i + batch] for j in lower_bounds_new
                ]

                ret_u[int(tmp_d[i])] = [j[i] for j in upper_bounds_new]
                ret_u[int(tmp_d[i] + total_batch)] = [
                    j[i + batch] for j in upper_bounds_new
                ]

                masks[int(tmp_d[i])] = mask[i]
                masks[int(tmp_d[i] + total_batch)] = mask[i + batch]
                if len(slope_select) > 0:
                    ret_s[int(tmp_d[i])] = slope[i]
                    ret_s[int(tmp_d[i] + total_batch)] = slope[i + batch]

        return ret_l, ret_u, masks, ret_s
Exemplo n.º 24
0
class mynet(nn.Module):
    def __init__(self):
        super(mynet, self).__init__()
        self.output = nn.sequential(nn.Linear(5, 10), nn.ReLU(),
                                    nn.Linear(10, 3))

    def forward(self, input):
        return self.features(input)


raw_model = mynet()
bound_model = BoundedModule(raw_model, input_vec)
num_actions = 3
batchsize = 5
label = torch.tensor([0, 2, 1, 1, 0])
bnd_state = BoundedTensor(input_vec, PerturbationLpNorm(norm=np.inf, eps=0.1))

c = torch.eye(3).type_as(input_vec)[label].unsqueeze(1) - torch.eye(3).type_as(
    input_vec).unsqueeze(0)
I = (~(label.data.unsqueeze(1) == torch.arange(3).type_as(
    label.data).unsqueeze(0)))
c = (c[I].view(input_vec.size(0), 2, 3))

pred = bound_model(input_vec)
basic_bound, _ = bound_model.compute_bounds(IBP=False, method='backward')
advance_bound, _ = bound_model.compute_bounds(C=c,
                                              IBP=False,
                                              method='backward')
print(basic_bound.detach().numpy())
print(advance_bound.detach().numpy())
Exemplo n.º 25
0
def test():
    torch.manual_seed(1234)
    torch.cuda.manual_seed_all(1234)
    random.seed(1234)
    np.random.seed(123)

    model_ori = cnn_4layer_test().eval()
    model_ori.load_state_dict(data['model'])
    dummy_input = data['data']
    inputs = (dummy_input, )

    model = BoundedModule(model_ori, inputs)
    forward_ret = model(dummy_input)
    model_ori.eval()

    assert torch.isclose(model_ori(dummy_input), model(dummy_input),
                         1e-8).all()

    # Linf
    ptb = PerturbationLpNorm(norm=np.inf, eps=0.01)
    x = BoundedTensor(dummy_input, ptb)
    x.requires_grad_()

    verify_bounds(model,
                  x,
                  IBP=True,
                  method=None,
                  forward_ret=forward_ret,
                  lb_name='l_inf_IBP_lb',
                  ub_name='l_inf_IBP_ub')  # IBP
    verify_bounds(model,
                  x,
                  IBP=True,
                  method='backward',
                  forward_ret=forward_ret,
                  lb_name='l_inf_CROWN-IBP_lb',
                  ub_name='l_inf_CROWN-IBP_ub')  # CROWN-IBP
    verify_bounds(model,
                  x,
                  IBP=False,
                  method='backward',
                  forward_ret=forward_ret,
                  lb_name='l_inf_CROWN_lb',
                  ub_name='l_inf_CROWN_ub')  # CROWN

    # L2
    ptb = PerturbationLpNorm(norm=2, eps=0.01)
    x = BoundedTensor(dummy_input, ptb)
    x.requires_grad_()

    verify_bounds(model,
                  x,
                  IBP=True,
                  method=None,
                  forward_ret=forward_ret,
                  lb_name='l_2_IBP_lb',
                  ub_name='l_2_IBP_ub')  # IBP
    verify_bounds(model,
                  x,
                  IBP=True,
                  method='backward',
                  forward_ret=forward_ret,
                  lb_name='l_2_CROWN-IBP_lb',
                  ub_name='l_2_CROWN-IBP_ub')  # CROWN-IBP
    verify_bounds(model,
                  x,
                  IBP=False,
                  method='backward',
                  forward_ret=forward_ret,
                  lb_name='l_2_CROWN_lb',
                  ub_name='l_2_CROWN_ub')  # CROWN
Exemplo n.º 26
0
                               args.embedding_size,
                               device=args.device)
dummy_labels = torch.zeros(1, dtype=torch.long, device=args.device)

if args.model == 'transformer':
    dummy_mask = torch.zeros(1, 1, 1, args.max_sent_length, device=args.device)
    model = Transformer(args, data_train)
elif args.model == 'lstm':
    dummy_mask = torch.zeros(1, args.max_sent_length, device=args.device)
    model = LSTM(args, data_train)

dev_batches = get_batches(data_dev, args.batch_size)
test_batches = get_batches(data_test, args.batch_size)

ptb = PerturbationSynonym(budget=args.budget)
dummy_embeddings = BoundedTensor(dummy_embeddings, ptb)
model_ori = model.model_from_embeddings
bound_opts = {'relu': args.bound_opts_relu, 'exp': 'no-max-input'}
if isinstance(model_ori, BoundedModule):
    model_bound = model_ori
else:
    model_bound = BoundedModule(model_ori, (dummy_embeddings, dummy_mask),
                                bound_opts=bound_opts,
                                device=args.device)
model.model_from_embeddings = model_bound
if args.loss_fusion:
    bound_opts['loss_fusion'] = True
    model_loss = BoundedModule(
        CrossEntropyWrapperMultiInput(model_ori),
        (torch.zeros(1, dtype=torch.long), dummy_embeddings, dummy_mask),
        bound_opts=bound_opts,
Exemplo n.º 27
0
if device == 'cuda':
    image = image.cuda()

## Step 3: wrap model with auto_LiRPA
# The second parameter is for constructing the trace of the computational graph, and its content is not important.
# The new "patches" conv_mode provides an more efficient implementation for convolutional neural networks.
model = BoundedModule(model_ori,
                      image,
                      bound_opts={"conv_mode": conv_mode},
                      device=device)

## Step 4: Compute bounds using LiRPA given a perturbation
eps = 0.03
norm = np.inf
ptb = PerturbationLpNorm(norm=norm, eps=eps)
image = BoundedTensor(image, ptb)
# Get model prediction as usual
pred = model(image)

# Compute bounds
torch.cuda.empty_cache()
print('Using {} mode to compute convolution.'.format(conv_mode))
lb, ub = model.compute_bounds(IBP=False, C=None, method='backward')

## Step 5: Final output
# pred = pred.detach().cpu().numpy()
lb = lb.detach().cpu().numpy()
ub = ub.detach().cpu().numpy()
for i in range(N):
    # print("Image {} top-1 prediction {}".format(i, label[i]))
    for j in range(n_classes):
Exemplo n.º 28
0
def Train(model,
          t,
          loader,
          eps_scheduler,
          norm,
          train,
          opt,
          bound_type,
          method='robust'):
    num_class = 10
    meter = MultiAverageMeter()
    if train:
        model.train()
        eps_scheduler.train()
        eps_scheduler.step_epoch()
        eps_scheduler.set_epoch_length(
            int((len(loader.dataset) + loader.batch_size - 1) /
                loader.batch_size))
    else:
        model.eval()
        eps_scheduler.eval()

    for i, (data, labels) in enumerate(loader):
        start = time.time()
        eps_scheduler.step_batch()
        eps = eps_scheduler.get_eps()
        # For small eps just use natural training, no need to compute LiRPA bounds
        batch_method = method
        if eps < 1e-20:
            batch_method = "natural"
        if train:
            opt.zero_grad()
        # generate specifications
        c = torch.eye(num_class).type_as(data)[labels].unsqueeze(
            1) - torch.eye(num_class).type_as(data).unsqueeze(0)
        # remove specifications to self
        I = (~(labels.data.unsqueeze(1) == torch.arange(num_class).type_as(
            labels.data).unsqueeze(0)))
        c = (c[I].view(data.size(0), num_class - 1, num_class))
        # bound input for Linf norm used only
        if norm == np.inf:
            data_max = torch.reshape((1. - loader.mean) / loader.std,
                                     (1, -1, 1, 1))
            data_min = torch.reshape((0. - loader.mean) / loader.std,
                                     (1, -1, 1, 1))
            data_ub = torch.min(data + (eps / loader.std).view(1, -1, 1, 1),
                                data_max)
            data_lb = torch.max(data - (eps / loader.std).view(1, -1, 1, 1),
                                data_min)
        else:
            data_ub = data_lb = data

        if list(model.parameters())[0].is_cuda:
            data, labels, c = data.cuda(), labels.cuda(), c.cuda()
            data_lb, data_ub = data_lb.cuda(), data_ub.cuda()

        # Specify Lp norm perturbation.
        # When using Linf perturbation, we manually set element-wise bound x_L and x_U. eps is not used for Linf norm.
        ptb = PerturbationLpNorm(norm=norm, eps=eps, x_L=data_lb, x_U=data_ub)
        x = BoundedTensor(data, ptb)

        output = model(x)
        regular_ce = CrossEntropyLoss()(
            output, labels)  # regular CrossEntropyLoss used for warming up
        meter.update('CE', regular_ce.item(), x.size(0))
        meter.update(
            'Err',
            torch.sum(
                torch.argmax(output, dim=1) != labels).cpu().detach().numpy() /
            x.size(0), x.size(0))

        if batch_method == "robust":
            if bound_type == "IBP":
                lb, ub = model.compute_bounds(IBP=True, C=c, method=None)
            elif bound_type == "CROWN":
                lb, ub = model.compute_bounds(IBP=False,
                                              C=c,
                                              method="backward",
                                              bound_upper=False)
            elif bound_type == "CROWN-IBP":
                # lb, ub = model.compute_bounds(ptb=ptb, IBP=True, x=data, C=c, method="backward")  # pure IBP bound
                # we use a mixed IBP and CROWN-IBP bounds, leading to better performance (Zhang et al., ICLR 2020)
                factor = (eps_scheduler.get_max_eps() -
                          eps) / eps_scheduler.get_max_eps()
                ilb, iub = model.compute_bounds(IBP=True, C=c, method=None)
                if factor < 1e-5:
                    lb = ilb
                else:
                    clb, cub = model.compute_bounds(IBP=False,
                                                    C=c,
                                                    method="backward",
                                                    bound_upper=False)
                    lb = clb * factor + ilb * (1 - factor)

            # Pad zero at the beginning for each example, and use fake label "0" for all examples
            lb_padded = torch.cat((torch.zeros(
                size=(lb.size(0), 1), dtype=lb.dtype, device=lb.device), lb),
                                  dim=1)
            fake_labels = torch.zeros(size=(lb.size(0), ),
                                      dtype=torch.int64,
                                      device=lb.device)
            robust_ce = CrossEntropyLoss()(-lb_padded, fake_labels)
        if batch_method == "robust":
            loss = robust_ce
        elif batch_method == "natural":
            loss = regular_ce
        if train:
            loss.backward()
            eps_scheduler.update_loss(loss.item() - regular_ce.item())
            opt.step()
        meter.update('Loss', loss.item(), data.size(0))
        if batch_method != "natural":
            meter.update('Robust_CE', robust_ce.item(), data.size(0))
            # For an example, if lower bounds of margins is >0 for all classes, the output is verifiably correct.
            # If any margin is < 0 this example is counted as an error
            meter.update('Verified_Err',
                         torch.sum((lb < 0).any(dim=1)).item() / data.size(0),
                         data.size(0))
        meter.update('Time', time.time() - start)
        if i % 50 == 0 and train:
            print('[{:2d}:{:4d}]: eps={:.8f} {}'.format(t, i, eps, meter))
    print('[{:2d}:{:4d}]: eps={:.8f} {}'.format(t, i, eps, meter))
def cert(args,
         model,
         model_ori,
         epoch,
         epoch_progress,
         data,
         labels,
         eps,
         data_max,
         data_min,
         std,
         robust=False,
         reg=False,
         loss_fusion=False,
         eps_scheduler=None,
         train=False,
         meter=None):
    if not robust and reg:
        eps = max(eps, args.min_eps_reg)
    if type(eps) == float:
        eps = (eps / std).view(1, -1, 1, 1)
    else:  # [batch_size, channels]
        eps = (eps.view(*eps.shape, 1, 1) / std.view(1, -1, 1, 1))

    data_ub = torch.min(data + eps, data_max)
    data_lb = torch.max(data - eps, data_min)
    ptb = PerturbationLpNorm(norm=np.inf, eps=eps, x_L=data_lb, x_U=data_ub)
    x = BoundedTensor(data, ptb)

    if loss_fusion:
        x = (x, labels)
        output = model(*x)
        regular_ce = torch.mean(
            torch.log(output) + get_exp_module(model).max_input)
        regular_err = None
    else:
        output = model(x)
        regular_ce = ce_loss(
            output, labels)  # regular CrossEntropyLoss used for warming up
        regular_err = torch.sum(
            torch.argmax(output, dim=1) != labels).item() / x.size(0)
        x = (x, )

    if robust or reg or args.xiao_reg or args.vol_reg:
        b_res, robust_loss = get_bound_loss(args,
                                            model,
                                            loss_fusion,
                                            eps_scheduler,
                                            x=(x if loss_fusion else None),
                                            data=data,
                                            labels=labels,
                                            eps=eps,
                                            meter=meter,
                                            train=train)
        robust_err = torch.sum((b_res < 0).any(
            dim=1)).item() / data.size(0) if not loss_fusion else None
    else:
        robust_loss = robust_err = None

    if robust_loss is not None and torch.isnan(robust_loss):
        robust_err = 100.

    return regular_ce, robust_loss, regular_err, robust_err