def set_rcam(self):
        print("Creating CAM for {}".format(self.args.model))
        if 'resnet' in str.lower(type(self.model).__name__):
            last_conv = 'layer4'
        else:
            print("Model not implemented. Setting rcam=False by default.")
            return

        self.weights = EvaluationMetrics(list(range(self.args.num_classes)))
        def hook_weights(module, input, output):
            weights.append(F.adaptive_max_pool2d(output, (1,1)))
        handle = self.model._modules.get(last_conv).register_forward_hook(hook_weights)

        train_loader, _ = get_loader(self.args.dataset,
            batch_size=1,
            num_workers=self.args.workers
        )
        for i, (image, label) in enumerate(train_loader):
            weights = []
            _ = self.model(to_var(image, volatile=True))
            weights = weights[0].squeeze()
            label = label.squeeze()[0]
            self.weights.update(label, weights)
            if (i+1)%1000 == 0:
                print("{:5.1f}% ({}/{})".format((i+1)/len(train_loader)*100, i+1, len(train_loader)))
        handle.remove()
Exemplo n.º 2
0
 def __init__(self,
              model,
              inner_iter=100,
              outer_iter=10,
              c0=1e-3,
              max_clip=1,
              min_clip=0,
              tau0=1.0,
              kappa=0,
              norm='L2',
              target=None,
              c_rate=10,
              tau_rate=0.9,
              max_eps=0.01,
              args=None,
              **kwargs):
     self.model = model
     self.inner_iter = inner_iter
     self.outer_iter = outer_iter
     self.c0 = c0
     self.max_clip = max_clip
     self.min_clip = min_clip
     self.tau0 = tau0
     self.kappa = kappa
     self.norm = norm
     self.target = target
     self.c_rate = c_rate
     self.tau_rate = tau_rate
     self.max_eps = max_eps
     self.args = args
     if args.domain_restrict:
         self.mask = to_var(kwargs.get('artifact'))
     else:
         self.mask = 1
Exemplo n.º 3
0
 def binary_search(self, c, update):
     update = update.byte()
     self.upper[update] = torch.min(self.upper, c.data)[update]
     self.lower[~update] = torch.max(self.lower, c.data)[~update]
     init = ~update * (self.upper == 1e10)
     c[init] = c[init] * self.c_rate
     c[~init] = to_var((self.upper + self.lower) / 2)[~init]
     return c
    def generate(self, images, labels):
        def_imgs = [
            self.generate_sample(image, label)
            for (image, label) in zip(images, labels)
        ]
        def_imgs = torch.stack(def_imgs)
        def_outputs = self.model(to_var(def_imgs))
        def_probs, def_labels = torch.max(def_outputs, 1)

        return def_imgs, def_labels
    def generate(self, images, labels):
        """
        Images (Tensor)
        Labels (Tensor)
        """
        self.original_shape = images[0].shape

        def_imgs = [self.generate_sample(image, label) for (image, label)
                    in zip(images, labels)]
        def_imgs = torch.stack(def_imgs)
        def_outputs = self.model(to_var(def_imgs, volatile=True))
        def_probs, def_labels = torch.max(def_outputs, 1)

        return def_imgs, def_labels
    def generate(self, images, labels):
        ensemble = []
        for _ in range(self.m):
            noise = 2 * self.r * (torch.rand(*images.shape) - 0.5)
            if self.args.cuda:
                noise = noise.cuda()
            def_outputs = self.model(to_var(images + noise))
            ensemble.append(to_np(torch.max(def_outputs, 1)[1]))
        ensemble = np.asarray(ensemble)
        def_labels, _ = stats.mode(ensemble)

        def_images = images
        def_labels = torch.LongTensor(def_labels).squeeze(0)
        if self.args.cuda:
            def_labels = def_labels.cuda()

        return def_images, def_labels
    def get_rcam(self, image, k=1):
        size = image.shape[-2:]
        if not hasattr(self, 'weights'):
            return torch.zeros(size)
        if 'resnet' in str.lower(type(self.model).__name__):
            last_conv = 'layer4'
        else:
            return torch.zeros(size)

        features = []
        def hook_feature(module, input, output):
            features.append(output)
        handle = self.model._modules.get(last_conv).register_forward_hook(hook_feature)
        outputs = self.model(to_var(image.unsqueeze(0), volatile=True))
        outputs = to_np(outputs).squeeze()
        handle.remove()

        features = features[0]
        weights = self.weights.avg

        _, nc, h, w = features.shape
        cams = []
        for label in range(self.args.num_classes):
            cam = weights[label]@features.view(nc, h*w)
            cam = cam.view(h, w)
            cam = (cam - torch.min(cam))/(torch.max(cam) - torch.min(cam))
            cam = cam.view(1,1,*cam.shape)
            cams.append(F.upsample(cam, size, mode='bilinear'))
        rcam = 0
        for idx, label in enumerate(np.argsort(outputs)):
            if idx >= k:
                break
            else:
                rcam += cams[label]/float(2**(idx+1))
        rcam = (rcam - torch.min(rcam))/(torch.max(rcam) - torch.min(rcam))
        rcam = to_np(rcam).squeeze()

        return rcam
Exemplo n.º 8
0
    def generate_sample(self, image, label):
        # PGD Obfuscated Gradients setting
        alpha = 0.1
        max_clip = 0.031
        adv_img = image.clone()
        criterion = nn.CrossEntropyLoss()
        label = torch.LongTensor([label])

        if isinstance(adv_img, Variable):
            adv_img = adv_img.data
        adv_img = Variable(adv_img, requires_grad=True)  # Start of graph

        for i in range(self.args.eot_iter):

            ensemble_images = torch.cat([self.defense.generate(adv_img.unsqueeze(0), label)[0] for i in range(self.nsamples)], dim=0)
            ensemble_labels = to_var(label.repeat(self.nsamples), self.cuda)

            ensemble_outputs = self.model(ensemble_images)
            ensemble_loss = criterion(ensemble_outputs, ensemble_labels)
            if adv_img.grad is not None:
                adv_img.grad.data.zero_()
            ensemble_loss.backward()

            if self.args.eot_norm == 'linf':
                adv_img.grad.sign()
            elif self.args.eot_norm == 'l2':
                L2_norm = torch.norm(adv_img.grad.view(label.size(0), -1), p=2, dim=1)
                adv_img.grad = adv_img.grad / L2_norm.view(-1,1,1)
            else:
                raise ValueError

            adv_img = adv_img + alpha * adv_img.grad
            diff = torch.clamp(denormalize(adv_img, self.args.dataset) - denormalize(Variable(image), self.args.dataset), -max_clip, max_clip)
            adv_img = torch.clamp(denormalize(image, self.args.dataset) + diff.data, 0, 1)
            adv_img = Variable(normalize(adv_img, self.args.dataset)[0], requires_grad=True)

        return adv_img
Exemplo n.º 9
0
    def __init__(self,
                 in_channels=3,
                 depth=5,
                 start_filts=64,
                 up_mode='transpose',
                 merge_mode='concat',
                 args=None,
                 **kwargs):
        """
        Arguments:
            in_channels: int, number of channels in the input tensor.
                Default is 3 for RGB images.
            depth: int, number of MaxPools in the U-Net.
            start_filts: int, number of convolutional filters for the
                first conv.
            up_mode: string, type of upconvolution. Choices: 'transpose'
                for transpose convolution or 'upsample' for nearest neighbour
                upsampling.
        """
        super(UNet, self).__init__()
        self.std = to_var(torch.FloatTensor(data_stats(args.dataset)[1]))
        self.std = self.std.view(1, -1, 1, 1)

        if up_mode in ('transpose', 'upsample'):
            self.up_mode = up_mode
        else:
            raise ValueError("\"{}\" is not a valid mode for "
                             "upsampling. Only \"transpose\" and "
                             "\"upsample\" are allowed.".format(up_mode))

        if merge_mode in ('concat', 'add'):
            self.merge_mode = merge_mode
        else:
            raise ValueError("\"{}\" is not a valid mode for"
                             "merging up and down paths. "
                             "Only \"concat\" and "
                             "\"add\" are allowed.".format(up_mode))

        # NOTE: up_mode 'upsample' is incompatible with merge_mode 'add'
        if self.up_mode == 'upsample' and self.merge_mode == 'add':
            raise ValueError("up_mode \"upsample\" is incompatible "
                             "with merge_mode \"add\" at the moment "
                             "because it doesn't make sense to use "
                             "nearest neighbour to reduce "
                             "depth channels (by half).")

        self.in_channels = in_channels
        self.start_filts = start_filts
        self.depth = depth

        self.down_convs = []
        self.up_convs = []

        # create the encoder pathway and add to a list
        for i in range(depth):
            ins = self.in_channels if i == 0 else outs
            outs = self.start_filts * (2**i)
            pooling = True if i < depth - 1 else False

            down_conv = DownConv(ins, outs, pooling=pooling)
            self.down_convs.append(down_conv)

        # create the decoder pathway and add to a list
        # - careful! decoding only requires depth-1 blocks
        for i in range(depth - 1):
            ins = outs
            outs = ins // 2
            up_conv = UpConv(ins, outs, up_mode=up_mode, merge_mode=merge_mode)
            self.up_convs.append(up_conv)

        # add the list of modules to current module
        self.first_conv = nn.Conv2d(self.in_channels,
                                    self.in_channels,
                                    kernel_size=1,
                                    stride=2)
        self.down_convs = nn.ModuleList(self.down_convs)
        self.up_convs = nn.ModuleList(self.up_convs)
        self.last_conv = nn.ConvTranspose2d(self.start_filts,
                                            self.in_channels,
                                            kernel_size=3,
                                            stride=2,
                                            padding=1,
                                            output_padding=1)
Exemplo n.º 10
0
    def generate(self, images, labels):
        if self.target is not None:
            if self.target == -1:  # Least likely method
                _, labels = torch.min(self.model(to_var(images)).data, dim=1)
            else:
                labels = self.target * torch.ones_like(labels)
        labels = to_var(labels)
        # images = to_var(images*self.std.data + self.mean.data)
        images = denormalize(to_var(images), self.args.dataset)
        outer_adv_images = images.data.clone()
        outer_Lp = torch.ones(images.size(0)) * 1e10
        if self.args.cuda: outer_Lp = outer_Lp.cuda()

        self.lower = torch.zeros(self.args.batch_size)
        self.upper = torch.ones(self.args.batch_size) * 1e10
        if self.args.cuda:
            self.lower = self.lower.cuda()
            self.upper = self.upper.cuda()
        c = to_var(torch.ones(self.args.batch_size) * self.c0)
        tau = to_var(torch.ones(self.args.batch_size) * self.tau0)

        # perform binary search for the best c, i.e. constant for confidence loss
        for binary_step in range(self.outer_iter):

            update = torch.zeros(images.size(0))
            if self.args.cuda: update = update.cuda()
            valid = to_var(
                torch.ones(images.size(0), 1, images.size(2), images.size(3)))

            # variables used only inside the binary search loop
            inner_adv_grad = to_var(self.unclip(images.data))
            inner_adv_grad.requires_grad = True
            inner_adv_nograd = to_var(self.unclip(images.data))
            inner_adv_latent = inner_adv_grad * self.mask + inner_adv_nograd * (
                1 - self.mask)

            inner_adv_images = self.clip(inner_adv_latent)
            inner_adv_out = self.model(
                normalize(inner_adv_images, self.args.dataset))
            inner_Lp = torch.ones(images.size(0)) * 1e10
            inner_grad = torch.zeros_like(images.data)
            if self.args.cuda: inner_Lp = inner_Lp.cuda()

            optimizer = get_optimizer(self.args.optimizer, [inner_adv_grad],
                                      self.args)

            for step in range(self.inner_iter):
                diff = (inner_adv_images - images).view(images.size(0), -1)
                if self.norm == 'Li':
                    Lp = torch.max(torch.abs(diff), tau.view(-1, 1))
                    Lp = torch.sum(Lp, dim=1)
                else:
                    Lp = torch.norm(diff, p=2, dim=1)**2
                Lp_loss = torch.sum(Lp)

                Z_t = inner_adv_out.gather(1, labels.view(-1, 1)).squeeze(1)
                Z_nt, _ = torch.max(inner_adv_out.scatter(
                    1, labels.view(-1, 1), -1e10),
                                    dim=1)
                Z_diff = Z_nt - Z_t
                if self.target is None:
                    Z_diff = -Z_diff
                conf_loss = torch.max(Z_diff,
                                      torch.ones_like(Z_diff) * (-self.kappa))

                loss = Lp_loss + torch.dot(c, conf_loss)
                optimizer.zero_grad()
                loss.backward(retain_graph=True)
                optimizer.step()

                grad = inner_adv_grad.grad
                inner_adv_latent = inner_adv_grad * self.mask + inner_adv_nograd * (
                    1 - self.mask)
                inner_adv_images = self.clip(
                    inner_adv_latent) * valid + images * (1 - valid)
                # inner_adv_out = self.model((inner_adv_images - self.mean)/self.std)
                inner_adv_out = self.model(
                    normalize(inner_adv_images, self.args.dataset))
                success = (torch.max(inner_adv_out, dim=1)[1] == labels).data
                if self.target is None:
                    success = ~success
                inner_update = ((inner_Lp > Lp.data) * success).float()
                outer_update = ((outer_Lp > Lp.data) * success).float()
                update = update + inner_update

                inner_Lp += inner_update * (Lp.data - inner_Lp)
                outer_Lp += outer_update * (Lp.data - outer_Lp)

                inner_update = inner_update.view(-1, 1, 1, 1)
                inner_grad += inner_update * (grad.data - inner_grad)

                outer_update = outer_update.view(-1, 1, 1, 1)
                outer_adv_images = outer_update * inner_adv_images.data + \
                                   (1 - outer_update) * outer_adv_images

            c = self.binary_search(c, update)
            abs_diff = torch.abs(inner_adv_images - images)
            if self.norm == 'L0':
                totalchange = torch.sum(abs_diff.data * torch.abs(inner_grad),
                                        dim=1)
                valid = (totalchange > self.max_eps)
                valid = valid.view(
                    (images.size(0), 1, images.size(2), images.size(3)))
            elif self.norm == 'Li':
                actual_tau, _ = torch.max(abs_diff.view(images.size(0), -1),
                                          dim=1)
                tau = self.reduce_tau(tau, actual_tau, update)

        # adv_images = (outer_adv_images - self.mean.data) / self.std.data
        adv_images = normalize(to_var(outer_adv_images), self.args.dataset)
        return adv_images.data, labels
Exemplo n.º 11
0
 def reduce_tau(self, tau, actual_tau, update):
     update = to_var(update.float())
     tau = torch.min(
         tau, actual_tau) * self.tau_rate * update + tau * (1 - update)
     return tau
Exemplo n.º 12
0
    def generate(self, images, labels):
        """Generate adversarial images
        """
        preds = np.argmax(to_np(self.model(to_var(images))), axis=1)
        images = denormalize(images, self.args.dataset) * 255
        #self.n_pix = int(images.size(2)*images.size(3)*self.args.gamma)

        bounds = [(0, images[0].size(1) - 1), (0, images[0].size(2) - 1),
                  (0, 255), (0, 255), (0, 255)] * self.n_pix

        adv_images = []
        adv_labels = []

        for i in range(len(images)):
            self.image = images[i]
            self.label = int(preds[i])

            if self.target is not None:
                self.label = self.target
            self.convergence = False

            if self.init == 'normal':
                x_loc = np.random.uniform(0, images[0].size(1),
                                          self.n_pix * self.popsize)
                y_loc = np.random.uniform(0, images[0].size(2),
                                          self.n_pix * self.popsize)
                val = np.array(
                    np.split(
                        np.random.normal(128, 127,
                                         self.n_pix * self.popsize * 3), 3))
                init = np.array(
                    np.split(np.vstack((x_loc, y_loc, val)),
                             self.n_pix,
                             axis=1))
                init = np.transpose(init.reshape(-1, self.popsize))

            else:
                init = self.init

            self.step = 0
            if self.args.domain_restrict:
                self.mapping = self.create_map(self.args.gamma,
                                               self.kwargs.get('artifact'))
            else:
                self.mapping = lambda x, y: (x, y)

            result = differential_evolution(self.optimize,
                                            bounds,
                                            init=init,
                                            strategy=self.strategy,
                                            maxiter=self.max_iter,
                                            popsize=self.popsize,
                                            seed=self.args.seed,
                                            callback=self.callback,
                                            mutation=0.5,
                                            recombination=1,
                                            polish=False,
                                            tol=0,
                                            atol=-1)

            adv_image = self.perturb(result.x).squeeze(0)
            adv_images.append(adv_image)
            adv_labels.append(self.label)

            self.step_meter.update(self.step - 1)

        #print("Average step per iter: {}".format(self.step_meter.avg))

        return torch.stack(adv_images), torch.LongTensor(
            adv_labels)  #, torch.FloatTensor(steps)