def evaluate(self):
        self.model.eval()

        std_loss = Accumulator('std_loss')
        adv_loss = Accumulator('adv_loss')
        std_corr = Accumulator('std_corr')
        adv_corr = Accumulator('adv_corr')
        std_logits = Accumulator('std_logits')
        adv_logits = Accumulator('adv_logits')

        seen_classes = []
        adv_images = Accumulator('adv_images')
        first_batch_images = Accumulator('first_batch_images')

        from PIL import Image

        # for batch_idx, (data, target) in enumerate(self.val_loader[0]):
        #     if self.cuda:
        #         data, target = data.cuda(non_blocking=True), target.cuda(non_blocking=True)
        #     with torch.no_grad():
        #         #output = self.model(data)
        #         data_cpy = data.clone().detach()
        #         std_cpy = data.clone().detach() # std_cpy is used for finding the standard accuracy and has transforms applied as normal
        #         # data_cpy = torch.tensor([])
        #         # std_cpy = torch.tensor([])
        #         # for idx in range(len(data_cpy)):
        #         #     #print("Tensor is cuda?", data_cpy.is_cuda)
        #
        #         #     data_cpy = torch.cat((data_cpy, torch.tensor(transforms.functional.normalize(transforms.functional.to_tensor(data[idx, :]), IMAGENET_MEAN, IMAGENET_STD)      )))
        #         #     #std_cpy[idx] = transforms.functional.normalize(data[idx].clone().cpu(), IMAGENET_MEAN, IMAGENET_STD).cuda() # DELETE
        #         #     transformedTensor = applyTransforms(np.copy(data[idx, :]))
        #         #     std_cpy = torch.cat((std_cpy, torch.tensor(transforms.functional.normalize(transformedTensor.clone().cpu(), IMAGENET_MEAN, IMAGENET_STD))))
        #         #     #std_cpy[idx, :] = transforms.functional.normalize(transformedTensor.cpu(), IMAGENET_MEAN, IMAGENET_STD).cuda()
        #         #     transformedImage = norm_to_pil_image(np.array(std_cpy[idx, :].cpu()))
        #         #     transformedImage.save('sample_data/standard' + str(idx) + '.png')
        #         #     untransformedImage = norm_to_pil_image(np.array(data_cpy[idx, :].cpu()))
        #         #     untransformedImage.save('sample_data/data' + str(idx) + '.png')
        #         #     # print(np.array(data_cpy[idx].cpu()) - np.array(std_cpy[idx].cpu()))
        #         output = self.model(std_cpy)
        #         std_logits.update(output.cpu())
        #         loss = F.cross_entropy(output, target, reduction='none').cpu()
        #         std_loss.update(loss)
        #         corr = correct(output, target)
        #         corr = corr.view(corr.size()[0]).cpu()
        #         std_corr.update(corr)
        #
        #     run_output = {'std_loss':std_loss.avg,
        #                   'std_acc':std_corr.avg}
        #     print('Standard Batch', batch_idx)
        #     print(run_output)

        for batch_idx, (data, target) in enumerate(self.val_loader[1]):

            # data is normalized at this point

            if self.cuda:
                data, target = data.cuda(non_blocking=True), target.cuda(
                    non_blocking=True)

            # for idx in range(len(data)):
            #     savedImage = norm_to_pil_image(data[idx])
            #     savedImage.save("sample_data/eric" + str(idx) + '.png')

            # with torch.no_grad():
            #     #output = self.model(data)
            #     data_cpy = data.clone().detach()
            #     std_cpy = data.clone().detach() # std_cpy is used for finding the standard accuracy and has transforms applied as normal
            #     # data_cpy = torch.tensor([])
            #     # std_cpy = torch.tensor([])
            #     # for idx in range(len(data_cpy)):
            #     #     #print("Tensor is cuda?", data_cpy.is_cuda)

            #     #     data_cpy = torch.cat((data_cpy, torch.tensor(transforms.functional.normalize(transforms.functional.to_tensor(data[idx, :]), IMAGENET_MEAN, IMAGENET_STD)      )))
            #     #     #std_cpy[idx] = transforms.functional.normalize(data[idx].clone().cpu(), IMAGENET_MEAN, IMAGENET_STD).cuda() # DELETE
            #     #     transformedTensor = applyTransforms(np.copy(data[idx, :]))
            #     #     std_cpy = torch.cat((std_cpy, torch.tensor(transforms.functional.normalize(transformedTensor.clone().cpu(), IMAGENET_MEAN, IMAGENET_STD))))
            #     #     #std_cpy[idx, :] = transforms.functional.normalize(transformedTensor.cpu(), IMAGENET_MEAN, IMAGENET_STD).cuda()
            #     #     transformedImage = norm_to_pil_image(np.array(std_cpy[idx, :].cpu()))
            #     #     transformedImage.save('sample_data/standard' + str(idx) + '.png')
            #     #     untransformedImage = norm_to_pil_image(np.array(data_cpy[idx, :].cpu()))
            #     #     untransformedImage.save('sample_data/data' + str(idx) + '.png')
            #     #     # print(np.array(data_cpy[idx].cpu()) - np.array(std_cpy[idx].cpu()))
            #     output_adv = self.model(data)
            #     adv_logits.update(output_adv.cpu())
            #     loss = F.cross_entropy(output_adv, target, reduction='none').cpu()
            #     adv_loss.update(loss)
            #     corr = correct(output_adv, target)
            #     corr = corr.view(corr.size()[0]).cpu()
            #     adv_corr.update(corr)

            rand_target = torch.randint(0,
                                        self.nb_classes - 1,
                                        target.size(),
                                        dtype=target.dtype,
                                        device='cuda')
            rand_target = torch.remainder(target + rand_target + 1,
                                          self.nb_classes)

            data_cpy = data.clone().detach()

            for idx in range(len(data_cpy)):
                # savedImage = norm_to_pil_image(data_adv[idx])
                # savedImage.save("sample_data/before_transforms" + str(idx) + '.png')
                unnormalized = reverse_normalization(data[idx])
                changed = np.swapaxes(
                    np.array(unnormalized.cpu().detach()) * 255.0, 0, 2)

                transformed = applyTransforms(
                    np.swapaxes(
                        np.array(unnormalized.cpu().clone().detach()) * 255.0,
                        0, 2))
                data_cpy[idx] = transforms.functional.normalize(
                    transformed.clone().cpu(), IMAGENET_MEAN,
                    IMAGENET_STD).cuda()

            #from PIL import Image
            data_adv = self.attack(self.model,
                                   data_cpy,
                                   rand_target,
                                   avoid_target=False,
                                   scale_eps=False)

            # for idx in range(len(data)):
            #     savedImage = norm_to_pil_image(data_adv[idx])
            #     savedImage.save("sample_data/eric" + str(idx) + '.png')

            with torch.no_grad():
                output_adv = self.model(data_adv)
                adv_logits.update(output_adv.cpu())
                loss = F.cross_entropy(output_adv, target,
                                       reduction='none').cpu()
                adv_loss.update(loss)
                corr = correct(output_adv, target)
                corr = corr.view(corr.size()[0]).cpu()
                adv_corr.update(corr)

            run_output = {'adv_loss': adv_loss.avg, 'adv_acc': adv_corr.avg}
            print('Adv Batch', batch_idx)
            print(run_output)

        summary_dict = {
            'std_acc': std_corr.avg.item(),
            'adv_acc': adv_corr.avg.item()
        }
        print(std_loss.avg, std_corr.avg, adv_loss.avg, adv_corr.avg)
Example #2
0
    def evaluate(self):
        self.model.eval()

        std_loss = Accumulator('std_loss')
        adv_loss = Accumulator('adv_loss')
        std_corr = Accumulator('std_corr')
        adv_corr = Accumulator('adv_corr')
        std_logits = Accumulator('std_logits')
        adv_logits = Accumulator('adv_logits')

        seen_classes = []
        adv_images = Accumulator('adv_images')
        first_batch_images = Accumulator('first_batch_images')

        from PIL import Image

        for batch_idx, (data, target) in enumerate(self.val_loader[0]):
            if self.cuda:
                data, target = data.cuda(non_blocking=True), target.cuda(
                    non_blocking=True)
            with torch.no_grad():
                std_cpy = data.clone().detach(
                )  # std_cpy is used for finding the standard accuracy and has transforms applied as normal
                output = self.model(std_cpy)
                std_logits.update(output.cpu())
                loss = F.cross_entropy(output, target, reduction='none').cpu()
                std_loss.update(loss)
                corr = correct(output, target)
                corr = corr.view(corr.size()[0]).cpu()
                std_corr.update(corr)

            run_output = {'std_loss': std_loss.avg, 'std_acc': std_corr.avg}
            print('Standard Batch', batch_idx)
            print(run_output)

        for batch_idx, (data, target) in enumerate(self.val_loader[1]):

            # data is normalized at this point

            if self.cuda:
                data, target = data.cuda(non_blocking=True), target.cuda(
                    non_blocking=True)

            rand_target = torch.randint(0,
                                        self.nb_classes - 1,
                                        target.size(),
                                        dtype=target.dtype,
                                        device='cuda')
            rand_target = torch.remainder(target + rand_target + 1,
                                          self.nb_classes)

            data_cpy = data.clone().detach()

            for idx in range(len(data_cpy)):
                unnormalized = reverse_normalization(data[idx])
                changed = np.swapaxes(
                    np.array(unnormalized.cpu().detach()) * 255.0, 0, 2)

                transformed = applyTransforms(
                    np.swapaxes(
                        np.array(unnormalized.cpu().clone().detach()) * 255.0,
                        0, 2))
                data_cpy[idx] = transforms.functional.normalize(
                    transformed.clone().cpu(), IMAGENET_MEAN,
                    IMAGENET_STD).cuda()

            data_adv = self.attack(self.model,
                                   data_cpy,
                                   rand_target,
                                   avoid_target=False,
                                   scale_eps=False)

            with torch.no_grad():
                output_adv = self.model(data_adv)
                adv_logits.update(output_adv.cpu())
                loss = F.cross_entropy(output_adv, target,
                                       reduction='none').cpu()
                adv_loss.update(loss)
                corr = correct(output_adv, target)
                corr = corr.view(corr.size()[0]).cpu()
                adv_corr.update(corr)

            run_output = {'adv_loss': adv_loss.avg, 'adv_acc': adv_corr.avg}
            print('Adv Batch', batch_idx)
            print(run_output)

        summary_dict = {
            'std_acc': std_corr.avg.item(),
            'adv_acc': adv_corr.avg.item()
        }
        print(std_loss.avg, std_corr.avg, adv_loss.avg, adv_corr.avg)
    def evaluate(self):
        self.model.eval()

        std_loss = Accumulator('std_loss')
        adv_loss = Accumulator('adv_loss')
        std_corr = Accumulator('std_corr')
        adv_corr = Accumulator('adv_corr')
        std_logits = Accumulator('std_logits')
        adv_logits = Accumulator('adv_logits')

        seen_classes = []
        adv_images = Accumulator('adv_images')
        first_batch_images = Accumulator('first_batch_images')

        for batch_idx, (data, target) in enumerate(self.val_loader):
            if self.cuda:
                data, target = data.cuda(non_blocking=True), target.cuda(
                    non_blocking=True)
            with torch.no_grad():
                output = self.model(data)
                std_logits.update(output.cpu())
                loss = F.cross_entropy(output, target, reduction='none').cpu()
                std_loss.update(loss)
                corr = correct(output, target)
                corr = corr.view(corr.size()[0]).cpu()
                std_corr.update(corr)

            rand_target = torch.randint(0,
                                        self.nb_classes - 1,
                                        target.size(),
                                        dtype=target.dtype,
                                        device='cuda')
            rand_target = torch.remainder(target + rand_target + 1,
                                          self.nb_classes)
            data_adv = self.attack(self.model,
                                   data,
                                   rand_target,
                                   avoid_target=False,
                                   scale_eps=False)

            for idx in range(target.size()[0]):
                if target[idx].cpu() not in seen_classes:
                    seen_classes.append(target[idx].cpu())
                    orig_image = norm_to_pil_image(data[idx].detach().cpu())
                    adv_image = norm_to_pil_image(data_adv[idx].detach().cpu())
                    adv_images.update(
                        (orig_image, adv_image, target[idx].cpu()))

            if batch_idx == 0:
                for idx in range(target.size()[0]):
                    orig_image = norm_to_pil_image(data[idx].detach().cpu())
                    adv_image = norm_to_pil_image(data_adv[idx].detach().cpu())
                    first_batch_images.update((orig_image, adv_image))

            with torch.no_grad():
                output_adv = self.model(data_adv)
                adv_logits.update(output_adv.cpu())
                loss = F.cross_entropy(output_adv, target,
                                       reduction='none').cpu()
                adv_loss.update(loss)
                corr = correct(output_adv, target)
                corr = corr.view(corr.size()[0]).cpu()
                adv_corr.update(corr)

            run_output = {
                'std_loss': std_loss.avg,
                'std_acc': std_corr.avg,
                'adv_loss': adv_loss.avg,
                'adv_acc': adv_corr.avg
            }
            print('Batch', batch_idx)
            print(run_output)
            if batch_idx % 20 == 0:
                self.logger.log(run_output, batch_idx)

        summary_dict = {
            'std_acc': std_corr.avg.item(),
            'adv_acc': adv_corr.avg.item()
        }
        self.logger.log_summary(summary_dict)
        for orig_img, adv_img, target in adv_images.vals:
            self.logger.log_image(orig_img, 'orig_{}.png'.format(target))
            self.logger.log_image(adv_img, 'adv_{}.png'.format(target))
        for idx, imgs in enumerate(first_batch_images.vals):
            orig_img, adv_img = imgs
            self.logger.log_image(orig_img, 'init_orig_{}.png'.format(idx))
            self.logger.log_image(adv_img, 'init_adv_{}.png'.format(idx))

        self.logger.end()
        print(std_loss.avg, std_corr.avg, adv_loss.avg, adv_corr.avg)
Example #4
0
    def evaluate(self):
        self.model.eval()

        std_loss = Accumulator('std_loss')
        adv_loss = Accumulator('adv_loss')
        std_corr = Accumulator('std_corr')
        adv_corr = Accumulator('adv_corr')
        std_logits = Accumulator('std_logits')
        adv_logits = Accumulator('adv_logits')

        seen_classes = []
        adv_images = Accumulator('adv_images')
        first_batch_images = Accumulator('first_batch_images')

        from PIL import Image

        for batch_idx, (data, target) in enumerate(self.val_loader[0]):
            if self.cuda:
                data, target = data.cuda(non_blocking=True), target.cuda(non_blocking=True)
            with torch.no_grad():
                std_cpy = data.clone().detach()
                output = self.model(std_cpy)
                std_logits.update(output.cpu())
                loss = F.cross_entropy(output, target, reduction='none').cpu()
                std_loss.update(loss)
                corr = correct(output, target)
                corr = corr.view(corr.size()[0]).cpu()
                std_corr.update(corr)
        
            run_output = {'std_loss':std_loss.avg,
                          'std_acc':std_corr.avg}
            print('Standard Batch', batch_idx)
            print(run_output)

        for batch_idx, (data, target) in enumerate(self.val_loader[1]):

            # data is normalized at this point

            if self.cuda:
                data, target = data.cuda(non_blocking=True), target.cuda(non_blocking=True)

            rand_target = torch.randint(
                0, self.nb_classes - 1, target.size(),
                dtype=target.dtype, device='cuda')
            rand_target = torch.remainder(target + rand_target + 1, self.nb_classes)

            from PIL import Image
            data_adv = self.attack(self.model, data, rand_target,
                                   avoid_target=False, scale_eps=False)

            with torch.no_grad():
                output_adv = self.model(data_adv)
                adv_logits.update(output_adv.cpu())
                loss = F.cross_entropy(output_adv, target, reduction='none').cpu()
                adv_loss.update(loss)
                corr = correct(output_adv, target)
                corr = corr.view(corr.size()[0]).cpu()
                adv_corr.update(corr)

            run_output = {'adv_loss':adv_loss.avg,
                          'adv_acc':adv_corr.avg}
            print('Adv Batch', batch_idx)
            print(run_output)

        summary_dict = {'std_acc':std_corr.avg.item(),
                        'adv_acc':adv_corr.avg.item()}
        print(std_loss.avg, std_corr.avg, adv_loss.avg, adv_corr.avg)