def explain(method, device, model, data):

    def model_forward(edge_mask, device, data, model):
        batch = torch.zeros(data.x.shape[0], dtype=int).to(device)
        out = model(data.x, data.edge_index, batch, edge_mask)
        return out

    data = data.to(device)
    target = data.y.to(device)
    input_mask = torch.ones(data.edge_index.shape[1]).requires_grad_(True).to(device)
    if method == 'ig':
        ig = IntegratedGradients(model_forward)
        mask = ig.attribute(input_mask, target=target,
                            additional_forward_args=(data,),
                            internal_batch_size=data.edge_index.shape[1])
    elif method == 'saliency':
        saliency = Saliency(model_forward)
        mask = saliency.attribute(input_mask, target=target,
                                  additional_forward_args=(device, data, model))
    else:
        raise Exception('Unknown explanation method')

    edge_mask = np.abs(mask.cpu().detach().numpy())
    if edge_mask.max() > 0:  # avoid division by zero
        edge_mask = edge_mask / edge_mask.max()

    edge_mask_dict = defaultdict(float)
    for val, u, v in list(zip(edge_mask, *data.edge_index)):
        u, v = u.item(), v.item()
        edge_mask_dict[(u, v)] += val

    return edge_mask_dict
Example #2
0
    def test_classification_infidelity_tpl_target(self) -> None:
        model = BasicModel_MultiLayer()
        input = torch.arange(1.0, 13.0).view(4, 3)
        additional_forward_args = (torch.arange(1, 13).view(4,
                                                            3).float(), True)
        targets: List = [(0, 1, 1), (0, 1, 1), (1, 1, 1), (0, 1, 1)]
        sa = Saliency(model)

        infid1 = self.infidelity_assert(
            model,
            sa.attribute(input,
                         target=targets,
                         additional_forward_args=additional_forward_args),
            input,
            torch.zeros(4),
            additional_args=additional_forward_args,
            target=targets,
            multi_input=False,
        )

        infid2 = self.infidelity_assert(
            model,
            sa.attribute(input,
                         target=targets,
                         additional_forward_args=additional_forward_args),
            input,
            torch.zeros(4),
            additional_args=additional_forward_args,
            target=targets,
            max_batch_size=2,
            multi_input=False,
        )
        assertArraysAlmostEqual(infid1, infid2, 1e-05)
Example #3
0
def interpret_model(originalPath='',reconPath='', origOutput='', reconOutput=''):
    # read the images
    print("reading input image")
    original_image = cv.imread(originalPath)
    original_image = cv.cvtColor(original_image, cv.COLOR_BGR2RGB)
    recon_image = cv.imread(reconPath)
    recon_image = cv.cvtColor(recon_image, cv.COLOR_BGR2RGB)
    
    # creat torch tensor
    input = Image.open(originalPath)
    input = data_transform(input)
    input = torch.unsqueeze(input, 0)
    input.requires_grad = True
    recon = Image.open(reconPath)
    recon = data_transform(recon)
    recon = torch.unsqueeze(recon, 0)
    recon.requires_grad = True

    # do the classfication on the original image
    original_label_float = model(input.cuda(0))
    _, target_label = torch.max(original_label_float, 1)
    recon_label_float = model(recon.cuda(0))
    _, recon_label = torch.max(recon_label_float, 1)
    saliency = Saliency(model)
    grads = saliency.attribute(input.cuda(0), target = target_label)
    grads = np.transpose(grads.squeeze().cpu().detach().numpy(), (1, 2, 0))
    saliencyMap = viz.visualize_image_attr(grads, original_image, method="blended_heat_map", sign="all",
                            show_colorbar=True, title="Overlayed Saliency Map - Original")
    plt.savefig(origOutput + '/saliency_' + ntpath.basename(originalPath))

    grads = saliency.attribute(recon.cuda(0), target = recon_label)
    grads = np.transpose(grads.squeeze().cpu().detach().numpy(), (1, 2, 0))
    saliencyMap = viz.visualize_image_attr(grads, recon_image, method="blended_heat_map", sign="all",
                            show_colorbar=True, title="Overlayed Saliency Map - Recon")
    plt.savefig(reconOutput + '/saliency_' + ntpath.basename(reconPath))
def explain_sa(model, x, edge_index, target, include_edges=None):
    saliency = Saliency(model_forward)
    input_mask = torch.ones(edge_index.shape[1]).requires_grad_(True).to(device)
    saliency_mask = saliency.attribute(input_mask, target=target,
                                       additional_forward_args=(model, x, edge_index), abs=False)

    edge_mask = saliency_mask.cpu().numpy()
    return edge_mask
Example #5
0
def saliency(model, image, results_dir, file_name):
    sal = Saliency(model)
    attributions = sal.attribute(image)  #, n_steps=200)

    attributions = attributions.squeeze()
    attributions = attributions.detach().numpy()
    attributions = nib.Nifti1Image(attributions, affine=np.eye(4))
    nib.save(attributions, results_dir + file_name + "-HM.nii")
def explain_sa_node(model, x, edge_index, target, include_edges=None):
    saliency = Saliency(model_forward_node)
    input_mask = x.clone().requires_grad_(True).to(device)
    saliency_mask = saliency.attribute(input_mask, target=target, additional_forward_args=(model, edge_index),
                                       abs=False)

    node_attr = saliency_mask.cpu().numpy().sum(axis=1)
    edge_mask = node_attr_to_edge(edge_index, node_attr)
    return edge_mask
Example #7
0
    def compute_saliency(self, img_path, target):

        # open image
        img, transformed_img, input = self.open_image(img_path)

        gradient_saliency = Saliency(self.model)
        attributions_sa = gradient_saliency.attribute(input, target=target)
        attributions_sa = np.transpose(
            attributions_sa.squeeze().cpu().detach().numpy(), (1, 2, 0))
        return attributions_sa
def get_model_attribution(forward_func, test_dataset):
    dim_out = forward_func(torch.zeros(size=(1,771))).shape[1]
    ig = Saliency(forward_func)

    test_input_tensor = test_dataset.matrix
    test_input_tensor.requires_grad_()
    
    
    attrs =  [ ig.attribute(test_input_tensor, target=i) for i in range(dim_out) ]
    return(attrs)
Example #9
0
def PT_SaliencyGradient(model,
                        x,
                        y_onthot,
                        multiply_with_input=False,
                        device='cuda:0',
                        **kwargs):
    input = torch.tensor(x).to(device)
    model = model.to(device)
    model.eval()
    saliency = Saliency(model)
    target = torch.tensor(np.argmax(y_onthot, -1)).to(device)
    attribution_map = saliency.attribute(input, target=target, abs=False)
    if multiply_with_input:
        attribution_map *= input
    return attribution_map.detach().cpu().numpy()
Example #10
0
    def test_sensitivity_max_multi_dim_batching(self) -> None:
        model = BasicModel_MultiLayer()

        input = torch.arange(1.0, 16.0).view(5, 3)

        additional_forward_args = (torch.ones(5, 3).float(), False)
        targets: List = [0, 0, 0, 0, 0]

        sa = Saliency(model)

        sensitivity1 = self.sensitivity_max_assert(
            sa.attribute,
            input,
            torch.zeros(5),
            n_perturb_samples=1,
            max_examples_per_batch=None,
            perturb_func=_perturb_func,
            target=targets,
            additional_forward_args=additional_forward_args,
        )
        sensitivity2 = self.sensitivity_max_assert(
            sa.attribute,
            input,
            torch.zeros(5),
            n_perturb_samples=10,
            max_examples_per_batch=10,
            perturb_func=_perturb_func,
            target=targets,
            additional_forward_args=additional_forward_args,
        )
        assertTensorAlmostEqual(self, sensitivity1, sensitivity2, 0.0)
Example #11
0
class saliency_explainer:
    def __init__(self, model, train_data):
        model.eval()
        self.explainer = Saliency(model)

    def get_feature_importance(self, data):
        data.requires_grad = True
        return torch.stack([self.explainer.attribute(data, target=i) for i in range(global_vars.get('n_classes'))], axis=0)
Example #12
0
def saliency_map(
        batch: dict,
        saliency: Saliency,
        sign: str = 'all',
        method: str = 'blended_heat_map',
        use_pyplot: bool = False,
        fig_axis: tuple = None,
        mix_bg: bool = True,
        alpha_overlay: float = 0.7,
) -> Tuple[Any, torch.Tensor]:
    """
    :param batch: batch to visualise
    :param saliency: Saliency object initialised for trainer_module
    :param sign: sign of gradient attributes to visualise
    :param method: method of visualization to be used
    :param use_pyplot: whether to use pyplot
    :param mix_bg: whether to mix semantic/aerial map with vehicles
    :return: pair of figure and corresponding gradients tensor
    """

    batch['image'].requires_grad = True
    grads = saliency.attribute(batch['image'], abs=False, additional_forward_args=(
        batch['target_positions'], None if 'target_availabilities' not in batch else batch['target_availabilities'],
        False))
    batch['image'].requires_grad = False
    gradsm = grads.squeeze().cpu().detach().numpy()
    if len(gradsm.shape) == 3:
        gradsm = gradsm.reshape(1, *gradsm.shape)
    gradsm = np.transpose(gradsm, (0, 2, 3, 1))
    im = batch['image'].detach().cpu().numpy().transpose(0, 2, 3, 1)
    fig, axis = fig_axis if fig_axis is not None else plt.subplots(2 - mix_bg, im.shape[0], dpi=200, figsize=(6, 6))
    for b in range(im.shape[0]):
        if mix_bg:
            grad_norm = float(np.abs(gradsm[b, ...]).sum())
            viz.visualize_image_attr(
                gradsm[b, ...], im[b, ...], method=method,
                sign=sign, use_pyplot=use_pyplot,
                plt_fig_axis=(fig, axis if im.shape[0] == 1 else axis[b]),
                alpha_overlay=alpha_overlay,
                title=f'l1 grad: {grad_norm:.5f}',
            )
            ttl = (axis if im.shape[0] == 1 else axis[b]).title
            ttl.set_position([.5, 0.95])
            (axis if im.shape[0] == 1 else axis[b]).axis('off')
        else:
            for (s_channel, end_channel), row in [((im.shape[-1] - 3, im.shape[-1]), 0), ((0, im.shape[-1] - 3), 1)]:
                grad_norm = float(np.abs(gradsm[b, :, :, s_channel:end_channel]).sum())
                viz.visualize_image_attr(
                    gradsm[b, :, :, s_channel:end_channel], im[b, :, :, s_channel:end_channel], method=method,
                    sign=sign, use_pyplot=use_pyplot,
                    plt_fig_axis=(fig, axis[row] if im.shape[0] == 1 else axis[row][b]),
                    alpha_overlay=alpha_overlay, title=f'l1 grad: {grad_norm:.5f}',
                )
                ttl = (axis[row] if im.shape[0] == 1 else axis[row][b]).title
                ttl.set_position([.5, 0.95])
                (axis[row] if im.shape[0] == 1 else axis[row][b]).axis('off')
    return fig, grads
Example #13
0
    def test_basic_sensitivity_max_single(self):
        model = BasicModel2()
        sa = Saliency(model)

        input1 = torch.tensor([3.0])
        input2 = torch.tensor([1.0])
        self.sensitivity_max_assert(
            sa.attribute, (input1, input2), [0.0], perturb_func=default_perturb_func
        )
Example #14
0
class Explainer():
    def __init__(self,model):
        self.model=model
        self.explain=Saliency(model)


    def get_attribution_map(self,img,target=None):
        if target is None:
            target=torch.argmax(self.model(img),1)
        attributions = self.explain.attribute(img, target=target,abs=False)
        return attributions
def explain(method, data, target=0):
    input_mask = torch.ones(data.edge_index.shape[1]).requires_grad_(True)
    if method == 'ig':
        ig = IntegratedGradients(model_forward)
        mask = ig.attribute(input_mask,
                            target=target,
                            additional_forward_args=(data, ),
                            internal_batch_size=data.edge_index.shape[1])
    elif method == 'saliency':
        saliency = Saliency(model_forward)
        mask = saliency.attribute(input_mask,
                                  target=target,
                                  additional_forward_args=(data, ))
    else:
        raise Exception('Unknown explanation method')

    edge_mask = np.abs(mask.detach().numpy())
    if edge_mask.max() > 0:  # avoid division by zero
        edge_mask = edge_mask / edge_mask.max()
    return edge_mask
Example #16
0
    def test_basic_sensitivity_max_multiple(self) -> None:
        model = BasicModel2()
        sa = Saliency(model)

        input1 = torch.tensor([3.0] * 20)
        input2 = torch.tensor([1.0] * 20)
        self.sensitivity_max_assert(
            sa.attribute, (input1, input2), torch.zeros(20), max_examples_per_batch=21
        )
        self.sensitivity_max_assert(
            sa.attribute, (input1, input2), torch.zeros(20), max_examples_per_batch=60
        )
Example #17
0
    def compute_saliency_noise_tunnel(self, img_path, target):

        # open image
        img, transformed_img, input = self.open_image(img_path)

        gradient_saliency = Saliency(self.model)
        noise_tunnel = NoiseTunnel(gradient_saliency)
        attributions_sa_nt = noise_tunnel.attribute(
            input,
            n_samples=10,
            nt_type='smoothgrad',
            # internal_batch_size=8,
            target=target)
        attributions_sa_nt = np.transpose(
            attributions_sa_nt.squeeze().cpu().detach().numpy(), (1, 2, 0))
        return attributions_sa_nt
Example #18
0
class Explainer():
    def __init__(self, model, num_samples=50):
        self.model = model
        self.num_samples = num_samples
        self.explain = Saliency(model)

    def get_attribution_map(self, img, target=None):
        if target is None:
            target = torch.argmax(self.model(img), 1)
        attributions = torch.zeros_like(img)
        max = torch.max(img)
        min = torch.min(img)
        for i in range(self.num_samples):
            attributions += self.explain.attribute(
                img + torch.randn_like(img) * 0.1 * (max - min),
                target=target,
                abs=False)
        return attributions
Example #19
0
    def test_convnet_multi_target(self) -> None:
        r"""
        Another test with Saliency, local sensitivity and more
        complex model with higher dimensional input.
        """
        model = BasicModel_ConvNet_One_Conv()
        sa = Saliency(model)

        input = torch.stack([torch.arange(1, 17).float()] * 20, dim=0).view(20, 1, 4, 4)

        self.sensitivity_max_assert(
            sa.attribute,
            input,
            torch.zeros(20),
            target=torch.tensor([1] * 20),
            n_perturb_samples=10,
            max_examples_per_batch=40,
        )
Example #20
0
def PT_SmoothGradient(model,
                      x,
                      y_onthot,
                      multiply_with_input=False,
                      device='cuda:0',
                      n_steps=50,
                      stdevs=0.15,
                      **kwargs):
    input = torch.tensor(x).to(device)
    model = model.to(device)
    model.eval()
    saliency = NoiseTunnel(Saliency(model))
    target = torch.tensor(np.argmax(y_onthot, -1)).to(device)
    attribution_map = saliency.attribute(input,
                                         n_samples=n_steps,
                                         target=target,
                                         stdevs=stdevs,
                                         abs=False)

    if multiply_with_input:
        attribution_map *= input
    return attribution_map.detach().cpu().numpy()
Example #21
0
 def __init__(
         self, model, rasterizer, root: str = 'preprocess', grad_enabled: bool = False, device: str = 'cpu',
         turn_thresh: float = 3., speed_thresh: float = 0.5, k: int = 500, prog=True,
         output_root: str = 'validation', extreme_k: int = 5, visualize=True, seaborn_style: str = 'darkgrid',
 ):
     sns.set_theme(style=seaborn_style)
     self.root = root
     self.model = model.to(device)
     self.device = device
     self.grad_enabled = grad_enabled
     self.files = [f for f in listdir(root) if isfile(join(root, f)) and f.endswith('.npz')]
     self.splits = defaultdict(dict)
     self.k = k
     self.visualize = visualize
     self.turn_thresh = turn_thresh
     self.speed_thresh = speed_thresh
     self.prog = prog
     self.output_root = output_root
     Path(output_root).mkdir(parents=True, exist_ok=True)
     self.extreme_k = extreme_k
     self.rasterizer = rasterizer
     self.saliency = None if not visualize else Saliency(self.model)
     self.occlusion = None if not visualize else Occlusion(self.model)
Example #22
0
def get_explanation(generated_data, discriminator, prediction, XAItype="shap", cuda=True, trained_data=None,
                    data_type="mnist") -> None:
    """
    This function calculates the explanation for given generated images using the desired xAI systems and the
    :param generated_data: data created by the generator
    :type generated_data: torch.Tensor
    :param discriminator: the discriminator model
    :type discriminator: torch.nn.Module
    :param prediction: tensor of predictions by the discriminator on the generated data
    :type prediction: torch.Tensor
    :param XAItype: the type of xAI system to use. One of ("shap", "lime", "saliency")
    :type XAItype: str
    :param cuda: whether to use gpu
    :type cuda: bool
    :param trained_data: a batch from the dataset
    :type trained_data: torch.Tensor
    :param data_type: the type of the dataset used. One of ("cifar", "mnist", "fmnist")
    :type data_type: str
    :return:
    :rtype:
    """

    # initialize temp values to all 1s
    temp = values_target(size=generated_data.size(), value=1.0, cuda=cuda)

    # mask values with low prediction
    mask = (prediction < 0.5).view(-1)
    indices = (mask.nonzero(as_tuple=False)).detach().cpu().numpy().flatten().tolist()

    data = generated_data[mask, :]

    if len(indices) > 1:
        if XAItype == "saliency":
            for i in range(len(indices)):
                explainer = Saliency(discriminator)
                temp[indices[i], :] = explainer.attribute(data[i, :].detach().unsqueeze(0))

        elif XAItype == "shap":
            for i in range(len(indices)):
                explainer = DeepLiftShap(discriminator)
                temp[indices[i], :] = explainer.attribute(data[i, :].detach().unsqueeze(0), trained_data, target=0)

        elif XAItype == "lime":
            explainer = lime_image.LimeImageExplainer()
            global discriminatorLime
            discriminatorLime = deepcopy(discriminator)
            discriminatorLime.cpu()
            discriminatorLime.eval()
            for i in range(len(indices)):
                if data_type == "cifar":
                    tmp = data[i, :].detach().cpu().numpy()
                    tmp = np.reshape(tmp, (32, 32, 3)).astype(np.double)
                    exp = explainer.explain_instance(tmp, batch_predict_cifar, num_samples=100)
                else:
                    tmp = data[i, :].squeeze().detach().cpu().numpy().astype(np.double)
                    exp = explainer.explain_instance(tmp, batch_predict, num_samples=100)
                _, mask = exp.get_image_and_mask(exp.top_labels[0], positive_only=False, negative_only=False)
                temp[indices[i], :] = torch.tensor(mask.astype(np.float))
            del discriminatorLime
        else:
            raise Exception("wrong xAI type given")

    if cuda:
        temp = temp.cuda()
    set_values(normalize_vector(temp))
Example #23
0
        #     "text.usetex": True,
        #     "font.family": "serif",
        "font.serif": ["Times"],
    })

    # In[11]:

    random.seed(0)
    np.random.seed(0)
    th.manual_seed(0)
    th.backends.cudnn.deterministic = True
    th.backends.cudnn.benchmark = False

    plt.figure(figsize=(5.5, 2.5))

    sal = Saliency(rm.tforward)
    w = 6
    i = 1

    while i <= w:
        obs = env.reset()
        action, _states = model.predict(obs, deterministic=True)
        next_obs, reward, done, info = env.step(action)
        if i == w:
            obs = env.reset()
            goal_pos = env.maze.objects.goal.positions[0]
            if goal_pos == [1, 1]:
                agent_pos = [2, 1]
            else:
                agent_pos = [8, 9]
            env.maze.objects.agent.positions[0] = agent_pos
Example #24
0
     params={
         "sliding_window_shapes": StrConfig(value=""),
         "strides": StrConfig(value=""),
         "perturbations_per_eval": NumberConfig(value=1, limit=(1, 100)),
     },
     post_process={
         "sliding_window_shapes": _str_to_tuple,
         "strides": _str_to_tuple,
         "perturbations_per_eval": int,
     },
 ),
 GuidedBackprop.get_name():
 ConfigParameters(params={}),
 InputXGradient.get_name():
 ConfigParameters(params={}),
 Saliency.get_name():
 ConfigParameters(
     params={"abs": StrEnumConfig(limit=["True", "False"], value="True")},
     post_process={"abs": _str_to_bool}),
 # Won't work as Relu is being used in multiple places (same layer can't be shared)
 # DeepLift.get_name(): ConfigParameters(
 #     params={}
 # ),
 LayerIntegratedGradients.get_name():
 ConfigParameters(
     params={
         "n_steps":
         NumberConfig(value=25, limit=(2, None)),
         "method":
         StrEnumConfig(limit=SUPPORTED_METHODS, value="gausslegendre"),
     },
def main(args):

    train_loader, test_loader = data_generator(args.data_dir,1)

    for m in range(len(models)):

        model_name = "model_{}_NumFeatures_{}".format(models[m],args.NumFeatures)
        model_filename = args.model_dir + 'm_' + model_name + '.pt'
        pretrained_model = torch.load(open(model_filename, "rb"),map_location=device) 
        pretrained_model.to(device)



        if(args.GradFlag):
            Grad = Saliency(pretrained_model)
        if(args.IGFlag):
            IG = IntegratedGradients(pretrained_model)
        if(args.DLFlag):
            DL = DeepLift(pretrained_model)
        if(args.GSFlag):
            GS = GradientShap(pretrained_model)
        if(args.DLSFlag):
            DLS = DeepLiftShap(pretrained_model)                 
        if(args.SGFlag):
            Grad_ = Saliency(pretrained_model)
            SG = NoiseTunnel(Grad_)
        if(args.ShapleySamplingFlag):
            SS = ShapleyValueSampling(pretrained_model)
        if(args.GSFlag):
            FP = FeaturePermutation(pretrained_model)
        if(args.FeatureAblationFlag):
            FA = FeatureAblation(pretrained_model)         
        if(args.OcclusionFlag):
            OS = Occlusion(pretrained_model)

        timeMask=np.zeros((args.NumTimeSteps, args.NumFeatures),dtype=int)
        featureMask=np.zeros((args.NumTimeSteps, args.NumFeatures),dtype=int)
        for i in  range (args.NumTimeSteps):
            timeMask[i,:]=i

        for i in  range (args.NumTimeSteps):
            featureMask[:,i]=i

        indexes = [[] for i in range(5,10)]
        for i ,(data, target) in enumerate(test_loader):
            if(target==5 or target==6 or target==7 or target==8 or target==9):
                index=target-5

                if(len(indexes[index])<1):
                    indexes[index].append(i)
        for j, index in enumerate(indexes):
            print(index)
        # indexes = [[21],[17],[84],[9]]

        for j, index in enumerate(indexes):
            print("Getting Saliency for number", j+1)
            for i, (data, target) in enumerate(test_loader):
                if(i in index):
                        
                    labels =  target.to(device)
             
                    input = data.reshape(-1, args.NumTimeSteps, args.NumFeatures).to(device)
                    input = Variable(input,  volatile=False, requires_grad=True)

                    baseline_single=torch.Tensor(np.random.random(input.shape)).to(device)
                    baseline_multiple=torch.Tensor(np.random.random((input.shape[0]*5,input.shape[1],input.shape[2]))).to(device)
                    inputMask= np.zeros((input.shape))
                    inputMask[:,:,:]=timeMask
                    inputMask =torch.Tensor(inputMask).to(device)
                    mask_single= torch.Tensor(timeMask).to(device)
                    mask_single=mask_single.reshape(1,args.NumTimeSteps, args.NumFeatures).to(device)

                    Data=data.reshape(args.NumTimeSteps, args.NumFeatures).data.cpu().numpy()
                    
                    target_=int(target.data.cpu().numpy()[0])

                    plotExampleBox(Data,args.Graph_dir+'Sample_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)




                    if(args.GradFlag):
                        attributions = Grad.attribute(input, \
                                                      target=labels)
                        
                        saliency_=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,attributions)

                        plotExampleBox(saliency_[0],args.Graph_dir+models[m]+'_Grad_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)
                        if(args.TSRFlag):
                            TSR_attributions =  getTwoStepRescaling(Grad,input, args.NumFeatures,args.NumTimeSteps, labels,hasBaseline=None)
                            TSR_saliency=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,TSR_attributions,isTensor=False)
                            plotExampleBox(TSR_saliency,args.Graph_dir+models[m]+'_TSR_Grad_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)



                    if(args.IGFlag):
                        attributions = IG.attribute(input,  \
                                                    baselines=baseline_single, \
                                                    target=labels)
                        saliency_=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,attributions)

                        plotExampleBox(saliency_[0],args.Graph_dir+models[m]+'_IG_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)
                        if(args.TSRFlag):
                            TSR_attributions =  getTwoStepRescaling(IG,input, args.NumFeatures,args.NumTimeSteps, labels,hasBaseline=baseline_single)
                            TSR_saliency=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,TSR_attributions,isTensor=False)
                            plotExampleBox(TSR_saliency,args.Graph_dir+models[m]+'_TSR_IG_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)




                    if(args.DLFlag):
                        attributions = DL.attribute(input,  \
                                                    baselines=baseline_single, \
                                                    target=labels)
                        saliency_=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,attributions)
                        plotExampleBox(saliency_[0],args.Graph_dir+models[m]+'_DL_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)


                        if(args.TSRFlag):
                            TSR_attributions =  getTwoStepRescaling(DL,input, args.NumFeatures,args.NumTimeSteps, labels,hasBaseline=baseline_single)
                            TSR_saliency=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,TSR_attributions,isTensor=False)
                            plotExampleBox(TSR_saliency,args.Graph_dir+models[m]+'_TSR_DL_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)




                    if(args.GSFlag):

                        attributions = GS.attribute(input,  \
                                                    baselines=baseline_multiple, \
                                                    stdevs=0.09,\
                                                    target=labels)
                        saliency_=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,attributions)
                        plotExampleBox(saliency_[0],args.Graph_dir+models[m]+'_GS_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)

 
                        if(args.TSRFlag):
                            TSR_attributions =  getTwoStepRescaling(GS,input, args.NumFeatures,args.NumTimeSteps, labels,hasBaseline=baseline_multiple)
                            TSR_saliency=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,TSR_attributions,isTensor=False)
                            plotExampleBox(TSR_saliency,args.Graph_dir+models[m]+'_TSR_GS_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)


                    if(args.DLSFlag):

                        attributions = DLS.attribute(input,  \
                                                    baselines=baseline_multiple, \
                                                    target=labels)
                        saliency_=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,attributions)
                        plotExampleBox(saliency_[0],args.Graph_dir+models[m]+'_DLS_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)
                        if(args.TSRFlag):
                            TSR_attributions =  getTwoStepRescaling(DLS,input, args.NumFeatures,args.NumTimeSteps, labels,hasBaseline=baseline_multiple)
                            TSR_saliency=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,TSR_attributions,isTensor=False)
                            plotExampleBox(TSR_saliency,args.Graph_dir+models[m]+'_TSR_DLS_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)



                    if(args.SGFlag):
                        attributions = SG.attribute(input, \
                                                    target=labels)
                        saliency_=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,attributions)
                        plotExampleBox(saliency_[0],args.Graph_dir+models[m]+'_SG_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)
                        if(args.TSRFlag):
                            TSR_attributions =  getTwoStepRescaling(SG,input, args.NumFeatures,args.NumTimeSteps, labels)
                            TSR_saliency=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,TSR_attributions,isTensor=False)
                            plotExampleBox(TSR_saliency,args.Graph_dir+models[m]+'_TSR_SG_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)


                    if(args.ShapleySamplingFlag):
                        attributions = SS.attribute(input, \
                                        baselines=baseline_single, \
                                        target=labels,\
                                        feature_mask=inputMask)
                        saliency_=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,attributions)
                        plotExampleBox(saliency_[0],args.Graph_dir+models[m]+'_SVS_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)
                        if(args.TSRFlag):
                            TSR_attributions =  getTwoStepRescaling(SS,input, args.NumFeatures,args.NumTimeSteps, labels,hasBaseline=baseline_single,hasFeatureMask=inputMask)
                            TSR_saliency=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,TSR_attributions,isTensor=False)
                            plotExampleBox(TSR_saliency,args.Graph_dir+models[m]+'_TSR_SVS_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)
                    # if(args.FeaturePermutationFlag):
                    #     attributions = FP.attribute(input, \
                    #                     target=labels),
                    #                     # perturbations_per_eval= 1,\
                    #                     # feature_mask=mask_single)
                    #     saliency_=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,attributions)
                    #     plotExampleBox(saliency_[0],args.Graph_dir+models[m]+'_FP',greyScale=True)


                    if(args.FeatureAblationFlag):
                        attributions = FA.attribute(input, \
                                        target=labels)
                                        # perturbations_per_eval= input.shape[0],\
                                        # feature_mask=mask_single)
                        saliency_=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,attributions)
                        plotExampleBox(saliency_[0],args.Graph_dir+models[m]+'_FA_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)
                        if(args.TSRFlag):
                            TSR_attributions =  getTwoStepRescaling(FA,input, args.NumFeatures,args.NumTimeSteps, labels)
                            TSR_saliency=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,TSR_attributions,isTensor=False)
                            plotExampleBox(TSR_saliency,args.Graph_dir+models[m]+'_TSR_FA_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)

                    if(args.OcclusionFlag):
                        attributions = OS.attribute(input, \
                                        sliding_window_shapes=(1,int(args.NumFeatures/10)),
                                        target=labels,
                                        baselines=baseline_single)
                        saliency_=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,attributions)

                        plotExampleBox(saliency_[0],args.Graph_dir+models[m]+'_FO_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)
                        if(args.TSRFlag):
                            TSR_attributions =  getTwoStepRescaling(OS,input, args.NumFeatures,args.NumTimeSteps, labels,hasBaseline=baseline_single,hasSliding_window_shapes= (1,int(args.NumFeatures/10)))
                            TSR_saliency=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,TSR_attributions,isTensor=False)
                            plotExampleBox(TSR_saliency,args.Graph_dir+models[m]+'_TSR_FO_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)
Example #26
0
def measure_model(
    model_version,
    dataset,
    out_folder,
    weights_dir,
    device,
    method=METHODS["gradcam"],
    sample_images=50,
    step=1,
):
    invTrans = get_inverse_normalization_transformation()
    data_dir = os.path.join("data")

    if model_version == "resnet18":
        model = create_resnet18_model(num_of_classes=NUM_OF_CLASSES[dataset])
    elif model_version == "resnet50":
        model = create_resnet50_model(num_of_classes=NUM_OF_CLASSES[dataset])
    elif model_version == "densenet":
        model = create_densenet121_model(
            num_of_classes=NUM_OF_CLASSES[dataset])
    else:
        model = create_efficientnetb0_model(
            num_of_classes=NUM_OF_CLASSES[dataset])

    model.load_state_dict(torch.load(weights_dir))

    # print(model)

    model.eval()
    model.to(device)

    test_dataset = CustomDataset(
        dataset=dataset,
        transformer=get_default_transformation(),
        data_type="test",
        root_dir=data_dir,
        step=step,
    )
    data_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=1,
                                              shuffle=False,
                                              num_workers=4)

    try:
        image_ids = random.sample(range(0, test_dataset.__len__()),
                                  sample_images)
    except ValueError:
        raise ValueError(
            f"Image sample number ({sample_images}) exceeded dataset size ({test_dataset.__len__()})."
        )

    classes_map = test_dataset.classes_map

    print(f"Measuring {model_version} on {dataset} dataset, with {method}")
    print("-" * 10)
    pbar = tqdm(total=test_dataset.__len__(), desc="Model test completion")
    multipy_by_inputs = False
    if method == METHODS["ig"]:
        attr_method = IntegratedGradients(model)
        nt_samples = 8
        n_perturb_samples = 3
    if method == METHODS["saliency"]:
        attr_method = Saliency(model)
        nt_samples = 8
        n_perturb_samples = 10
    if method == METHODS["gradcam"]:
        if model_version == "efficientnet":
            attr_method = GuidedGradCam(model, model._conv_stem)
        elif model_version == "densenet":
            attr_method = GuidedGradCam(model, model.features.conv0)
        else:
            attr_method = GuidedGradCam(model, model.conv1)
        nt_samples = 8
        n_perturb_samples = 10
    if method == METHODS["deconv"]:
        attr_method = Deconvolution(model)
        nt_samples = 8
        n_perturb_samples = 10
    if method == METHODS["gradshap"]:
        attr_method = GradientShap(model)
        nt_samples = 8
        if model_version == "efficientnet":
            n_perturb_samples = 3
        elif model_version == "densenet":
            n_perturb_samples = 2
        else:
            n_perturb_samples = 10
    if method == METHODS["gbp"]:
        attr_method = GuidedBackprop(model)
        nt_samples = 8
        n_perturb_samples = 10
    if method == "lime":
        attr_method = Lime(model)
        nt_samples = 8
        n_perturb_samples = 10
        feature_mask = torch.tensor(lime_mask).to(device)
        multipy_by_inputs = True
    if method == METHODS['ig']:
        nt = attr_method
    else:
        nt = NoiseTunnel(attr_method)
    scores = []

    @infidelity_perturb_func_decorator(multipy_by_inputs=multipy_by_inputs)
    def perturb_fn(inputs):
        noise = torch.tensor(np.random.normal(0, 0.003, inputs.shape)).float()
        noise = noise.to(device)
        return inputs - noise

    for input, label in data_loader:
        pbar.update(1)
        inv_input = invTrans(input)
        input = input.to(device)
        input.requires_grad = True
        output = model(input)
        output = F.softmax(output, dim=1)
        prediction_score, pred_label_idx = torch.topk(output, 1)
        prediction_score = prediction_score.cpu().detach().numpy()[0][0]
        pred_label_idx.squeeze_()

        if method == METHODS['gradshap']:
            baseline = torch.randn(input.shape)
            baseline = baseline.to(device)

        if method == "lime":
            attributions = attr_method.attribute(input, target=1, n_samples=50)
        elif method == METHODS['ig']:
            attributions = nt.attribute(
                input,
                target=pred_label_idx,
                n_steps=25,
            )
        elif method == METHODS['gradshap']:
            attributions = nt.attribute(input,
                                        target=pred_label_idx,
                                        baselines=baseline)
        else:
            attributions = nt.attribute(
                input,
                nt_type="smoothgrad",
                nt_samples=nt_samples,
                target=pred_label_idx,
            )

        infid = infidelity(model,
                           perturb_fn,
                           input,
                           attributions,
                           target=pred_label_idx)

        if method == "lime":
            sens = sensitivity_max(
                attr_method.attribute,
                input,
                target=pred_label_idx,
                n_perturb_samples=1,
                n_samples=200,
                feature_mask=feature_mask,
            )
        elif method == METHODS['ig']:
            sens = sensitivity_max(
                nt.attribute,
                input,
                target=pred_label_idx,
                n_perturb_samples=n_perturb_samples,
                n_steps=25,
            )
        elif method == METHODS['gradshap']:
            sens = sensitivity_max(nt.attribute,
                                   input,
                                   target=pred_label_idx,
                                   n_perturb_samples=n_perturb_samples,
                                   baselines=baseline)
        else:
            sens = sensitivity_max(
                nt.attribute,
                input,
                target=pred_label_idx,
                n_perturb_samples=n_perturb_samples,
            )
        inf_value = infid.cpu().detach().numpy()[0]
        sens_value = sens.cpu().detach().numpy()[0]
        if pbar.n in image_ids:
            attr_data = attributions.squeeze().cpu().detach().numpy()
            fig, ax = viz.visualize_image_attr_multiple(
                np.transpose(attr_data, (1, 2, 0)),
                np.transpose(inv_input.squeeze().cpu().detach().numpy(),
                             (1, 2, 0)),
                ["original_image", "heat_map"],
                ["all", "positive"],
                titles=["original_image", "heat_map"],
                cmap=default_cmap,
                show_colorbar=True,
                use_pyplot=False,
                fig_size=(8, 6),
            )
            ax[0].set_xlabel(
                f"Infidelity: {'{0:.6f}'.format(inf_value)}\n Sensitivity: {'{0:.6f}'.format(sens_value)}"
            )
            fig.suptitle(
                f"True: {classes_map[str(label.numpy()[0])][0]}, Pred: {classes_map[str(pred_label_idx.item())][0]}\nScore: {'{0:.4f}'.format(prediction_score)}",
                fontsize=16,
            )
            fig.savefig(
                os.path.join(
                    out_folder,
                    f"{str(pbar.n)}-{classes_map[str(label.numpy()[0])][0]}-{classes_map[str(pred_label_idx.item())][0]}.png",
                ))
            plt.close(fig)
            # if pbar.n > 25:
            #     break

        scores.append([inf_value, sens_value])
    pbar.close()

    np.savetxt(
        os.path.join(out_folder, f"{model_version}-{dataset}-{method}.csv"),
        np.array(scores),
        delimiter=",",
        header="infidelity,sensitivity",
    )

    print(f"Artifacts stored at {out_folder}")
def run(arch=None, img=None, out=None, target=None, **params):

    key = params['extractor']
    #key='ScoreCAM'
    if key == 'GradCAM'.lower():
        cam = GradCAM(arch, conv_layer)
    elif key == 'CAM'.lower():
        cam = CAM(arch, conv_layer, fc_layer)
    elif key == 'XGradCAM'.lower():
        cam = XGradCAM(arch, conv_layer)
    elif key == 'GradCAM++'.lower():
        cam = GradCAMpp(arch, conv_layer)
    elif key == 'SmoothGradCAM++'.lower():
        cam = SmoothGradCAMpp(arch, conv_layer, input_layer)
    elif key == 'ScoreCAM'.lower():
        cam = ScoreCAM(arch, conv_layer, input_layer)
    elif key == 'IntersectionSamCAM'.lower():
        cam = IntersectionSamCAM(arch, conv_layer, input_layer)
    elif key == 'SamCAM'.lower():
        cam = SamCAM(arch, conv_layer)
    elif key == 'SamCAM2'.lower():
        cam = SamCAM2(arch, conv_layer, p=0.25)
    elif key == 'SamCAM3'.lower():
        cam = SamCAM3(arch, conv_layer, p=1.0)
    elif key == 'SamCAM4'.lower():
        cam = SamCAM4(arch, conv_layer, input_layer)
    elif key == 'DropCAM'.lower():
        cam = DropCAM(arch, conv_layer, input_layer)
    elif key == 'SSCAM'.lower():
        cam = SSCAM(arch, conv_layer, input_layer, num_samples=10)
    elif key == 'ISSCAM'.lower():
        cam = ISSCAM(arch, conv_layer, input_layer)
    elif 'IntegratedGradients'.lower() in key or key == 'IGDown'.lower():
        ig = IntegratedGradients(arch.arch)
        cam = ig.attribute
    elif key == 'Saliency'.lower() or key == 'SaliencyDown'.lower():
        saliency = Saliency(arch.arch)
        cam = saliency.attribute
    elif key == "FakeCAM".lower():
        cam = None
    elif key == 'Occlusion'.lower():
        occ = Occlusion(arch.arch)
        cam = occ.attribute

    model = arch

    if type(img) == Image.Image:
        inp = apply_transform(img).cuda()
    else:
        inp = img
    out = F.softmax(model.arch(inp), dim=1)

    if cam is not None:
        if 'GradCAM'.lower() in key:
            salmap = cam(inp, target=target, scores=out)
        elif 'Occlusion'.lower() in key:
            salmap = cam(inp,
                         sliding_window_shapes=(3, 45, 45),
                         strides=(3, 9, 9),
                         target=target)
            salmap = torch.abs(salmap.sum(dim=1))
        else:
            salmap = cam(inp, target=target)
    else:
        salmap = torch.ones((inp.shape[-1], inp.shape[-2]))
        salmap[0, 0] = 0

    # remove 50% less important pixel
    #salmap.view(1,-1)[0,(1-salmap).view(1,-1).topk(int((salmap.shape[-1]**2)/2))[1]]=0.

    salmap = salmap.to(torch.float32)
    if 'IntegratedGradients'.lower() in key or key == 'Saliency'.lower():
        salmap = torch.abs(salmap.sum(dim=1))
        salmap = (salmap - salmap.min()) / (salmap.max() - salmap.min())

        salmap_previous = salmap
        if '20' in key:
            sigma = 20
        elif '5' in key:
            sigma = 5
        else:
            sigma = 3

        # torchvision gaussian
        '''
        trans=transforms.Compose([
            transforms.GaussianBlur(3,sigma=sigma)
        ])
        salmap=trans(salmap)
        '''

        #scipy gaussian
        #'''
        from scipy.ndimage import gaussian_filter as GB
        salmap = torch.from_numpy(GB(salmap.cpu().detach().numpy(), sigma))
        #'''

        salmap = torch.abs(salmap)
        salmap = (salmap - salmap.min()) / (salmap.max() - salmap.min())
        salmap = salmap.squeeze(0)
    elif key == 'IGDown'.lower() or key == 'SaliencyDown'.lower():
        salmap = torch.abs(salmap.sum(dim=1))
        salmap = (salmap - salmap.min()) / (salmap.max() - salmap.min())
        salmap_previous = salmap
        salmap = F.interpolate(salmap.unsqueeze(0), (7, 7),
                               mode='bilinear',
                               align_corners=False)
        salmap = F.interpolate(salmap,
                               salmap_previous.shape[-2:],
                               mode='bilinear',
                               align_corners=False)
        salmap = torch.abs(salmap.sum(dim=1))
        salmap = (salmap - salmap.min()) / (salmap.max() - salmap.min())

    salmap = torch.abs(salmap)
    salmap = (salmap - salmap.min()) / (salmap.max() - salmap.min())
    return salmap
Example #28
0
def measure_filter_model(
    model_version,
    dataset,
    out_folder,
    weights_dir,
    device,
    method=METHODS["gradcam"],
    sample_images=50,
    step=1,
    use_infidelity=False,
    use_sensitivity=False,
    render=False,
    ids=None,
):
    invTrans = get_inverse_normalization_transformation()
    data_dir = os.path.join("data")

    if model_version == "resnet18":
        model = create_resnet18_model(num_of_classes=NUM_OF_CLASSES[dataset])
    elif model_version == "resnet50":
        model = create_resnet50_model(num_of_classes=NUM_OF_CLASSES[dataset])
    elif model_version == "densenet":
        model = create_densenet121_model(
            num_of_classes=NUM_OF_CLASSES[dataset])
    else:
        model = create_efficientnetb0_model(
            num_of_classes=NUM_OF_CLASSES[dataset])

    model.load_state_dict(torch.load(weights_dir))

    # print(model)

    model.eval()
    model.to(device)

    test_dataset = CustomDataset(
        dataset=dataset,
        transformer=get_default_transformation(),
        data_type="test",
        root_dir=data_dir,
        step=step,
        add_filters=True,
        ids=ids,
    )
    data_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=1,
                                              shuffle=False,
                                              num_workers=4)

    try:
        image_ids = random.sample(range(0, test_dataset.__len__()),
                                  test_dataset.__len__())
    except ValueError:
        raise ValueError(
            f"Image sample number ({test_dataset.__len__()}) exceeded dataset size ({test_dataset.__len__()})."
        )

    classes_map = test_dataset.classes_map

    print(f"Measuring {model_version} on {dataset} dataset, with {method}")
    print("-" * 10)
    pbar = tqdm(total=test_dataset.__len__(), desc="Model test completion")
    multipy_by_inputs = False
    if method == METHODS["ig"]:
        attr_method = IntegratedGradients(model)
        nt_samples = 1
        n_perturb_samples = 1
    if method == METHODS["saliency"]:
        attr_method = Saliency(model)
        nt_samples = 8
        n_perturb_samples = 2
    if method == METHODS["gradcam"]:
        if model_version == "efficientnet":
            attr_method = GuidedGradCam(model, model._conv_stem)
        elif model_version == "densenet":
            attr_method = GuidedGradCam(model, model.features.conv0)
        else:
            attr_method = GuidedGradCam(model, model.conv1)
        nt_samples = 8
        n_perturb_samples = 2
    if method == METHODS["deconv"]:
        attr_method = Deconvolution(model)
        nt_samples = 8
        n_perturb_samples = 2
    if method == METHODS["gradshap"]:
        attr_method = GradientShap(model)
        nt_samples = 8
        n_perturb_samples = 2
    if method == METHODS["gbp"]:
        attr_method = GuidedBackprop(model)
        nt_samples = 8
        n_perturb_samples = 2
    if method == "lime":
        attr_method = Lime(model)
        nt_samples = 8
        n_perturb_samples = 2
        feature_mask = torch.tensor(lime_mask).to(device)
        multipy_by_inputs = True
    if method == METHODS["ig"]:
        nt = attr_method
    else:
        nt = NoiseTunnel(attr_method)
    scores = []

    @infidelity_perturb_func_decorator(multipy_by_inputs=multipy_by_inputs)
    def perturb_fn(inputs):
        noise = torch.tensor(np.random.normal(0, 0.003, inputs.shape)).float()
        noise = noise.to(device)
        return inputs - noise

    OUR_FILTERS = [
        "none",
        "fx_freaky_details 2,10,1,11,0,32,0",
        "normalize_local 8,10",
        "fx_boost_chroma 90,0,0",
        "fx_mighty_details 25,1,25,1,11,0",
        "sharpen 300",
    ]
    idx = 0
    filter_count = 0
    filter_attrs = {filter_name: [] for filter_name in OUR_FILTERS}
    predicted_main_class = 0
    for input, label in data_loader:
        pbar.update(1)
        inv_input = invTrans(input)
        input = input.to(device)
        input.requires_grad = True
        output = model(input)
        output = F.softmax(output, dim=1)
        prediction_score, pred_label_idx = torch.topk(output, 1)
        prediction_score = prediction_score.cpu().detach().numpy()[0][0]
        pred_label_idx.squeeze_()
        if OUR_FILTERS[filter_count] == 'none':
            predicted_main_class = pred_label_idx.item()

        if method == METHODS["gradshap"]:
            baseline = torch.randn(input.shape)
            baseline = baseline.to(device)

        if method == "lime":
            attributions = attr_method.attribute(input, target=1, n_samples=50)
        elif method == METHODS["ig"]:
            attributions = nt.attribute(
                input,
                target=predicted_main_class,
                n_steps=25,
            )
        elif method == METHODS["gradshap"]:
            attributions = nt.attribute(input,
                                        target=predicted_main_class,
                                        baselines=baseline)
        else:
            attributions = nt.attribute(
                input,
                nt_type="smoothgrad",
                nt_samples=nt_samples,
                target=predicted_main_class,
            )

        if use_infidelity:
            infid = infidelity(model,
                               perturb_fn,
                               input,
                               attributions,
                               target=predicted_main_class)
            inf_value = infid.cpu().detach().numpy()[0]
        else:
            inf_value = 0

        if use_sensitivity:
            if method == "lime":
                sens = sensitivity_max(
                    attr_method.attribute,
                    input,
                    target=predicted_main_class,
                    n_perturb_samples=1,
                    n_samples=200,
                    feature_mask=feature_mask,
                )
            elif method == METHODS["ig"]:
                sens = sensitivity_max(
                    nt.attribute,
                    input,
                    target=predicted_main_class,
                    n_perturb_samples=n_perturb_samples,
                    n_steps=25,
                )
            elif method == METHODS["gradshap"]:
                sens = sensitivity_max(
                    nt.attribute,
                    input,
                    target=predicted_main_class,
                    n_perturb_samples=n_perturb_samples,
                    baselines=baseline,
                )
            else:
                sens = sensitivity_max(
                    nt.attribute,
                    input,
                    target=predicted_main_class,
                    n_perturb_samples=n_perturb_samples,
                )
            sens_value = sens.cpu().detach().numpy()[0]
        else:
            sens_value = 0

        # filter_name = test_dataset.data.iloc[pbar.n]["filter"].split(" ")[0]
        attr_data = attributions.squeeze().cpu().detach().numpy()
        if render:
            fig, ax = viz.visualize_image_attr_multiple(
                np.transpose(attr_data, (1, 2, 0)),
                np.transpose(inv_input.squeeze().cpu().detach().numpy(),
                             (1, 2, 0)),
                ["original_image", "heat_map"],
                ["all", "positive"],
                titles=["original_image", "heat_map"],
                cmap=default_cmap,
                show_colorbar=True,
                use_pyplot=False,
                fig_size=(8, 6),
            )
            if use_sensitivity or use_infidelity:
                ax[0].set_xlabel(
                    f"Infidelity: {'{0:.6f}'.format(inf_value)}\n Sensitivity: {'{0:.6f}'.format(sens_value)}"
                )
            fig.suptitle(
                f"True: {classes_map[str(label.numpy()[0])][0]}, Pred: {classes_map[str(pred_label_idx.item())][0]}\nScore: {'{0:.4f}'.format(prediction_score)}",
                fontsize=16,
            )
            fig.savefig(
                os.path.join(
                    out_folder,
                    f"{str(idx)}-{str(filter_count)}-{str(label.numpy()[0])}-{str(OUR_FILTERS[filter_count])}-{classes_map[str(label.numpy()[0])][0]}-{classes_map[str(pred_label_idx.item())][0]}.png",
                ))
            plt.close(fig)
        # if pbar.n > 25:
        #     break
        score_for_true_label = output.cpu().detach().numpy(
        )[0][predicted_main_class]

        filter_attrs[OUR_FILTERS[filter_count]] = [
            np.moveaxis(attr_data, 0, -1),
            "{0:.8f}".format(score_for_true_label),
        ]

        data_range_for_current_set = MAX_ATT_VALUES[model_version][method][
            dataset]
        filter_count += 1
        if filter_count >= len(OUR_FILTERS):
            ssims = []
            for rot in OUR_FILTERS:
                ssims.append("{0:.8f}".format(
                    ssim(
                        filter_attrs["none"][0],
                        filter_attrs[rot][0],
                        win_size=11,
                        data_range=data_range_for_current_set,
                        multichannel=True,
                    )))
                ssims.append(filter_attrs[rot][1])

            scores.append(ssims)
            filter_count = 0
            predicted_main_class = 0
            idx += 1

    pbar.close()

    indexes = []

    for filter_name in OUR_FILTERS:
        indexes.append(str(filter_name) + "-ssim")
        indexes.append(str(filter_name) + "-score")
    np.savetxt(
        os.path.join(
            out_folder,
            f"{model_version}-{dataset}-{method}-ssim-with-range.csv"),
        np.array(scores),
        delimiter=";",
        fmt="%s",
        header=";".join([str(rot) for rot in indexes]),
    )

    print(f"Artifacts stored at {out_folder}")
Example #29
0
grads_sal = list()
grads_igrad = list()
grads_occ = list()
grads_gshap = list()
grads_dlift = list()
signal = list()

for idx in range(36):
    x = signals[idx].float().unsqueeze(0)
    x.requires_grad = True

    model.eval()

    # Saliency
    saliency = Saliency(model)
    grads = saliency.attribute(x, target=labels[idx].item())
    grads_sal.append(grads.squeeze().cpu().detach().numpy())

    # Occlusion
    occlusion = Occlusion(model)
    grads = occlusion.attribute(x, strides = (1, int(FS / 100)), target=labels[idx].item(), sliding_window_shapes=(1, int(FS / 10)), baselines=0)
    grads_occ.append(grads.squeeze().cpu().detach().numpy())

    # Integrated Gradients
    integrated_gradients = IntegratedGradients(model)
    grads = integrated_gradients.attribute(x, target=labels[idx].item(), n_steps=1000)
    grads_igrad.append(grads.squeeze().cpu().detach().numpy())

    # Gradient SHAP
    gshap = GradientShap(model)
Example #30
0
 def extract_Sa(self, X_test):
     saliency = Saliency(self.net)
     start = time.time()
     saliency_attr_test = saliency.attribute(X_test.to(self.device))
     print("temps train", time.time() - start)
     return saliency_attr_test.detach().cpu().numpy()