예제 #1
0
def attribution_map(func: Callable[[torch.Tensor], torch.Tensor],
                    attribution_type: Type,
                    with_noise: bool,
                    probe_data: torch.Tensor,
                    norm_square: bool = False,
                    **attribution_kwargs) -> torch.Tensor:
    """
    Calculate attribution map with given attribution type(algorithm).
    Args:
        model: pytorch module
        attribution_type: attribution algorithm, e.g. IntegratedGradients, InputXGradient, ...
        with_noise: whether to add noise tunnel
        probe_data: input data to model
        device: torch.device("cuda: 0")
        attribution_kwargs: other kwargs for attribution method
    Return: attribution map
    """
    attribution: Attribution = attribution_type(
        lambda x: with_norm(func, x, norm_square))
    if with_noise:
        attribution = NoiseTunnel(attribution)
    attr_map = attribution.attribute(inputs=probe_data,
                                     target=None,
                                     **attribution_kwargs)
    return attr_map.detach()
예제 #2
0
 def extract_IGNT(self, X_test):
     ig = IntegratedGradients(self.net)
     ig_nt = NoiseTunnel(ig)
     start = time.time()
     ig_nt_attr_test = ig_nt.attribute(X_test.to(self.device))
     print("temps train", time.time() - start)
     return ig_nt_attr_test.detach().cpu().numpy()
예제 #3
0
파일: captum.py 프로젝트: C-Advait/fastai-1
 def _get_attributions(self, enc_data, metric, n_steps, nt_type,
                       baseline_type, strides, sliding_window_shapes):
     # Get Baseline
     baseline = self.get_baseline_img(enc_data[0], baseline_type)
     supported_metrics = {}
     if metric == 'IG':
         self._integrated_gradients = self._integrated_gradients if hasattr(
             self, '_integrated_gradients') else IntegratedGradients(
                 self.model)
         return self._integrated_gradients.attribute(enc_data[0],
                                                     baseline,
                                                     target=enc_data[1],
                                                     n_steps=200)
     elif metric == 'NT':
         self._integrated_gradients = self._integrated_gradients if hasattr(
             self, '_integrated_gradients') else IntegratedGradients(
                 self.model)
         self._noise_tunnel = self._noise_tunnel if hasattr(
             self, '_noise_tunnel') else NoiseTunnel(
                 self._integrated_gradients)
         return self._noise_tunnel.attribute(enc_data[0].to(
             self.dls.device),
                                             n_samples=1,
                                             nt_type=nt_type,
                                             target=enc_data[1])
     elif metric == 'Occl':
         self._occlusion = self._occlusion if hasattr(
             self, '_occlusion') else Occlusion(self.model)
         return self._occlusion.attribute(
             enc_data[0].to(self.dls.device),
             strides=strides,
             target=enc_data[1],
             sliding_window_shapes=sliding_window_shapes,
             baselines=baseline)
예제 #4
0
파일: captum.py 프로젝트: hal-314/fastai2
class NoiseTunnelCallback(Callback):
    "Captum Callback for Resnet Interpretation"
    def __init__(self):
        pass

    def after_fit(self):
        self.integrated_gradients = IntegratedGradients(self.model)
        self._noise_tunnel= NoiseTunnel(self.integrated_gradients)

    def visualize(self,inp_data,cmap_name='custom blue',colors=None,N=256,methods=['original_image','heat_map'],signs=["all", "positive"],nt_type='smoothgrad'):
        dl = self.dls.test_dl(L(inp_data),with_labels=True, bs=1)

        self.enc_inp,self.enc_preds= dl.one_batch()
        dec_data=dl.decode((self.enc_inp,self.enc_preds))
        self.dec_img,self.dec_pred=dec_data[0][0],dec_data[1][0]

        self.colors = [(0, '#ffffff'),(0.25, '#000000'),(1, '#000000')] if colors is None else colors
        attributions_ig_nt = self._noise_tunnel.attribute(self.enc_inp.to(self.dl.device), n_samples=1, nt_type=nt_type, target=self.enc_preds)
        default_cmap = LinearSegmentedColormap.from_list(cmap_name,
                                                 self.colors, N=N)
        _ = viz.visualize_image_attr_multiple(np.transpose(attributions_ig_nt.squeeze().cpu().detach().numpy(), (1,2,0)),
                                              np.transpose(self.dec_img.numpy(), (1,2,0)),
                                              methods,signs,
                                              cmap=default_cmap,
                                              show_colorbar=True,titles=[f'Original Image - ({self.dec_pred})', 'Noise Tunnel'])
예제 #5
0
    def compute_saliency_noise_tunnel(self, img_path, target):

        # open image
        img, transformed_img, input = self.open_image(img_path)

        gradient_saliency = Saliency(self.model)
        noise_tunnel = NoiseTunnel(gradient_saliency)
        attributions_sa_nt = noise_tunnel.attribute(
            input,
            n_samples=10,
            nt_type='smoothgrad',
            # internal_batch_size=8,
            target=target)
        attributions_sa_nt = np.transpose(
            attributions_sa_nt.squeeze().cpu().detach().numpy(), (1, 2, 0))
        return attributions_sa_nt
예제 #6
0
    def _attr_noise_tunnel(self, input, pred):

        attr_algo = NoiseTunnel(IntegratedGradients(self.model))
        default_cmap = LinearSegmentedColormap.from_list('custom blue',
                                                         [(0, '#ffffff'),
                                                          (0.25, '#000000'),
                                                          (1, '#000000')], N=256)
        attr_ = attr_algo.attribute(input, n_samples=10, nt_type='smoothgrad_sq', target=pred)

        _ = viz.visualize_image_attr_multiple(
            np.transpose(attr_.squeeze().cpu().detach().numpy(), (1, 2, 0)),
            np.transpose(input.squeeze().cpu().detach().numpy(), (1, 2, 0)),
            ["original_image", "heat_map"],
            ["all", "positive"],
            cmap=default_cmap,
            show_colorbar=True)
예제 #7
0
    def compute_integrated_gradients_noise_tunnel(self, img_path, target):

        # open image
        img, transformed_img, input = self.open_image(img_path)

        integrated_gradients = IntegratedGradients(self.model)
        # attributions_ig = integrated_gradients.attribute(input,
        #                                                  target=target,
        #                                                  n_steps=200)
        noise_tunnel = NoiseTunnel(integrated_gradients)
        attributions_ig_nt = noise_tunnel.attribute(input,
                                                    n_samples=10,
                                                    nt_type='smoothgrad',
                                                    internal_batch_size=8,
                                                    n_steps=200,
                                                    target=target)
        attributions_ig_nt = np.transpose(
            attributions_ig_nt.squeeze().cpu().detach().numpy(), (1, 2, 0))
        return attributions_ig_nt
예제 #8
0
def PT_SmoothGradient(model,
                      x,
                      y_onthot,
                      multiply_with_input=False,
                      device='cuda:0',
                      n_steps=50,
                      stdevs=0.15,
                      **kwargs):
    input = torch.tensor(x).to(device)
    model = model.to(device)
    model.eval()
    saliency = NoiseTunnel(Saliency(model))
    target = torch.tensor(np.argmax(y_onthot, -1)).to(device)
    attribution_map = saliency.attribute(input,
                                         n_samples=n_steps,
                                         target=target,
                                         stdevs=stdevs,
                                         abs=False)

    if multiply_with_input:
        attribution_map *= input
    return attribution_map.detach().cpu().numpy()
def main(args):

    train_loader, test_loader = data_generator(args.data_dir,1)

    for m in range(len(models)):

        model_name = "model_{}_NumFeatures_{}".format(models[m],args.NumFeatures)
        model_filename = args.model_dir + 'm_' + model_name + '.pt'
        pretrained_model = torch.load(open(model_filename, "rb"),map_location=device) 
        pretrained_model.to(device)



        if(args.GradFlag):
            Grad = Saliency(pretrained_model)
        if(args.IGFlag):
            IG = IntegratedGradients(pretrained_model)
        if(args.DLFlag):
            DL = DeepLift(pretrained_model)
        if(args.GSFlag):
            GS = GradientShap(pretrained_model)
        if(args.DLSFlag):
            DLS = DeepLiftShap(pretrained_model)                 
        if(args.SGFlag):
            Grad_ = Saliency(pretrained_model)
            SG = NoiseTunnel(Grad_)
        if(args.ShapleySamplingFlag):
            SS = ShapleyValueSampling(pretrained_model)
        if(args.GSFlag):
            FP = FeaturePermutation(pretrained_model)
        if(args.FeatureAblationFlag):
            FA = FeatureAblation(pretrained_model)         
        if(args.OcclusionFlag):
            OS = Occlusion(pretrained_model)

        timeMask=np.zeros((args.NumTimeSteps, args.NumFeatures),dtype=int)
        featureMask=np.zeros((args.NumTimeSteps, args.NumFeatures),dtype=int)
        for i in  range (args.NumTimeSteps):
            timeMask[i,:]=i

        for i in  range (args.NumTimeSteps):
            featureMask[:,i]=i

        indexes = [[] for i in range(5,10)]
        for i ,(data, target) in enumerate(test_loader):
            if(target==5 or target==6 or target==7 or target==8 or target==9):
                index=target-5

                if(len(indexes[index])<1):
                    indexes[index].append(i)
        for j, index in enumerate(indexes):
            print(index)
        # indexes = [[21],[17],[84],[9]]

        for j, index in enumerate(indexes):
            print("Getting Saliency for number", j+1)
            for i, (data, target) in enumerate(test_loader):
                if(i in index):
                        
                    labels =  target.to(device)
             
                    input = data.reshape(-1, args.NumTimeSteps, args.NumFeatures).to(device)
                    input = Variable(input,  volatile=False, requires_grad=True)

                    baseline_single=torch.Tensor(np.random.random(input.shape)).to(device)
                    baseline_multiple=torch.Tensor(np.random.random((input.shape[0]*5,input.shape[1],input.shape[2]))).to(device)
                    inputMask= np.zeros((input.shape))
                    inputMask[:,:,:]=timeMask
                    inputMask =torch.Tensor(inputMask).to(device)
                    mask_single= torch.Tensor(timeMask).to(device)
                    mask_single=mask_single.reshape(1,args.NumTimeSteps, args.NumFeatures).to(device)

                    Data=data.reshape(args.NumTimeSteps, args.NumFeatures).data.cpu().numpy()
                    
                    target_=int(target.data.cpu().numpy()[0])

                    plotExampleBox(Data,args.Graph_dir+'Sample_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)




                    if(args.GradFlag):
                        attributions = Grad.attribute(input, \
                                                      target=labels)
                        
                        saliency_=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,attributions)

                        plotExampleBox(saliency_[0],args.Graph_dir+models[m]+'_Grad_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)
                        if(args.TSRFlag):
                            TSR_attributions =  getTwoStepRescaling(Grad,input, args.NumFeatures,args.NumTimeSteps, labels,hasBaseline=None)
                            TSR_saliency=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,TSR_attributions,isTensor=False)
                            plotExampleBox(TSR_saliency,args.Graph_dir+models[m]+'_TSR_Grad_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)



                    if(args.IGFlag):
                        attributions = IG.attribute(input,  \
                                                    baselines=baseline_single, \
                                                    target=labels)
                        saliency_=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,attributions)

                        plotExampleBox(saliency_[0],args.Graph_dir+models[m]+'_IG_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)
                        if(args.TSRFlag):
                            TSR_attributions =  getTwoStepRescaling(IG,input, args.NumFeatures,args.NumTimeSteps, labels,hasBaseline=baseline_single)
                            TSR_saliency=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,TSR_attributions,isTensor=False)
                            plotExampleBox(TSR_saliency,args.Graph_dir+models[m]+'_TSR_IG_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)




                    if(args.DLFlag):
                        attributions = DL.attribute(input,  \
                                                    baselines=baseline_single, \
                                                    target=labels)
                        saliency_=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,attributions)
                        plotExampleBox(saliency_[0],args.Graph_dir+models[m]+'_DL_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)


                        if(args.TSRFlag):
                            TSR_attributions =  getTwoStepRescaling(DL,input, args.NumFeatures,args.NumTimeSteps, labels,hasBaseline=baseline_single)
                            TSR_saliency=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,TSR_attributions,isTensor=False)
                            plotExampleBox(TSR_saliency,args.Graph_dir+models[m]+'_TSR_DL_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)




                    if(args.GSFlag):

                        attributions = GS.attribute(input,  \
                                                    baselines=baseline_multiple, \
                                                    stdevs=0.09,\
                                                    target=labels)
                        saliency_=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,attributions)
                        plotExampleBox(saliency_[0],args.Graph_dir+models[m]+'_GS_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)

 
                        if(args.TSRFlag):
                            TSR_attributions =  getTwoStepRescaling(GS,input, args.NumFeatures,args.NumTimeSteps, labels,hasBaseline=baseline_multiple)
                            TSR_saliency=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,TSR_attributions,isTensor=False)
                            plotExampleBox(TSR_saliency,args.Graph_dir+models[m]+'_TSR_GS_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)


                    if(args.DLSFlag):

                        attributions = DLS.attribute(input,  \
                                                    baselines=baseline_multiple, \
                                                    target=labels)
                        saliency_=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,attributions)
                        plotExampleBox(saliency_[0],args.Graph_dir+models[m]+'_DLS_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)
                        if(args.TSRFlag):
                            TSR_attributions =  getTwoStepRescaling(DLS,input, args.NumFeatures,args.NumTimeSteps, labels,hasBaseline=baseline_multiple)
                            TSR_saliency=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,TSR_attributions,isTensor=False)
                            plotExampleBox(TSR_saliency,args.Graph_dir+models[m]+'_TSR_DLS_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)



                    if(args.SGFlag):
                        attributions = SG.attribute(input, \
                                                    target=labels)
                        saliency_=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,attributions)
                        plotExampleBox(saliency_[0],args.Graph_dir+models[m]+'_SG_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)
                        if(args.TSRFlag):
                            TSR_attributions =  getTwoStepRescaling(SG,input, args.NumFeatures,args.NumTimeSteps, labels)
                            TSR_saliency=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,TSR_attributions,isTensor=False)
                            plotExampleBox(TSR_saliency,args.Graph_dir+models[m]+'_TSR_SG_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)


                    if(args.ShapleySamplingFlag):
                        attributions = SS.attribute(input, \
                                        baselines=baseline_single, \
                                        target=labels,\
                                        feature_mask=inputMask)
                        saliency_=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,attributions)
                        plotExampleBox(saliency_[0],args.Graph_dir+models[m]+'_SVS_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)
                        if(args.TSRFlag):
                            TSR_attributions =  getTwoStepRescaling(SS,input, args.NumFeatures,args.NumTimeSteps, labels,hasBaseline=baseline_single,hasFeatureMask=inputMask)
                            TSR_saliency=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,TSR_attributions,isTensor=False)
                            plotExampleBox(TSR_saliency,args.Graph_dir+models[m]+'_TSR_SVS_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)
                    # if(args.FeaturePermutationFlag):
                    #     attributions = FP.attribute(input, \
                    #                     target=labels),
                    #                     # perturbations_per_eval= 1,\
                    #                     # feature_mask=mask_single)
                    #     saliency_=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,attributions)
                    #     plotExampleBox(saliency_[0],args.Graph_dir+models[m]+'_FP',greyScale=True)


                    if(args.FeatureAblationFlag):
                        attributions = FA.attribute(input, \
                                        target=labels)
                                        # perturbations_per_eval= input.shape[0],\
                                        # feature_mask=mask_single)
                        saliency_=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,attributions)
                        plotExampleBox(saliency_[0],args.Graph_dir+models[m]+'_FA_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)
                        if(args.TSRFlag):
                            TSR_attributions =  getTwoStepRescaling(FA,input, args.NumFeatures,args.NumTimeSteps, labels)
                            TSR_saliency=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,TSR_attributions,isTensor=False)
                            plotExampleBox(TSR_saliency,args.Graph_dir+models[m]+'_TSR_FA_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)

                    if(args.OcclusionFlag):
                        attributions = OS.attribute(input, \
                                        sliding_window_shapes=(1,int(args.NumFeatures/10)),
                                        target=labels,
                                        baselines=baseline_single)
                        saliency_=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,attributions)

                        plotExampleBox(saliency_[0],args.Graph_dir+models[m]+'_FO_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)
                        if(args.TSRFlag):
                            TSR_attributions =  getTwoStepRescaling(OS,input, args.NumFeatures,args.NumTimeSteps, labels,hasBaseline=baseline_single,hasSliding_window_shapes= (1,int(args.NumFeatures/10)))
                            TSR_saliency=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,TSR_attributions,isTensor=False)
                            plotExampleBox(TSR_saliency,args.Graph_dir+models[m]+'_TSR_FO_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)
예제 #10
0
def measure_model(
    model_version,
    dataset,
    out_folder,
    weights_dir,
    device,
    method=METHODS["gradcam"],
    sample_images=50,
    step=1,
):
    invTrans = get_inverse_normalization_transformation()
    data_dir = os.path.join("data")

    if model_version == "resnet18":
        model = create_resnet18_model(num_of_classes=NUM_OF_CLASSES[dataset])
    elif model_version == "resnet50":
        model = create_resnet50_model(num_of_classes=NUM_OF_CLASSES[dataset])
    elif model_version == "densenet":
        model = create_densenet121_model(
            num_of_classes=NUM_OF_CLASSES[dataset])
    else:
        model = create_efficientnetb0_model(
            num_of_classes=NUM_OF_CLASSES[dataset])

    model.load_state_dict(torch.load(weights_dir))

    # print(model)

    model.eval()
    model.to(device)

    test_dataset = CustomDataset(
        dataset=dataset,
        transformer=get_default_transformation(),
        data_type="test",
        root_dir=data_dir,
        step=step,
    )
    data_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=1,
                                              shuffle=False,
                                              num_workers=4)

    try:
        image_ids = random.sample(range(0, test_dataset.__len__()),
                                  sample_images)
    except ValueError:
        raise ValueError(
            f"Image sample number ({sample_images}) exceeded dataset size ({test_dataset.__len__()})."
        )

    classes_map = test_dataset.classes_map

    print(f"Measuring {model_version} on {dataset} dataset, with {method}")
    print("-" * 10)
    pbar = tqdm(total=test_dataset.__len__(), desc="Model test completion")
    multipy_by_inputs = False
    if method == METHODS["ig"]:
        attr_method = IntegratedGradients(model)
        nt_samples = 8
        n_perturb_samples = 3
    if method == METHODS["saliency"]:
        attr_method = Saliency(model)
        nt_samples = 8
        n_perturb_samples = 10
    if method == METHODS["gradcam"]:
        if model_version == "efficientnet":
            attr_method = GuidedGradCam(model, model._conv_stem)
        elif model_version == "densenet":
            attr_method = GuidedGradCam(model, model.features.conv0)
        else:
            attr_method = GuidedGradCam(model, model.conv1)
        nt_samples = 8
        n_perturb_samples = 10
    if method == METHODS["deconv"]:
        attr_method = Deconvolution(model)
        nt_samples = 8
        n_perturb_samples = 10
    if method == METHODS["gradshap"]:
        attr_method = GradientShap(model)
        nt_samples = 8
        if model_version == "efficientnet":
            n_perturb_samples = 3
        elif model_version == "densenet":
            n_perturb_samples = 2
        else:
            n_perturb_samples = 10
    if method == METHODS["gbp"]:
        attr_method = GuidedBackprop(model)
        nt_samples = 8
        n_perturb_samples = 10
    if method == "lime":
        attr_method = Lime(model)
        nt_samples = 8
        n_perturb_samples = 10
        feature_mask = torch.tensor(lime_mask).to(device)
        multipy_by_inputs = True
    if method == METHODS['ig']:
        nt = attr_method
    else:
        nt = NoiseTunnel(attr_method)
    scores = []

    @infidelity_perturb_func_decorator(multipy_by_inputs=multipy_by_inputs)
    def perturb_fn(inputs):
        noise = torch.tensor(np.random.normal(0, 0.003, inputs.shape)).float()
        noise = noise.to(device)
        return inputs - noise

    for input, label in data_loader:
        pbar.update(1)
        inv_input = invTrans(input)
        input = input.to(device)
        input.requires_grad = True
        output = model(input)
        output = F.softmax(output, dim=1)
        prediction_score, pred_label_idx = torch.topk(output, 1)
        prediction_score = prediction_score.cpu().detach().numpy()[0][0]
        pred_label_idx.squeeze_()

        if method == METHODS['gradshap']:
            baseline = torch.randn(input.shape)
            baseline = baseline.to(device)

        if method == "lime":
            attributions = attr_method.attribute(input, target=1, n_samples=50)
        elif method == METHODS['ig']:
            attributions = nt.attribute(
                input,
                target=pred_label_idx,
                n_steps=25,
            )
        elif method == METHODS['gradshap']:
            attributions = nt.attribute(input,
                                        target=pred_label_idx,
                                        baselines=baseline)
        else:
            attributions = nt.attribute(
                input,
                nt_type="smoothgrad",
                nt_samples=nt_samples,
                target=pred_label_idx,
            )

        infid = infidelity(model,
                           perturb_fn,
                           input,
                           attributions,
                           target=pred_label_idx)

        if method == "lime":
            sens = sensitivity_max(
                attr_method.attribute,
                input,
                target=pred_label_idx,
                n_perturb_samples=1,
                n_samples=200,
                feature_mask=feature_mask,
            )
        elif method == METHODS['ig']:
            sens = sensitivity_max(
                nt.attribute,
                input,
                target=pred_label_idx,
                n_perturb_samples=n_perturb_samples,
                n_steps=25,
            )
        elif method == METHODS['gradshap']:
            sens = sensitivity_max(nt.attribute,
                                   input,
                                   target=pred_label_idx,
                                   n_perturb_samples=n_perturb_samples,
                                   baselines=baseline)
        else:
            sens = sensitivity_max(
                nt.attribute,
                input,
                target=pred_label_idx,
                n_perturb_samples=n_perturb_samples,
            )
        inf_value = infid.cpu().detach().numpy()[0]
        sens_value = sens.cpu().detach().numpy()[0]
        if pbar.n in image_ids:
            attr_data = attributions.squeeze().cpu().detach().numpy()
            fig, ax = viz.visualize_image_attr_multiple(
                np.transpose(attr_data, (1, 2, 0)),
                np.transpose(inv_input.squeeze().cpu().detach().numpy(),
                             (1, 2, 0)),
                ["original_image", "heat_map"],
                ["all", "positive"],
                titles=["original_image", "heat_map"],
                cmap=default_cmap,
                show_colorbar=True,
                use_pyplot=False,
                fig_size=(8, 6),
            )
            ax[0].set_xlabel(
                f"Infidelity: {'{0:.6f}'.format(inf_value)}\n Sensitivity: {'{0:.6f}'.format(sens_value)}"
            )
            fig.suptitle(
                f"True: {classes_map[str(label.numpy()[0])][0]}, Pred: {classes_map[str(pred_label_idx.item())][0]}\nScore: {'{0:.4f}'.format(prediction_score)}",
                fontsize=16,
            )
            fig.savefig(
                os.path.join(
                    out_folder,
                    f"{str(pbar.n)}-{classes_map[str(label.numpy()[0])][0]}-{classes_map[str(pred_label_idx.item())][0]}.png",
                ))
            plt.close(fig)
            # if pbar.n > 25:
            #     break

        scores.append([inf_value, sens_value])
    pbar.close()

    np.savetxt(
        os.path.join(out_folder, f"{model_version}-{dataset}-{method}.csv"),
        np.array(scores),
        delimiter=",",
        header="infidelity,sensitivity",
    )

    print(f"Artifacts stored at {out_folder}")
예제 #11
0
def save_explanation(inputImage: torch.Tensor, modeladapter: torch.nn.Module,
                     cfg: DictConfig, pred_label_idx: int, pred_label_num: int,
                     gt_label_num: int, filename: str, filepath: str,
                     filename_without_ext: str, prediction_score: float):
    """
    Return explanation value dict
    """

    input_gradients = torch.unsqueeze(inputImage, 0)

    integrated_gradients = IntegratedGradients(modeladapter)
    default_cmap = LinearSegmentedColormap.from_list('custom blue',
                                                     [(0, '#ffffff'),
                                                      (0.25, '#000000'),
                                                      (1, '#000000')],
                                                     N=256)

    noise_tunnel = NoiseTunnel(integrated_gradients)
    attributions_ig = noise_tunnel.attribute(
        input_gradients,
        nt_samples=cfg.inference.captum.noise_tunnel.nt_samples,
        nt_samples_batch_size=cfg.inference.captum.noise_tunnel.
        nt_samples_batch_size,
        nt_type=cfg.inference.captum.noise_tunnel.nt_type,
        target=pred_label_idx)

    # Standard Captum Visualization
    figure, plot = viz.visualize_image_attr(np.transpose(
        attributions_ig.squeeze().cpu().detach().numpy(), (1, 2, 0)),
                                            method="heat_map",
                                            sign="positive",
                                            cmap=default_cmap,
                                            show_colorbar=True)

    dict_col_name = {}

    dict_col_name.update({
        "pred": pred_label_num,
        "GT": gt_label_num,
        "predict_score": prediction_score.squeeze_().item(),
        "image_path": filename
    })

    save_path_original = "/PREDcls_" + str(pred_label_num) + "_GTcls_" + str(
        gt_label_num) + "_" + filename

    save_path_explanation = "/PREDcls_" + str(
        pred_label_num) + "_GTcls_" + str(
            gt_label_num) + "_" + filename_without_ext + "_explain" + ".png"

    if (pred_label_num == gt_label_num):

        # path to image

        shutil.copy(
            filepath,
            cfg.inference.captum.correct_explanation_path + save_path_original)

        figure.savefig(cfg.inference.captum.correct_explanation_path +
                       save_path_explanation,
                       dpi=figure.dpi)

    else:

        shutil.copy(
            filepath,
            cfg.inference.captum.error_explanation_path + save_path_original)

        figure.savefig(cfg.inference.captum.error_explanation_path +
                       save_path_explanation,
                       dpi=figure.dpi)

    plt.close()

    return dict_col_name
def main(args, DatasetsTypes, DataGenerationTypes, models, device):
    for m in range(len(models)):

        for x in range(len(DatasetsTypes)):
            for y in range(len(DataGenerationTypes)):

                if (DataGenerationTypes[y] == None):
                    args.DataName = DatasetsTypes[x] + "_Box"
                else:
                    args.DataName = DatasetsTypes[
                        x] + "_" + DataGenerationTypes[y]

                Training = np.load(args.data_dir + "SimulatedTrainingData_" +
                                   args.DataName + "_F_" +
                                   str(args.NumFeatures) + "_TS_" +
                                   str(args.NumTimeSteps) + ".npy")
                TrainingMetaDataset = np.load(args.data_dir +
                                              "SimulatedTrainingMetaData_" +
                                              args.DataName + "_F_" +
                                              str(args.NumFeatures) + "_TS_" +
                                              str(args.NumTimeSteps) + ".npy")
                TrainingLabel = TrainingMetaDataset[:, 0]

                Testing = np.load(args.data_dir + "SimulatedTestingData_" +
                                  args.DataName + "_F_" +
                                  str(args.NumFeatures) + "_TS_" +
                                  str(args.NumTimeSteps) + ".npy")
                TestingDataset_MetaData = np.load(args.data_dir +
                                                  "SimulatedTestingMetaData_" +
                                                  args.DataName + "_F_" +
                                                  str(args.NumFeatures) +
                                                  "_TS_" +
                                                  str(args.NumTimeSteps) +
                                                  ".npy")
                TestingLabel = TestingDataset_MetaData[:, 0]

                Training = Training.reshape(
                    Training.shape[0], Training.shape[1] * Training.shape[2])
                Testing = Testing.reshape(Testing.shape[0],
                                          Testing.shape[1] * Testing.shape[2])

                scaler = MinMaxScaler()
                scaler.fit(Training)
                Training = scaler.transform(Training)
                Testing = scaler.transform(Testing)

                TrainingRNN = Training.reshape(Training.shape[0],
                                               args.NumTimeSteps,
                                               args.NumFeatures)
                TestingRNN = Testing.reshape(Testing.shape[0],
                                             args.NumTimeSteps,
                                             args.NumFeatures)

                train_dataRNN = data_utils.TensorDataset(
                    torch.from_numpy(TrainingRNN),
                    torch.from_numpy(TrainingLabel))
                train_loaderRNN = data_utils.DataLoader(
                    train_dataRNN, batch_size=args.batch_size, shuffle=True)

                test_dataRNN = data_utils.TensorDataset(
                    torch.from_numpy(TestingRNN),
                    torch.from_numpy(TestingLabel))
                test_loaderRNN = data_utils.DataLoader(
                    test_dataRNN, batch_size=args.batch_size, shuffle=False)

                modelName = "Simulated"
                modelName += args.DataName

                saveModelName = "../Models/" + models[m] + "/" + modelName
                saveModelBestName = saveModelName + "_BEST.pkl"

                pretrained_model = torch.load(saveModelBestName,
                                              map_location=device)
                Test_Acc = checkAccuracy(test_loaderRNN, pretrained_model,
                                         args)
                print('{} {} model BestAcc {:.4f}'.format(
                    args.DataName, models[m], Test_Acc))

                if (Test_Acc >= 90):

                    if (args.GradFlag):
                        rescaledGrad = np.zeros((TestingRNN.shape))
                        Grad = Saliency(pretrained_model)

                    if (args.IGFlag):
                        rescaledIG = np.zeros((TestingRNN.shape))
                        IG = IntegratedGradients(pretrained_model)
                    if (args.DLFlag):
                        rescaledDL = np.zeros((TestingRNN.shape))
                        DL = DeepLift(pretrained_model)
                    if (args.GSFlag):
                        rescaledGS = np.zeros((TestingRNN.shape))
                        GS = GradientShap(pretrained_model)
                    if (args.DLSFlag):
                        rescaledDLS = np.zeros((TestingRNN.shape))
                        DLS = DeepLiftShap(pretrained_model)

                    if (args.SGFlag):
                        rescaledSG = np.zeros((TestingRNN.shape))
                        Grad_ = Saliency(pretrained_model)
                        SG = NoiseTunnel(Grad_)

                    if (args.ShapleySamplingFlag):
                        rescaledShapleySampling = np.zeros((TestingRNN.shape))
                        SS = ShapleyValueSampling(pretrained_model)
                    if (args.GSFlag):
                        rescaledFeaturePermutation = np.zeros(
                            (TestingRNN.shape))
                        FP = FeaturePermutation(pretrained_model)
                    if (args.FeatureAblationFlag):
                        rescaledFeatureAblation = np.zeros((TestingRNN.shape))
                        FA = FeatureAblation(pretrained_model)

                    if (args.OcclusionFlag):
                        rescaledOcclusion = np.zeros((TestingRNN.shape))
                        OS = Occlusion(pretrained_model)

                    idx = 0
                    mask = np.zeros((args.NumTimeSteps, args.NumFeatures),
                                    dtype=int)
                    for i in range(args.NumTimeSteps):
                        mask[i, :] = i

                    for i, (samples, labels) in enumerate(test_loaderRNN):

                        print('[{}/{}] {} {} model accuracy {:.2f}'\
                                .format(i,len(test_loaderRNN), models[m], args.DataName, Test_Acc))

                        input = samples.reshape(-1, args.NumTimeSteps,
                                                args.NumFeatures).to(device)
                        input = Variable(input,
                                         volatile=False,
                                         requires_grad=True)

                        batch_size = input.shape[0]
                        baseline_single = torch.from_numpy(
                            np.random.random(input.shape)).to(device)
                        baseline_multiple = torch.from_numpy(
                            np.random.random(
                                (input.shape[0] * 5, input.shape[1],
                                 input.shape[2]))).to(device)
                        inputMask = np.zeros((input.shape))
                        inputMask[:, :, :] = mask
                        inputMask = torch.from_numpy(inputMask).to(device)
                        mask_single = torch.from_numpy(mask).to(device)
                        mask_single = mask_single.reshape(
                            1, args.NumTimeSteps, args.NumFeatures).to(device)
                        labels = torch.tensor(labels.int().tolist()).to(device)

                        if (args.GradFlag):
                            attributions = Grad.attribute(input, \
                                                          target=labels)
                            rescaledGrad[
                                idx:idx +
                                batch_size, :, :] = Helper.givenAttGetRescaledSaliency(
                                    args, attributions)

                        if (args.IGFlag):
                            attributions = IG.attribute(input,  \
                                                        baselines=baseline_single, \
                                                        target=labels)
                            rescaledIG[
                                idx:idx +
                                batch_size, :, :] = Helper.givenAttGetRescaledSaliency(
                                    args, attributions)

                        if (args.DLFlag):
                            attributions = DL.attribute(input,  \
                                                        baselines=baseline_single, \
                                                        target=labels)
                            rescaledDL[
                                idx:idx +
                                batch_size, :, :] = Helper.givenAttGetRescaledSaliency(
                                    args, attributions)

                        if (args.GSFlag):

                            attributions = GS.attribute(input,  \
                                                        baselines=baseline_multiple, \
                                                        stdevs=0.09,\
                                                        target=labels)
                            rescaledGS[
                                idx:idx +
                                batch_size, :, :] = Helper.givenAttGetRescaledSaliency(
                                    args, attributions)

                        if (args.DLSFlag):

                            attributions = DLS.attribute(input,  \
                                                        baselines=baseline_multiple, \
                                                        target=labels)
                            rescaledDLS[
                                idx:idx +
                                batch_size, :, :] = Helper.givenAttGetRescaledSaliency(
                                    args, attributions)

                        if (args.SGFlag):
                            attributions = SG.attribute(input, \
                                                        target=labels)
                            rescaledSG[
                                idx:idx +
                                batch_size, :, :] = Helper.givenAttGetRescaledSaliency(
                                    args, attributions)

                        if (args.ShapleySamplingFlag):
                            attributions = SS.attribute(input, \
                                            baselines=baseline_single, \
                                            target=labels,\
                                            feature_mask=inputMask)
                            rescaledShapleySampling[
                                idx:idx +
                                batch_size, :, :] = Helper.givenAttGetRescaledSaliency(
                                    args, attributions)

                        if (args.FeaturePermutationFlag):
                            attributions = FP.attribute(input, \
                                            target=labels,
                                            perturbations_per_eval= input.shape[0],\
                                            feature_mask=mask_single)
                            rescaledFeaturePermutation[
                                idx:idx +
                                batch_size, :, :] = Helper.givenAttGetRescaledSaliency(
                                    args, attributions)

                        if (args.FeatureAblationFlag):
                            attributions = FA.attribute(input, \
                                            target=labels)
                            # perturbations_per_eval= input.shape[0],\
                            # feature_mask=mask_single)
                            rescaledFeatureAblation[
                                idx:idx +
                                batch_size, :, :] = Helper.givenAttGetRescaledSaliency(
                                    args, attributions)

                        if (args.OcclusionFlag):
                            attributions = OS.attribute(input, \
                                            sliding_window_shapes=(1,args.NumFeatures),
                                            target=labels,
                                            baselines=baseline_single)
                            rescaledOcclusion[
                                idx:idx +
                                batch_size, :, :] = Helper.givenAttGetRescaledSaliency(
                                    args, attributions)

                        idx += batch_size

                    if (args.plot):
                        index = random.randint(0, TestingRNN.shape[0] - 1)
                        plotExampleBox(TestingRNN[index, :, :],
                                       args.Saliency_Maps_graphs_dir +
                                       args.DataName + "_" + models[m] +
                                       '_sample',
                                       flip=True)

                        print("Plotting sample", index)
                        if (args.GradFlag):
                            plotExampleBox(rescaledGrad[index, :, :],
                                           args.Saliency_Maps_graphs_dir +
                                           args.DataName + "_" + models[m] +
                                           '_Grad',
                                           greyScale=True,
                                           flip=True)

                        if (args.IGFlag):
                            plotExampleBox(rescaledIG[index, :, :],
                                           args.Saliency_Maps_graphs_dir +
                                           args.DataName + "_" + models[m] +
                                           '_IG',
                                           greyScale=True,
                                           flip=True)

                        if (args.DLFlag):
                            plotExampleBox(rescaledDL[index, :, :],
                                           args.Saliency_Maps_graphs_dir +
                                           args.DataName + "_" + models[m] +
                                           '_DL',
                                           greyScale=True,
                                           flip=True)

                        if (args.GSFlag):
                            plotExampleBox(rescaledGS[index, :, :],
                                           args.Saliency_Maps_graphs_dir +
                                           args.DataName + "_" + models[m] +
                                           '_GS',
                                           greyScale=True,
                                           flip=True)

                        if (args.DLSFlag):
                            plotExampleBox(rescaledDLS[index, :, :],
                                           args.Saliency_Maps_graphs_dir +
                                           args.DataName + "_" + models[m] +
                                           '_DLS',
                                           greyScale=True,
                                           flip=True)

                        if (args.SGFlag):
                            plotExampleBox(rescaledSG[index, :, :],
                                           args.Saliency_Maps_graphs_dir +
                                           args.DataName + "_" + models[m] +
                                           '_SG',
                                           greyScale=True,
                                           flip=True)

                        if (args.ShapleySamplingFlag):
                            plotExampleBox(
                                rescaledShapleySampling[index, :, :],
                                args.Saliency_Maps_graphs_dir + args.DataName +
                                "_" + models[m] + '_ShapleySampling',
                                greyScale=True,
                                flip=True)

                        if (args.FeaturePermutationFlag):
                            plotExampleBox(
                                rescaledFeaturePermutation[index, :, :],
                                args.Saliency_Maps_graphs_dir + args.DataName +
                                "_" + models[m] + '_FeaturePermutation',
                                greyScale=True,
                                flip=True)

                        if (args.FeatureAblationFlag):
                            plotExampleBox(
                                rescaledFeatureAblation[index, :, :],
                                args.Saliency_Maps_graphs_dir + args.DataName +
                                "_" + models[m] + '_FeatureAblation',
                                greyScale=True,
                                flip=True)

                        if (args.OcclusionFlag):
                            plotExampleBox(rescaledOcclusion[index, :, :],
                                           args.Saliency_Maps_graphs_dir +
                                           args.DataName + "_" + models[m] +
                                           '_Occlusion',
                                           greyScale=True,
                                           flip=True)

                    if (args.save):
                        if (args.GradFlag):
                            print("Saving Grad", modelName + "_" + models[m])
                            np.save(
                                args.Saliency_dir + modelName + "_" +
                                models[m] + "_Grad_rescaled", rescaledGrad)

                        if (args.IGFlag):
                            print("Saving IG", modelName + "_" + models[m])
                            np.save(
                                args.Saliency_dir + modelName + "_" +
                                models[m] + "_IG_rescaled", rescaledIG)

                        if (args.DLFlag):
                            print("Saving DL", modelName + "_" + models[m])
                            np.save(
                                args.Saliency_dir + modelName + "_" +
                                models[m] + "_DL_rescaled", rescaledDL)

                        if (args.GSFlag):
                            print("Saving GS", modelName + "_" + models[m])
                            np.save(
                                args.Saliency_dir + modelName + "_" +
                                models[m] + "_GS_rescaled", rescaledGS)

                        if (args.DLSFlag):
                            print("Saving DLS", modelName + "_" + models[m])
                            np.save(
                                args.Saliency_dir + modelName + "_" +
                                models[m] + "_DLS_rescaled", rescaledDLS)

                        if (args.SGFlag):
                            print("Saving SG", modelName + "_" + models[m])
                            np.save(
                                args.Saliency_dir + modelName + "_" +
                                models[m] + "_SG_rescaled", rescaledSG)

                        if (args.ShapleySamplingFlag):
                            print("Saving ShapleySampling",
                                  modelName + "_" + models[m])
                            np.save(
                                args.Saliency_dir + modelName + "_" +
                                models[m] + "_ShapleySampling_rescaled",
                                rescaledShapleySampling)

                        if (args.FeaturePermutationFlag):
                            print("Saving FeaturePermutation",
                                  modelName + "_" + models[m])
                            np.save(
                                args.Saliency_dir + modelName + "_" +
                                models[m] + "_FeaturePermutation_rescaled",
                                rescaledFeaturePermutation)

                        if (args.FeatureAblationFlag):
                            print("Saving FeatureAblation",
                                  modelName + "_" + models[m])
                            np.save(
                                args.Saliency_dir + modelName + "_" +
                                models[m] + "_FeatureAblation_rescaled",
                                rescaledFeatureAblation)

                        if (args.OcclusionFlag):
                            print("Saving Occlusion",
                                  modelName + "_" + models[m])
                            np.save(
                                args.Saliency_dir + modelName + "_" +
                                models[m] + "_Occlusion_rescaled",
                                rescaledOcclusion)

                else:
                    logging.basicConfig(filename=args.log_file,
                                        level=logging.DEBUG)

                    logging.debug('{} {} model BestAcc {:.4f}'.format(
                        args.DataName, models[m], Test_Acc))

                    if not os.path.exists(args.ignore_list):
                        with open(args.ignore_list, 'w') as fp:
                            fp.write(args.DataName + '_' + models[m] + '\n')

                    else:
                        with open(args.ignore_list, "a") as fp:
                            fp.write(args.DataName + '_' + models[m] + '\n')
예제 #13
0
                                                      (0.25, '#000000'),
                                                      (1, '#000000')],
                                                     N=256)

    vis_img = viz.visualize_image_attr(
        np.transpose(attributions_ig.squeeze().cpu().detach().numpy(),
                     (1, 2, 0)),
        np.transpose(transformed_img.squeeze().cpu().detach().numpy(),
                     (1, 2, 0)),
        method='heat_map',
        cmap=default_cmap,
        show_colorbar=True,
        sign='positive',
        outlier_perc=1)

    noise_tunnel = NoiseTunnel(integrated_gradients)

    attributions_ig_nt = noise_tunnel.attribute(input,
                                                nt_samples=10,
                                                nt_type='smoothgrad_sq',
                                                target=pred_label_idx,
                                                internal_batch_size=10)

    _ = viz.visualize_image_attr_multiple(
        np.transpose(attributions_ig_nt.squeeze().cpu().detach().numpy(),
                     (1, 2, 0)),
        np.transpose(transformed_img.squeeze().cpu().detach().numpy(),
                     (1, 2, 0)), ["original_image", "heat_map"],
        ["all", "positive"],
        cmap=default_cmap,
        show_colorbar=True)
예제 #14
0
def visualize_maps(
        model: torch.nn.Module,
        inputs: Union[Tuple[torch.Tensor, torch.Tensor]],
        labels: torch.Tensor,
        title: str,
        second_occlusion: Tuple[int, int, int] = (1, 2, 2),
        baselines: Tuple[int, int] = (0, 0),
        closest: bool = False,
) -> None:
    """
    Visualizes the average of the inputs, or the single input, using various different XAI approaches
    """
    single = inputs[1].ndim == 2
    model.zero_grad()
    model.eval()
    occ = Occlusion(model)
    saliency = Saliency(model)
    saliency = NoiseTunnel(saliency)
    igrad = IntegratedGradients(model)
    igrad_2 = NoiseTunnel(igrad)
    # deep_lift = DeepLift(model)
    grad_shap = ShapleyValueSampling(model)
    output = model(inputs[0], inputs[1])
    output = F.softmax(output, dim=-1).argmax(dim=1, keepdim=True)
    labels = F.softmax(labels, dim=-1).argmax(dim=1, keepdim=True)
    if np.all(labels.cpu().numpy() == 1) and not closest:
        return
    if True:
        targets = labels
    else:
        targets = output
    print(targets)
    correct = targets.cpu().numpy() == labels.cpu().numpy()
    # if correct:
    #   return
    occ_out = occ.attribute(
        inputs,
        baselines=baselines,
        sliding_window_shapes=((1, 5, 5), second_occlusion),
        target=targets,
    )
    # occ_out2 = occ.attribute(inputs, sliding_window_shapes=((1,20,20), second_occlusion), strides=(8,1), target=targets)
    saliency_out = saliency.attribute(inputs,
                                      nt_type="smoothgrad_sq",
                                      n_samples=5,
                                      target=targets,
                                      abs=False)
    # igrad_out = igrad.attribute(inputs, target=targets, internal_batch_size=1)
    igrad_out = igrad_2.attribute(
        inputs,
        baselines=baselines,
        target=targets,
        n_samples=5,
        nt_type="smoothgrad_sq",
        internal_batch_size=1,
    )
    # deep_lift_out = deep_lift.attribute(inputs, target=targets)
    grad_shap_out = grad_shap.attribute(inputs,
                                        baselines=baselines,
                                        target=targets)

    if single:
        inputs = convert_to_image(inputs)
        occ_out = convert_to_image(occ_out)
        saliency_out = convert_to_image(saliency_out)
        igrad_out = convert_to_image(igrad_out)
        # grad_shap_out = convert_to_image(grad_shap_out)
    else:
        inputs = convert_to_image_multi(inputs)
        occ_out = convert_to_image_multi(occ_out)
        saliency_out = convert_to_image_multi(saliency_out)
        igrad_out = convert_to_image_multi(igrad_out)
        grad_shap_out = convert_to_image_multi(grad_shap_out)
    fig, axes = plt.subplots(2, 5)
    (fig, axes[0, 0]) = visualization.visualize_image_attr(
        occ_out[0][0],
        inputs[0][0],
        title="Original Image",
        method="original_image",
        show_colorbar=True,
        plt_fig_axis=(fig, axes[0, 0]),
        use_pyplot=False,
    )
    (fig, axes[0, 1]) = visualization.visualize_image_attr(
        occ_out[0][0],
        None,
        sign="all",
        title="Occ (5x5)",
        show_colorbar=True,
        plt_fig_axis=(fig, axes[0, 1]),
        use_pyplot=False,
    )
    (fig, axes[0, 2]) = visualization.visualize_image_attr(
        saliency_out[0][0],
        None,
        sign="all",
        title="Saliency",
        show_colorbar=True,
        plt_fig_axis=(fig, axes[0, 2]),
        use_pyplot=False,
    )
    (fig, axes[0, 3]) = visualization.visualize_image_attr(
        igrad_out[0][0],
        None,
        sign="all",
        title="Integrated Grad",
        show_colorbar=True,
        plt_fig_axis=(fig, axes[0, 3]),
        use_pyplot=False,
    )
    (fig, axes[0, 4]) = visualization.visualize_image_attr(
        grad_shap_out[0],
        None,
        title="GradSHAP",
        show_colorbar=True,
        plt_fig_axis=(fig, axes[0, 4]),
        use_pyplot=False,
    )
    ##### Second Input Labels #########################################################################################
    (fig, axes[1, 0]) = visualization.visualize_image_attr(
        occ_out[1],
        inputs[1],
        title="Original Aux",
        method="original_image",
        show_colorbar=True,
        plt_fig_axis=(fig, axes[1, 0]),
        use_pyplot=False,
    )
    (fig, axes[1, 1]) = visualization.visualize_image_attr(
        occ_out[1],
        None,
        sign="all",
        title="Occ (1x1)",
        show_colorbar=True,
        plt_fig_axis=(fig, axes[1, 1]),
        use_pyplot=False,
    )
    (fig, axes[1, 2]) = visualization.visualize_image_attr(
        saliency_out[1],
        None,
        sign="all",
        title="Saliency",
        show_colorbar=True,
        plt_fig_axis=(fig, axes[1, 2]),
        use_pyplot=False,
    )
    (fig, axes[1, 3]) = visualization.visualize_image_attr(
        igrad_out[1],
        None,
        sign="all",
        title="Integrated Grad",
        show_colorbar=True,
        plt_fig_axis=(fig, axes[1, 3]),
        use_pyplot=False,
    )
    (fig, axes[1, 4]) = visualization.visualize_image_attr(
        grad_shap_out[1],
        None,
        title="GradSHAP",
        show_colorbar=True,
        plt_fig_axis=(fig, axes[1, 4]),
        use_pyplot=False,
    )

    fig.suptitle(
        title +
        f" Label: {labels.cpu().numpy()} Pred: {targets.cpu().numpy()}")
    plt.savefig(
        f"{title}_{'single' if single else 'multi'}_{'Failed' if correct else 'Success'}_baseline{baselines[0]}.png",
        dpi=300,
    )
    plt.clf()
    plt.cla()
예제 #15
0
                              pos_neg_file='data/labels_covid19_posi.tsv',
                              splits=[0.7, 0.15, 0.15],
                              replicate_channel=1,
                              batch_size=1,
                              random_seed=123,
                              input_size=224,
                              mode='ct',
                              num_workers=0)

model = DenseNetModel(121, 2)
model.model.load_state_dict(
    torch.load('interpretability/model_best.pth')['state_dict'])
model = model.eval()
model.model = model.model.to('cuda')
gc = GuidedGradCam(model.model, model.model.features.denseblock3)
noise_tunnel = NoiseTunnel(gc)

for (img, label, sublabel, subject_id, ct_id,
     slice_id) in tqdm(covid_loaders.test):
    img = img.to('cuda')
    output = model.model(img)
    prediction_score, predicted_label = torch.max(output, 1)
    attributions_ig_nt = noise_tunnel.attribute(img,
                                                nt_samples=10,
                                                nt_type='smoothgrad_sq',
                                                target=predicted_label,
                                                stdevs=0.01)
    filename = subject_id[0] + '_' + ct_id[0] + '_' + str(
        slice_id.item()) + '.png'
    dest = os.path.join('interp_output', subject_id[0], ct_id[0], filename)
    if not os.path.exists(os.path.dirname(dest)):
예제 #16
0
                                                 [(0, '#ffffff'),
                                                  (0.25, '#000000'),
                                                  (1, '#000000')],
                                                 N=256)
baseline = 2.6400  # ~maximum in the image, white place
for batch_idx, (input, target) in enumerate(dataloaders['test']):
    image_input = input.cpu().data[0].numpy().transpose((1, 2, 0))
    mean = np.array([0.485, 0.456, 0.406])
    std = np.array([0.229, 0.224, 0.225])
    image_input = std * image_input + mean
    image_input = np.clip(image_input, 0, 1)
    output = model_ft(input)
    prediction_score, pred_label_idx = torch.topk(output, 1)
    pred_clname = class_names[pred_label_idx]
    integrated_gradients = IntegratedGradients(model_ft)
    noise_tunnel = NoiseTunnel(integrated_gradients)
    attributions_ig_nt = noise_tunnel.attribute(input,
                                                n_samples=10,
                                                nt_type='smoothgrad_sq',
                                                target=pred_label_idx,
                                                baselines=baseline)
    pdf = PdfPages(inputdir + "res/image_intepret_" + str(batch_idx) + ".pdf")
    fig, axis = viz.visualize_image_attr_multiple(
        np.transpose(attributions_ig_nt.squeeze().cpu().detach().numpy(),
                     (1, 2, 0)),
        image_input, ["original_image", "heat_map"], ["all", "positive"],
        cmap=default_cmap,
        show_colorbar=True,
        titles=[
            'Truth: ' + class_names[target] + ' Prediction: ' + pred_clname,
            'NoiseTunnel'
예제 #17
0
def run_saliency_methods(saliency_methods,
                         pretrained_model,
                         test_shape,
                         train_loader,
                         test_loader,
                         device,
                         model_type,
                         model_name,
                         saliency_dir,
                         tsr_graph_dir=None,
                         tsr_inputs_to_graph=()):
    _, num_timesteps, num_features = test_shape

    run_grad = "Grad" in saliency_methods
    run_grad_tsr = "Grad_TSR" in saliency_methods
    run_ig = "IG" in saliency_methods
    run_ig_tsr = "IG_TSR" in saliency_methods
    run_dl = "DL" in saliency_methods
    run_gs = "GS" in saliency_methods
    run_dls = "DLS" in saliency_methods
    run_dls_tsr = "DLS_TSR" in saliency_methods
    run_sg = "SG" in saliency_methods
    run_shapley_sampling = "ShapleySampling" in saliency_methods
    run_feature_permutation = "FeaturePermutation" in saliency_methods
    run_feature_ablation = "FeatureAblation" in saliency_methods
    run_occlusion = "Occlusion" in saliency_methods
    run_fit = "FIT" in saliency_methods
    run_ifit = "IFIT" in saliency_methods
    run_wfit = "WFIT" in saliency_methods
    run_iwfit = "IWFIT" in saliency_methods

    if run_grad or run_grad_tsr:
        Grad = Saliency(pretrained_model)
    if run_grad:
        rescaledGrad = np.zeros(test_shape)
    if run_grad_tsr:
        rescaledGrad_TSR = np.zeros(test_shape)

    if run_ig or run_ig_tsr:
        IG = IntegratedGradients(pretrained_model)
    if run_ig:
        rescaledIG = np.zeros(test_shape)
    if run_ig_tsr:
        rescaledIG_TSR = np.zeros(test_shape)

    if run_dl:
        rescaledDL = np.zeros(test_shape)
        DL = DeepLift(pretrained_model)

    if run_gs:
        rescaledGS = np.zeros(test_shape)
        GS = GradientShap(pretrained_model)

    if run_dls or run_dls_tsr:
        DLS = DeepLiftShap(pretrained_model)
    if run_dls:
        rescaledDLS = np.zeros(test_shape)
    if run_dls_tsr:
        rescaledDLS_TSR = np.zeros(test_shape)

    if run_sg:
        rescaledSG = np.zeros(test_shape)
        Grad_ = Saliency(pretrained_model)
        SG = NoiseTunnel(Grad_)

    if run_shapley_sampling:
        rescaledShapleySampling = np.zeros(test_shape)
        SS = ShapleyValueSampling(pretrained_model)

    if run_gs:
        rescaledFeaturePermutation = np.zeros(test_shape)
        FP = FeaturePermutation(pretrained_model)

    if run_feature_ablation:
        rescaledFeatureAblation = np.zeros(test_shape)
        FA = FeatureAblation(pretrained_model)

    if run_occlusion:
        rescaledOcclusion = np.zeros(test_shape)
        OS = Occlusion(pretrained_model)

    if run_fit:
        rescaledFIT = np.zeros(test_shape)
        FIT = FITExplainer(pretrained_model, ft_dim_last=True)
        generator = JointFeatureGenerator(num_features, data='none')
        # TODO: Increase epochs
        FIT.fit_generator(generator, train_loader, test_loader, n_epochs=300)

    if run_ifit:
        rescaledIFIT = np.zeros(test_shape)
    if run_wfit:
        rescaledWFIT = np.zeros(test_shape)
    if run_iwfit:
        rescaledIWFIT = np.zeros(test_shape)

    idx = 0
    mask = np.zeros((num_timesteps, num_features), dtype=int)
    for i in range(num_timesteps):
        mask[i, :] = i

    for i, (samples, labels) in enumerate(test_loader):
        input = samples.reshape(-1, num_timesteps, num_features).to(device)
        input = Variable(input, volatile=False, requires_grad=True)

        batch_size = input.shape[0]
        baseline_single = torch.from_numpy(np.random.random(
            input.shape)).to(device)
        baseline_multiple = torch.from_numpy(
            np.random.random((input.shape[0] * 5, input.shape[1],
                              input.shape[2]))).to(device)
        inputMask = np.zeros((input.shape))
        inputMask[:, :, :] = mask
        inputMask = torch.from_numpy(inputMask).to(device)
        mask_single = torch.from_numpy(mask).to(device)
        mask_single = mask_single.reshape(1, num_timesteps,
                                          num_features).to(device)
        labels = torch.tensor(labels.int().tolist()).to(device)

        if run_grad:
            attributions = Grad.attribute(input, target=labels)
            rescaledGrad[
                idx:idx +
                batch_size, :, :] = Helper.givenAttGetRescaledSaliency(
                    num_timesteps, num_features, attributions)
        if run_grad_tsr:
            rescaledGrad_TSR[idx:idx + batch_size, :, :] = get_tsr_saliency(
                Grad,
                input,
                labels,
                graph_dir=tsr_graph_dir,
                graph_name=f'{model_name}_{model_type}_Grad_TSR',
                inputs_to_graph=tsr_inputs_to_graph,
                cur_batch=i)

        if run_ig:
            attributions = IG.attribute(input,
                                        baselines=baseline_single,
                                        target=labels)
            rescaledIG[idx:idx +
                       batch_size, :, :] = Helper.givenAttGetRescaledSaliency(
                           num_timesteps, num_features, attributions)
        if run_ig_tsr:
            rescaledIG_TSR[idx:idx + batch_size, :, :] = get_tsr_saliency(
                IG,
                input,
                labels,
                baseline=baseline_single,
                graph_dir=tsr_graph_dir,
                graph_name=f'{model_name}_{model_type}_IG_TSR',
                inputs_to_graph=tsr_inputs_to_graph,
                cur_batch=i)

        if run_dl:
            attributions = DL.attribute(input,
                                        baselines=baseline_single,
                                        target=labels)
            rescaledDL[idx:idx +
                       batch_size, :, :] = Helper.givenAttGetRescaledSaliency(
                           num_timesteps, num_features, attributions)

        if run_gs:
            attributions = GS.attribute(input,
                                        baselines=baseline_multiple,
                                        stdevs=0.09,
                                        target=labels)
            rescaledGS[idx:idx +
                       batch_size, :, :] = Helper.givenAttGetRescaledSaliency(
                           num_timesteps, num_features, attributions)

        if run_dls:
            attributions = DLS.attribute(input,
                                         baselines=baseline_multiple,
                                         target=labels)
            rescaledDLS[idx:idx +
                        batch_size, :, :] = Helper.givenAttGetRescaledSaliency(
                            num_timesteps, num_features, attributions)
        if run_dls_tsr:
            rescaledDLS_TSR[idx:idx + batch_size, :, :] = get_tsr_saliency(
                DLS,
                input,
                labels,
                baseline=baseline_multiple,
                graph_dir=tsr_graph_dir,
                graph_name=f'{model_name}_{model_type}_DLS_TSR',
                inputs_to_graph=tsr_inputs_to_graph,
                cur_batch=i)

        if run_sg:
            attributions = SG.attribute(input, target=labels)
            rescaledSG[idx:idx +
                       batch_size, :, :] = Helper.givenAttGetRescaledSaliency(
                           num_timesteps, num_features, attributions)

        if run_shapley_sampling:
            attributions = SS.attribute(input,
                                        baselines=baseline_single,
                                        target=labels,
                                        feature_mask=inputMask)
            rescaledShapleySampling[
                idx:idx +
                batch_size, :, :] = Helper.givenAttGetRescaledSaliency(
                    num_timesteps, num_features, attributions)

        if run_feature_permutation:
            attributions = FP.attribute(input,
                                        target=labels,
                                        perturbations_per_eval=input.shape[0],
                                        feature_mask=mask_single)
            rescaledFeaturePermutation[
                idx:idx +
                batch_size, :, :] = Helper.givenAttGetRescaledSaliency(
                    num_timesteps, num_features, attributions)

        if run_feature_ablation:
            attributions = FA.attribute(input, target=labels)
            # perturbations_per_eval= input.shape[0],\
            # feature_mask=mask_single)
            rescaledFeatureAblation[
                idx:idx +
                batch_size, :, :] = Helper.givenAttGetRescaledSaliency(
                    num_timesteps, num_features, attributions)

        if run_occlusion:
            attributions = OS.attribute(input,
                                        sliding_window_shapes=(1,
                                                               num_features),
                                        target=labels,
                                        baselines=baseline_single)
            rescaledOcclusion[
                idx:idx +
                batch_size, :, :] = Helper.givenAttGetRescaledSaliency(
                    num_timesteps, num_features, attributions)

        if run_fit:
            attributions = torch.from_numpy(FIT.attribute(input, labels))
            rescaledFIT[idx:idx +
                        batch_size, :, :] = Helper.givenAttGetRescaledSaliency(
                            num_timesteps, num_features, attributions)

        if run_ifit:
            attributions = torch.from_numpy(
                inverse_fit_attribute(input,
                                      pretrained_model,
                                      ft_dim_last=True))
            rescaledIFIT[idx:idx + batch_size, :, :] = attributions

        if run_wfit:
            attributions = torch.from_numpy(
                wfit_attribute(input,
                               pretrained_model,
                               N=test_shape[1],
                               ft_dim_last=True,
                               single_label=True))
            rescaledWFIT[idx:idx + batch_size, :, :] = attributions

        if run_iwfit:
            attributions = torch.from_numpy(
                wfit_attribute(input,
                               pretrained_model,
                               N=test_shape[1],
                               ft_dim_last=True,
                               single_label=True,
                               inverse=True))
            rescaledIWFIT[idx:idx + batch_size, :, :] = attributions

        idx += batch_size

    if run_grad:
        print("Saving Grad", model_name + "_" + model_type)
        np.save(
            saliency_dir + model_name + "_" + model_type + "_Grad_rescaled",
            rescaledGrad)
    if run_grad_tsr:
        print("Saving Grad_TSR", model_name + "_" + model_type)
        np.save(
            saliency_dir + model_name + "_" + model_type +
            "_Grad_TSR_rescaled", rescaledGrad_TSR)

    if run_ig:
        print("Saving IG", model_name + "_" + model_type)
        np.save(saliency_dir + model_name + "_" + model_type + "_IG_rescaled",
                rescaledIG)
    if run_ig_tsr:
        print("Saving IG_TSR", model_name + "_" + model_type)
        np.save(
            saliency_dir + model_name + "_" + model_type + "_IG_TSR_rescaled",
            rescaledIG_TSR)

    if run_dl:
        print("Saving DL", model_name + "_" + model_type)
        np.save(saliency_dir + model_name + "_" + model_type + "_DL_rescaled",
                rescaledDL)

    if run_gs:
        print("Saving GS", model_name + "_" + model_type)
        np.save(saliency_dir + model_name + "_" + model_type + "_GS_rescaled",
                rescaledGS)

    if run_dls:
        print("Saving DLS", model_name + "_" + model_type)
        np.save(saliency_dir + model_name + "_" + model_type + "_DLS_rescaled",
                rescaledDLS)
    if run_dls_tsr:
        print("Saving DLS_TSR", model_name + "_" + model_type)
        np.save(
            saliency_dir + model_name + "_" + model_type + "_DLS_TSR_rescaled",
            rescaledDLS_TSR)

    if run_sg:
        print("Saving SG", model_name + "_" + model_type)
        np.save(saliency_dir + model_name + "_" + model_type + "_SG_rescaled",
                rescaledSG)

    if run_shapley_sampling:
        print("Saving ShapleySampling", model_name + "_" + model_type)
        np.save(
            saliency_dir + model_name + "_" + model_type +
            "_ShapleySampling_rescaled", rescaledShapleySampling)

    if run_feature_permutation:
        print("Saving FeaturePermutation", model_name + "_" + model_type)
        np.save(
            saliency_dir + model_name + "_" + model_type +
            "_FeaturePermutation_rescaled", rescaledFeaturePermutation)

    if run_feature_ablation:
        print("Saving FeatureAblation", model_name + "_" + model_type)
        np.save(
            saliency_dir + model_name + "_" + model_type +
            "_FeatureAblation_rescaled", rescaledFeatureAblation)

    if run_occlusion:
        print("Saving Occlusion", model_name + "_" + model_type)
        np.save(
            saliency_dir + model_name + "_" + model_type +
            "_Occlusion_rescaled", rescaledOcclusion)

    if run_fit:
        print("Saving FIT", model_name + "_" + model_type)
        np.save(saliency_dir + model_name + "_" + model_type + "_FIT_rescaled",
                rescaledFIT)

    if run_ifit:
        print("Saving IFIT", model_name + "_" + model_type)
        np.save(
            saliency_dir + model_name + "_" + model_type + "_IFIT_rescaled",
            rescaledIFIT)

    if run_wfit:
        print("Saving WFIT", model_name + "_" + model_type)
        np.save(
            saliency_dir + model_name + "_" + model_type + "_WFIT_rescaled",
            rescaledWFIT)

    if run_iwfit:
        print("Saving IWFIT", model_name + "_" + model_type)
        np.save(
            saliency_dir + model_name + "_" + model_type + "_IWFIT_rescaled",
            rescaledIWFIT)
예제 #18
0
파일: captum.py 프로젝트: hal-314/fastai2
 def after_fit(self):
     self.integrated_gradients = IntegratedGradients(self.model)
     self._noise_tunnel= NoiseTunnel(self.integrated_gradients)
예제 #19
0
 def __init__(self, model, train_data):
     model.eval()
     self.explainer = NoiseTunnel(IntegratedGradients(model))
     self.model = model
     global_vars.set('explainer_sampling_rate', 0.1)
예제 #20
0
def measure_filter_model(
    model_version,
    dataset,
    out_folder,
    weights_dir,
    device,
    method=METHODS["gradcam"],
    sample_images=50,
    step=1,
    use_infidelity=False,
    use_sensitivity=False,
    render=False,
    ids=None,
):
    invTrans = get_inverse_normalization_transformation()
    data_dir = os.path.join("data")

    if model_version == "resnet18":
        model = create_resnet18_model(num_of_classes=NUM_OF_CLASSES[dataset])
    elif model_version == "resnet50":
        model = create_resnet50_model(num_of_classes=NUM_OF_CLASSES[dataset])
    elif model_version == "densenet":
        model = create_densenet121_model(
            num_of_classes=NUM_OF_CLASSES[dataset])
    else:
        model = create_efficientnetb0_model(
            num_of_classes=NUM_OF_CLASSES[dataset])

    model.load_state_dict(torch.load(weights_dir))

    # print(model)

    model.eval()
    model.to(device)

    test_dataset = CustomDataset(
        dataset=dataset,
        transformer=get_default_transformation(),
        data_type="test",
        root_dir=data_dir,
        step=step,
        add_filters=True,
        ids=ids,
    )
    data_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=1,
                                              shuffle=False,
                                              num_workers=4)

    try:
        image_ids = random.sample(range(0, test_dataset.__len__()),
                                  test_dataset.__len__())
    except ValueError:
        raise ValueError(
            f"Image sample number ({test_dataset.__len__()}) exceeded dataset size ({test_dataset.__len__()})."
        )

    classes_map = test_dataset.classes_map

    print(f"Measuring {model_version} on {dataset} dataset, with {method}")
    print("-" * 10)
    pbar = tqdm(total=test_dataset.__len__(), desc="Model test completion")
    multipy_by_inputs = False
    if method == METHODS["ig"]:
        attr_method = IntegratedGradients(model)
        nt_samples = 1
        n_perturb_samples = 1
    if method == METHODS["saliency"]:
        attr_method = Saliency(model)
        nt_samples = 8
        n_perturb_samples = 2
    if method == METHODS["gradcam"]:
        if model_version == "efficientnet":
            attr_method = GuidedGradCam(model, model._conv_stem)
        elif model_version == "densenet":
            attr_method = GuidedGradCam(model, model.features.conv0)
        else:
            attr_method = GuidedGradCam(model, model.conv1)
        nt_samples = 8
        n_perturb_samples = 2
    if method == METHODS["deconv"]:
        attr_method = Deconvolution(model)
        nt_samples = 8
        n_perturb_samples = 2
    if method == METHODS["gradshap"]:
        attr_method = GradientShap(model)
        nt_samples = 8
        n_perturb_samples = 2
    if method == METHODS["gbp"]:
        attr_method = GuidedBackprop(model)
        nt_samples = 8
        n_perturb_samples = 2
    if method == "lime":
        attr_method = Lime(model)
        nt_samples = 8
        n_perturb_samples = 2
        feature_mask = torch.tensor(lime_mask).to(device)
        multipy_by_inputs = True
    if method == METHODS["ig"]:
        nt = attr_method
    else:
        nt = NoiseTunnel(attr_method)
    scores = []

    @infidelity_perturb_func_decorator(multipy_by_inputs=multipy_by_inputs)
    def perturb_fn(inputs):
        noise = torch.tensor(np.random.normal(0, 0.003, inputs.shape)).float()
        noise = noise.to(device)
        return inputs - noise

    OUR_FILTERS = [
        "none",
        "fx_freaky_details 2,10,1,11,0,32,0",
        "normalize_local 8,10",
        "fx_boost_chroma 90,0,0",
        "fx_mighty_details 25,1,25,1,11,0",
        "sharpen 300",
    ]
    idx = 0
    filter_count = 0
    filter_attrs = {filter_name: [] for filter_name in OUR_FILTERS}
    predicted_main_class = 0
    for input, label in data_loader:
        pbar.update(1)
        inv_input = invTrans(input)
        input = input.to(device)
        input.requires_grad = True
        output = model(input)
        output = F.softmax(output, dim=1)
        prediction_score, pred_label_idx = torch.topk(output, 1)
        prediction_score = prediction_score.cpu().detach().numpy()[0][0]
        pred_label_idx.squeeze_()
        if OUR_FILTERS[filter_count] == 'none':
            predicted_main_class = pred_label_idx.item()

        if method == METHODS["gradshap"]:
            baseline = torch.randn(input.shape)
            baseline = baseline.to(device)

        if method == "lime":
            attributions = attr_method.attribute(input, target=1, n_samples=50)
        elif method == METHODS["ig"]:
            attributions = nt.attribute(
                input,
                target=predicted_main_class,
                n_steps=25,
            )
        elif method == METHODS["gradshap"]:
            attributions = nt.attribute(input,
                                        target=predicted_main_class,
                                        baselines=baseline)
        else:
            attributions = nt.attribute(
                input,
                nt_type="smoothgrad",
                nt_samples=nt_samples,
                target=predicted_main_class,
            )

        if use_infidelity:
            infid = infidelity(model,
                               perturb_fn,
                               input,
                               attributions,
                               target=predicted_main_class)
            inf_value = infid.cpu().detach().numpy()[0]
        else:
            inf_value = 0

        if use_sensitivity:
            if method == "lime":
                sens = sensitivity_max(
                    attr_method.attribute,
                    input,
                    target=predicted_main_class,
                    n_perturb_samples=1,
                    n_samples=200,
                    feature_mask=feature_mask,
                )
            elif method == METHODS["ig"]:
                sens = sensitivity_max(
                    nt.attribute,
                    input,
                    target=predicted_main_class,
                    n_perturb_samples=n_perturb_samples,
                    n_steps=25,
                )
            elif method == METHODS["gradshap"]:
                sens = sensitivity_max(
                    nt.attribute,
                    input,
                    target=predicted_main_class,
                    n_perturb_samples=n_perturb_samples,
                    baselines=baseline,
                )
            else:
                sens = sensitivity_max(
                    nt.attribute,
                    input,
                    target=predicted_main_class,
                    n_perturb_samples=n_perturb_samples,
                )
            sens_value = sens.cpu().detach().numpy()[0]
        else:
            sens_value = 0

        # filter_name = test_dataset.data.iloc[pbar.n]["filter"].split(" ")[0]
        attr_data = attributions.squeeze().cpu().detach().numpy()
        if render:
            fig, ax = viz.visualize_image_attr_multiple(
                np.transpose(attr_data, (1, 2, 0)),
                np.transpose(inv_input.squeeze().cpu().detach().numpy(),
                             (1, 2, 0)),
                ["original_image", "heat_map"],
                ["all", "positive"],
                titles=["original_image", "heat_map"],
                cmap=default_cmap,
                show_colorbar=True,
                use_pyplot=False,
                fig_size=(8, 6),
            )
            if use_sensitivity or use_infidelity:
                ax[0].set_xlabel(
                    f"Infidelity: {'{0:.6f}'.format(inf_value)}\n Sensitivity: {'{0:.6f}'.format(sens_value)}"
                )
            fig.suptitle(
                f"True: {classes_map[str(label.numpy()[0])][0]}, Pred: {classes_map[str(pred_label_idx.item())][0]}\nScore: {'{0:.4f}'.format(prediction_score)}",
                fontsize=16,
            )
            fig.savefig(
                os.path.join(
                    out_folder,
                    f"{str(idx)}-{str(filter_count)}-{str(label.numpy()[0])}-{str(OUR_FILTERS[filter_count])}-{classes_map[str(label.numpy()[0])][0]}-{classes_map[str(pred_label_idx.item())][0]}.png",
                ))
            plt.close(fig)
        # if pbar.n > 25:
        #     break
        score_for_true_label = output.cpu().detach().numpy(
        )[0][predicted_main_class]

        filter_attrs[OUR_FILTERS[filter_count]] = [
            np.moveaxis(attr_data, 0, -1),
            "{0:.8f}".format(score_for_true_label),
        ]

        data_range_for_current_set = MAX_ATT_VALUES[model_version][method][
            dataset]
        filter_count += 1
        if filter_count >= len(OUR_FILTERS):
            ssims = []
            for rot in OUR_FILTERS:
                ssims.append("{0:.8f}".format(
                    ssim(
                        filter_attrs["none"][0],
                        filter_attrs[rot][0],
                        win_size=11,
                        data_range=data_range_for_current_set,
                        multichannel=True,
                    )))
                ssims.append(filter_attrs[rot][1])

            scores.append(ssims)
            filter_count = 0
            predicted_main_class = 0
            idx += 1

    pbar.close()

    indexes = []

    for filter_name in OUR_FILTERS:
        indexes.append(str(filter_name) + "-ssim")
        indexes.append(str(filter_name) + "-score")
    np.savetxt(
        os.path.join(
            out_folder,
            f"{model_version}-{dataset}-{method}-ssim-with-range.csv"),
        np.array(scores),
        delimiter=";",
        fmt="%s",
        header=";".join([str(rot) for rot in indexes]),
    )

    print(f"Artifacts stored at {out_folder}")
예제 #21
0
파일: test.py 프로젝트: vinnamkim/captum
gf = GaussianFilter(2.0)
ig = IntegratedGradients(net)
attr_ig, delta = attribute_image_features(ig,
                                          input,
                                          n_steps=50,
                                          baselines=gf(input),
                                          return_convergence_delta=True)
attr_ig = np.transpose(attr_ig.squeeze().cpu().detach().numpy(), (1, 2, 0))
print('Approximation delta: ', abs(delta))

# %% [markdown]
# Below we demonstrate how to use integrated gradients and noise tunnel with smoothgrad square option on the test image. Noise tunnel with `smoothgrad square` option adds gaussian noise with a standard deviation of `stdevs=0.2` to the input image `n_samples` times, computes the attributions for `n_samples` images and returns the mean of the squared attributions across `n_samples` images.

# %%
ig = IntegratedGradients(net)
nt = NoiseTunnel(ig)
attr_ig_nt = attribute_image_features(nt,
                                      input,
                                      baselines=input * 0,
                                      nt_type='smoothgrad_sq',
                                      n_samples=100,
                                      stdevs=0.2)
attr_ig_nt = np.transpose(
    attr_ig_nt.squeeze(0).cpu().detach().numpy(), (1, 2, 0))

# %% [markdown]
# Applies DeepLift on test image. Deeplift assigns attributions to each input pixel by looking at the differences of output and its reference in terms of the differences of the input from the reference.

# %%
dl = DeepLift(net)
attr_dl = attribute_image_features(dl, input, baselines=input * 0)