def extract_Oc(self, X_test, sliding_window=(4, )):
     oc = Occlusion(self.net)
     start = time.time()
     oc_attr_test = oc.attribute(X_test.to(self.device),
                                 sliding_window_shapes=sliding_window)
     print("temps train", time.time() - start)
     return oc_attr_test.detach().cpu().numpy()
Пример #2
0
    def _attr_occlusion(self, input, pred_label_idx, w_size=15):

        occlusion = Occlusion(self.model)
        attributions_occ = occlusion.attribute(input,
                                               strides=(3, int(w_size / 2), int(w_size / 2)),
                                               target=pred_label_idx,
                                               sliding_window_shapes=(3, w_size, w_size),
                                               baselines=0)
        _ = viz.visualize_image_attr_multiple(
            np.transpose(attributions_occ.squeeze().cpu().detach().numpy(), (1, 2, 0)),
            np.transpose(input.squeeze().cpu().detach().numpy(), (1, 2, 0)),
            ["original_image", "heat_map"],
            ["all", "positive"],
            show_colorbar=True,
            outlier_perc=2,
            )
Пример #3
0
    def compute_occlusion(self, img_path, target):

        # open image
        img, transformed_img, input = self.open_image(img_path)

        # run occlusion
        occlusion = Occlusion(self.model)
        attributions_occ = occlusion.attribute(input,
                                               strides=(3, 8, 8),
                                               target=target,
                                               sliding_window_shapes=(3, 20,
                                                                      20),
                                               baselines=0)
        attributions_occ = np.transpose(
            attributions_occ.squeeze().cpu().detach().numpy(), (1, 2, 0))
        return attributions_occ
Пример #4
0
 def _get_attributions(self, enc_data, metric, n_steps, nt_type,
                       baseline_type, strides, sliding_window_shapes):
     # Get Baseline
     baseline = self.get_baseline_img(enc_data[0], baseline_type)
     supported_metrics = {}
     if metric == 'IG':
         self._integrated_gradients = self._integrated_gradients if hasattr(
             self, '_integrated_gradients') else IntegratedGradients(
                 self.model)
         return self._integrated_gradients.attribute(enc_data[0],
                                                     baseline,
                                                     target=enc_data[1],
                                                     n_steps=200)
     elif metric == 'NT':
         self._integrated_gradients = self._integrated_gradients if hasattr(
             self, '_integrated_gradients') else IntegratedGradients(
                 self.model)
         self._noise_tunnel = self._noise_tunnel if hasattr(
             self, '_noise_tunnel') else NoiseTunnel(
                 self._integrated_gradients)
         return self._noise_tunnel.attribute(enc_data[0].to(
             self.dls.device),
                                             n_samples=1,
                                             nt_type=nt_type,
                                             target=enc_data[1])
     elif metric == 'Occl':
         self._occlusion = self._occlusion if hasattr(
             self, '_occlusion') else Occlusion(self.model)
         return self._occlusion.attribute(
             enc_data[0].to(self.dls.device),
             strides=strides,
             target=enc_data[1],
             sliding_window_shapes=sliding_window_shapes,
             baselines=baseline)
Пример #5
0
class OcclusionCallback(Callback):
    "Captum Callback for Resnet Interpretation"
    def __init__(self):
        pass

    def after_fit(self):
        self._occlusion = Occlusion(self.model)

    def _formatted_data_iter(self,dl):
        normalize_func= next((func for func in dl.after_batch if type(func)==Normalize),noop)
        dl_iter=iter(dl)
        while True:
            images,labels=next(dl_iter)
            images=normalize_func.decode(images).to(dl.device)
            return images,labels

    def visualize(self,inp_data,cmap_name='custom blue',colors=None,N=256,methods=['original_image','heat_map'],signs=["all", "positive"],strides = (3, 4, 4), sliding_window_shapes=(3,15, 15), outlier_perc=2):
        dl = self.dls.test_dl(L(inp_data),with_labels=True, bs=1)
        self.dec_img,self.dec_pred=self._formatted_data_iter(dl)

        attributions_occ = self._occlusion.attribute(self.dec_img,
                                       strides = strides,
                                       target=self.dec_pred,
                                       sliding_window_shapes=sliding_window_shapes,
                                       baselines=0)

        self.colors = [(0, '#ffffff'),(0.25, '#000000'),(1, '#000000')] if colors is None else colors
        default_cmap = LinearSegmentedColormap.from_list(cmap_name,
                                                 self.colors, N=N)
        _ = viz.visualize_image_attr_multiple(np.transpose(attributions_occ.squeeze().cpu().detach().numpy(), (1,2,0)),
                                              np.transpose(self.dec_img.squeeze().cpu().numpy(), (1,2,0)),methods,signs,
                                              cmap=default_cmap,
                                              show_colorbar=True,
                                              outlier_perc=outlier_perc,titles=[f'Original Image - ({self.dec_pred.cpu().item()})', 'Occlusion']
                                             )
Пример #6
0
    def initialize(self, ctx):  # pylint: disable=arguments-differ
        """In this initialize function, the CIFAR10 trained model is loaded and
        the Integrated Gradients,occlusion and layer_gradcam Algorithm for
        Captum Explanations is initialized here.
        Args:
            ctx (context): It is a JSON Object containing information
            pertaining to the model artifacts parameters.
        """
        self.manifest = ctx.manifest
        properties = ctx.system_properties
        model_dir = properties.get("model_dir")
        print("Model dir is {}".format(model_dir))
        serialized_file = self.manifest["model"]["serializedFile"]
        mapping_file_path = os.path.join(model_dir, "index_to_name.json")
        if os.path.exists(mapping_file_path):
            with open(mapping_file_path) as fp:
                self.mapping = json.load(fp)
        model_pt_path = os.path.join(model_dir, serialized_file)
        self.device = torch.device(
            "cuda:" + str(properties.get("gpu_id")) if torch.cuda.is_available() else "cpu"
        )
        from cifar10_train import CIFAR10Classifier

        self.model = CIFAR10Classifier()
        self.model.load_state_dict(torch.load(model_pt_path))
        self.model.to(self.device)
        self.model.eval()
        self.model.zero_grad()
        logger.info("CIFAR10 model from path %s loaded successfully", model_dir)

        # Read the mapping file, index to object name
        mapping_file_path = os.path.join(model_dir, "class_mapping.json")
        if os.path.isfile(mapping_file_path):
            print("Mapping file present")
            with open(mapping_file_path) as pointer:
                self.mapping = json.load(pointer)
        else:
            print("Mapping file missing")
            logger.warning("Missing the class_mapping.json file.")

        self.ig = IntegratedGradients(self.model)
        self.layer_gradcam = LayerGradCam(self.model, self.model.model_conv.layer4[2].conv3)
        self.occlusion = Occlusion(self.model)
        self.initialized = True
        self.image_processing = transforms.Compose(
            [
                transforms.Resize(224),
                transforms.CenterCrop(224),
                transforms.ToTensor(),
                transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
            ]
        )
Пример #7
0
class Explainer():
    def __init__(self, model):
        self.model = model
        self.explain = Occlusion(model)

    def get_attribution_map(self,
                            img,
                            target=None,
                            sliding_window_shapes=(3, 3, 3)):
        if target is None:
            target = torch.argmax(self.model(img), 1)
        attributions = self.explain.attribute(
            img, target=target, sliding_window_shapes=sliding_window_shapes)
        return attributions
Пример #8
0
    def initialize(self, ctx):  #pylint: disable=arguments-differ
        """In this initialize function, the Titanic trained model is loaded and
        the Integrated Gradients Algorithm for Captum Explanations
        is initialized here.
        Args:
            ctx (context): It is a JSON Object containing information
            pertaining to the model artifacts parameters.
        """
        self.manifest = ctx.manifest
        properties = ctx.system_properties
        model_dir = properties.get("model_dir")
        print("Model dir is {}".format(model_dir))
        serialized_file = self.manifest["model"]["serializedFile"]
        model_pt_path = os.path.join(model_dir, serialized_file)
        self.device = torch.device(  #pylint: disable=no-member
            "cuda:" + str(properties.get("gpu_id"))
            if torch.cuda.is_available() else "cpu")

        self.model = CIFAR10CLASSIFIER()
        self.model.load_state_dict(torch.load(model_pt_path))
        self.model.to(self.device)
        self.model.eval()
        self.model.zero_grad()
        logger.info("CIFAR10 model from path %s loaded successfully",
                    model_dir)

        # Read the mapping file, index to object name
        mapping_file_path = os.path.join(model_dir, "class_mapping.json")
        if os.path.isfile(mapping_file_path):
            print("Mapping file present")
            with open(mapping_file_path) as file_pointer:
                self.mapping = json.load(file_pointer)
        else:
            print("Mapping file missing")
            logger.warning("Missing the class_mapping.json file.")

        self.ig = IntegratedGradients(self.model)
        self.layer_gradcam = LayerGradCam(
            self.model, self.model.model_conv.layer4[2].conv3)
        self.occlusion = Occlusion(self.model)
        self.initialized = True
Пример #9
0
 def __init__(
         self, model, rasterizer, root: str = 'preprocess', grad_enabled: bool = False, device: str = 'cpu',
         turn_thresh: float = 3., speed_thresh: float = 0.5, k: int = 500, prog=True,
         output_root: str = 'validation', extreme_k: int = 5, visualize=True, seaborn_style: str = 'darkgrid',
 ):
     sns.set_theme(style=seaborn_style)
     self.root = root
     self.model = model.to(device)
     self.device = device
     self.grad_enabled = grad_enabled
     self.files = [f for f in listdir(root) if isfile(join(root, f)) and f.endswith('.npz')]
     self.splits = defaultdict(dict)
     self.k = k
     self.visualize = visualize
     self.turn_thresh = turn_thresh
     self.speed_thresh = speed_thresh
     self.prog = prog
     self.output_root = output_root
     Path(output_root).mkdir(parents=True, exist_ok=True)
     self.extreme_k = extreme_k
     self.rasterizer = rasterizer
     self.saliency = None if not visualize else Saliency(self.model)
     self.occlusion = None if not visualize else Occlusion(self.model)
Пример #10
0
 def after_fit(self):
     self._occlusion = Occlusion(self.model)
Пример #11
0
def generate_saliency(model_path, saliency_path, saliency, aggregation):
    checkpoint = torch.load(model_path,
                            map_location=lambda storage, loc: storage)
    model_args = Namespace(**checkpoint['args'])
    if args.model == 'lstm':
        model = LSTM_MODEL(tokenizer,
                           model_args,
                           n_labels=checkpoint['args']['labels']).to(device)
        model.load_state_dict(checkpoint['model'])
    elif args.model == 'trans':
        transformer_config = BertConfig.from_pretrained(
            'bert-base-uncased', num_labels=model_args.labels)
        model_cp = BertForSequenceClassification.from_pretrained(
            'bert-base-uncased', config=transformer_config).to(device)
        checkpoint = torch.load(model_path,
                                map_location=lambda storage, loc: storage)
        model_cp.load_state_dict(checkpoint['model'])
        model = BertModelWrapper(model_cp)
    else:
        model = CNN_MODEL(tokenizer,
                          model_args,
                          n_labels=checkpoint['args']['labels']).to(device)
        model.load_state_dict(checkpoint['model'])

    model.train()

    pad_to_max = False
    if saliency == 'deeplift':
        ablator = DeepLift(model)
    elif saliency == 'guided':
        ablator = GuidedBackprop(model)
    elif saliency == 'sal':
        ablator = Saliency(model)
    elif saliency == 'inputx':
        ablator = InputXGradient(model)
    elif saliency == 'occlusion':
        ablator = Occlusion(model)

    coll_call = get_collate_fn(dataset=args.dataset, model=args.model)

    return_attention_masks = args.model == 'trans'

    collate_fn = partial(coll_call,
                         tokenizer=tokenizer,
                         device=device,
                         return_attention_masks=return_attention_masks,
                         pad_to_max_length=pad_to_max)
    test = get_dataset(path=args.dataset_dir,
                       mode=args.split,
                       dataset=args.dataset)
    batch_size = args.batch_size if args.batch_size != None else \
        model_args.batch_size
    test_dl = DataLoader(batch_size=batch_size,
                         dataset=test,
                         shuffle=False,
                         collate_fn=collate_fn)

    # PREDICTIONS
    predictions_path = model_path + '.predictions'
    if not os.path.exists(predictions_path):
        predictions = defaultdict(lambda: [])
        for batch in tqdm(test_dl, desc='Running test prediction... '):
            if args.model == 'trans':
                logits = model(batch[0],
                               attention_mask=batch[1],
                               labels=batch[2].long())
            else:
                logits = model(batch[0])
            logits = logits.detach().cpu().numpy().tolist()
            predicted = np.argmax(np.array(logits), axis=-1)
            predictions['class'] += predicted.tolist()
            predictions['logits'] += logits

        with open(predictions_path, 'w') as out:
            json.dump(predictions, out)

    # COMPUTE SALIENCY
    if saliency != 'occlusion':
        embedding_layer_name = 'model.bert.embeddings' if args.model == \
                                                          'trans' else \
            'embedding'
        interpretable_embedding = configure_interpretable_embedding_layer(
            model, embedding_layer_name)

    class_attr_list = defaultdict(lambda: [])
    token_ids = []
    saliency_flops = []

    for batch in tqdm(test_dl, desc='Running Saliency Generation...'):
        if args.model == 'cnn':
            additional = None
        elif args.model == 'trans':
            additional = (batch[1], batch[2])
        else:
            additional = batch[-1]

        token_ids += batch[0].detach().cpu().numpy().tolist()
        if saliency != 'occlusion':
            input_embeddings = interpretable_embedding.indices_to_embeddings(
                batch[0])

        if not args.no_time:
            high.start_counters([
                events.PAPI_FP_OPS,
            ])
        for cls_ in range(checkpoint['args']['labels']):
            if saliency == 'occlusion':
                attributions = ablator.attribute(
                    batch[0],
                    sliding_window_shapes=(args.sw, ),
                    target=cls_,
                    additional_forward_args=additional)
            else:
                attributions = ablator.attribute(
                    input_embeddings,
                    target=cls_,
                    additional_forward_args=additional)

            attributions = summarize_attributions(
                attributions, type=aggregation, model=model,
                tokens=batch[0]).detach().cpu().numpy().tolist()
            class_attr_list[cls_] += [[_li for _li in _l]
                                      for _l in attributions]

        if not args.no_time:
            saliency_flops.append(
                sum(high.stop_counters()) / batch[0].shape[0])

    if saliency != 'occlusion':
        remove_interpretable_embedding_layer(model, interpretable_embedding)

    # SERIALIZE
    print('Serializing...', flush=True)
    with open(saliency_path, 'w') as out:
        for instance_i, _ in enumerate(test):
            saliencies = []
            for token_i, token_id in enumerate(token_ids[instance_i]):
                token_sal = {'token': tokenizer.ids_to_tokens[token_id]}
                for cls_ in range(checkpoint['args']['labels']):
                    token_sal[int(
                        cls_)] = class_attr_list[cls_][instance_i][token_i]
                saliencies.append(token_sal)

            out.write(json.dumps({'tokens': saliencies}) + '\n')
            out.flush()

    return saliency_flops
Пример #12
0
        ap = average_precision_score(y_true, y_scores)
        running_auc.append(auc)
        print('ALL AUCs', running_auc)
        print('Best AUC', np.argmax(running_auc))
        if epoch == (EPOCHS - 1):
            val_auc.append(auc)

        writer.add_scalar('Loss/val', loss.item(), epoch)
        writer.add_scalar('AUC/val', auc, epoch)
        writer.add_scalar('AP/val', ap, epoch)
        print("Epoch: {}, Loss: {}, Test Accuracy: {}, AUC: {}".format(
            epoch, running_loss, round(acc, 4), auc))

        ## Occlusion
        if OCCLUSION:
            oc = Occlusion(model)
            x_shape = 16
            x_stride = 8
            #random_index = np.random.randint(images.size(0))
            #images = images[random_index, ...][None, ...].cuda()
            #labels = labels[random_index, ...][None, ...].cuda()
            #oc_images = images[(labels==1).squeeze()].cuda()
            #oc_labels = labels[(labels==1).squeeze()].cuda()
            print('oc_images', oc_images.shape)
            print('oc_labels', oc_labels.shape)
            baseline = torch.zeros_like(oc_images).cuda()
            oc_attributions = oc.attribute(oc_images,
                                           sliding_window_shapes=(3, x_shape,
                                                                  x_shape),
                                           strides=(3, int(x_stride / 2),
                                                    int(x_stride / 2)),
                    # labels = labels[random_index, ...][None, ...].cuda()
                    # print(label.shape, label)
                    # print(image.shape, image)
                    labels = labels.cuda()
                    labels = labels.unsqueeze(1).float()
                    # bloods = bloods[random_index, ...][None, ...]
                    bloods = bloods.cuda().float()

                    # Account for tta: Take first image (non-augmented)
                    # Label does not need to be touched because it is obv. the same for this image regardless of tta
                    # Set a baseline
                    baseline_bloods = torch.zeros_like(bloods).cuda().float()

                    # Calculate attribution scores + delta
                    # ig = IntegratedGradients(model)
                    oc = Occlusion(model)
                    # nt = NoiseTunnel(ig)
                    # attributions, delta = nt.attribute(image, nt_type='smoothgrad', stdevs=0.02, n_samples=2,
                    #                                    baselines=baseline, target=0, return_convergence_delta=True)
                    _, target_ID = torch.max(labels, 1)
                    print(target_ID)
                    print(baseline_bloods.shape)
                    # attributions = ig.attribute(image, baseline, target=target_ID, return_convergence_delta=False)
                    blud0 = oc.attribute(bloods, sliding_window_shapes=(1,),
                                         strides=(1,), target=None,
                                         baselines=baseline_bloods)
                    # print('IG + SmoothGrad Attributions:', attributions)
                    # print('Convergence Delta:', delta)

                    # Print
                    for single_feature in range(blud0.shape[0]):
Пример #14
0
                    # print(label.shape, label)
                    # print(image.shape, image)
                    print(images.shape, labels.shape)
                    images = images.cuda()
                    labels = labels.cuda()

                    # Account for tta: Take first image (non-augmented)
                    # Label does not need to be touched because it is obv. the same for this image regardless of tta
                    images = images[:, 0, ...]

                    # Set a baseline
                    baseline = torch.zeros_like(images).cuda()

                    # Calculate attribution scores + delta
                    # ig = IntegratedGradients(model)
                    oc = Occlusion(model)
                    # nt = NoiseTunnel(ig)
                    # attributions, delta = nt.attribute(image, nt_type='smoothgrad', stdevs=0.02, n_samples=2,
                    #                                    baselines=baseline, target=0, return_convergence_delta=True)
                    _, target_ID = torch.max(labels, 1)
                    print(target_ID)
                    # attributions = ig.attribute(image, baseline, target=target_ID, return_convergence_delta=False)
                    x_shape = 16
                    x_stride = 8

                    oc_attributions0 = oc.attribute(
                        images,
                        sliding_window_shapes=(3, x_shape, x_shape),
                        strides=(3, x_stride, x_stride),
                        target=0,
                        baselines=baseline)
Пример #15
0
input_img = normalize(center_crop(img)).unsqueeze(0)


######################################################################
# 속성(attribution) 계산하기
# ---------------------


######################################################################
# 모델의 top-3 예측 중에는 개와 고양이에 해당하는 클래스 208과 283이 있습니다.
#
# Captum의 \ ``Occlusion``\ 알고리즘을 사용하여 각 예측을 입력의 해당 부분에 표시합니다.

from captum.attr import Occlusion

occlusion = Occlusion(model)

strides = (3, 9, 9)               # 작을수록 = 세부적인 속성이지만 느림
target=208,                       # ImageNet에서 Labrador의 인덱스
sliding_window_shapes=(3,45, 45)  # 객체의 모양을 변화시키기에 충분한 크기를 선택
baselines = 0                     # 이미지를 가릴 값, 0은 회색

attribution_dog = occlusion.attribute(input_img,
                                       strides = strides,
                                       target=target,
                                       sliding_window_shapes=sliding_window_shapes,
                                       baselines=baselines)


target=283,                       # ImageNet에서 Persian cat의 인덱스
attribution_cat = occlusion.attribute(input_img,
def main(args):

    train_loader, test_loader = data_generator(args.data_dir,1)

    for m in range(len(models)):

        model_name = "model_{}_NumFeatures_{}".format(models[m],args.NumFeatures)
        model_filename = args.model_dir + 'm_' + model_name + '.pt'
        pretrained_model = torch.load(open(model_filename, "rb"),map_location=device) 
        pretrained_model.to(device)



        if(args.GradFlag):
            Grad = Saliency(pretrained_model)
        if(args.IGFlag):
            IG = IntegratedGradients(pretrained_model)
        if(args.DLFlag):
            DL = DeepLift(pretrained_model)
        if(args.GSFlag):
            GS = GradientShap(pretrained_model)
        if(args.DLSFlag):
            DLS = DeepLiftShap(pretrained_model)                 
        if(args.SGFlag):
            Grad_ = Saliency(pretrained_model)
            SG = NoiseTunnel(Grad_)
        if(args.ShapleySamplingFlag):
            SS = ShapleyValueSampling(pretrained_model)
        if(args.GSFlag):
            FP = FeaturePermutation(pretrained_model)
        if(args.FeatureAblationFlag):
            FA = FeatureAblation(pretrained_model)         
        if(args.OcclusionFlag):
            OS = Occlusion(pretrained_model)

        timeMask=np.zeros((args.NumTimeSteps, args.NumFeatures),dtype=int)
        featureMask=np.zeros((args.NumTimeSteps, args.NumFeatures),dtype=int)
        for i in  range (args.NumTimeSteps):
            timeMask[i,:]=i

        for i in  range (args.NumTimeSteps):
            featureMask[:,i]=i

        indexes = [[] for i in range(5,10)]
        for i ,(data, target) in enumerate(test_loader):
            if(target==5 or target==6 or target==7 or target==8 or target==9):
                index=target-5

                if(len(indexes[index])<1):
                    indexes[index].append(i)
        for j, index in enumerate(indexes):
            print(index)
        # indexes = [[21],[17],[84],[9]]

        for j, index in enumerate(indexes):
            print("Getting Saliency for number", j+1)
            for i, (data, target) in enumerate(test_loader):
                if(i in index):
                        
                    labels =  target.to(device)
             
                    input = data.reshape(-1, args.NumTimeSteps, args.NumFeatures).to(device)
                    input = Variable(input,  volatile=False, requires_grad=True)

                    baseline_single=torch.Tensor(np.random.random(input.shape)).to(device)
                    baseline_multiple=torch.Tensor(np.random.random((input.shape[0]*5,input.shape[1],input.shape[2]))).to(device)
                    inputMask= np.zeros((input.shape))
                    inputMask[:,:,:]=timeMask
                    inputMask =torch.Tensor(inputMask).to(device)
                    mask_single= torch.Tensor(timeMask).to(device)
                    mask_single=mask_single.reshape(1,args.NumTimeSteps, args.NumFeatures).to(device)

                    Data=data.reshape(args.NumTimeSteps, args.NumFeatures).data.cpu().numpy()
                    
                    target_=int(target.data.cpu().numpy()[0])

                    plotExampleBox(Data,args.Graph_dir+'Sample_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)




                    if(args.GradFlag):
                        attributions = Grad.attribute(input, \
                                                      target=labels)
                        
                        saliency_=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,attributions)

                        plotExampleBox(saliency_[0],args.Graph_dir+models[m]+'_Grad_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)
                        if(args.TSRFlag):
                            TSR_attributions =  getTwoStepRescaling(Grad,input, args.NumFeatures,args.NumTimeSteps, labels,hasBaseline=None)
                            TSR_saliency=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,TSR_attributions,isTensor=False)
                            plotExampleBox(TSR_saliency,args.Graph_dir+models[m]+'_TSR_Grad_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)



                    if(args.IGFlag):
                        attributions = IG.attribute(input,  \
                                                    baselines=baseline_single, \
                                                    target=labels)
                        saliency_=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,attributions)

                        plotExampleBox(saliency_[0],args.Graph_dir+models[m]+'_IG_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)
                        if(args.TSRFlag):
                            TSR_attributions =  getTwoStepRescaling(IG,input, args.NumFeatures,args.NumTimeSteps, labels,hasBaseline=baseline_single)
                            TSR_saliency=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,TSR_attributions,isTensor=False)
                            plotExampleBox(TSR_saliency,args.Graph_dir+models[m]+'_TSR_IG_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)




                    if(args.DLFlag):
                        attributions = DL.attribute(input,  \
                                                    baselines=baseline_single, \
                                                    target=labels)
                        saliency_=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,attributions)
                        plotExampleBox(saliency_[0],args.Graph_dir+models[m]+'_DL_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)


                        if(args.TSRFlag):
                            TSR_attributions =  getTwoStepRescaling(DL,input, args.NumFeatures,args.NumTimeSteps, labels,hasBaseline=baseline_single)
                            TSR_saliency=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,TSR_attributions,isTensor=False)
                            plotExampleBox(TSR_saliency,args.Graph_dir+models[m]+'_TSR_DL_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)




                    if(args.GSFlag):

                        attributions = GS.attribute(input,  \
                                                    baselines=baseline_multiple, \
                                                    stdevs=0.09,\
                                                    target=labels)
                        saliency_=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,attributions)
                        plotExampleBox(saliency_[0],args.Graph_dir+models[m]+'_GS_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)

 
                        if(args.TSRFlag):
                            TSR_attributions =  getTwoStepRescaling(GS,input, args.NumFeatures,args.NumTimeSteps, labels,hasBaseline=baseline_multiple)
                            TSR_saliency=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,TSR_attributions,isTensor=False)
                            plotExampleBox(TSR_saliency,args.Graph_dir+models[m]+'_TSR_GS_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)


                    if(args.DLSFlag):

                        attributions = DLS.attribute(input,  \
                                                    baselines=baseline_multiple, \
                                                    target=labels)
                        saliency_=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,attributions)
                        plotExampleBox(saliency_[0],args.Graph_dir+models[m]+'_DLS_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)
                        if(args.TSRFlag):
                            TSR_attributions =  getTwoStepRescaling(DLS,input, args.NumFeatures,args.NumTimeSteps, labels,hasBaseline=baseline_multiple)
                            TSR_saliency=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,TSR_attributions,isTensor=False)
                            plotExampleBox(TSR_saliency,args.Graph_dir+models[m]+'_TSR_DLS_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)



                    if(args.SGFlag):
                        attributions = SG.attribute(input, \
                                                    target=labels)
                        saliency_=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,attributions)
                        plotExampleBox(saliency_[0],args.Graph_dir+models[m]+'_SG_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)
                        if(args.TSRFlag):
                            TSR_attributions =  getTwoStepRescaling(SG,input, args.NumFeatures,args.NumTimeSteps, labels)
                            TSR_saliency=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,TSR_attributions,isTensor=False)
                            plotExampleBox(TSR_saliency,args.Graph_dir+models[m]+'_TSR_SG_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)


                    if(args.ShapleySamplingFlag):
                        attributions = SS.attribute(input, \
                                        baselines=baseline_single, \
                                        target=labels,\
                                        feature_mask=inputMask)
                        saliency_=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,attributions)
                        plotExampleBox(saliency_[0],args.Graph_dir+models[m]+'_SVS_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)
                        if(args.TSRFlag):
                            TSR_attributions =  getTwoStepRescaling(SS,input, args.NumFeatures,args.NumTimeSteps, labels,hasBaseline=baseline_single,hasFeatureMask=inputMask)
                            TSR_saliency=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,TSR_attributions,isTensor=False)
                            plotExampleBox(TSR_saliency,args.Graph_dir+models[m]+'_TSR_SVS_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)
                    # if(args.FeaturePermutationFlag):
                    #     attributions = FP.attribute(input, \
                    #                     target=labels),
                    #                     # perturbations_per_eval= 1,\
                    #                     # feature_mask=mask_single)
                    #     saliency_=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,attributions)
                    #     plotExampleBox(saliency_[0],args.Graph_dir+models[m]+'_FP',greyScale=True)


                    if(args.FeatureAblationFlag):
                        attributions = FA.attribute(input, \
                                        target=labels)
                                        # perturbations_per_eval= input.shape[0],\
                                        # feature_mask=mask_single)
                        saliency_=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,attributions)
                        plotExampleBox(saliency_[0],args.Graph_dir+models[m]+'_FA_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)
                        if(args.TSRFlag):
                            TSR_attributions =  getTwoStepRescaling(FA,input, args.NumFeatures,args.NumTimeSteps, labels)
                            TSR_saliency=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,TSR_attributions,isTensor=False)
                            plotExampleBox(TSR_saliency,args.Graph_dir+models[m]+'_TSR_FA_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)

                    if(args.OcclusionFlag):
                        attributions = OS.attribute(input, \
                                        sliding_window_shapes=(1,int(args.NumFeatures/10)),
                                        target=labels,
                                        baselines=baseline_single)
                        saliency_=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,attributions)

                        plotExampleBox(saliency_[0],args.Graph_dir+models[m]+'_FO_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)
                        if(args.TSRFlag):
                            TSR_attributions =  getTwoStepRescaling(OS,input, args.NumFeatures,args.NumTimeSteps, labels,hasBaseline=baseline_single,hasSliding_window_shapes= (1,int(args.NumFeatures/10)))
                            TSR_saliency=Helper.givenAttGetRescaledSaliency(args.NumTimeSteps, args.NumFeatures,TSR_attributions,isTensor=False)
                            plotExampleBox(TSR_saliency,args.Graph_dir+models[m]+'_TSR_FO_MNIST_'+str(target_)+'_index_'+str(i+1),greyScale=True)
Пример #17
0
     params={
         "n_steps":
         NumberConfig(value=25, limit=(2, None)),
         "method":
         StrEnumConfig(limit=SUPPORTED_METHODS, value="gausslegendre"),
     },
     post_process={"n_steps": int},
 ),
 FeatureAblation.get_name():
 ConfigParameters(params={
     "perturbations_per_eval":
     NumberConfig(value=1, limit=(1, 100))
 }, ),
 Deconvolution.get_name():
 ConfigParameters(params={}),
 Occlusion.get_name():
 ConfigParameters(
     params={
         "sliding_window_shapes": StrConfig(value=""),
         "strides": StrConfig(value=""),
         "perturbations_per_eval": NumberConfig(value=1, limit=(1, 100)),
     },
     post_process={
         "sliding_window_shapes": _str_to_tuple,
         "strides": _str_to_tuple,
         "perturbations_per_eval": int,
     },
 ),
 GuidedBackprop.get_name():
 ConfigParameters(params={}),
 InputXGradient.get_name():
Пример #18
0
vgg16VB = vgg16VB.eval()


def prediction(inp):
    outputs = vgg16PN(inp)
    probN = F.softmax(outputs, 1)[0][0].detach().cpu().numpy()
    if int(np.round(probN)) == 0:
        outputs = vgg16VB(inp)
        probsVB = F.softmax(outputs, 1)[0].detach().cpu().numpy()
    else:
        probsVB = (1 - probN) / 2 * np.ones(2)
    probs = np.append(probN, probsVB)
    return probs / np.sum(probs)


occlusion = Occlusion(vgg16PN)
for inp, filename in tqdm(zip(dataloaders, image_datasets.total_imgs)):
    inp = inp.to(device)
    probs = prediction(inp)
    attributions_ig = occlusion.attribute(inp,
                                          strides=(1, 10, 10),
                                          target=0,
                                          sliding_window_shapes=(1, 15, 15),
                                          baselines=0)

    fig = plt.figure(figsize=(18, 6), dpi=200)
    ax1 = fig.add_subplot(131)
    ax1.imshow((inp.squeeze().cpu().detach().numpy()), cmap='viridis')
    ax1.axis('off')

    ax2 = fig.add_subplot(132)
def main(args, DatasetsTypes, DataGenerationTypes, models, device):
    for m in range(len(models)):

        for x in range(len(DatasetsTypes)):
            for y in range(len(DataGenerationTypes)):

                if (DataGenerationTypes[y] == None):
                    args.DataName = DatasetsTypes[x] + "_Box"
                else:
                    args.DataName = DatasetsTypes[
                        x] + "_" + DataGenerationTypes[y]

                Training = np.load(args.data_dir + "SimulatedTrainingData_" +
                                   args.DataName + "_F_" +
                                   str(args.NumFeatures) + "_TS_" +
                                   str(args.NumTimeSteps) + ".npy")
                TrainingMetaDataset = np.load(args.data_dir +
                                              "SimulatedTrainingMetaData_" +
                                              args.DataName + "_F_" +
                                              str(args.NumFeatures) + "_TS_" +
                                              str(args.NumTimeSteps) + ".npy")
                TrainingLabel = TrainingMetaDataset[:, 0]

                Testing = np.load(args.data_dir + "SimulatedTestingData_" +
                                  args.DataName + "_F_" +
                                  str(args.NumFeatures) + "_TS_" +
                                  str(args.NumTimeSteps) + ".npy")
                TestingDataset_MetaData = np.load(args.data_dir +
                                                  "SimulatedTestingMetaData_" +
                                                  args.DataName + "_F_" +
                                                  str(args.NumFeatures) +
                                                  "_TS_" +
                                                  str(args.NumTimeSteps) +
                                                  ".npy")
                TestingLabel = TestingDataset_MetaData[:, 0]

                Training = Training.reshape(
                    Training.shape[0], Training.shape[1] * Training.shape[2])
                Testing = Testing.reshape(Testing.shape[0],
                                          Testing.shape[1] * Testing.shape[2])

                scaler = MinMaxScaler()
                scaler.fit(Training)
                Training = scaler.transform(Training)
                Testing = scaler.transform(Testing)

                TrainingRNN = Training.reshape(Training.shape[0],
                                               args.NumTimeSteps,
                                               args.NumFeatures)
                TestingRNN = Testing.reshape(Testing.shape[0],
                                             args.NumTimeSteps,
                                             args.NumFeatures)

                train_dataRNN = data_utils.TensorDataset(
                    torch.from_numpy(TrainingRNN),
                    torch.from_numpy(TrainingLabel))
                train_loaderRNN = data_utils.DataLoader(
                    train_dataRNN, batch_size=args.batch_size, shuffle=True)

                test_dataRNN = data_utils.TensorDataset(
                    torch.from_numpy(TestingRNN),
                    torch.from_numpy(TestingLabel))
                test_loaderRNN = data_utils.DataLoader(
                    test_dataRNN, batch_size=args.batch_size, shuffle=False)

                modelName = "Simulated"
                modelName += args.DataName

                saveModelName = "../Models/" + models[m] + "/" + modelName
                saveModelBestName = saveModelName + "_BEST.pkl"

                pretrained_model = torch.load(saveModelBestName,
                                              map_location=device)
                Test_Acc = checkAccuracy(test_loaderRNN, pretrained_model,
                                         args)
                print('{} {} model BestAcc {:.4f}'.format(
                    args.DataName, models[m], Test_Acc))

                if (Test_Acc >= 90):

                    if (args.GradFlag):
                        rescaledGrad = np.zeros((TestingRNN.shape))
                        Grad = Saliency(pretrained_model)

                    if (args.IGFlag):
                        rescaledIG = np.zeros((TestingRNN.shape))
                        IG = IntegratedGradients(pretrained_model)
                    if (args.DLFlag):
                        rescaledDL = np.zeros((TestingRNN.shape))
                        DL = DeepLift(pretrained_model)
                    if (args.GSFlag):
                        rescaledGS = np.zeros((TestingRNN.shape))
                        GS = GradientShap(pretrained_model)
                    if (args.DLSFlag):
                        rescaledDLS = np.zeros((TestingRNN.shape))
                        DLS = DeepLiftShap(pretrained_model)

                    if (args.SGFlag):
                        rescaledSG = np.zeros((TestingRNN.shape))
                        Grad_ = Saliency(pretrained_model)
                        SG = NoiseTunnel(Grad_)

                    if (args.ShapleySamplingFlag):
                        rescaledShapleySampling = np.zeros((TestingRNN.shape))
                        SS = ShapleyValueSampling(pretrained_model)
                    if (args.GSFlag):
                        rescaledFeaturePermutation = np.zeros(
                            (TestingRNN.shape))
                        FP = FeaturePermutation(pretrained_model)
                    if (args.FeatureAblationFlag):
                        rescaledFeatureAblation = np.zeros((TestingRNN.shape))
                        FA = FeatureAblation(pretrained_model)

                    if (args.OcclusionFlag):
                        rescaledOcclusion = np.zeros((TestingRNN.shape))
                        OS = Occlusion(pretrained_model)

                    idx = 0
                    mask = np.zeros((args.NumTimeSteps, args.NumFeatures),
                                    dtype=int)
                    for i in range(args.NumTimeSteps):
                        mask[i, :] = i

                    for i, (samples, labels) in enumerate(test_loaderRNN):

                        print('[{}/{}] {} {} model accuracy {:.2f}'\
                                .format(i,len(test_loaderRNN), models[m], args.DataName, Test_Acc))

                        input = samples.reshape(-1, args.NumTimeSteps,
                                                args.NumFeatures).to(device)
                        input = Variable(input,
                                         volatile=False,
                                         requires_grad=True)

                        batch_size = input.shape[0]
                        baseline_single = torch.from_numpy(
                            np.random.random(input.shape)).to(device)
                        baseline_multiple = torch.from_numpy(
                            np.random.random(
                                (input.shape[0] * 5, input.shape[1],
                                 input.shape[2]))).to(device)
                        inputMask = np.zeros((input.shape))
                        inputMask[:, :, :] = mask
                        inputMask = torch.from_numpy(inputMask).to(device)
                        mask_single = torch.from_numpy(mask).to(device)
                        mask_single = mask_single.reshape(
                            1, args.NumTimeSteps, args.NumFeatures).to(device)
                        labels = torch.tensor(labels.int().tolist()).to(device)

                        if (args.GradFlag):
                            attributions = Grad.attribute(input, \
                                                          target=labels)
                            rescaledGrad[
                                idx:idx +
                                batch_size, :, :] = Helper.givenAttGetRescaledSaliency(
                                    args, attributions)

                        if (args.IGFlag):
                            attributions = IG.attribute(input,  \
                                                        baselines=baseline_single, \
                                                        target=labels)
                            rescaledIG[
                                idx:idx +
                                batch_size, :, :] = Helper.givenAttGetRescaledSaliency(
                                    args, attributions)

                        if (args.DLFlag):
                            attributions = DL.attribute(input,  \
                                                        baselines=baseline_single, \
                                                        target=labels)
                            rescaledDL[
                                idx:idx +
                                batch_size, :, :] = Helper.givenAttGetRescaledSaliency(
                                    args, attributions)

                        if (args.GSFlag):

                            attributions = GS.attribute(input,  \
                                                        baselines=baseline_multiple, \
                                                        stdevs=0.09,\
                                                        target=labels)
                            rescaledGS[
                                idx:idx +
                                batch_size, :, :] = Helper.givenAttGetRescaledSaliency(
                                    args, attributions)

                        if (args.DLSFlag):

                            attributions = DLS.attribute(input,  \
                                                        baselines=baseline_multiple, \
                                                        target=labels)
                            rescaledDLS[
                                idx:idx +
                                batch_size, :, :] = Helper.givenAttGetRescaledSaliency(
                                    args, attributions)

                        if (args.SGFlag):
                            attributions = SG.attribute(input, \
                                                        target=labels)
                            rescaledSG[
                                idx:idx +
                                batch_size, :, :] = Helper.givenAttGetRescaledSaliency(
                                    args, attributions)

                        if (args.ShapleySamplingFlag):
                            attributions = SS.attribute(input, \
                                            baselines=baseline_single, \
                                            target=labels,\
                                            feature_mask=inputMask)
                            rescaledShapleySampling[
                                idx:idx +
                                batch_size, :, :] = Helper.givenAttGetRescaledSaliency(
                                    args, attributions)

                        if (args.FeaturePermutationFlag):
                            attributions = FP.attribute(input, \
                                            target=labels,
                                            perturbations_per_eval= input.shape[0],\
                                            feature_mask=mask_single)
                            rescaledFeaturePermutation[
                                idx:idx +
                                batch_size, :, :] = Helper.givenAttGetRescaledSaliency(
                                    args, attributions)

                        if (args.FeatureAblationFlag):
                            attributions = FA.attribute(input, \
                                            target=labels)
                            # perturbations_per_eval= input.shape[0],\
                            # feature_mask=mask_single)
                            rescaledFeatureAblation[
                                idx:idx +
                                batch_size, :, :] = Helper.givenAttGetRescaledSaliency(
                                    args, attributions)

                        if (args.OcclusionFlag):
                            attributions = OS.attribute(input, \
                                            sliding_window_shapes=(1,args.NumFeatures),
                                            target=labels,
                                            baselines=baseline_single)
                            rescaledOcclusion[
                                idx:idx +
                                batch_size, :, :] = Helper.givenAttGetRescaledSaliency(
                                    args, attributions)

                        idx += batch_size

                    if (args.plot):
                        index = random.randint(0, TestingRNN.shape[0] - 1)
                        plotExampleBox(TestingRNN[index, :, :],
                                       args.Saliency_Maps_graphs_dir +
                                       args.DataName + "_" + models[m] +
                                       '_sample',
                                       flip=True)

                        print("Plotting sample", index)
                        if (args.GradFlag):
                            plotExampleBox(rescaledGrad[index, :, :],
                                           args.Saliency_Maps_graphs_dir +
                                           args.DataName + "_" + models[m] +
                                           '_Grad',
                                           greyScale=True,
                                           flip=True)

                        if (args.IGFlag):
                            plotExampleBox(rescaledIG[index, :, :],
                                           args.Saliency_Maps_graphs_dir +
                                           args.DataName + "_" + models[m] +
                                           '_IG',
                                           greyScale=True,
                                           flip=True)

                        if (args.DLFlag):
                            plotExampleBox(rescaledDL[index, :, :],
                                           args.Saliency_Maps_graphs_dir +
                                           args.DataName + "_" + models[m] +
                                           '_DL',
                                           greyScale=True,
                                           flip=True)

                        if (args.GSFlag):
                            plotExampleBox(rescaledGS[index, :, :],
                                           args.Saliency_Maps_graphs_dir +
                                           args.DataName + "_" + models[m] +
                                           '_GS',
                                           greyScale=True,
                                           flip=True)

                        if (args.DLSFlag):
                            plotExampleBox(rescaledDLS[index, :, :],
                                           args.Saliency_Maps_graphs_dir +
                                           args.DataName + "_" + models[m] +
                                           '_DLS',
                                           greyScale=True,
                                           flip=True)

                        if (args.SGFlag):
                            plotExampleBox(rescaledSG[index, :, :],
                                           args.Saliency_Maps_graphs_dir +
                                           args.DataName + "_" + models[m] +
                                           '_SG',
                                           greyScale=True,
                                           flip=True)

                        if (args.ShapleySamplingFlag):
                            plotExampleBox(
                                rescaledShapleySampling[index, :, :],
                                args.Saliency_Maps_graphs_dir + args.DataName +
                                "_" + models[m] + '_ShapleySampling',
                                greyScale=True,
                                flip=True)

                        if (args.FeaturePermutationFlag):
                            plotExampleBox(
                                rescaledFeaturePermutation[index, :, :],
                                args.Saliency_Maps_graphs_dir + args.DataName +
                                "_" + models[m] + '_FeaturePermutation',
                                greyScale=True,
                                flip=True)

                        if (args.FeatureAblationFlag):
                            plotExampleBox(
                                rescaledFeatureAblation[index, :, :],
                                args.Saliency_Maps_graphs_dir + args.DataName +
                                "_" + models[m] + '_FeatureAblation',
                                greyScale=True,
                                flip=True)

                        if (args.OcclusionFlag):
                            plotExampleBox(rescaledOcclusion[index, :, :],
                                           args.Saliency_Maps_graphs_dir +
                                           args.DataName + "_" + models[m] +
                                           '_Occlusion',
                                           greyScale=True,
                                           flip=True)

                    if (args.save):
                        if (args.GradFlag):
                            print("Saving Grad", modelName + "_" + models[m])
                            np.save(
                                args.Saliency_dir + modelName + "_" +
                                models[m] + "_Grad_rescaled", rescaledGrad)

                        if (args.IGFlag):
                            print("Saving IG", modelName + "_" + models[m])
                            np.save(
                                args.Saliency_dir + modelName + "_" +
                                models[m] + "_IG_rescaled", rescaledIG)

                        if (args.DLFlag):
                            print("Saving DL", modelName + "_" + models[m])
                            np.save(
                                args.Saliency_dir + modelName + "_" +
                                models[m] + "_DL_rescaled", rescaledDL)

                        if (args.GSFlag):
                            print("Saving GS", modelName + "_" + models[m])
                            np.save(
                                args.Saliency_dir + modelName + "_" +
                                models[m] + "_GS_rescaled", rescaledGS)

                        if (args.DLSFlag):
                            print("Saving DLS", modelName + "_" + models[m])
                            np.save(
                                args.Saliency_dir + modelName + "_" +
                                models[m] + "_DLS_rescaled", rescaledDLS)

                        if (args.SGFlag):
                            print("Saving SG", modelName + "_" + models[m])
                            np.save(
                                args.Saliency_dir + modelName + "_" +
                                models[m] + "_SG_rescaled", rescaledSG)

                        if (args.ShapleySamplingFlag):
                            print("Saving ShapleySampling",
                                  modelName + "_" + models[m])
                            np.save(
                                args.Saliency_dir + modelName + "_" +
                                models[m] + "_ShapleySampling_rescaled",
                                rescaledShapleySampling)

                        if (args.FeaturePermutationFlag):
                            print("Saving FeaturePermutation",
                                  modelName + "_" + models[m])
                            np.save(
                                args.Saliency_dir + modelName + "_" +
                                models[m] + "_FeaturePermutation_rescaled",
                                rescaledFeaturePermutation)

                        if (args.FeatureAblationFlag):
                            print("Saving FeatureAblation",
                                  modelName + "_" + models[m])
                            np.save(
                                args.Saliency_dir + modelName + "_" +
                                models[m] + "_FeatureAblation_rescaled",
                                rescaledFeatureAblation)

                        if (args.OcclusionFlag):
                            print("Saving Occlusion",
                                  modelName + "_" + models[m])
                            np.save(
                                args.Saliency_dir + modelName + "_" +
                                models[m] + "_Occlusion_rescaled",
                                rescaledOcclusion)

                else:
                    logging.basicConfig(filename=args.log_file,
                                        level=logging.DEBUG)

                    logging.debug('{} {} model BestAcc {:.4f}'.format(
                        args.DataName, models[m], Test_Acc))

                    if not os.path.exists(args.ignore_list):
                        with open(args.ignore_list, 'w') as fp:
                            fp.write(args.DataName + '_' + models[m] + '\n')

                    else:
                        with open(args.ignore_list, "a") as fp:
                            fp.write(args.DataName + '_' + models[m] + '\n')
Пример #20
0
def run(arch=None, img=None, out=None, target=None, **params):

    key = params['extractor']
    #key='ScoreCAM'
    if key == 'GradCAM'.lower():
        cam = GradCAM(arch, conv_layer)
    elif key == 'CAM'.lower():
        cam = CAM(arch, conv_layer, fc_layer)
    elif key == 'XGradCAM'.lower():
        cam = XGradCAM(arch, conv_layer)
    elif key == 'GradCAM++'.lower():
        cam = GradCAMpp(arch, conv_layer)
    elif key == 'SmoothGradCAM++'.lower():
        cam = SmoothGradCAMpp(arch, conv_layer, input_layer)
    elif key == 'ScoreCAM'.lower():
        cam = ScoreCAM(arch, conv_layer, input_layer)
    elif key == 'IntersectionSamCAM'.lower():
        cam = IntersectionSamCAM(arch, conv_layer, input_layer)
    elif key == 'SamCAM'.lower():
        cam = SamCAM(arch, conv_layer)
    elif key == 'SamCAM2'.lower():
        cam = SamCAM2(arch, conv_layer, p=0.25)
    elif key == 'SamCAM3'.lower():
        cam = SamCAM3(arch, conv_layer, p=1.0)
    elif key == 'SamCAM4'.lower():
        cam = SamCAM4(arch, conv_layer, input_layer)
    elif key == 'DropCAM'.lower():
        cam = DropCAM(arch, conv_layer, input_layer)
    elif key == 'SSCAM'.lower():
        cam = SSCAM(arch, conv_layer, input_layer, num_samples=10)
    elif key == 'ISSCAM'.lower():
        cam = ISSCAM(arch, conv_layer, input_layer)
    elif 'IntegratedGradients'.lower() in key or key == 'IGDown'.lower():
        ig = IntegratedGradients(arch.arch)
        cam = ig.attribute
    elif key == 'Saliency'.lower() or key == 'SaliencyDown'.lower():
        saliency = Saliency(arch.arch)
        cam = saliency.attribute
    elif key == "FakeCAM".lower():
        cam = None
    elif key == 'Occlusion'.lower():
        occ = Occlusion(arch.arch)
        cam = occ.attribute

    model = arch

    if type(img) == Image.Image:
        inp = apply_transform(img).cuda()
    else:
        inp = img
    out = F.softmax(model.arch(inp), dim=1)

    if cam is not None:
        if 'GradCAM'.lower() in key:
            salmap = cam(inp, target=target, scores=out)
        elif 'Occlusion'.lower() in key:
            salmap = cam(inp,
                         sliding_window_shapes=(3, 45, 45),
                         strides=(3, 9, 9),
                         target=target)
            salmap = torch.abs(salmap.sum(dim=1))
        else:
            salmap = cam(inp, target=target)
    else:
        salmap = torch.ones((inp.shape[-1], inp.shape[-2]))
        salmap[0, 0] = 0

    # remove 50% less important pixel
    #salmap.view(1,-1)[0,(1-salmap).view(1,-1).topk(int((salmap.shape[-1]**2)/2))[1]]=0.

    salmap = salmap.to(torch.float32)
    if 'IntegratedGradients'.lower() in key or key == 'Saliency'.lower():
        salmap = torch.abs(salmap.sum(dim=1))
        salmap = (salmap - salmap.min()) / (salmap.max() - salmap.min())

        salmap_previous = salmap
        if '20' in key:
            sigma = 20
        elif '5' in key:
            sigma = 5
        else:
            sigma = 3

        # torchvision gaussian
        '''
        trans=transforms.Compose([
            transforms.GaussianBlur(3,sigma=sigma)
        ])
        salmap=trans(salmap)
        '''

        #scipy gaussian
        #'''
        from scipy.ndimage import gaussian_filter as GB
        salmap = torch.from_numpy(GB(salmap.cpu().detach().numpy(), sigma))
        #'''

        salmap = torch.abs(salmap)
        salmap = (salmap - salmap.min()) / (salmap.max() - salmap.min())
        salmap = salmap.squeeze(0)
    elif key == 'IGDown'.lower() or key == 'SaliencyDown'.lower():
        salmap = torch.abs(salmap.sum(dim=1))
        salmap = (salmap - salmap.min()) / (salmap.max() - salmap.min())
        salmap_previous = salmap
        salmap = F.interpolate(salmap.unsqueeze(0), (7, 7),
                               mode='bilinear',
                               align_corners=False)
        salmap = F.interpolate(salmap,
                               salmap_previous.shape[-2:],
                               mode='bilinear',
                               align_corners=False)
        salmap = torch.abs(salmap.sum(dim=1))
        salmap = (salmap - salmap.min()) / (salmap.max() - salmap.min())

    salmap = torch.abs(salmap)
    salmap = (salmap - salmap.min()) / (salmap.max() - salmap.min())
    return salmap
Пример #21
0
        res_prob = []
        res_label = []

        if epoch % 1 == 0:
            for images, names, labels in val_loader:
                # Pick one image
                random_index = np.random.randint(images.size(0))
                image = images[random_index, ...][None, ...].cuda()
                label = labels[random_index, ...][None, ...].cuda()

                # Set a baseline
                baseline = torch.zeros_like(image).cuda()

                # Calculate attribution scores + delta
                # ig = IntegratedGradients(model)
                oc = Occlusion(model)
                # nt = NoiseTunnel(ig)
                # attributions, delta = nt.attribute(image, nt_type='smoothgrad', stdevs=0.02, n_samples=2,
                #                                    baselines=baseline, target=0, return_convergence_delta=True)
                _, target_ID = torch.max(label[0], 0)
                x_shape = 16
                x_stride = 8
                attributions = oc.attribute(image,
                                            sliding_window_shapes=(3, x_shape,
                                                                   x_shape),
                                            strides=(3, x_stride, x_stride),
                                            target=0,
                                            baselines=baseline)
                # attributions = ig.attribute(image, baseline, target=0, return_convergence_delta=False)
                # print('IG + SmoothGrad Attributions:', attributions)
                # print('Convergence Delta:', delta)
Пример #22
0
def draw_occlusion(
        batch: dict,
        occlusion: Occlusion,
        window: tuple = (4, 4),
        stride: tuple = (8, 8),
        sign: str = 'positive',
        method: str = 'blended_heat_map',
        use_pyplot: bool = False,
        outlier_perc: float = 2.,
        fig_axis: tuple = None,
        mix_bg: bool = True,
        alpha_overlay: float = 0.7
):
    """
    :param batch: batch to visualise
    :param occlusion: Occlusion object initialised for trainer_module
    :param window: Shape of patch (hyperrectangle) to occlude each input
    :param stride: step by which the occlusion hyperrectangle should be shifted by in each direction
    :param sign: sign of gradient attributes to visualise
    :param method: method of visualization to be used
    :param use_pyplot: whether to use pyplot
    :param mix_bg: whether to mix semantic/aerial map with vehicles
    :return: pair of figure and corresponding gradients tensor
    """

    strides = (batch['image'].shape[2] // stride[0], batch['image'].shape[3] // stride[1])
    window_size = (batch['image'].shape[2] // window[0], batch['image'].shape[3] // window[1])
    channels = batch['image'].shape[1]

    grads = occlusion.attribute(
        batch['image'], strides=(channels if mix_bg else channels - 3, *strides),
        sliding_window_shapes=(channels if mix_bg else channels - 3, *window_size),
        baselines=0,
        additional_forward_args=(
            batch['target_positions'],
            None if 'target_availabilities' not in batch else batch['target_availabilities'],
            False))

    gradsm = grads.squeeze().cpu().detach().numpy()
    if len(gradsm.shape) == 3:
        gradsm = gradsm.reshape(1, *gradsm.shape)
    gradsm = np.transpose(gradsm, (0, 2, 3, 1))
    im = batch['image'].detach().cpu().numpy().transpose(0, 2, 3, 1)
    fig, axis = fig_axis if fig_axis is not None else plt.subplots(2 - mix_bg, im.shape[0], dpi=200, figsize=(6, 6))
    for b in range(im.shape[0]):
        if mix_bg:
            viz.visualize_image_attr(
                gradsm[b, ...], im[b, ...], method=method,
                sign=sign, use_pyplot=use_pyplot,
                plt_fig_axis=(fig, axis if im.shape[0] == 1 else axis[b]),
                alpha_overlay=alpha_overlay, outlier_perc=outlier_perc,
            )
            ttl = (axis if im.shape[0] == 1 else axis[b]).title
            ttl.set_position([.5, 0.95])
            (axis if im.shape[0] == 1 else axis[b]).axis('off')
        else:
            for (s_channel, end_channel), row in [((im.shape[-1] - 3, im.shape[-1]), 0), ((0, im.shape[-1] - 3), 1)]:
                viz.visualize_image_attr(
                    gradsm[b, :, :, s_channel:end_channel], im[b, :, :, s_channel:end_channel], method=method,
                    sign=sign, use_pyplot=use_pyplot,
                    plt_fig_axis=(fig, axis[row] if im.shape[0] == 1 else axis[row][b]),
                    alpha_overlay=alpha_overlay, outlier_perc=outlier_perc,
                )
                ttl = (axis[row] if im.shape[0] == 1 else axis[row][b]).title
                ttl.set_position([.5, 0.95])
                (axis[row] if im.shape[0] == 1 else axis[row][b]).axis('off')
    return fig, grads
Пример #23
0
 def __init__(self, model):
     self.model = model
     self.explain = Occlusion(model)
Пример #24
0
def visualize_maps(
        model: torch.nn.Module,
        inputs: Union[Tuple[torch.Tensor, torch.Tensor]],
        labels: torch.Tensor,
        title: str,
        second_occlusion: Tuple[int, int, int] = (1, 2, 2),
        baselines: Tuple[int, int] = (0, 0),
        closest: bool = False,
) -> None:
    """
    Visualizes the average of the inputs, or the single input, using various different XAI approaches
    """
    single = inputs[1].ndim == 2
    model.zero_grad()
    model.eval()
    occ = Occlusion(model)
    saliency = Saliency(model)
    saliency = NoiseTunnel(saliency)
    igrad = IntegratedGradients(model)
    igrad_2 = NoiseTunnel(igrad)
    # deep_lift = DeepLift(model)
    grad_shap = ShapleyValueSampling(model)
    output = model(inputs[0], inputs[1])
    output = F.softmax(output, dim=-1).argmax(dim=1, keepdim=True)
    labels = F.softmax(labels, dim=-1).argmax(dim=1, keepdim=True)
    if np.all(labels.cpu().numpy() == 1) and not closest:
        return
    if True:
        targets = labels
    else:
        targets = output
    print(targets)
    correct = targets.cpu().numpy() == labels.cpu().numpy()
    # if correct:
    #   return
    occ_out = occ.attribute(
        inputs,
        baselines=baselines,
        sliding_window_shapes=((1, 5, 5), second_occlusion),
        target=targets,
    )
    # occ_out2 = occ.attribute(inputs, sliding_window_shapes=((1,20,20), second_occlusion), strides=(8,1), target=targets)
    saliency_out = saliency.attribute(inputs,
                                      nt_type="smoothgrad_sq",
                                      n_samples=5,
                                      target=targets,
                                      abs=False)
    # igrad_out = igrad.attribute(inputs, target=targets, internal_batch_size=1)
    igrad_out = igrad_2.attribute(
        inputs,
        baselines=baselines,
        target=targets,
        n_samples=5,
        nt_type="smoothgrad_sq",
        internal_batch_size=1,
    )
    # deep_lift_out = deep_lift.attribute(inputs, target=targets)
    grad_shap_out = grad_shap.attribute(inputs,
                                        baselines=baselines,
                                        target=targets)

    if single:
        inputs = convert_to_image(inputs)
        occ_out = convert_to_image(occ_out)
        saliency_out = convert_to_image(saliency_out)
        igrad_out = convert_to_image(igrad_out)
        # grad_shap_out = convert_to_image(grad_shap_out)
    else:
        inputs = convert_to_image_multi(inputs)
        occ_out = convert_to_image_multi(occ_out)
        saliency_out = convert_to_image_multi(saliency_out)
        igrad_out = convert_to_image_multi(igrad_out)
        grad_shap_out = convert_to_image_multi(grad_shap_out)
    fig, axes = plt.subplots(2, 5)
    (fig, axes[0, 0]) = visualization.visualize_image_attr(
        occ_out[0][0],
        inputs[0][0],
        title="Original Image",
        method="original_image",
        show_colorbar=True,
        plt_fig_axis=(fig, axes[0, 0]),
        use_pyplot=False,
    )
    (fig, axes[0, 1]) = visualization.visualize_image_attr(
        occ_out[0][0],
        None,
        sign="all",
        title="Occ (5x5)",
        show_colorbar=True,
        plt_fig_axis=(fig, axes[0, 1]),
        use_pyplot=False,
    )
    (fig, axes[0, 2]) = visualization.visualize_image_attr(
        saliency_out[0][0],
        None,
        sign="all",
        title="Saliency",
        show_colorbar=True,
        plt_fig_axis=(fig, axes[0, 2]),
        use_pyplot=False,
    )
    (fig, axes[0, 3]) = visualization.visualize_image_attr(
        igrad_out[0][0],
        None,
        sign="all",
        title="Integrated Grad",
        show_colorbar=True,
        plt_fig_axis=(fig, axes[0, 3]),
        use_pyplot=False,
    )
    (fig, axes[0, 4]) = visualization.visualize_image_attr(
        grad_shap_out[0],
        None,
        title="GradSHAP",
        show_colorbar=True,
        plt_fig_axis=(fig, axes[0, 4]),
        use_pyplot=False,
    )
    ##### Second Input Labels #########################################################################################
    (fig, axes[1, 0]) = visualization.visualize_image_attr(
        occ_out[1],
        inputs[1],
        title="Original Aux",
        method="original_image",
        show_colorbar=True,
        plt_fig_axis=(fig, axes[1, 0]),
        use_pyplot=False,
    )
    (fig, axes[1, 1]) = visualization.visualize_image_attr(
        occ_out[1],
        None,
        sign="all",
        title="Occ (1x1)",
        show_colorbar=True,
        plt_fig_axis=(fig, axes[1, 1]),
        use_pyplot=False,
    )
    (fig, axes[1, 2]) = visualization.visualize_image_attr(
        saliency_out[1],
        None,
        sign="all",
        title="Saliency",
        show_colorbar=True,
        plt_fig_axis=(fig, axes[1, 2]),
        use_pyplot=False,
    )
    (fig, axes[1, 3]) = visualization.visualize_image_attr(
        igrad_out[1],
        None,
        sign="all",
        title="Integrated Grad",
        show_colorbar=True,
        plt_fig_axis=(fig, axes[1, 3]),
        use_pyplot=False,
    )
    (fig, axes[1, 4]) = visualization.visualize_image_attr(
        grad_shap_out[1],
        None,
        title="GradSHAP",
        show_colorbar=True,
        plt_fig_axis=(fig, axes[1, 4]),
        use_pyplot=False,
    )

    fig.suptitle(
        title +
        f" Label: {labels.cpu().numpy()} Pred: {targets.cpu().numpy()}")
    plt.savefig(
        f"{title}_{'single' if single else 'multi'}_{'Failed' if correct else 'Success'}_baseline{baselines[0]}.png",
        dpi=300,
    )
    plt.clf()
    plt.cla()
Пример #25
0
# `Occlusion <https://captum.ai/api/occlusion.html>`__ is one such method.
# It involves replacing sections of the input image, and examining the
# effect on the output signal.
#
# Below, we set up Occlusion attribution. Similarly to configuring a
# convolutional neural network, you can specify the size of the target
# region, and a stride length to determine the spacing of individual
# measurements. We’ll visualize the output of our Occlusion attribution
# with ``visualize_image_attr_multiple()``, showing heat maps of both
# positive and negative attribution by region, and by masking the original
# image with the positive attribution regions. The masking gives a very
# instructive view of what regions of our cat photo the model found to be
# most “cat-like”.
#

occlusion = Occlusion(model)

attributions_occ = occlusion.attribute(input_img,
                                       target=pred_label_idx,
                                       strides=(3, 8, 8),
                                       sliding_window_shapes=(3, 15, 15),
                                       baselines=0)

_ = viz.visualize_image_attr_multiple(
    np.transpose(attributions_occ.squeeze().cpu().detach().numpy(), (1, 2, 0)),
    np.transpose(transformed_img.squeeze().cpu().detach().numpy(), (1, 2, 0)),
    ["original_image", "heat_map", "heat_map", "masked_image"],
    ["all", "positive", "negative", "positive"],
    show_colorbar=True,
    titles=[
        "Original", "Positive Attribution", "Negative Attribution", "Masked"
Пример #26
0
def run_saliency_methods(saliency_methods,
                         pretrained_model,
                         test_shape,
                         train_loader,
                         test_loader,
                         device,
                         model_type,
                         model_name,
                         saliency_dir,
                         tsr_graph_dir=None,
                         tsr_inputs_to_graph=()):
    _, num_timesteps, num_features = test_shape

    run_grad = "Grad" in saliency_methods
    run_grad_tsr = "Grad_TSR" in saliency_methods
    run_ig = "IG" in saliency_methods
    run_ig_tsr = "IG_TSR" in saliency_methods
    run_dl = "DL" in saliency_methods
    run_gs = "GS" in saliency_methods
    run_dls = "DLS" in saliency_methods
    run_dls_tsr = "DLS_TSR" in saliency_methods
    run_sg = "SG" in saliency_methods
    run_shapley_sampling = "ShapleySampling" in saliency_methods
    run_feature_permutation = "FeaturePermutation" in saliency_methods
    run_feature_ablation = "FeatureAblation" in saliency_methods
    run_occlusion = "Occlusion" in saliency_methods
    run_fit = "FIT" in saliency_methods
    run_ifit = "IFIT" in saliency_methods
    run_wfit = "WFIT" in saliency_methods
    run_iwfit = "IWFIT" in saliency_methods

    if run_grad or run_grad_tsr:
        Grad = Saliency(pretrained_model)
    if run_grad:
        rescaledGrad = np.zeros(test_shape)
    if run_grad_tsr:
        rescaledGrad_TSR = np.zeros(test_shape)

    if run_ig or run_ig_tsr:
        IG = IntegratedGradients(pretrained_model)
    if run_ig:
        rescaledIG = np.zeros(test_shape)
    if run_ig_tsr:
        rescaledIG_TSR = np.zeros(test_shape)

    if run_dl:
        rescaledDL = np.zeros(test_shape)
        DL = DeepLift(pretrained_model)

    if run_gs:
        rescaledGS = np.zeros(test_shape)
        GS = GradientShap(pretrained_model)

    if run_dls or run_dls_tsr:
        DLS = DeepLiftShap(pretrained_model)
    if run_dls:
        rescaledDLS = np.zeros(test_shape)
    if run_dls_tsr:
        rescaledDLS_TSR = np.zeros(test_shape)

    if run_sg:
        rescaledSG = np.zeros(test_shape)
        Grad_ = Saliency(pretrained_model)
        SG = NoiseTunnel(Grad_)

    if run_shapley_sampling:
        rescaledShapleySampling = np.zeros(test_shape)
        SS = ShapleyValueSampling(pretrained_model)

    if run_gs:
        rescaledFeaturePermutation = np.zeros(test_shape)
        FP = FeaturePermutation(pretrained_model)

    if run_feature_ablation:
        rescaledFeatureAblation = np.zeros(test_shape)
        FA = FeatureAblation(pretrained_model)

    if run_occlusion:
        rescaledOcclusion = np.zeros(test_shape)
        OS = Occlusion(pretrained_model)

    if run_fit:
        rescaledFIT = np.zeros(test_shape)
        FIT = FITExplainer(pretrained_model, ft_dim_last=True)
        generator = JointFeatureGenerator(num_features, data='none')
        # TODO: Increase epochs
        FIT.fit_generator(generator, train_loader, test_loader, n_epochs=300)

    if run_ifit:
        rescaledIFIT = np.zeros(test_shape)
    if run_wfit:
        rescaledWFIT = np.zeros(test_shape)
    if run_iwfit:
        rescaledIWFIT = np.zeros(test_shape)

    idx = 0
    mask = np.zeros((num_timesteps, num_features), dtype=int)
    for i in range(num_timesteps):
        mask[i, :] = i

    for i, (samples, labels) in enumerate(test_loader):
        input = samples.reshape(-1, num_timesteps, num_features).to(device)
        input = Variable(input, volatile=False, requires_grad=True)

        batch_size = input.shape[0]
        baseline_single = torch.from_numpy(np.random.random(
            input.shape)).to(device)
        baseline_multiple = torch.from_numpy(
            np.random.random((input.shape[0] * 5, input.shape[1],
                              input.shape[2]))).to(device)
        inputMask = np.zeros((input.shape))
        inputMask[:, :, :] = mask
        inputMask = torch.from_numpy(inputMask).to(device)
        mask_single = torch.from_numpy(mask).to(device)
        mask_single = mask_single.reshape(1, num_timesteps,
                                          num_features).to(device)
        labels = torch.tensor(labels.int().tolist()).to(device)

        if run_grad:
            attributions = Grad.attribute(input, target=labels)
            rescaledGrad[
                idx:idx +
                batch_size, :, :] = Helper.givenAttGetRescaledSaliency(
                    num_timesteps, num_features, attributions)
        if run_grad_tsr:
            rescaledGrad_TSR[idx:idx + batch_size, :, :] = get_tsr_saliency(
                Grad,
                input,
                labels,
                graph_dir=tsr_graph_dir,
                graph_name=f'{model_name}_{model_type}_Grad_TSR',
                inputs_to_graph=tsr_inputs_to_graph,
                cur_batch=i)

        if run_ig:
            attributions = IG.attribute(input,
                                        baselines=baseline_single,
                                        target=labels)
            rescaledIG[idx:idx +
                       batch_size, :, :] = Helper.givenAttGetRescaledSaliency(
                           num_timesteps, num_features, attributions)
        if run_ig_tsr:
            rescaledIG_TSR[idx:idx + batch_size, :, :] = get_tsr_saliency(
                IG,
                input,
                labels,
                baseline=baseline_single,
                graph_dir=tsr_graph_dir,
                graph_name=f'{model_name}_{model_type}_IG_TSR',
                inputs_to_graph=tsr_inputs_to_graph,
                cur_batch=i)

        if run_dl:
            attributions = DL.attribute(input,
                                        baselines=baseline_single,
                                        target=labels)
            rescaledDL[idx:idx +
                       batch_size, :, :] = Helper.givenAttGetRescaledSaliency(
                           num_timesteps, num_features, attributions)

        if run_gs:
            attributions = GS.attribute(input,
                                        baselines=baseline_multiple,
                                        stdevs=0.09,
                                        target=labels)
            rescaledGS[idx:idx +
                       batch_size, :, :] = Helper.givenAttGetRescaledSaliency(
                           num_timesteps, num_features, attributions)

        if run_dls:
            attributions = DLS.attribute(input,
                                         baselines=baseline_multiple,
                                         target=labels)
            rescaledDLS[idx:idx +
                        batch_size, :, :] = Helper.givenAttGetRescaledSaliency(
                            num_timesteps, num_features, attributions)
        if run_dls_tsr:
            rescaledDLS_TSR[idx:idx + batch_size, :, :] = get_tsr_saliency(
                DLS,
                input,
                labels,
                baseline=baseline_multiple,
                graph_dir=tsr_graph_dir,
                graph_name=f'{model_name}_{model_type}_DLS_TSR',
                inputs_to_graph=tsr_inputs_to_graph,
                cur_batch=i)

        if run_sg:
            attributions = SG.attribute(input, target=labels)
            rescaledSG[idx:idx +
                       batch_size, :, :] = Helper.givenAttGetRescaledSaliency(
                           num_timesteps, num_features, attributions)

        if run_shapley_sampling:
            attributions = SS.attribute(input,
                                        baselines=baseline_single,
                                        target=labels,
                                        feature_mask=inputMask)
            rescaledShapleySampling[
                idx:idx +
                batch_size, :, :] = Helper.givenAttGetRescaledSaliency(
                    num_timesteps, num_features, attributions)

        if run_feature_permutation:
            attributions = FP.attribute(input,
                                        target=labels,
                                        perturbations_per_eval=input.shape[0],
                                        feature_mask=mask_single)
            rescaledFeaturePermutation[
                idx:idx +
                batch_size, :, :] = Helper.givenAttGetRescaledSaliency(
                    num_timesteps, num_features, attributions)

        if run_feature_ablation:
            attributions = FA.attribute(input, target=labels)
            # perturbations_per_eval= input.shape[0],\
            # feature_mask=mask_single)
            rescaledFeatureAblation[
                idx:idx +
                batch_size, :, :] = Helper.givenAttGetRescaledSaliency(
                    num_timesteps, num_features, attributions)

        if run_occlusion:
            attributions = OS.attribute(input,
                                        sliding_window_shapes=(1,
                                                               num_features),
                                        target=labels,
                                        baselines=baseline_single)
            rescaledOcclusion[
                idx:idx +
                batch_size, :, :] = Helper.givenAttGetRescaledSaliency(
                    num_timesteps, num_features, attributions)

        if run_fit:
            attributions = torch.from_numpy(FIT.attribute(input, labels))
            rescaledFIT[idx:idx +
                        batch_size, :, :] = Helper.givenAttGetRescaledSaliency(
                            num_timesteps, num_features, attributions)

        if run_ifit:
            attributions = torch.from_numpy(
                inverse_fit_attribute(input,
                                      pretrained_model,
                                      ft_dim_last=True))
            rescaledIFIT[idx:idx + batch_size, :, :] = attributions

        if run_wfit:
            attributions = torch.from_numpy(
                wfit_attribute(input,
                               pretrained_model,
                               N=test_shape[1],
                               ft_dim_last=True,
                               single_label=True))
            rescaledWFIT[idx:idx + batch_size, :, :] = attributions

        if run_iwfit:
            attributions = torch.from_numpy(
                wfit_attribute(input,
                               pretrained_model,
                               N=test_shape[1],
                               ft_dim_last=True,
                               single_label=True,
                               inverse=True))
            rescaledIWFIT[idx:idx + batch_size, :, :] = attributions

        idx += batch_size

    if run_grad:
        print("Saving Grad", model_name + "_" + model_type)
        np.save(
            saliency_dir + model_name + "_" + model_type + "_Grad_rescaled",
            rescaledGrad)
    if run_grad_tsr:
        print("Saving Grad_TSR", model_name + "_" + model_type)
        np.save(
            saliency_dir + model_name + "_" + model_type +
            "_Grad_TSR_rescaled", rescaledGrad_TSR)

    if run_ig:
        print("Saving IG", model_name + "_" + model_type)
        np.save(saliency_dir + model_name + "_" + model_type + "_IG_rescaled",
                rescaledIG)
    if run_ig_tsr:
        print("Saving IG_TSR", model_name + "_" + model_type)
        np.save(
            saliency_dir + model_name + "_" + model_type + "_IG_TSR_rescaled",
            rescaledIG_TSR)

    if run_dl:
        print("Saving DL", model_name + "_" + model_type)
        np.save(saliency_dir + model_name + "_" + model_type + "_DL_rescaled",
                rescaledDL)

    if run_gs:
        print("Saving GS", model_name + "_" + model_type)
        np.save(saliency_dir + model_name + "_" + model_type + "_GS_rescaled",
                rescaledGS)

    if run_dls:
        print("Saving DLS", model_name + "_" + model_type)
        np.save(saliency_dir + model_name + "_" + model_type + "_DLS_rescaled",
                rescaledDLS)
    if run_dls_tsr:
        print("Saving DLS_TSR", model_name + "_" + model_type)
        np.save(
            saliency_dir + model_name + "_" + model_type + "_DLS_TSR_rescaled",
            rescaledDLS_TSR)

    if run_sg:
        print("Saving SG", model_name + "_" + model_type)
        np.save(saliency_dir + model_name + "_" + model_type + "_SG_rescaled",
                rescaledSG)

    if run_shapley_sampling:
        print("Saving ShapleySampling", model_name + "_" + model_type)
        np.save(
            saliency_dir + model_name + "_" + model_type +
            "_ShapleySampling_rescaled", rescaledShapleySampling)

    if run_feature_permutation:
        print("Saving FeaturePermutation", model_name + "_" + model_type)
        np.save(
            saliency_dir + model_name + "_" + model_type +
            "_FeaturePermutation_rescaled", rescaledFeaturePermutation)

    if run_feature_ablation:
        print("Saving FeatureAblation", model_name + "_" + model_type)
        np.save(
            saliency_dir + model_name + "_" + model_type +
            "_FeatureAblation_rescaled", rescaledFeatureAblation)

    if run_occlusion:
        print("Saving Occlusion", model_name + "_" + model_type)
        np.save(
            saliency_dir + model_name + "_" + model_type +
            "_Occlusion_rescaled", rescaledOcclusion)

    if run_fit:
        print("Saving FIT", model_name + "_" + model_type)
        np.save(saliency_dir + model_name + "_" + model_type + "_FIT_rescaled",
                rescaledFIT)

    if run_ifit:
        print("Saving IFIT", model_name + "_" + model_type)
        np.save(
            saliency_dir + model_name + "_" + model_type + "_IFIT_rescaled",
            rescaledIFIT)

    if run_wfit:
        print("Saving WFIT", model_name + "_" + model_type)
        np.save(
            saliency_dir + model_name + "_" + model_type + "_WFIT_rescaled",
            rescaledWFIT)

    if run_iwfit:
        print("Saving IWFIT", model_name + "_" + model_type)
        np.save(
            saliency_dir + model_name + "_" + model_type + "_IWFIT_rescaled",
            rescaledIWFIT)
Пример #27
0
grads_dlift = list()
signal = list()

for idx in range(36):
    x = signals[idx].float().unsqueeze(0)
    x.requires_grad = True

    model.eval()

    # Saliency
    saliency = Saliency(model)
    grads = saliency.attribute(x, target=labels[idx].item())
    grads_sal.append(grads.squeeze().cpu().detach().numpy())

    # Occlusion
    occlusion = Occlusion(model)
    grads = occlusion.attribute(x, strides = (1, int(FS / 100)), target=labels[idx].item(), sliding_window_shapes=(1, int(FS / 10)), baselines=0)
    grads_occ.append(grads.squeeze().cpu().detach().numpy())

    # Integrated Gradients
    integrated_gradients = IntegratedGradients(model)
    grads = integrated_gradients.attribute(x, target=labels[idx].item(), n_steps=1000)
    grads_igrad.append(grads.squeeze().cpu().detach().numpy())

    # Gradient SHAP
    gshap = GradientShap(model)
    baseline_dist = torch.cat([x*0, x*1])
    grads = gshap.attribute(x, n_samples=10, stdevs=0.1, baselines=baseline_dist, target=labels[idx].item())
    grads_gshap.append(grads.squeeze().cpu().detach().numpy())

    # DeepLIFT
                    # bloods = bloods[random_index, ...][None, ...].cuda()
                    # # print(label.shape, label)
                    # # print(image.shape, image)
                    # print(images.shape, labels.shape)

                    # Account for tta: Take first image (non-augmented)
                    # Label does not need to be touched because it is obv. the same for this image regardless of tta
                    images = images[:, 0, ...].cuda()

                    # Set a baseline
                    baseline = torch.zeros_like(images).cuda()
                    baseline_bloods = torch.zeros_like(bloods).cuda()

                    # Calculate attribution scores + delta
                    # ig = IntegratedGradients(model)
                    oc = Occlusion(model)
                    # nt = NoiseTunnel(ig)
                    # attributions, delta = nt.attribute(image, nt_type='smoothgrad', stdevs=0.02, n_samples=2,
                    #                                    baselines=baseline, target=0, return_convergence_delta=True)
                    _, target_ID = torch.max(labels, 1)
                    print(target_ID)
                    # attributions = ig.attribute(image, baseline, target=target_ID, return_convergence_delta=False)
                    if occlusion_count == 0:
                        x_shape = 16
                        x_stride = 8
                    else:
                        x_shape = input_size[0]
                        x_stride = input_size[0]

                    oc_attributions0, blud0 = oc.attribute(
                        (images, bloods),