コード例 #1
0
ファイル: randomization_tests.py プロジェクト: rbtsbg/pgig
def cascading_parameter_randomization(method_name, pattern_augmented, input, target):
    model = utils.load_model('vgg16')
    init_out = None
    state_dict = model.state_dict()
    print(method_name)
    for idx, k in enumerate(reversed(state_dict.keys())):
        if 'weight' in k:
            explainer = get_explainer(model, method_name)
            explainer.set_weights_and_patterns()
            if pattern_augmented:
                explainer.set_weights_and_patterns()
            saliency = explainer.explain(input, target)
            if method_name=='pattern_net' or method_name=='pattern_attribution':
                saliency = explainer.explain(input, target, idx)
            out = saliency.cpu().flatten()
            out = normalize_range(out, -1.0, 1.0)
            print(out)

            if init_out is None:
                init_out = out
                continue

            corr = spearmanr(init_out, out)
            print(corr)
            corr = spearmanr(np.abs(init_out), np.abs(out))
            print(corr)

            state_dict[k] = torch.rand_like(state_dict[k])
            # shuffle randomization method
            # idx = torch.randperm(layer.nelement())
            # layer = layer.view(-1)[idx].view(layer.size())

            # reset randomization method
            model.load_state_dict(state_dict)
コード例 #2
0
                                  batch_size]
        raw_imgs = [viz.pil_loader(image_path) for image_path in image_batch]
        # make sure preprocessing is correct
        inputs = [
            get_preprocess('vgg16', 'pattern_vanilla_grad')(raw_img)
            for raw_img in raw_imgs
        ]
        inputs = torch.stack(inputs).cuda()
        inputs = utils.cuda_var(inputs, requires_grad=True)

        diff_sum = 0

        with torch.cuda.device(0):
            torch.cuda.empty_cache()
            model = utils.load_model('vgg16').cuda()
            explainer = get_explainer(model, 'vanilla_grad')

            out = torch.softmax(model(inputs.clone()), axis=-1)
            classes = torch.max(out, dim=1)[1]
            out = out.detach().cpu().numpy()

            # get baseline val
            baseline_inp = torch.zeros_like(inputs)
            baseline_out = torch.softmax(model(baseline_inp),
                                         axis=-1).detach().cpu().numpy()

            score_diff = np.array(
                [s[c] for s, c in zip((out - baseline_out), classes)])

            model = utils.load_model('vgg16').cuda()
コード例 #3
0
    def __init__(self):

        self.model_methods = [['resnext', 'gradcam', 'camshow']]

        self.classes = [
            "brush_hair", "cartwheel", "catch", "chew", "clap", "climb",
            "climb_stairs", "dive", "draw_sword", "dribble", "drink", "eat",
            "fall_floor", "fencing", "flic_flac", "golf", "handstand", "hit",
            "hug", "jump", "kick", "kick_ball", "kiss", "laugh", "pick",
            "pour", "pullup", "punch", "push", "pushup", "ride_bike",
            "ride_horse", "run", "shake_hands", "shoot_ball", "shoot_bow",
            "shoot_gun", "sit", "situp", "smile", "smoke", "somersault",
            "stand", "swing_baseball", "sword", "sword_exercise", "talk",
            "throw", "turn", "walk", "wave"
        ]

        scales = [1.0]

        self.spatial_transform = Compose([
            MultiScaleCornerCrop(scales, 112),
            ToTensor(1.0),
            Normalize(get_mean(1.0, dataset='activitynet'), get_std(1.0))
        ])

        self.spatial_transform2 = Compose([MultiScaleCornerCrop(scales, 112)])

        self.spatial_transform3 = Compose([
            MultiScaleCornerCrop(scales, 112),
            ToTensor(1),
            Normalize([0, 0, 0], [1, 1, 1])
        ])

        self.model = utils.load_model(self.model_methods[0][0])
        self.model.cuda()
        #self.video=[]
        #self.flows=[]
        self.bb_frames = []
        #self.explainer= get_explainer
        method_name = 'gradcam'
        self.explainer = get_explainer(self.model, method_name, "conv1")
        self.explainer2 = get_explainer(self.model, method_name, "layer1")
        self.explainer3 = get_explainer(self.model, method_name, "layer2")
        self.explainer4 = get_explainer(self.model, method_name, "layer3")
        self.explainer5 = get_explainer(self.model, method_name, "layer4")
        self.explainer6 = get_explainer(self.model, method_name, "avgpool")
        path = "images/frames4"
        #print path
        self.path = path + "/"
        #dirc = os.listdir(path)
        #self.files = [ fname for fname in dirc if fname.startswith('img')]
        #self.files2 = [ fname for fname in dirc if fname.startswith('flow_x')]
        self.seq = []
        self.kls = []
        self.scr = []
        self.totalhit = 0
        self.totalhit2 = 0
        self.totalhit3 = 0
        self.totalhit4 = 0
        self.totalhit5 = 0
        self.totalhit6 = 0
        self.totalhit7 = 0
        self.totalframes = 0
コード例 #4
0
for model_name, method_name, _ in model_methods:
    # transf = transforms.Compose([
    #     transforms.Resize((225, 225)),
    #     transforms.ToTensor(),
    #     transforms.Normalize(mean=[0.485, 0.456, 0.406],
    #                          std=[0.229, 0.224, 0.225])
    # ])
    for pattern_attribution in [False, True]:
        torch.cuda.empty_cache()

        img_input = transf(raw_img).cuda()
        img_input = utils.cuda_var(img_input.unsqueeze(0), requires_grad=True)

        model = utils.load_model(model_name).cuda()

        explainer = get_explainer(model, method_name)
        if pattern_attribution:
            explainer = pattern_augment(explainer,
                                        method='pattern_attribution')
        pred = model(img_input)

        ind = pred.data.max(1)[1]
        ind = torch.tensor(np.expand_dims(np.array(image_class), 0)).cuda()
        print(
            f'Processing {method_name}, predicted class is {lrp_utils.imgclasses[ind.item()]}'
        )

        target = torch.LongTensor([image_class]).cuda()

        saliency = explainer.explain(img_input, ind)
        saliencies[int(pattern_attribution)].append(saliency.cpu().numpy())
コード例 #5
0
ファイル: convergence_test.py プロジェクト: rbtsbg/pgig
    methods = ['saliency', 'vanilla_grad', 'deconv',
               'guided_backprop']  # , 'deconv', 'vanilla_grad']
    pattern_methods = ['pattern_' + m for m in methods]
    methods.extend(pattern_methods)

    results = []

    layer_num = 34

    x = range(layer_num + 1)
    for m in methods:
        print(m)
        torch.cuda.empty_cache()
        model = utils.load_model('vgg16').cuda()
        exp = get_explainer(model, m)
        c = ConvergenceTester(exp)
        conv = c.get_convergence(layer_num, pattern='pattern_' in m)
        plt.plot(x, list(conv), label=m)

    torch.cuda.empty_cache()
    model = utils.load_model('vgg16').cuda()
    exp = get_explainer(model, 'vanilla_grad')
    c = ConvergenceTester(exp)
    conv = c.get_baseline(layer_num)
    plt.plot(x, conv, label='baseline')
    plt.xticks(x, c.get_names(), rotation='vertical')
    plt.legend()
    plt.show()
    plt.savefig('convergence_output.png')
コード例 #6
0
def compute_saliency_map(model_name, displayed_class, number_image):
    model_methods = [
        [model_name, 'vanilla_grad', 'imshow'],
        [model_name, 'grad_x_input', 'imshow'],
        [model_name, 'saliency', 'imshow'],
        [model_name, 'integrate_grad', 'imshow'],
        [model_name, 'deconv', 'imshow'],
        [model_name, 'guided_backprop', 'imshow'],
        #[model_name, 'gradcam', 'camshow'],
        #[model_name, 'excitation_backprop', 'camshow'],
        #[model_name, 'contrastive_excitation_backprop', 'camshow']
    ]
    # Change 'image_class' to 0 if you want to display for a dog
    if (displayed_class == "dog"):
        image_class = 0
    elif (displayed_class == "cat"):
        image_class = 1
    else:
        print("ERROR: wrong displayed class")

    # Take the sample image, and display it (original form)
    image_path = "models/test_" + displayed_class + "_images/" + str(
        number_image)

    raw_img = viz.pil_loader(image_path)
    plt.figure(figsize=(5, 5))
    plt.imshow(raw_img)
    plt.axis('off')
    plt.title(displayed_class)

    # Now, we want to display the saliency maps of this image, for every model_method element
    all_saliency_maps = []

    for model_name, method_name, _ in model_methods:
        # Get a specific picture transformation (see torchvision.transforms documentation)
        transf = get_preprocess(model_name, method_name)
        # Load the pretrained model
        model = utils.load_model(model_name)
        model.cuda()
        # Get the explainer
        explainer = get_explainer(model, method_name)

        # Transform the image
        inp = transf(raw_img)
        if method_name == 'googlenet':  # swap channel due to caffe weights
            inp_copy = inp.clone()
            inp[0] = inp_copy[2]
            inp[2] = inp_copy[0]
        inp = utils.cuda_var(inp.unsqueeze(0), requires_grad=True)

        target = torch.LongTensor([image_class]).cuda()
        saliency = explainer.explain(inp, target)
        saliency = utils.upsample(saliency, (raw_img.height, raw_img.width))
        #all_saliency_maps.append(saliency.cpy().numpy())
        all_saliency_maps.append(saliency.cpu().numpy())

    # Display all the results
    plt.figure(figsize=(25, 15))
    plt.subplot(3, 5, 1)
    plt.imshow(raw_img)
    plt.axis('off')
    plt.title(displayed_class)
    for i, (saliency,
            (model_name, method_name,
             show_style)) in enumerate(zip(all_saliency_maps, model_methods)):
        plt.subplot(3, 5, i + 2 + i // 4)
        if show_style == 'camshow':
            viz.plot_cam(np.abs(saliency).max(axis=1).squeeze(),
                         raw_img,
                         'jet',
                         alpha=0.5)
        else:
            if model_name == 'googlenet' or method_name == 'pattern_net':
                saliency = saliency.squeeze()[::-1].transpose(1, 2, 0)
            else:
                saliency = saliency.squeeze().transpose(1, 2, 0)
            saliency -= saliency.min()
            saliency /= (saliency.max() + 1e-20)
            plt.imshow(saliency, cmap='gray')

        plt.axis('off')
        if method_name == 'excitation_backprop':
            plt.title('Exc_bp')
        elif method_name == 'contrastive_excitation_backprop':
            plt.title('CExc_bp')
        else:
            plt.title('%s' % (method_name))

    plt.tight_layout()

    if not os.path.exists('images/' + model_name + '/'):
        os.makedirs('images/' + model_name + '/')
    save_destination = 'images/' + model_name + '/' + str(
        number_image[:-4]) + '_saliency.png'

    plt.savefig(save_destination)
    plt.clf()
コード例 #7
0
ファイル: degradation_test.py プロジェクト: rbtsbg/pgig
    imagenet_data = get_imagenet_dataloader(data_dir, input_transform, val_size, batch_size, imagenet_download_key)

    num_batches = len(imagenet_data)
    print("Evaluating on {} images.".format(val_size))

    model = utils.load_model('vgg16').cuda()

    results = {}

    for batch_idx, batch in enumerate(imagenet_data):
        t = time.time()
        print("Batch {} of {}".format(batch_idx+1, num_batches))

        for m in methods:
            print("Method: {}".format(m))
            explainer = get_explainer(model, m)

            with torch.cuda.device(0):

                torch.cuda.empty_cache()
                confidences = degradation_test(explainer, batch[0].cuda(), n_patches)
                if m not in results:
                    results[m] = confidences
                else:
                    results[m] += confidences
        batch_time = time.time() - t
        print("Batch time: {} seconds".format(batch_time))
        remaining_time = batch_time * (num_batches - batch_idx - 1)
        remaining = datetime.timedelta(seconds=remaining_time)
        print("Approximate time remaining: {}".format(remaining))