Пример #1
0
    def heatmap(self, input_t: torch.Tensor, target_t: torch.Tensor):

        self.model.eval()
        self._inject_bottleneck()
        with torch.no_grad():
            self.model(input_t)
        self._remove_bottleneck()
        if (input_t.shape[0] > 1):
            hmaps = np.zeros(
                (input_t.shape[0], input_t.shape[2], input_t.shape[3]))
            for i in range(input_t.shape[0]):
                htensor = to_np(self.bn_layer.buffer_capacity[i])
                hmap = htensor.mean(0)
                hmap = resize(hmap, input_t.shape[2:])

                hmap = hmap - hmap.min()
                hmap = hmap / (max(hmap.max(), 1e-5))
                hmaps[i] = hmap
            hmap = hmaps
        else:
            htensor = to_np(self.bn_layer.buffer_capacity)
            hmap = htensor.mean(axis=(0, 1))
            hmap = resize(hmap, input_t.shape[2:])
            hmap = hmap - hmap.min()
            hmap = hmap / (max(hmap.max(), 1e-5))

        return hmap
Пример #2
0
    def _current_heatmap(self, shape=None):
        # Read bottleneck
        heatmap = self.bottleneck.buffer_capacity
        heatmap = to_np(heatmap[0])
        heatmap = heatmap.sum(axis=0)  # Sum over channel dim
        heatmap = heatmap - heatmap.min()  # min=0
        heatmap = heatmap / heatmap.max()  # max=0

        if shape is not None:
            heatmap = resize(heatmap, shape)

        return heatmap
    def heatmap(self, input_t: torch.Tensor, target_t: torch.Tensor):
        shape = tuple(input_t[0, 0].shape)

        # feed in
        self.model.eval()

        fmaps, grads = None, None

        def hook_forward(module, input, output):
            nonlocal fmaps
            fmaps = output.detach()

        def hook_backward(module, grad_in, grad_out):
            nonlocal grads
            grads = grad_out[0].detach()

        # pass and collect activations + gradient of feature map
        forward_handle = self.layer.register_forward_hook(hook_forward)
        backward_handle = self.layer.register_backward_hook(hook_backward)
        self.model.zero_grad()
        preds = self.model(input_t)
        forward_handle.remove()
        backward_handle.remove()

        # calc grads
        grad_eval_point = torch.Tensor(1, preds.size()[-1]).zero_()
        grad_eval_point[0][preds.argmax().item()] = 1.0
        grad_eval_point = grad_eval_point.to(input_t.device)
        preds.backward(gradient=grad_eval_point, retain_graph=True)

        # weight maps
        maps = fmaps.detach().cpu().numpy()[0, ]
        weights = grads.detach().cpu().numpy().mean(axis=(2, 3))[0, :]

        # avg maps
        gcam = np.zeros(maps.shape[0:], dtype=np.float32)
        # sum up weighted fmaps
        for i, w in enumerate(weights):
            gcam += w * maps[i, :, :]

        # avg pool over feature maps
        gcam = np.mean(gcam, axis=0)
        # relu
        gcam = np.maximum(gcam, 0)
        # to input shape
        gcam = resize(gcam, shape, interp=self.interp)
        # rescale to max 1
        gcam = gcam / (gcam.max() + self.eps)

        return gcam
    def heatmap(self, input_t: torch.Tensor, target_t: torch.Tensor):

        self.model.eval()
        self._inject_bottleneck()
        with torch.no_grad():
            self.model(input_t)
        self._remove_bottleneck()

        htensor = to_np(self.bn_layer.buffer_capacity)

        hmap = htensor.mean(axis=(0, 1))
        hmap = resize(hmap, input_t.shape[2:])

        hmap = hmap - hmap.min()
        hmap = hmap / (max(hmap.max(), 1e-5))

        return hmap
Пример #5
0
    def heatmap(self, input_t: torch.Tensor, target_t: torch.Tensor):
        self.model.eval()

        # create baseline to get patches from
        baseline_t = to_img_tensor(self.baseline.apply(to_np_img(input_t)))

        # img: 1xCxNxN
        rows = input_t.shape[2]
        cols = input_t.shape[3]
        rsteps = 1 + int(np.floor((rows-self.size) / self.stride))
        csteps = 1 + int(np.floor((cols-self.size) / self.stride))
        steps = csteps * rsteps
        target = target_t.cpu().numpy()


        with torch.no_grad():
            initial_score = self.eval_np(input_t, target)
            hmap = np.zeros((rsteps, csteps))

            rstep_list = []
            cstep_list = []
            occluded = input_t.repeat(steps, 1, 1, 1).clone()
            for step in tqdm(range(steps), ncols=100, desc="calc score", disable=not self.progbar):

                # calc patch position
                cstep = step % csteps
                rstep = int((step - cstep) / csteps)
                r = rstep * self.stride
                c = cstep * self.stride
                assert((r + self.size) <= rows)
                assert((c + self.size) <= cols)

                # occlude
                # occluded.copy_(input_t)

                occluded[step, :, r:r + self.size, c:c + self.size] = baseline_t[0, :, r:r + self.size, c:c + self.size]
                rstep_list.append(rstep)
                cstep_list.append(cstep)

            # measure score drop
            #print(occluded.shape)

            score = call_batched(self.model, occluded, batch_size=100)[:, target]
            for i, (rstep, cstep) in enumerate(zip(rstep_list, cstep_list)):
                hmap[rstep, cstep] += initial_score - score[i]
            #for step in tqdm(range(steps), ncols=100, desc="calc score", disable=not self.progbar):

                # calc patch position
            #    cstep = step % csteps
            #    rstep = int((step - cstep) / csteps)
            #    r = rstep * self.stride
            #    c = cstep * self.stride
            #    assert((r + self.size) <= rows)
            #    assert((c + self.size) <= cols)

                # occlude
            #    occluded.copy_(input_t)
            #    occluded[0, :, r:r + self.size, c:c + self.size] = baseline_t[0, :, r:r + self.size, c:c + self.size]

                # measure score drop
            #    score = self.eval_np(occluded, target)
            #    hmap[rstep, cstep] += initial_score - score

        hmap = resize(hmap, (rows, cols), interp=self.interp)
        return hmap