Пример #1
0
    def generate(
            self,
            image_conf: np.ndarray,
            perturbed_conf: np.ndarray,
            perturbed_masks: np.ndarray
    ) -> np.ndarray:

        if len(image_conf) != len(perturbed_conf[0]):
            raise ValueError("Number of classses in original image and",
                             " perturbed image do not match.")

        if len(perturbed_conf) != len(perturbed_masks):
            raise ValueError("Number of perturbation masks and respective",
                             "confidence lengths do not match.")

        # Iterating through each class confidence and compare it with
        # its perturbed twin
        diff = image_conf - perturbed_conf

        # Weighting perturbed regions with respective difference in confidence
        sal = weight_regions_by_scalar(diff, perturbed_masks)

        # Converting nan values to zero.
        sal = np.nan_to_num(sal)
        # Normalize final saliency map in range [0, 1]
        sal = minmax_scale(sal.ravel(), feature_range=(0, 1)).reshape(sal.shape)

        return sal
Пример #2
0
    def generate(
        self,
        image_conf: np.ndarray,
        perturbed_conf: np.ndarray,
        perturbed_masks: np.ndarray,
    ) -> np.ndarray:
        if len(perturbed_conf) != len(perturbed_masks):
            raise ValueError("Number of perturbation masks and respective "
                             "confidence lengths do not match.")

        # The RISE method does not use the difference of confidences, but just
        # the perturbed image confidences. The reference confidences are not
        # used here.

        # Weighting perturbed regions with respective difference in confidence
        sal = weight_regions_by_scalar(perturbed_conf,
                                       perturbed_masks - self.p1,
                                       inv_masks=False,
                                       normalize=False)

        # Normalize final saliency map
        sal = maxabs_scale(sal.reshape(sal.shape[0], -1),
                           axis=1).reshape(sal.shape)

        # Ensure saliency map in range [-1, 1]
        sal = np.clip(sal, -1, 1)

        return sal
Пример #3
0
    def generate(self, image_conf: np.ndarray, perturbed_conf: np.ndarray,
                 perturbed_masks: np.ndarray) -> np.ndarray:

        if len(image_conf) != len(perturbed_conf[0]):
            raise ValueError("Number of classes in original image and"
                             " perturbed image do not match.")

        if len(perturbed_conf) != len(perturbed_masks):
            raise ValueError("Number of perturbation masks and respective "
                             "confidence lengths do not match.")

        # Iterating through each class confidence and compare it with
        # its perturbed twin
        diff = image_conf - perturbed_conf

        # Weighting perturbed regions with respective difference in confidence
        sal = weight_regions_by_scalar(diff, perturbed_masks)

        # Normalize final saliency map
        sal = maxabs_scale(sal.reshape(sal.shape[0], -1),
                           axis=1).reshape(sal.shape)

        # Ensure saliency map in range [-1, 1]
        sal = np.clip(sal, -1, 1)

        return sal
Пример #4
0
    def generate(
        self,
        ref_dets: np.ndarray,
        perturbed_dets: np.ndarray,
        perturbed_masks: np.ndarray,
    ) -> np.ndarray:

        if len(perturbed_dets) != len(perturbed_masks):
            raise ValueError("Number of perturbation masks and respective "
                             "detections vector do not match.")

        if ref_dets.shape[1] != perturbed_dets.shape[2]:
            raise ValueError("Dimensions of reference detections and "
                             "perturbed detections do not match. Both "
                             "should be of dimension (n_classes + 4 + 1).")

        n_masks = len(perturbed_masks)
        n_props = perturbed_dets.shape[1]
        n_dets = len(ref_dets)

        # Compute IoU of bounding boxes
        s1 = self.iou(perturbed_dets[:, :, :4].reshape(-1, 4),
                      ref_dets[:, :4]).reshape(n_masks, n_props, n_dets)

        # Compute similarity of class probabilities
        s2 = cdist(perturbed_dets[:, :, 5:].reshape(n_masks * n_props, -1),
                   ref_dets[:, 5:],
                   metric=self.proximity_metric).reshape(
                       n_masks, n_props, n_dets)

        # Use objectness score if available
        s3 = perturbed_dets[:, :, 4:5]

        # Compute overall similarity s
        # Shape: n_masks x n_props x n_dets
        s = s1 * s2 * s3

        # Take max similarity over all proposals
        # Shape: n_masks x n_dets
        s = s.max(axis=1)

        # Weighting perturbed regions by similarity
        sal = weight_regions_by_scalar(s, perturbed_masks)

        # Normalize final saliency map
        sal = maxabs_scale(sal.reshape(sal.shape[0], -1),
                           axis=1).reshape(sal.shape)

        # Ensure saliency map in range [-1, 1]
        sal = np.clip(sal, -1, 1)

        return sal
Пример #5
0
    def generate(
        self,
        ref_descr_1: np.ndarray,
        ref_descr_2: np.ndarray,
        perturbed_descrs: np.ndarray,
        perturbed_masks: np.ndarray,
    ) -> np.ndarray:

        if len(perturbed_descrs) != len(perturbed_masks):
            raise ValueError("Number of perturbation masks and respective",
                             "feature vector do not match.")

        if len(ref_descr_1) != len(ref_descr_2):
            raise ValueError("Length of feature vector between",
                             "two images do not match.")

        # Computing original proximity between image1 and image2 feature vectors.
        original_proximity = cdist(ref_descr_1.reshape(1, -1),
                                   ref_descr_2.reshape(1, -1),
                                   metric=self.proximity_metric)

        # Computing proximity between original image1 and perturbed image2 feature vectors.
        perturbed_proximity = cdist(ref_descr_1.reshape(1, -1),
                                    perturbed_descrs,
                                    metric=self.proximity_metric)[0]

        # Iterating through each distance and compare it with
        # its perturbed twin
        diff = perturbed_proximity - original_proximity

        diff = np.transpose(np.clip(diff, 0, None))
        # Weighting perturbed regions with respective difference in confidence
        sal = weight_regions_by_scalar(diff, perturbed_masks)

        # Normalize final saliency map
        sal = maxabs_scale(sal.reshape(sal.shape[0], -1),
                           axis=1).reshape(sal.shape)

        # Ensure saliency map in range [-1, 1]
        sal = np.clip(sal, -1, 1)

        return sal