def __call__(self, inputs, targets): """Call function for `GradCAM`.""" self._verify_data(inputs, targets) self._hook_cell() with ForwardProbe(self._saliency_cell) as probe: inputs = unify_inputs(inputs) targets = unify_targets(targets) weights = get_bp_weights(self._backward_model, *inputs, targets) grad_net = GradNet(self._backward_model) gradients = grad_net(*inputs, weights) # get intermediate activation activation = (probe.value,) if self._layer == "": activation = inputs self._intermediate_grad = unify_inputs(gradients) if self._intermediate_grad is not None: # average pooling on gradients intermediate_grad = unify_inputs( self._avgpool(self._intermediate_grad[0], (2, 3))) else: raise ValueError("Gradient for intermediate layer is not " "obtained") mul = op.Mul() attribution = self._aggregation_fn( mul(*intermediate_grad, *activation)) if self._resize: attribution = self._resize_fn(attribution, *inputs, mode=self._resize_mode) self._intermediate_grad = None return attribution
def __call__(self, inputs, targets): """ Call function for `ModifiedReLU`, inherited by "Deconvolution" and "GuidedBackprop". Args: inputs (Tensor): The input data to be explained, a 4D tensor of shape :math:`(N, C, H, W)`. targets (Tensor, int): The label of interest. It should be a 1D or 0D tensor, or an integer. If it is a 1D tensor, its length should be the same as `inputs`. Returns: Tensor, a 4D tensor of shape :math:`(N, 1, H, W)`. Examples: >>> inputs = ms.Tensor(np.random.rand(1, 3, 224, 224), ms.float32) >>> label = 5 >>> # explainer is a "Deconvolution" or "GuidedBackprop" object, parse data and the target label to be >>> # explained and get the attribution >>> saliency = explainer(inputs, label) """ self._verify_data(inputs, targets) inputs = unify_inputs(inputs) targets = unify_targets(targets) weights = get_bp_weights(self._backward_model, inputs, targets) gradients = self._grad_net(*inputs, weights) saliency = self._aggregation_fn(gradients) return saliency
def __call__(self, inputs, targets): """Call function for `Gradient`.""" self._verify_data(inputs, targets) inputs = unify_inputs(inputs) targets = unify_targets(targets) weights = get_bp_weights(self._backward_model, *inputs, targets) gradient = self._grad_net(*inputs, weights) saliency = self._aggregation_fn(gradient) return saliency
def __call__(self, inputs, targets): """ Call function for `ModifiedReLU`, inherited by "Deconvolution" and "GuidedBackprop". Args: inputs (Tensor): The input data to be explained, a 4D tensor of shape :math:`(N, C, H, W)`. targets (Tensor, int): The label of interest. It should be a 1D or 0D tensor, or an integer. If it is a 1D tensor, its length should be the same as `inputs`. Returns: Tensor, a 4D tensor of shape :math:`(N, 1, H, W)`. """ self._verify_data(inputs, targets) inputs = unify_inputs(inputs) targets = unify_targets(targets) weights = get_bp_weights(self._backward_model, inputs, targets) gradients = self._grad_net(*inputs, weights) saliency = self._aggregation_fn(gradients) return saliency
def get_bp_weights(model, inputs, targets=None, weights=None): r""" Compute the gradient of output w.r.t input. Args: model (Cell): Differentiable black-box model. inputs (Tensor): Input to calculate gradient and explanation. targets (int, optional): Target label id specifying which category to compute gradient. Default: None. weights (Tensor, optional): Custom weights for computing gradients. The shape of weights should match the model outputs. If None is provided, an one-hot weights with one in targets positions will be used instead. Default: None. Returns: Tensor, signal to be back-propagated to the input. """ inputs = unify_inputs(inputs) if targets is None and weights is None: raise ValueError('Must provide one of targets or weights') if weights is None: targets = unify_targets(targets) output = model(*inputs) num_categories = output.shape[-1] weights = generate_one_hot(targets, num_categories) return weights