Example #1
0
    def attr_backward(self, rel_y: np.ndarray, eps: float = 0.001) -> \
            np.ndarray:
        """

        :param rel_y:
        :param eps:
        :return:
        """
        if self.elementwise_affine:
            rel_y = lrp_linear(self._state["gamma_term"],
                               self._state["output"],
                               rel_y,
                               eps=eps)

        num = self._state["x"] - self._state["mean"]
        rel_x = lrp_linear(self._state["x"], num, rel_y, eps=eps)
        rel_mean = lrp_linear(-self._state["mean"], num, rel_y, eps=eps)

        n = reduce(operator.mul, self.normalized_shape, 1)
        rel_x += lrp_linear(self._state["x"],
                            n * self._state["mean"],
                            rel_mean,
                            eps=eps)

        return rel_x
Example #2
0
    def _layer_backward(self,
                        rel_y: np.ndarray,
                        layer: int,
                        direction: int,
                        eps: float = 0.001) -> np.ndarray:
        """
        Performs a backward pass using numpy operations for one layer.

        :param rel_y: The relevance flowing to this layer
        :param layer: The layer to perform the backward pass for
        :param direction: The direction to perform the backward pass for
        :return: The relevance of the layer inputs
        """
        if direction == 0:
            x = self._input[layer]
            h, c, i, f, g, g_pre, w_ig, w_hg = self._state["ltr"][layer]
        else:
            x = np.flip(self._input[layer], 1)
            h, c, i, f, g, g_pre, w_ig, w_hg = self._state["rtl"][layer]

        batch_size, seq_len, _ = x.shape

        # Initialize
        rel_h = np.zeros((batch_size, seq_len + 1, self.hidden_size))
        rel_c = np.zeros((batch_size, seq_len + 1, self.hidden_size))
        rel_g = np.zeros(g.shape)
        rel_x = np.zeros(x.shape)

        # Backward pass
        rel_h[:, 1:] = rel_y
        for t in reversed(range(seq_len)):
            rel_c[:, t + 1] += rel_h[:, t + 1]
            rel_c[:, t] = lrp_linear(f[:, t] * c[:, t - 1],
                                     c[:, t],
                                     rel_c[:, t + 1],
                                     eps=eps)
            rel_g[:, t] = lrp_linear(i[:, t] * g[:, t],
                                     c[:, t],
                                     rel_c[:, t + 1],
                                     eps=eps)
            rel_x[:, t] = lrp_linear(x[:, t],
                                     g_pre[:, t],
                                     rel_g[:, t],
                                     w=w_ig,
                                     eps=eps)

            h_prev = np.zeros((batch_size, self.hidden_size)) if t == 0 \
                else h[:, t - 1]
            rel_h[:, t] += lrp_linear(h_prev,
                                      g_pre[:, t],
                                      rel_g[:, t],
                                      w=w_hg,
                                      eps=eps)

        return rel_x
Example #3
0
    def attr_backward(self, rel_y: HiddenArray, eps: float = 0.001) -> \
            Tuple[HiddenArray, HiddenArray, HiddenArray]:
        """

        :param rel_y:
        :param eps:
        :return:
        """
        rel_y = self.LayerNorm.attr_backward(rel_y, eps=eps)

        inp_embeds, pos_embeds, tok_type_embeds = self._state
        combined_embeds = inp_embeds + pos_embeds + tok_type_embeds
        rel_input = lrp_linear(inp_embeds, combined_embeds, rel_y, eps=eps)
        rel_pos = lrp_linear(pos_embeds, combined_embeds, rel_y, eps=eps)
        rel_tok = lrp_linear(tok_type_embeds, combined_embeds, rel_y, eps=eps)
        return rel_input, rel_pos, rel_tok
Example #4
0
 def attr_backward(self,
                   rel_y: np.ndarray,
                   eps: float = 0.001) -> np.ndarray:
     return lrp_linear(self._input[0],
                       self._state["wx"],
                       rel_y,
                       self.weight.detach().numpy().T,
                       eps=eps)
Example #5
0
    def attr_backward(self, rel_y: HiddenArray, eps: float = 0.001) -> \
            Tuple[HiddenArray, HiddenArray]:
        input_tensor = self._state["input_tensor"]
        dense_output = self._state["dense_output"]
        pre_layer_norm = input_tensor + dense_output

        rel_pre_layer_norm = self.LayerNorm.attr_backward(rel_y)
        rel_input_tensor = lrp_linear(input_tensor,
                                      pre_layer_norm,
                                      rel_pre_layer_norm,
                                      eps=eps)
        rel_dense_output = lrp_linear(dense_output,
                                      pre_layer_norm,
                                      rel_pre_layer_norm,
                                      eps=eps)
        rel_hidden_states = self.dense.attr_backward(rel_dense_output)

        return rel_hidden_states, rel_input_tensor