Exemple #1
0
    def __init__(self, layer, state, low=-1, high=1, copy_weights=False):
        self._low = low
        self._high = high

        # This rule works with three variants of the layer, all without biases.
        # One is the original form and two with only the positive or
        # negative weights.
        if copy_weights:
            weights = layer.get_weights()
            if layer.use_bias:
                weights = weights[:-1]
            positive_weights = [x * (x > 0) for x in weights]
            negative_weights = [x * (x < 0) for x in weights]
        else:
            weights = layer.weights
            if layer.use_bias:
                weights = weights[:-1]
            positive_weights = [x * iK.to_floatx(x > 0) for x in weights]
            negative_weights = [x * iK.to_floatx(x < 0) for x in weights]

        self._layer_wo_act = kgraph.copy_layer_wo_activation(
            layer, keep_bias=False, name_template="reversed_kernel_%s")
        self._layer_wo_act_positive = kgraph.copy_layer_wo_activation(
            layer,
            keep_bias=False,
            weights=positive_weights,
            name_template="reversed_kernel_positive_%s")
        self._layer_wo_act_negative = kgraph.copy_layer_wo_activation(
            layer,
            keep_bias=False,
            weights=negative_weights,
            name_template="reversed_kernel_negative_%s")
Exemple #2
0
    def __init__(self,
                 layer,
                 state,
                 alpha=(0.5, 0.5),
                 beta=(0.5, 0.5),
                 bias=True,
                 copy_weights=False):
        self._alpha = alpha
        self._beta = beta

        # prepare positive and negative weights for computing positive
        # and negative preactivations z in apply_accordingly.
        if copy_weights:
            weights = layer.get_weights()
            if not bias and getattr(layer, "use_bias", False):
                weights = weights[:-1]
            positive_weights = [x * (x > 0) for x in weights]
            negative_weights = [x * (x < 0) for x in weights]
        else:
            weights = layer.weights
            if not bias and getattr(layer, "use_bias", False):
                weights = weights[:-1]
            positive_weights = [x * iK.to_floatx(x > 0) for x in weights]
            negative_weights = [x * iK.to_floatx(x < 0) for x in weights]

        self._layer_wo_act_positive = kgraph.copy_layer_wo_activation(
            layer,
            keep_bias=bias,
            weights=positive_weights,
            name_template="reversed_kernel_positive_%s")
        self._layer_wo_act_negative = kgraph.copy_layer_wo_activation(
            layer,
            keep_bias=bias,
            weights=negative_weights,
            name_template="reversed_kernel_negative_%s")
Exemple #3
0
    def __init__(self, layer, state, copy_weights=False):
        # The z-plus rule only works with positive weights and
        # no biases.
        #TODO: assert that layer inputs are always >= 0
        if copy_weights:
            weights = [
                x * iK.to_floatx(x > 0) for x in layer.get_weights()[:-1]
            ]
        else:
            weights = [x * iK.to_floatx(x > 0) for x in layer.weights[:-1]]

        self._layer_wo_act_b_positive = kgraph.copy_layer_wo_activation(
            layer,
            keep_bias=False,
            weights=weights,
            name_template="reversed_kernel_positive_%s")
Exemple #4
0
    def __init__(
        self,
        layer: Layer,
        _state,
        alpha=None,
        beta=None,
        bias: bool = True,
        copy_weights=False,
    ) -> None:
        alpha, beta = rutils.assert_infer_lrp_alpha_beta_param(
            alpha, beta, self)
        self._alpha = alpha
        self._beta = beta

        # prepare positive and negative weights for computing positive
        # and negative preactivations z in apply_accordingly.
        if copy_weights:
            weights = layer.get_weights()
            if not bias and layer.use_bias:
                weights = weights[:-1]
            positive_weights = [x * (x > 0) for x in weights]
            negative_weights = [x * (x < 0) for x in weights]
        else:
            weights = layer.weights
            if not bias and layer.use_bias:
                weights = weights[:-1]
            positive_weights = [x * iK.to_floatx(x > 0) for x in weights]
            negative_weights = [x * iK.to_floatx(x < 0) for x in weights]

        self._layer_wo_act_positive = kgraph.copy_layer_wo_activation(
            layer,
            keep_bias=bias,
            weights=positive_weights,
            name_template="reversed_kernel_positive_%s",
        )
        self._layer_wo_act_negative = kgraph.copy_layer_wo_activation(
            layer,
            keep_bias=bias,
            weights=negative_weights,
            name_template="reversed_kernel_negative_%s",
        )
Exemple #5
0
 def call(self, x: OptionalList[Tensor], **_kwargs) -> List[Tensor]:
     return [
         K.sum(iK.to_floatx(iK.is_not_finite(tmp)))
         for tmp in iutils.to_list(x)
     ]
Exemple #6
0
 def call(self, x: OptionalList[Tensor], **_kwargs) -> List[Tensor]:
     return [iK.to_floatx(tmp) for tmp in iutils.to_list(x)]
Exemple #7
0
 def safe_divide(a, b):
     return a / (b + iK.to_floatx(K.equal(b, K.constant(0))) * 1)
Exemple #8
0
 def call(self, x: Tuple[Tensor, Tensor]) -> Tensor:
     a, b = x
     return a / (b + iK.to_floatx(K.equal(b, K.constant(0))) * self._factor)
Exemple #9
0
 def safe_divide(A: Tensor, B: Tensor) -> Tensor:
     return A / (B + iK.to_floatx(K.equal(B, K.constant(0))) * 1)
Exemple #10
0
 def _apply_reduce(self, x: Tensor, axis: Optional[OptionalList[int]],
                   keepdims: bool) -> Tensor:
     return K.sum(iK.to_floatx(K.not_equal(x, K.constant(0))),
                  axis=axis,
                  keepdims=keepdims)