Exemple #1
0
    def _create_analysis(self, *args, **kwargs):

        low, high = self._bounds_low, self._bounds_high

        class BoundedProxyRule(lrp_rules.BoundedRule):
            def __init__(self, *args, **kwargs):
                super().__init__(*args, low=low, high=high, **kwargs)

        self._add_conditional_reverse_mapping(
            lambda l: kchecks.is_input_layer(l) and kchecks.contains_kernel(l),
            BoundedProxyRule,
            name="deep_taylor_first_layer_bounded",
            priority=10,  # do first
        )

        return super()._create_analysis(*args, **kwargs)
Exemple #2
0
    def __init__(self, model, *args, **kwargs):
        rule = kwargs.pop("rule", None)
        input_layer_rule = kwargs.pop("input_layer_rule", None)

        self._add_model_softmax_check()
        self._add_model_check(
            lambda layer: not kchecks.is_convnet_layer(layer),
            "LRP is only tested for convolutional neural networks.",
            check_type="warning",
        )

        # check if rule was given explicitly.
        # rule can be a string, a list (of strings) or a list of conditions [(Condition, Rule), ... ] for each layer.
        if rule is None:
            raise ValueError("Need LRP rule(s).")

        if isinstance(rule, list):
            # copy refrences
            self._rule = list(rule)
        else:
            self._rule = rule
        self._input_layer_rule = input_layer_rule

        if (isinstance(rule, six.string_types) or
            (inspect.isclass(rule)
             and issubclass(rule, kgraph.ReverseMappingBase)
             )  # NOTE: All LRP rules inherit from kgraph.ReverseMappingBase
            ):
            # the given rule is a single string or single rule implementing cla ss
            use_conditions = True
            rules = [(lambda a, b: True, rule)]

        elif not isinstance(rule[0], tuple):
            # rule list of rule strings or classes
            use_conditions = False
            rules = list(rule)
        else:
            # rule is list of conditioned rules
            use_conditions = True
            rules = rule

        # create a BoundedRule for input layer handling from given tuple
        if self._input_layer_rule is not None:
            input_layer_rule = self._input_layer_rule
            if isinstance(input_layer_rule, tuple):
                low, high = input_layer_rule

                class BoundedProxyRule(rrule.BoundedRule):
                    def __init__(self, *args, **kwargs):
                        super(BoundedProxyRule, self).__init__(*args,
                                                               low=low,
                                                               high=high,
                                                               **kwargs)

                input_layer_rule = BoundedProxyRule

            if use_conditions is True:
                rules.insert(0,
                             (lambda layer, foo: kchecks.is_input_layer(layer),
                              input_layer_rule))

            else:
                rules.insert(0, input_layer_rule)

        self._rules_use_conditions = use_conditions
        self._rules = rules

        # FINALIZED constructor.
        super(LRP, self).__init__(model, *args, **kwargs)
    def __init__(
        self,
        model,
        *args,
        rule=None,
        input_layer_rule=None,
        until_layer_idx=None,
        until_layer_rule=None,
        bn_layer_rule=None,
        bn_layer_fuse_mode: str = "one_linear",
        **kwargs,
    ):
        super().__init__(model, *args, **kwargs)

        self._input_layer_rule = input_layer_rule
        self._until_layer_rule = until_layer_rule
        self._until_layer_idx = until_layer_idx
        self._bn_layer_rule = bn_layer_rule
        self._bn_layer_fuse_mode = bn_layer_fuse_mode

        # Add
        self._add_model_softmax_check()
        self._add_model_check(
            lambda layer: not kchecks.is_convnet_layer(layer),
            "LRP is only tested for convolutional neural networks.",
            check_type="warning",
        )

        assert bn_layer_fuse_mode in ["one_linear", "two_linear"]

        # TODO: refactor rule type checking into separate function
        # check if rule was given explicitly.
        # rule can be a string, a list (of strings) or
        # a list of conditions [(Condition, Rule), ... ] for each layer.
        if rule is None:
            raise ValueError("Need LRP rule(s).")

        if isinstance(rule, list):
            self._rule = list(rule)
        else:
            self._rule = rule

        if isinstance(rule, str) or (
            inspect.isclass(rule) and issubclass(rule, kgraph.ReverseMappingBase)
        ):  # NOTE: All LRP rules inherit from kgraph.ReverseMappingBase
            # the given rule is a single string or single rule implementing cla ss
            use_conditions = True
            rules = [(lambda _: True, rule)]

        elif not isinstance(rule[0], tuple):
            # rule list of rule strings or classes
            use_conditions = False
            rules = list(rule)
        else:
            # rule is list of conditioned rules
            use_conditions = True
            rules = rule

        # apply rule to first self._until_layer_idx layers
        if self._until_layer_rule is not None and self._until_layer_idx is not None:
            for i in range(self._until_layer_idx + 1):
                is_at_idx: LayerCheck = lambda layer: kchecks.is_layer_at_idx(layer, i)
                rules.insert(0, (is_at_idx, self._until_layer_rule))

        # create a BoundedRule for input layer handling from given tuple
        if self._input_layer_rule is not None:
            input_layer_rule = self._input_layer_rule
            if isinstance(input_layer_rule, tuple):
                low, high = input_layer_rule

                class BoundedProxyRule(rrule.BoundedRule):
                    def __init__(self, *args, **kwargs):
                        super().__init__(*args, low=low, high=high, **kwargs)

                input_layer_rule = BoundedProxyRule

            if use_conditions is True:
                is_input: LayerCheck = lambda layer: kchecks.is_input_layer(layer)
                rules.insert(0, (is_input, input_layer_rule))
            else:
                rules.insert(0, input_layer_rule)

        self._rules_use_conditions = use_conditions
        self._rules = rules
Exemple #4
0
    def __init__(self, model, *args, **kwargs):
        rule = kwargs.pop("rule", None)
        input_layer_rule = kwargs.pop("input_layer_rule", None)
        self._model_checks = [
            # TODO: Check for non-linear output in general.
            {
                "check": lambda layer: kchecks.contains_activation(
                    layer, activation="softmax"),
                "type": "exception",
                "message": "Model should not contain a softmax.",
            },
            {
                "check": lambda layer: not kchecks.is_convnet_layer(layer),
                "type": "warning",
                "message": ("LRP is only tested for "
                            "convolutional neural networks."),
            },
        ]


        # check if rule was given explicitly.
        # rule can be a string, a list (of strings) or a list of conditions [(Condition, Rule), ... ] for each layer.
        if rule is None:
            raise ValueError("Need LRP rule(s).")



        if isinstance(rule, list):
            # copy refrences
            self._rule = list(rule)
        else:
            self._rule = rule
        self._input_layer_rule = input_layer_rule


        if(
           isinstance(rule, six.string_types) or
           (inspect.isclass(rule) and issubclass(rule, kgraph.ReverseMappingBase)) # NOTE: All LRP rules inherit from kgraph.ReverseMappingBase
        ):
            # the given rule is a single string or single rule implementing cla ss
            use_conditions = True
            rules = [(lambda a, b: True, rule)]

        elif not isinstance(rule[0], tuple):
            # rule list of rule strings or classes
            use_conditions = False
            rules = list(rule)
        else:
            # rule is list of conditioned rules
            use_conditions = True
            rules = rule


        # create a BoundedRule for input layer handling from given tuple
        if self._input_layer_rule is not None:
            input_layer_rule = self._input_layer_rule
            if isinstance(input_layer_rule, tuple):
                low, high = input_layer_rule

                class BoundedProxyRule(rrule.BoundedRule):
                    def __init__(self, *args, **kwargs):
                        super(BoundedProxyRule, self).__init__(
                            *args, low=low, high=high, **kwargs)
                input_layer_rule = BoundedProxyRule


            if use_conditions is True:
                rules.insert(0,
                             (lambda layer, foo: kchecks.is_input_layer(layer),
                              input_layer_rule))

            else:
                rules.insert(0, input_layer_rule)





        ####################################################################
        ### Functionality responible for backwards rule selection below ####
        ####################################################################

        def select_rule(layer, reverse_state):
            ##print("in select_rule:", layer.__class__.__name__ , end='->') #debug
            if use_conditions is True:
                for condition, rule in rules:
                    if condition(layer, reverse_state):
                        ##print(str(rule)) #debug
                        return rule
                raise Exception("No rule applies to layer: %s" % layer)
            else:
                ##print(str(rules[0]), '(via pop)') #debug
                return rules.pop()


        # default backward hook
        class ReverseLayer(kgraph.ReverseMappingBase):
            def __init__(self, layer, state):
                rule_class = select_rule(layer, state) #NOTE: this prevents refactoring.
                ##print("in ReverseLayer.init:",layer.__class__.__name__,"->" , rule_class if isinstance(rule_class, six.string_types) else rule_class.__name__) #debug
                if isinstance(rule_class, six.string_types):
                    rule_class = LRP_RULES[rule_class]
                self._rule = rule_class(layer, state)

            def apply(self, Xs, Ys, Rs, reverse_state):
                ##print("    in ReverseLayer.apply:", reverse_state['layer'].__class__.__name__, '(nid: {})'.format(reverse_state['nid']) ,  '-> {}.apply'.format(self._rule.__class__.__name__))
                return self._rule.apply(Xs, Ys, Rs, reverse_state)


        #specialized backward hooks. TODO: add ReverseLayer class handling layers Without kernel: Add and AvgPool
        class BatchNormalizationReverseLayer(kgraph.ReverseMappingBase):
            def __init__(self, layer, state):
                ##print("in BatchNormalizationReverseLayer.init:", layer.__class__.__name__,"-> Dedicated ReverseLayer class" ) #debug
                config = layer.get_config()

                self._center = config['center']
                self._scale = config['scale']
                self._axis = config['axis']

                self._mean = layer.moving_mean
                self._std = layer.moving_variance
                if self._center:
                    self._beta = layer.beta

                #TODO: implement rule support. for BatchNormalization -> [BNEpsilon, BNAlphaBeta, BNIgnore]
                #super(BatchNormalizationReverseLayer, self).__init__(layer, state)
                # how to do this:
                # super.__init__ calls select_rule and sets a self._rule class
                # check if isinstance(self_rule, EpsiloneRule), then reroute
                # to BatchNormEpsilonRule. Not pretty, but should work.

            def apply(self, Xs, Ys, Rs, reverse_state):
                ##print("    in BatchNormalizationReverseLayer.apply:", reverse_state['layer'].__class__.__name__, '(nid: {})'.format(reverse_state['nid']))

                input_shape = [K.int_shape(x) for x in Xs]
                if len(input_shape) != 1:
                    #extend below lambda layers towards multiple parameters.
                    raise ValueError("BatchNormalizationReverseLayer expects Xs with len(Xs) = 1, but was len(Xs) = {}".format(len(Xs)))
                input_shape = input_shape[0]

                # prepare broadcasting shape for layer parameters
                broadcast_shape = [1] * len(input_shape)
                broadcast_shape[self._axis] = input_shape[self._axis]
                broadcast_shape[0] =  -1

                #reweight relevances as
                #        x * (y - beta)     R
                # Rin = ---------------- * ----
                #           x - mu          y
                # batch norm can be considered as 3 distinct layers of subtraction,
                # multiplication and then addition. The multiplicative scaling layer
                # has no effect on LRP and functions as a linear activation layer

                minus_mu = keras.layers.Lambda(lambda x: x - K.reshape(self._mean, broadcast_shape))
                minus_beta = keras.layers.Lambda(lambda x: x - K.reshape(self._beta, broadcast_shape))
                prepare_div = keras.layers.Lambda(lambda x: x + (K.cast(K.greater_equal(x,0), K.floatx())*2-1)*K.epsilon())


                x_minus_mu = kutils.apply(minus_mu, Xs)
                if self._center:
                    y_minus_beta = kutils.apply(minus_beta, Ys)
                else:
                    y_minus_beta = Ys

                numerator = [keras.layers.Multiply()([x, ymb, r])
                             for x, ymb, r in zip(Xs, y_minus_beta, Rs)]
                denominator = [keras.layers.Multiply()([xmm, y])
                             for xmm, y in zip(x_minus_mu, Ys)]

                return [ilayers.SafeDivide()([n, prepare_div(d)])
                        for n, d in zip(numerator, denominator)]

        class AddReverseLayer(kgraph.ReverseMappingBase):
            def __init__(self, layer, state):
                ##print("in AddReverseLayer.init:", layer.__class__.__name__,"-> Dedicated ReverseLayer class" ) #debug
                self._layer_wo_act = kgraph.copy_layer_wo_activation(layer,
                                                                     name_template="reversed_kernel_%s")

                #TODO: implement rule support.
                #super(AddReverseLayer, self).__init__(layer, state)

            def apply(self, Xs, Ys, Rs, reverse_state):
                # the outputs of the pooling operation at each location is the sum of its inputs.
                # the forward message must be known in this case, and are the inputs for each pooling thing.
                # the gradient is 1 for each output-to-input connection, which corresponds to the "weights"
                # of the layer. It should thus be sufficient to reweight the relevances and and do a gradient_wrt
                grad = ilayers.GradientWRT(len(Xs))
                # Get activations.
                Zs = kutils.apply(self._layer_wo_act, Xs)
                # Divide incoming relevance by the activations.
                tmp = [ilayers.SafeDivide()([a, b])
                       for a, b in zip(Rs, Zs)]

                # Propagate the relevance to input neurons
                # using the gradient.
                tmp = iutils.to_list(grad(Xs+Zs+tmp))
                # Re-weight relevance with the input values.
                return [keras.layers.Multiply()([a, b])
                        for a, b in zip(Xs, tmp)]



        class AveragePoolingRerseLayer(kgraph.ReverseMappingBase):
            def __init__(self, layer, state):
                ##print("in AveragePoolingRerseLayer.init:", layer.__class__.__name__,"-> Dedicated ReverseLayer class" ) #debug
                self._layer_wo_act = kgraph.copy_layer_wo_activation(layer,
                                                                     name_template="reversed_kernel_%s")

                #TODO: implement rule support.
                #super(AveragePoolingRerseLayer, self).__init__(layer, state)

            def apply(self, Xs, Ys, Rs, reverse_state):
                # the outputs of the pooling operation at each location is the sum of its inputs.
                # the forward message must be known in this case, and are the inputs for each pooling thing.
                # the gradient is 1 for each output-to-input connection, which corresponds to the "weights"
                # of the layer. It should thus be sufficient to reweight the relevances and and do a gradient_wrt

                grad = ilayers.GradientWRT(len(Xs))
                # Get activations.
                Zs = kutils.apply(self._layer_wo_act, Xs)
                # Divide incoming relevance by the activations.
                tmp = [ilayers.SafeDivide()([a, b])
                       for a, b in zip(Rs, Zs)]

                # Propagate the relevance to input neurons
                # using the gradient.
                tmp = iutils.to_list(grad(Xs+Zs+tmp))
                # Re-weight relevance with the input values.
                return [keras.layers.Multiply()([a, b])
                        for a, b in zip(Xs, tmp)]





        # conditional mappings layer_criterion -> ReverseLayer on how to handle backward passes through layers.
        self._conditional_mappings = [
            (kchecks.contains_kernel, ReverseLayer),
            (kchecks.is_batch_normalization_layer, BatchNormalizationReverseLayer),
            (kchecks.is_average_pooling, AveragePoolingRerseLayer),
            (kchecks.is_add_layer, AddReverseLayer),
        ]

        # FINALIZED constructor.
        super(LRP, self).__init__(model, *args, **kwargs)