def __init__(self,
                 layer,
                 state,
                 alpha=None,
                 beta=None,
                 bias=True,
                 copy_weights=False):
        alpha, beta = rutils.assert_infer_lrp_alpha_beta_param(
            alpha, beta, self)
        self._alpha = alpha
        self._beta = beta

        # prepare positive and negative weights for computing positive
        # and negative preactivations z in apply_accordingly.
        if copy_weights:
            weights = layer.get_weights()
            if not bias:
                weights = weights[:-1]
            positive_weights = [x * (x > 0) for x in weights]
            negative_weights = [x * (x < 0) for x in weights]
        else:
            weights = layer.weights
            if not bias:
                weights = weights[:-1]
            positive_weights = [x * iK.to_floatx(x > 0) for x in weights]
            negative_weights = [x * iK.to_floatx(x < 0) for x in weights]

        self._layer_wo_act_positive = kgraph.copy_layer_wo_activation(
            layer,
            keep_bias=bias,
            weights=positive_weights,
            name_template="reversed_kernel_positive_%s")
        self._layer_wo_act_negative = kgraph.copy_layer_wo_activation(
            layer,
            keep_bias=bias,
            weights=negative_weights,
            name_template="reversed_kernel_negative_%s")
Beispiel #2
0
    def __init__(self, layer, state, copy_weights=False):
        # W-square rule works with squared weights and no biases.
        if copy_weights:
            weights = layer.get_weights()
        else:
            weights = layer.weights
        if layer.use_bias:
            weights = weights[:-1]
        weights = [x**2 for x in weights]

        self._layer_wo_act_b = kgraph.copy_layer_wo_activation(
            layer,
            keep_bias=False,
            weights=weights,
            name_template="reversed_kernel_%s")
Beispiel #3
0
    def __init__(self, layer, state, copy_weights=False):
        # The z-plus rule only works with positive weights and
        # no biases.
        #TODO: assert that layer inputs are always >= 0
        if copy_weights:
            weights = [
                x * iK.to_floatx(x > 0) for x in layer.get_weights()[:-1]
            ]
        else:
            weights = [x * iK.to_floatx(x > 0) for x in layer.weights[:-1]]

        self._layer_wo_act_b_positive = kgraph.copy_layer_wo_activation(
            layer,
            keep_bias=False,
            weights=weights,
            name_template="reversed_kernel_positive_%s")
Beispiel #4
0
    def get_stats_from_batch(self):
        # Get the neuron-wise I/O for this layer.
        layer = kgraph.copy_layer_wo_activation(self.layer,
                                                keep_bias=False,
                                                reuse_symbolic_tensors=False)
        # Readjust the layer nodes.
        for i in range(kgraph.get_layer_inbound_count(self.layer)):
            layer(self.layer.get_input_at(i))
        Xs, Ys = get_active_neuron_io(layer, self._active_node_indices)
        if len(Ys) != 1:
            raise ValueError("Assume that kernel layer have only one output.")
        X, Y = Xs[0], Ys[0]

        # Create layers that keep a running mean for the desired stats.
        self.mean_x = ilayers.RunningMeans()
        self.mean_y = ilayers.RunningMeans()
        self.mean_xy = ilayers.RunningMeans()

        # Compute mask and active neuron counts.
        mask = ilayers.AsFloatX()(self._get_neuron_mask())
        Y_masked = keras.layers.multiply([Y, mask])
        count = ilayers.CountNonZero(axis=0)(mask)
        count_all = ilayers.Sum(axis=0)(ilayers.OnesLike()(mask))

        # Get means ...
        def norm(x, count):
            return ilayers.SafeDivide(factor=1)([x, count])

        # ... along active neurons.
        mean_x = norm(ilayers.Dot()([ilayers.Transpose()(X), mask]), count)
        mean_xy = norm(ilayers.Dot()([ilayers.Transpose()(X), Y_masked]),
                       count)

        _, a = self.mean_x([mean_x, count])
        _, b = self.mean_xy([mean_xy, count])

        # ... along all neurons.
        mean_y = norm(ilayers.Sum(axis=0)(Y), count_all)
        _, c = self.mean_y([mean_y, count_all])

        # Create a dummy output to have a connected graph.
        # Needs to have the shape (mb_size, 1)
        dummy = keras.layers.Average()([a, b, c])
        return ilayers.Sum(axis=None)(dummy)
Beispiel #5
0
    def __init__(self, layer, state, copy_weights=False):
        # The flat rule works with weights equal to one and
        # no biases.
        if copy_weights:
            weights = layer.get_weights()
            if layer.use_bias:
                weights = weights[:-1]
            weights = [np.ones_like(x) for x in weights]
        else:
            weights = layer.weights
            if layer.use_bias:
                weights = weights[:-1]
            weights = [K.ones_like(x) for x in weights]

        self._layer_wo_act_b = kgraph.copy_layer_wo_activation(
            layer,
            keep_bias=False,
            weights=weights,
            name_template="reversed_kernel_%s")
Beispiel #6
0
        def __init__(self, layer, state):
            #print("in AddReverseLayer.init:", layer.__class__.__name__,"-> Dedicated ReverseLayer class" ) #debug
            self._layer_wo_act = kgraph.copy_layer_wo_activation(layer,
                                                                 name_template="reversed_kernel_%s")

            input_channels = [int(i.shape[-1]) for i in layer.input]
            self._merge_layer = keras.layers.Concatenate()


            self._sum_layer_with_kernel = keras.layers.Conv2D(input_channels[0], (1, 1),
                                                              #kernel_initializer=add_init,
                                                              use_bias=False)
            self._sum_layer_with_kernel.build((None, None, None, sum(input_channels)))
            #self._sum_layer_with_kernel.weights[0].initializer.run(session=K.get_session())

            weight_shape = [int(d) for d in self._sum_layer_with_kernel.weights[0].shape]
            self._sum_layer_with_kernel.set_weights([add_init(weight_shape)])

            x = self._merge_layer(layer.input)
            x = self._sum_layer_with_kernel(x)

            self._rule = rule(self._sum_layer_with_kernel, state)
Beispiel #7
0
 def __init__(self, layer, state):
     ##print("in AveragePoolingRerseLayer.init:", layer.__class__.__name__,"-> Dedicated ReverseLayer class" ) #debug
     self._layer_wo_act = kgraph.copy_layer_wo_activation(
         layer, name_template="reversed_kernel_%s")
Beispiel #8
0
 def __init__(self, layer, state, epsilon=1e-7, bias=True):
     self._epsilon = rutils.assert_lrp_epsilon_param(epsilon, self)
     self._layer_wo_act = kgraph.copy_layer_wo_activation(
         layer, keep_bias=bias, name_template="reversed_kernel_%s")
Beispiel #9
0
 def __init__(self, layer, state, bias=True):
     self._layer_wo_act = kgraph.copy_layer_wo_activation(
         layer, keep_bias=bias, name_template="reversed_kernel_%s")
Beispiel #10
0
 def __init__(self, layer, state):
     self._activation = keras.layers.Activation("relu")
     self._layer_wo_relu = kgraph.copy_layer_wo_activation(
         layer,
         name_template="reversed_%s",
     )
Beispiel #11
0
segmentshigher = []
segmentslower = []
shapelets_orig = []
shapelets_new = []

# load json and create model
json_file = open('modelsmall0.35.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
# load weights into new model
model.load_weights("smallmodel0.35.h5")

# Create a model without sigmoid for gradient a/descent.
layer = model.layers[-1]
layer_wo_act = copy_layer_wo_activation(layer)
output_layer = layer_wo_act(layer.input)
model_wo_sigm = keras.models.Model(inputs=model.inputs,outputs=[output_layer])

model_wo_sigm.save_weights("model_wo_sigm.h5")

# Create an intermediate model for shapelet detection.

int_model = Model(input=model.inputs, output=[model.layers[-2].output])

for i, layer in enumerate(int_model.layers):
    layer.name = 'layer_' + str(i)

train_x = np.load("train_x.npy")

predictions = model.predict(train_x)
Beispiel #12
0
 def __init__(self, layer: Layer, state, bias: bool = True) -> None:
     self._layer_wo_act = kgraph.copy_layer_wo_activation(
         layer, keep_bias=bias, name_template="reversed_kernel_%s")
     super().__init__(layer, state)