def backprop_max_pool2d(self,
                            a,
                            r,
                            ksize=(1, 2, 2, 1),
                            strides=(1, 2, 2, 1)):
        z = K.pool2d(a,
                     pool_size=ksize[1:-1],
                     strides=strides[1:-1],
                     padding='valid',
                     pool_mode='max')

        z_p = K.maximum(z, 0.) + self.epsilon
        s_p = r / z_p
        c_p = gen_nn_ops.max_pool_grad_v2(a,
                                          z_p,
                                          s_p,
                                          ksize,
                                          strides,
                                          padding='VALID')

        z_n = K.minimum(z, 0.) - self.epsilon
        s_n = r / z_n
        c_n = gen_nn_ops.max_pool_grad_v2(a,
                                          z_n,
                                          s_n,
                                          ksize,
                                          strides,
                                          padding='VALID')

        return a * (self.alpha * c_p + self.beta * c_n)
Exemplo n.º 2
0
    def run_pool(self, R):
        """METHOD:RUN_POOL:
			---
			Arguments:
			---
			>- R {tensor} -- relevance tensor.
			Returns: 
			---
			>- The relevance of a pooling layer."""
        poolSize = (1, self.layer.pool_size[0], self.layer.pool_size[1], 1)
        strdSize = (1, self.layer.strides[0], self.layer.strides[1], 1)
        pooled = tf.nn.max_pool(self.act,
                                ksize=poolSize,
                                strides=strdSize,
                                padding=self.layer.padding.upper())
        Za = K.maximum(pooled, 0.) + K.epsilon()
        Zb = K.minimum(pooled, 0.) - K.epsilon()
        Sa = R / Za
        Sb = R / Zb
        Ca = gen_nn_ops.max_pool_grad_v2(self.act,
                                         Za,
                                         Sa,
                                         poolSize,
                                         strdSize,
                                         padding=self.layer.padding.upper())
        Cb = gen_nn_ops.max_pool_grad_v2(self.act,
                                         Zb,
                                         Sb,
                                         poolSize,
                                         strdSize,
                                         padding=self.layer.padding.upper())
        Rn = self.act * (self.alpha * Ca + self.beta * Cb)
        return K.clip(Rn, self.minValue, self.maxValue)
Exemplo n.º 3
0
    def relprop_pool(self, x, r, ksize=(1, 2, 2, 1), strides=(1, 2, 2, 1), padding='SAME'):
        """Implements relevance propagation through pooling layers.

        Args:
            x: array of activations
            r: array of relevance scores
            ksize: pooling kernel dimensions used during forward path
            strides: step size of pooling kernel used during forward path
            padding: parameter for SAME or VALID padding

        Returns:
            array of relevance scores of same dimensions as a

        """
        if self.pooling_type == "avg":
            z = tf.nn.avg_pool(x, ksize, strides, padding) + self.epsilon
            s = r / z
            c = gen_nn_ops.avg_pool_grad(tf.shape(x), s, ksize, strides, padding)
        elif self.pooling_type == "max":
            z = tf.nn.max_pool(x, ksize, strides, padding) + self.epsilon
            s = r / z
            c = gen_nn_ops.max_pool_grad_v2(x, z, s, ksize, strides, padding)
        else:
            raise Exception("Error: no such unpooling operation implemented.")
        return c * x
Exemplo n.º 4
0
 def backprop_max_pool(self, activation, relevance, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1]):
     z = tf.nn.max_pool(activation, ksize, strides, padding='SAME') + self.epsilon
     #z = nn_ops.avg_pool(activation, ksize, strides, padding='SAME') + self.epsilon
     s = relevance / z
     #c = gen_nn_ops._avg_pool_grad(tf.shape(activation), s, ksize, strides, padding='SAME')
     c = gen_nn_ops.max_pool_grad_v2(activation, z, s, ksize, strides, padding='SAME')
     return activation * c
Exemplo n.º 5
0
def relprop_pooling1(layer, R, inputs, outputs, model):
    placeholder = K.eval(
        gen_nn_ops.max_pool_grad_v2(inputs[layer],
                                    outputs[layer],
                                    R / (outputs[layer] + eps), (1, 2, 2, 1),
                                    (1, 2, 2, 1),
                                    padding='VALID'))
    R = placeholder * inputs[layer]
    return R
Exemplo n.º 6
0
def _MaxPoolGradV2(op, grad):
  ksize = op.inputs[1]
  strides = op.inputs[2]
  return gen_nn_ops.max_pool_grad_v2(op.inputs[0],
                                     op.outputs[0],
                                     grad,
                                     ksize,
                                     strides,
                                     padding=op.get_attr("padding"),
                                     data_format=op.get_attr("data_format")), None, None
Exemplo n.º 7
0
 def relprop_pool(self, a, r, ksize=(1, 2, 2, 1), strides=(1, 2, 2, 1), padding='SAME', operation='avg'):
     if operation == 'avg':
         z = tf.nn.avg_pool(a, ksize, strides, padding) + self.epsilon
         s = r / z
         c = gen_nn_ops.avg_pool_grad(tf.shape(a), s, ksize, strides, padding)
     elif operation == 'max':
         z = tf.nn.max_pool(a, ksize, strides, padding) + self.epsilon
         s = r / z
         c = gen_nn_ops.max_pool_grad_v2(a, z, s, ksize, strides, padding)
     else:
         raise Exception('No such unpooling operation.')
     return c * a
Exemplo n.º 8
0
def backprop_pooling(activation, relevance):
    z = MaxPool2D(pool_size=(2, 2))(activation)

    s = relevance / (z + 1e-10)
    c = gen_nn_ops.max_pool_grad_v2(orig_input=activation,
                                    orig_output=z,
                                    grad=s,
                                    ksize=[1, 2, 2, 1],
                                    strides=[1, 2, 2, 1],
                                    padding='VALID')

    return activation * c
Exemplo n.º 9
0
    def backprop_inception_max_pool(self, activation, name, relevance, ksize=[1, 3, 3, 1], strides=[1, 1, 1, 1]):
        activation1 = self.graph.get_tensor_by_name(name + '_pool:0')
        activation2 = activation
        weights = self.graph.get_tensor_by_name(name + '_pool_reduce_w:0')

        new_relevance = self.backprop_conv(activation1, weights, relevance)

        z = tf.nn.max_pool(activation2, ksize, strides, padding='SAME') + self.epsilon
        #z = nn_ops.avg_pool(activation2, ksize, strides, padding='SAME') + self.epsilon
        s = new_relevance / z
        c = gen_nn_ops.max_pool_grad_v2(activation2, z, s, ksize, strides, padding='SAME')
        #c = gen_nn_ops._avg_pool_grad(tf.shape(activation2), s, ksize, strides, padding='SAME')
        return c * activation2
        def custom_grad(op, grad):
            if self.data_format == 'NHWC':
                ksizes = [1, self.pool_size[0], self.pool_size[1], 1]
                strides = [1, self.strides[0], self.strides[1], 1]
            else:
                ksizes = [1, 1, self.pool_size[0], self.pool_size[1]]
                strides = [1, 1, self.strides[0], self.strides[1]]

            #return gen_nn_ops.max_pool_grad(
            return gen_nn_ops.max_pool_grad_v2(
                op.inputs[0],
                op.outputs[0],
                grad,
                ksizes,
                strides,
                self.padding,
                data_format=self.data_format), K.tf.constant(0.0)
  def testMaxPoolGradV2(self):
    if test.is_gpu_available(cuda_only=True):
      random_seed.set_random_seed(0)
      x = random_ops.truncated_normal([1, 784], seed=0)
      conv = _two_layer_model(x)
      ksize = constant_op.constant([1, 2, 3, 1], shape=[4])
      strides = array_ops.placeholder(dtype='int32', shape=[4])
      max_pool_grad = gen_nn_ops.max_pool_grad_v2(conv, conv, conv, ksize,
                                                  strides, 'VALID')
      output = array_ops.identity(max_pool_grad)

      strides_val = [1, 3, 2, 1]
      with session.Session() as sess:
        output_val_ref = sess.run(output, feed_dict={strides: strides_val})

      with session.Session(config=_get_config()) as sess:
        metadata = config_pb2.RunMetadata()
        output_val = sess.run(
            output, run_metadata=metadata, feed_dict={
                strides: strides_val
            })

      nodes = []
      num_transposes = 0
      for node in metadata.cost_graph.node:
        if node.name.startswith('LayoutOptimizerTranspose'):
          num_transposes += 1
        nodes.append(node.name)

      expected_num_transposes = 2
      self.assertEqual(expected_num_transposes, num_transposes)
      self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-Conv2D-0', nodes)
      self.assertIn('LayoutOptimizerTransposeNCHWToNHWC-MaxPoolGradV2-0-0',
                    nodes)
      self.assertIn('LayoutOptimizerVecPermuteNHWCToNCHW_MaxPoolGradV2_4',
                    nodes)
      self.assertIn('LayoutOptimizer-MaxPoolGradV2-Const_2', nodes)
      self.assertAllClose(output_val_ref, output_val, atol=1e-3)
Exemplo n.º 12
0
    def run_pool(self, R):
        """METHOD:RUN_POOL:
			---
			Arguments:
			---
			>- R {tensor} -- relevance tensor.
			Returns: 
			---
			>- The relevance of a pooling layer."""
        poolSize = (1, self.layer.pool_size[0], self.layer.pool_size[1], 1)
        strdSize = (1, self.layer.strides[0], self.layer.strides[1], 1)
        pooled = tf.nn.max_pool(self.act,
                                ksize=poolSize,
                                strides=strdSize,
                                padding=self.layer.padding.upper())
        Z = K.maximum(pooled, 0.) + K.epsilon()
        S = R / Z
        C = gen_nn_ops.max_pool_grad_v2(self.act,
                                        Z,
                                        S,
                                        poolSize,
                                        strdSize,
                                        padding=self.layer.padding.upper())
        return K.clip(self.act * C, self.minValue, self.maxValue)
Exemplo n.º 13
0
def backprop_max_pool2d(self, activation, relevance, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1]):
	z = tf.nn.max_pool(activation, ksize, strides, padding='SAME') + self.epsilon
	s = relevance / z
	c = gen_nn_ops.max_pool_grad_v2(activation, z, s, ksize, strides, padding='SAME')
	return c * activation
Exemplo n.º 14
0
    R[1] = activations[1] * c

    w, b = act_weights['fc1']
    w_pos = tf.maximum(0.0, w)
    z = tf.nn.bias_add(tf.matmul(activations[2], w_pos), b) + 1e-10
    s = R[1] / z
    c = tf.matmul(s, tf.transpose(w_pos))
    R[2] = activations[2] * c

    R[3] = tf.reshape(R[2], [-1, 14, 14, 64])

    z = tf.nn.max_pool(activations[4], [1, 2, 2, 1], [1, 2, 2, 1],
                       padding='SAME') + 1e-10
    s = R[3] / z
    c = gen_nn_ops.max_pool_grad_v2(activations[4],
                                    z,
                                    s, [1, 2, 2, 1], [1, 2, 2, 1],
                                    padding='SAME')
    R[4] = activations[4] * c

    w, b = act_weights['conv2']
    w_pos = tf.maximum(0.0, w)
    z = tf.nn.bias_add(
        tf.nn.conv2d(activations[5], w_pos, [1, 1, 1, 1], padding='SAME'),
        b) + 1e-10
    s = R[4] / z
    c = tf.nn.conv2d_backprop_input(tf.shape(activations[5]),
                                    w_pos,
                                    s, [1, 1, 1, 1],
                                    padding='SAME')
    R[5] = activations[5] * c