コード例 #1
0
ファイル: rules.py プロジェクト: paudom/iNNterpret
    def run_conv(self, R, ignoreBias=True):
        """METHOD::RUN_CONV:
			---
			Arguments:
			---
			>- R {tensor} -- relevance tensor.
			>- ignoreBias {bool} -- flag to add the biases or ignore them (default: {False})
			Returns:
			---
			>The relevance of a dense layer."""
        strdSize = (1, self.layer.strides[0], self.layer.strides[1], 1)
        weights = self.layer.get_weights()
        self.maxW = K.maximum(weights[0], 0.)
        self.maxB = K.maximum(weights[1], 0.)
        self.minW = K.minimum(weights[0], 0.)
        self.minB = K.minimum(weights[1], 0.)
        Za = tf.nn.conv2d(self.act,
                          self.maxW,
                          strides=strdSize,
                          padding=self.layer.padding.upper()) + K.epsilon()
        Zb = tf.nn.conv2d(self.act,
                          self.minW,
                          strides=strdSize,
                          padding=self.layer.padding.upper()) - K.epsilon()
        if not ignoreBias:
            Za += self.maxB
            Zb += self.minB
        Sa = R / Za
        Sb = R / Zb
        Ca = nn_ops.conv2d_backprop_input(K.shape(self.act), self.maxW, Sa,
                                          strdSize, self.layer.padding.upper())
        Cb = nn_ops.conv2d_backprop_input(K.shape(self.act), self.minW, Sb,
                                          strdSize, self.layer.padding.upper())
        Rn = self.act * (self.alpha * Ca + self.beta * Cb)
        return K.clip(Rn, self.minValue, self.maxValue)
コード例 #2
0
    def backprop_conv_input(self,
                            activation,
                            weights,
                            relevance,
                            strides,
                            padding,
                            lowest=0.,
                            highest=1.):
        lowest = tf.reduce_min(activation)
        highest = tf.reduce_max(activation)
        W_p = tf.maximum(0., weights)
        W_n = tf.minimum(0., weights)

        L = tf.ones_like(activation, tf.float32) * lowest
        H = tf.ones_like(activation, tf.float32) * highest

        z_o = nn_ops.conv2d(activation, weights, strides, padding)
        z_p = nn_ops.conv2d(L, W_p, strides, padding)
        z_n = nn_ops.conv2d(H, W_n, strides, padding)

        z = z_o - z_p - z_n + 1e-10
        s = relevance / z

        c_o = nn_ops.conv2d_backprop_input(tf.shape(activation), weights, s,
                                           strides, padding)
        c_p = nn_ops.conv2d_backprop_input(tf.shape(activation), W_p, s,
                                           strides, padding)
        c_n = nn_ops.conv2d_backprop_input(tf.shape(activation), W_n, s,
                                           strides, padding)

        return activation * c_o - L * c_p - H * c_n
コード例 #3
0
    def backprop_conv_input(self,
                            X,
                            kernel,
                            relevance,
                            strides,
                            padding='SAME',
                            lowest=0.,
                            highest=1.):
        W_p = tf.maximum(0., kernel)
        W_n = tf.minimum(0., kernel)

        L = tf.ones_like(X, tf.float32) * lowest
        H = tf.ones_like(X, tf.float32) * highest

        z_o = nn_ops.conv2d(X, kernel, strides, padding)
        z_p = nn_ops.conv2d(L, W_p, strides, padding)
        z_n = nn_ops.conv2d(H, W_n, strides, padding)

        z = z_o - z_p - z_n + 1e-10
        s = relevance / z

        c_o = nn_ops.conv2d_backprop_input(tf.shape(X), kernel, s, strides,
                                           padding)
        c_p = nn_ops.conv2d_backprop_input(tf.shape(X), W_p, s, strides,
                                           padding)
        c_n = nn_ops.conv2d_backprop_input(tf.shape(X), W_n, s, strides,
                                           padding)

        return X * c_o - L * c_p - H * c_n
コード例 #4
0
    def _alphabeta_deep_lrp(self, R, alpha):

        alpha = 2
        beta = 1
        self.R = R
        if len(self.R.shape) == 2:
            self.R = tf.expand_dims(tf.expand_dims(self.R, 1), 1)
        if len(self.weights.shape) == 2:
            self.weights = tf.expand_dims(tf.expand_dims(self.weights, 0), 0)
        if len(self.input_tensor.shape) == 2:
            self.input_tensor = tf.expand_dims(
                tf.expand_dims(self.input_tensor, 1), 1)
        if self.weights.shape[2] == 25088:
            self.weights = tf.reshape(self.weights, [7, 7, 512, 4096])
            self.input_tensor = tf.reshape(self.input_tensor, [10, 7, 7, 512])

        pweight = tf.maximum(1e-9, self.weights)
        nweight = tf.minimum(-1e-9, self.weights)

        if self.first_layer == True:
            X = self.input_tensor
            L = self.input_tensor * 0 + tf.reduce_min(
                self.input_tensor, [1, 2, 3], keep_dims=True)
            H = self.input_tensor * 0 + tf.reduce_max(
                self.input_tensor, [1, 2, 3], keep_dims=True)
            Z = tf.nn.conv2d(X, self.weights, strides=self.strides, padding=self.pad)\
                - tf.nn.conv2d(L, pweight, strides=self.strides, padding=self.pad)\
                -tf.nn.conv2d(H, nweight, strides=self.strides, padding=self.pad)+1e-9
            S = self.R / Z
            result = X*(nn_ops.conv2d_backprop_input(tf.shape(self.input_tensor), self.weights, S, strides=self.strides, padding=self.pad))\
                     -L*(nn_ops.conv2d_backprop_input(tf.shape(self.input_tensor), pweight, S, strides=self.strides,padding=self.pad))-\
                     H*(nn_ops.conv2d_backprop_input(tf.shape(self.input_tensor), nweight, S, strides=self.strides, padding=self.pad))

        else:
            X = self.input_tensor + 1e-9
            Za = tf.nn.conv2d(X,
                              pweight,
                              strides=self.strides,
                              padding=self.pad)
            Sa = alpha * self.R / Za
            Zb = tf.nn.conv2d(X,
                              nweight,
                              strides=self.strides,
                              padding=self.pad)
            Sb = -beta * self.R / Zb
            result = X * (
                nn_ops.conv2d_backprop_input(tf.shape(self.input_tensor),
                                             pweight,
                                             Sa,
                                             strides=self.strides,
                                             padding=self.pad) +
                nn_ops.conv2d_backprop_input(tf.shape(self.input_tensor),
                                             nweight,
                                             Sb,
                                             strides=self.strides,
                                             padding=self.pad))
        return result
コード例 #5
0
def tf_model(padding):
    t1 = tf.constant(input_sizes_nhwc, dtype=tf.int32, name='t1')
    t2 = tf.placeholder(dtype=tf.float32, shape=filter_size_hwio, name='t2')
    t3 = tf.placeholder(dtype=tf.float32,
                        shape=out_backprop_in_sizes[padding],
                        name='t3')
    #reshaping the out_backprop to NHWC since TF does not support NCHW
    t3 = tf.transpose(t3, [0, 2, 3, 1])

    #Cast dtype to bfloat16 for TF because NNP casts ng_model inputs
    t2 = tf.cast(t2, dtype=tf.bfloat16)
    t3 = tf.cast(t3, dtype=tf.bfloat16)

    inp = nn_ops.conv2d_backprop_input(t1,
                                       t2,
                                       t3,
                                       stride_nhwc,
                                       padding=padding,
                                       data_format='NHWC')

    #Reshaping back to NCHW to compare outputs
    inp = tf.transpose(inp, [0, 3, 1, 2])
    #Cast dtype back to float32 similar to NNP
    inp = tf.cast(inp, dtype=tf.float32)
    return inp, t2, t3
コード例 #6
0
 def _Conv2DGrad(op, grad):
     """Weight sharing for symmetric lateral connections."""
     strides = op.get_attr('strides')
     padding = op.get_attr('padding')
     use_cudnn_on_gpu = op.get_attr('use_cudnn_on_gpu')
     data_format = op.get_attr('data_format')
     shape_0, shape_1 = array_ops.shape_n([op.inputs[0], op.inputs[1]])
     dx = nn_ops.conv2d_backprop_input(shape_0,
                                       op.inputs[1],
                                       grad,
                                       strides=strides,
                                       padding=padding,
                                       use_cudnn_on_gpu=use_cudnn_on_gpu,
                                       data_format=data_format)
     dw = nn_ops.conv2d_backprop_filter(op.inputs[0],
                                        shape_1,
                                        grad,
                                        strides=strides,
                                        padding=padding,
                                        use_cudnn_on_gpu=use_cudnn_on_gpu,
                                        data_format=data_format)
     dw_t = tf.transpose(dw, (2, 3, 0, 1))
     dw_symm_t = (0.5) * (dw_t + tf.transpose(dw_t, (1, 0, 2, 3)))
     dw_symm = tf.transpose(dw_symm_t, (2, 3, 0, 1))
     return dx, dw_symm
コード例 #7
0
ファイル: nn_grad.py プロジェクト: sgcm520/tensorflow2
def _Conv2DGrad(op, grad):
    dilations = op.get_attr("dilations")
    strides = op.get_attr("strides")
    padding = op.get_attr("padding")
    use_cudnn_on_gpu = op.get_attr("use_cudnn_on_gpu")
    data_format = op.get_attr("data_format")
    shape_0, shape_1 = array_ops.shape_n([op.inputs[0], op.inputs[1]])
    return [
        nn_ops.conv2d_backprop_input(shape_0,
                                     op.inputs[1],
                                     grad,
                                     dilations=dilations,
                                     strides=strides,
                                     padding=padding,
                                     use_cudnn_on_gpu=use_cudnn_on_gpu,
                                     data_format=data_format),
        nn_ops.conv2d_backprop_filter(op.inputs[0],
                                      shape_1,
                                      grad,
                                      dilations=dilations,
                                      strides=strides,
                                      padding=padding,
                                      use_cudnn_on_gpu=use_cudnn_on_gpu,
                                      data_format=data_format)
    ]
コード例 #8
0
    def testConvBackpropInput(self):
        with self.session() as sess:
            with ops.device("/device:IPU:0"):
                ins = constant_op.constant([2, 8, 8, 3], np.int32)
                fil = array_ops.placeholder(np.float32, [2, 2, 3, 5],
                                            name="inp")
                bck = array_ops.placeholder(np.float32, [2, 8, 8, 5],
                                            name="wei")

                output = nn_ops.conv2d_backprop_input(ins,
                                                      fil,
                                                      bck,
                                                      strides=[1, 1, 1, 1],
                                                      padding="SAME")

            report = tu.ReportJSON(self, sess)
            report.reset()

            fd = {
                fil: np.zeros([2, 2, 3, 5]),
                bck: np.zeros([2, 8, 8, 5]),
            }
            result = sess.run(output, fd)
            self.assertAllClose(result, np.zeros([2, 8, 8, 3]))

            report.parse_log()

            ok = [
                '__seed*', 'Copy_', 'Conv2DBackpropInput/fusion*/Conv_2x2',
                'Conv2DBackpropInput/fusion*/*Transpose'
            ]
            report.assert_all_compute_sets_and_list(ok)
コード例 #9
0
def _MpuSimConv2DGrad(op, grad):
  """Gradient function for MpuSimConv2D."""
  dilations = op.get_attr("dilations")
  strides = op.get_attr("strides")
  padding = op.get_attr("padding")
  data_format = op.get_attr("data_format")
  shape_0, shape_1 = array_ops.shape_n([op.inputs[0], op.inputs[1]])

  # We call the gen_nn_ops backprop functions instead of nn_ops backprop
  # functions for performance reasons in Eager mode. gen_nn_ops functions take a
  # `explicit_paddings` parameter, but nn_ops functions do not. So if were were
  # to use the nn_ops functions, we would have to convert `padding` and
  # `explicit_paddings` into a single `padding` parameter, increasing overhead
  # in Eager mode.
  return [
      nn_ops.conv2d_backprop_input(
          shape_0,
          op.inputs[1],
          grad,
          dilations=dilations,
          strides=strides,
          padding=padding,
          use_cudnn_on_gpu=False,
          data_format=data_format),
      nn_ops.conv2d_backprop_filter(
          op.inputs[0],
          shape_1,
          grad,
          dilations=dilations,
          strides=strides,
          padding=padding,
          use_cudnn_on_gpu=False,
          data_format=data_format)
  ]
コード例 #10
0
  def testConvBackpropInput(self):
    with ops.device("/device:IPU:0"):
      ins = constant_op.constant([2, 8, 8, 3], np.int32)
      fil = array_ops.placeholder(np.float32, [2, 2, 3, 5], name="inp")
      bck = array_ops.placeholder(np.float32, [2, 8, 8, 5], name="wei")

      output = nn_ops.conv2d_backprop_input(
          ins, fil, bck, strides=[1, 1, 1, 1], padding="SAME")

    with ops.device('cpu'):
      report = gen_ipu_ops.ipu_event_trace()

    tu.configure_ipu_system()

    with tu.ipu_session() as sess:
      sess.run(report)

      fd = {
          fil: np.zeros([2, 2, 3, 5]),
          bck: np.zeros([2, 8, 8, 5]),
      }
      result = sess.run(output, fd)
      self.assertAllClose(result, np.zeros([2, 8, 8, 3]))

      result = sess.run(report)

      s = tu.extract_all_strings_from_event_trace(result)
      cs_list = tu.get_compute_sets_from_report(s)

      ok = [
          '__seed*', 'Copy_', 'Conv2DBackpropInput/fusion*/Conv_2x2',
          'Conv2DBackpropInput/fusion*/WeightTranspose'
      ]
      self.assertTrue(tu.check_all_compute_sets_and_list(cs_list, ok))
コード例 #11
0
    def _PerturbConv2DGrad(op, grad):
        """Set grads for the middle-most column to 0."""
        strides = op.get_attr('strides')
        padding = op.get_attr('padding')
        use_cudnn_on_gpu = op.get_attr('use_cudnn_on_gpu')
        data_format = op.get_attr('data_format')
        shape_0, shape_1 = array_ops.shape_n([op.inputs[0], op.inputs[1]])
        dx = nn_ops.conv2d_backprop_input(shape_0,
                                          op.inputs[1],
                                          grad,
                                          strides=strides,
                                          padding=padding,
                                          use_cudnn_on_gpu=use_cudnn_on_gpu,
                                          data_format=data_format)
        dw = nn_ops.conv2d_backprop_filter(op.inputs[0],
                                           shape_1,
                                           grad,
                                           strides=strides,
                                           padding=padding,
                                           use_cudnn_on_gpu=use_cudnn_on_gpu,
                                           data_format=data_format)

        # # Find middle unit
        # h, w = shape_1[1] // 2, shape_1[2] // 2
        # # Set middle unit gradient to 0
        # dx[:, h, w] = 0.

        # Find middle unit of hidden state (these are weights)
        h, w = shape_0[1] // 2, shape_0[2] // 2
        # Set middle unit gradient to 0
        dw[:, h, w] = 0.
        return dx, dw
コード例 #12
0
    def _simple_lrp(self, R):
        self.R = R
        if len(self.R.shape) == 2:
            self.R = tf.expand_dims(tf.expand_dims(self.R, 1), 1)
        if len(self.weights.shape) == 2:
            self.weights = tf.expand_dims(tf.expand_dims(self.weights, 0), 0)
        if len(self.input_tensor.shape) == 2:
            self.input_tensor = tf.expand_dims(
                tf.expand_dims(self.input_tensor, 1), 1)
        if self.weights.shape[2] == 25088:
            self.weights = tf.reshape(self.weights, [7, 7, 512, 4096])
            self.input_tensor = tf.reshape(self.input_tensor, [10, 7, 7, 512])
        # self.R = R

        tmp_weight = tf.maximum(0.0, self.weights)
        X = self.input_tensor
        Z = tf.nn.conv2d(X, tmp_weight, strides=self.strides,
                         padding=self.pad) + 1e-9
        S = self.R / Z
        result = X * (nn_ops.conv2d_backprop_input(tf.shape(self.input_tensor),
                                                   tmp_weight,
                                                   S,
                                                   strides=self.strides,
                                                   padding=self.pad))
        return result
コード例 #13
0
ファイル: nn_grad.py プロジェクト: Huoxubeiyin/tensorflow
def _Conv2DGrad(op, grad):
  dilations = op.get_attr("dilations")
  strides = op.get_attr("strides")
  padding = op.get_attr("padding")
  use_cudnn_on_gpu = op.get_attr("use_cudnn_on_gpu")
  data_format = op.get_attr("data_format")
  shape_0, shape_1 = array_ops.shape_n([op.inputs[0], op.inputs[1]])
  return [
      nn_ops.conv2d_backprop_input(
          shape_0,
          op.inputs[1],
          grad,
          dilations=dilations,
          strides=strides,
          padding=padding,
          use_cudnn_on_gpu=use_cudnn_on_gpu,
          data_format=data_format),
      nn_ops.conv2d_backprop_filter(
          op.inputs[0],
          shape_1,
          grad,
          dilations=dilations,
          strides=strides,
          padding=padding,
          use_cudnn_on_gpu=use_cudnn_on_gpu,
          data_format=data_format)
  ]
コード例 #14
0
def _Conv2DGrad(op, grad):
    strides = op.get_attr("strides")
    padding = op.get_attr("padding")
    use_cudnn_on_gpu = op.get_attr("use_cudnn_on_gpu")
    data_format = op.get_attr("data_format")
    shape_0, shape_1 = array_ops.shape_n([op.inputs[0], op.inputs[1]])
    dx = nn_ops.conv2d_backprop_input(
           shape_0,
           op.inputs[1],
           grad,
           strides=strides,
           padding=padding,
           use_cudnn_on_gpu=use_cudnn_on_gpu,
           data_format=data_format)
    dw = nn_ops.conv2d_backprop_filter(
           op.inputs[0],
           shape_1,
           grad,
           strides=strides,
           padding=padding,
           use_cudnn_on_gpu=use_cudnn_on_gpu,
           data_format=data_format)
    dw_t = tf.transpose(dw,(2,3,0,1))
    dw_symm_t = (0.5) * (dw_t + tf.transpose(dw_t,(1,0,2,3)))
    dw_symm = tf.transpose(dw_symm_t,(2,3,0,1))
    return dx,dw_symm
コード例 #15
0
def ng_model(padding):
    t1 = tf.constant(input_sizes_nchw, dtype=tf.int32, name='t1')
    t2 = tf.placeholder(dtype=tf.float32, shape=filter_size_hwio, name='t2')
    t3 = tf.placeholder(
        dtype=tf.float32, shape=out_backprop_in_sizes[padding], name='t3')

    inp = nn_ops.conv2d_backprop_input(
        t1, t2, t3, stride_nchw, padding=padding, data_format='NCHW')
    return inp, t2, t3
コード例 #16
0
ファイル: nn_grad.py プロジェクト: swapnilashtekar/tensorflow
def _Conv2DGrad(op, grad):
    return [
        nn_ops.conv2d_backprop_input(
            array_ops.shape(op.inputs[0]), op.inputs[1], grad, op.get_attr("strides"), op.get_attr("padding")
        ),
        nn_ops.conv2d_backprop_filter(
            op.inputs[0], array_ops.shape(op.inputs[1]), grad, op.get_attr("strides"), op.get_attr("padding")
        ),
    ]
コード例 #17
0
ファイル: lrp.py プロジェクト: jdlamstein/GEDI3
def fprop_conv_first(F, W, X, lowest, highest, strides=None, padding='SAME'):
    strides = [1, 1, 1, 1] if strides is None else strides

    Wn = tf.minimum(0.0, W)
    Wp = tf.maximum(0.0, W)

    X, L, H = X, X * 0 + lowest, X * 0 + highest

    c = tf.nn.conv2d(X, W, strides, padding)
    cp = tf.nn.conv2d(H, Wp, strides, padding)
    cn = tf.nn.conv2d(L, Wn, strides, padding)
    Z = c - cp - cn + 1e-9
    S = F / Z

    g = nn_ops.conv2d_backprop_input(tf.shape(X), W, S, strides, padding)
    gp = nn_ops.conv2d_backprop_input(tf.shape(X), Wp, S, strides, padding)
    gn = nn_ops.conv2d_backprop_input(tf.shape(X), Wn, S, strides, padding)
    F = X * g - L * gp - H * gn
    return F
コード例 #18
0
 def testBackwardInputGradient(self):
   np.random.seed(2)
   in_shape = LayerShape(batch=8, height=32, width=32, channels=8)
   filter_shape = FilterShape(
       height=7, width=7, in_channels=8, out_channels=128)
   filter_op = self._random_data_op(filter_shape)
   out_op = self._random_out_op(in_shape, filter_shape)
   input_gradient_op = nn_ops.conv2d_backprop_input(
       in_shape, filter_op, out_op, strides=_STRIDES, padding=_PADDING)
   self._assert_reproducible(input_gradient_op)
コード例 #19
0
    def _simple_deep_lrp(self, R):

        self.R = R
        if len(self.R.shape) == 2:
            self.R = tf.expand_dims(tf.expand_dims(self.R, 1), 1)
        if len(self.weights.shape) == 2:
            self.weights = tf.expand_dims(tf.expand_dims(self.weights, 0), 0)
        if len(self.input_tensor.shape) == 2:
            self.input_tensor = tf.expand_dims(
                tf.expand_dims(self.input_tensor, 1), 1)
        if self.weights.shape[2] == 25088:
            self.weights = tf.reshape(self.weights, [7, 7, 512, 4096])
            self.input_tensor = tf.reshape(self.input_tensor, [10, 7, 7, 512])
        if self.first_layer == True:
            pweight = tf.maximum(1e-9, self.weights)
            nweight = tf.minimum(-1e-9, self.weights)
            X = self.input_tensor
            L = self.input_tensor * 0 + tf.reduce_max(
                self.input_tensor, [1, 2, 3], keep_dims=True)
            H = self.input_tensor * 0 + tf.reduce_min(
                self.input_tensor, [1, 2, 3], keep_dims=True)
            Z = tf.nn.conv2d(X, self.weights, strides=self.strides, padding=self.pad)\
                - tf.nn.conv2d(L, pweight, strides=self.strides, padding=self.pad)\
                -tf.nn.conv2d(H, nweight, strides=self.strides, padding=self.pad)+1e-9
            S = self.R / Z
            result = X*(nn_ops.conv2d_backprop_input(tf.shape(self.input_tensor), self.weights, S, strides=self.strides, padding=self.pad))\
                     -L*(nn_ops.conv2d_backprop_input(tf.shape(self.input_tensor), pweight, S, strides=self.strides,padding=self.pad))-\
                     H*(nn_ops.conv2d_backprop_input(tf.shape(self.input_tensor), nweight, S, strides=self.strides, padding=self.pad))

        else:
            tmp_weight = tf.maximum(0.0, self.weights)
            X = self.input_tensor
            Z = tf.nn.conv2d(
                X, tmp_weight, strides=self.strides, padding=self.pad) + 1e-9
            S = self.R / Z
            result = X * (nn_ops.conv2d_backprop_input(tf.shape(
                self.input_tensor),
                                                       tmp_weight,
                                                       S,
                                                       strides=self.strides,
                                                       padding=self.pad))
        return result
コード例 #20
0
 def testBackwardInputGradient(self):
   in_shape = LayerShapeNHWC(batch=8, height=32, width=32, channels=8)
   filter_shape = FilterShape2D(
       height=7, width=7, in_channels=8, out_channels=128)
   filter_op = self._random_data_op(filter_shape)
   strides = [1, 1, 1, 1]
   padding = 'SAME'
   out_op = self._random_out_op(in_shape, filter_shape, strides, padding)
   input_gradient_op = nn_ops.conv2d_backprop_input(
       in_shape, filter_op, out_op, strides=strides, padding=padding)
   self._assert_reproducible(input_gradient_op)
コード例 #21
0
def _Conv2DGrad(op, grad):
    return [
        nn_ops.conv2d_backprop_input(array_ops.shape(op.inputs[0]),
                                     op.inputs[1], grad,
                                     op.get_attr("strides"),
                                     op.get_attr("padding")),
        nn_ops.conv2d_backprop_filter(op.inputs[0],
                                      array_ops.shape(op.inputs[1]), grad,
                                      op.get_attr("strides"),
                                      op.get_attr("padding"))
    ]
コード例 #22
0
def _Conv2DBackpropFilterGrad(op, grad):
    return [
        nn_ops.conv2d_backprop_input(array_ops.shape(op.inputs[0]), grad,
                                     op.inputs[2], op.get_attr("strides"),
                                     op.get_attr("padding"),
                                     op.get_attr("use_cudnn_on_gpu"),
                                     op.get_attr("data_format")), None,
        nn_ops.conv2d(op.inputs[0], grad, op.get_attr("strides"),
                      op.get_attr("padding"), op.get_attr("use_cudnn_on_gpu"),
                      op.get_attr("data_format"))
    ]
コード例 #23
0
 def run_test_ngraph(sess):
     t1 = constant_op.constant(self.INPUT_SIZES_NCHW)
     t2 = constant_op.constant(x2, shape=self.FILTER_IN_SIZES)
     t3 = constant_op.constant(x1, shape=out_backprop_in_sizes)
     inp = nn_ops.conv2d_backprop_input(t1,
                                        t2,
                                        t3,
                                        strides=[1, 1, 2, 2],
                                        padding=padding,
                                        data_format='NCHW')
     return sess.run(inp)
コード例 #24
0
 def backprop_conv(self,
                   activation,
                   kernel,
                   relevance,
                   strides,
                   padding='SAME'):
     W_p = tf.maximum(0., kernel)
     z = nn_ops.conv2d(activation, W_p, strides, padding) + 1e-10
     s = relevance / z
     c = nn_ops.conv2d_backprop_input(tf.shape(activation), W_p, s, strides,
                                      padding)
     return activation * c
コード例 #25
0
def conv_backprop_input_tf(input_sizes,
                           filters,
                           out_backprop,
                           stride,
                           padding='VALID'):
    strd = [1, stride, stride, 1]
    d_A_backprop_input = nn_ops.conv2d_backprop_input(
        input_sizes=input_sizes,
        filter=filters,
        out_backprop=out_backprop,
        strides=strd,
        padding=padding)
    return np.array(d_A_backprop_input)
コード例 #26
0
 def run_test_tf(sess):
     t1 = constant_op.constant(self.INPUT_SIZES_NHWC)
     t2 = constant_op.constant(x2, shape=self.FILTER_IN_SIZES)
     t3 = constant_op.constant(x1, shape=out_backprop_in_sizes)
     t3 = tf.transpose(t3, [0, 2, 3, 1])
     inp = nn_ops.conv2d_backprop_input(t1,
                                        t2,
                                        t3,
                                        strides=[1, 2, 2, 1],
                                        padding=padding,
                                        data_format='NHWC')
     inp = tf.transpose(inp, [0, 3, 1, 2])
     return sess.run(inp)
コード例 #27
0
def backprop_conv(alpha,
                  activation,
                  kernel,
                  bias,
                  relevance,
                  strides,
                  padding='VALID'):
    W_p = tf.maximum(0., kernel)
    b_p = tf.maximum(0., bias)
    z_p = nn_ops.conv2d(activation, W_p, strides, padding) + b_p
    s_p = relevance / z_p
    c_p = nn_ops.conv2d_backprop_input(tf.shape(activation), W_p, s_p, strides,
                                       padding)

    W_n = tf.minimum(0., kernel)
    b_n = tf.minimum(0., bias)
    z_n = nn_ops.conv2d(activation, W_n, strides, padding) + b_n
    s_n = relevance / z_n
    c_n = nn_ops.conv2d_backprop_input(tf.shape(activation), W_n, s_n, strides,
                                       padding)

    return activation * (alpha * c_p + (1 - alpha) * c_n)
コード例 #28
0
def fprop_conv(F, W, X, strides=None, padding='SAME'):
    #Propagate over conv layer
    xshape = X.get_shape().as_list()
    fshape = F.get_shape().as_list()
    if len(xshape) != len(fshape):
        F = tf.reshape(F, (-1, xshape[1], xshape[2], fshape[-1]/(xshape[1]*xshape[2])))
    strides = [1, 1, 1, 1] if strides is None else strides
    W = tf.maximum(0.0, W)

    Z = tf.nn.conv2d(X, W, strides, padding) + 1e-9 
    S = F/Z
    C = nn_ops.conv2d_backprop_input(tf.shape(X), W,  S, strides, padding)
    F = X*C
    return F
コード例 #29
0
ファイル: nn_grad.py プロジェクト: Jackhuang945/tensorflow
def _Conv2DBackpropFilterGrad(op, grad):
  return [
      nn_ops.conv2d_backprop_input(
          array_ops.shape(op.inputs[0]), grad, op.inputs[2],
          op.get_attr("strides"),
          op.get_attr("padding"),
          op.get_attr("use_cudnn_on_gpu"),
          op.get_attr("data_format")),
      None,
      nn_ops.conv2d(
          op.inputs[0], grad,
          op.get_attr("strides"),
          op.get_attr("padding"),
          op.get_attr("use_cudnn_on_gpu"),
          op.get_attr("data_format"))
  ]
コード例 #30
0
 def testConvBackwardInputGradient(self, rate=1):
   in_shape = LayerShapeNHWC(batch=1, height=16, width=16, channels=1)
   filter_shape = FilterShape2D(
       height=7, width=7, in_channels=1, out_channels=3)
   filter_op = self._random_data_op(filter_shape)
   strides = [1, 1, 1, 1]
   padding = 'SAME'
   dilations = [1, rate, rate, 1]
   out_op = self._random_out_op(in_shape, filter_shape, strides, padding,
                                dilations)
   self._assert_reproducible(lambda: nn_ops.conv2d_backprop_input(
       in_shape,
       filter_op,
       out_op,
       strides=strides,
       padding=padding,
       dilations=dilations))
コード例 #31
0
def _Conv2DBackpropFilterGrad(op, grad):
    return [
        nn_ops.conv2d_backprop_input(
            array_ops.shape(op.inputs[0]),
            grad,
            op.inputs[2],
            dilations=op.get_attr("dilations"),
            strides=op.get_attr("strides"),
            padding=op.get_attr("padding"),
            use_cudnn_on_gpu=op.get_attr("use_cudnn_on_gpu"),
            data_format=op.get_attr("data_format").decode()), None,
        nn_ops.conv2d(op.inputs[0],
                      grad,
                      dilations=op.get_attr("dilations"),
                      strides=op.get_attr("strides"),
                      padding=op.get_attr("padding"),
                      use_cudnn_on_gpu=op.get_attr("use_cudnn_on_gpu"),
                      data_format=op.get_attr("data_format").decode())
    ]
コード例 #32
0
    def test_nhwc(self, padding):
        out_backprop_in_sizes = self.OUT_BACKPROP_IN_SIZES[padding]
        x1, x2 = self.make_filter_and_backprop_args(out_backprop_in_sizes)
        t1 = constant_op.constant(self.INPUT_SIZES_NHWC)
        t2 = constant_op.constant(x2, shape=self.FILTER_IN_SIZES)
        t3 = constant_op.constant(x1, shape=out_backprop_in_sizes)
        t3 = tf.transpose(t3, [0, 2, 3, 1])
        inp = nn_ops.conv2d_backprop_input(t1,
                                           t2,
                                           t3,
                                           strides=[1, 2, 2, 1],
                                           padding=padding,
                                           data_format='NHWC')

        def run_test(sess):
            return sess.run(inp)

        assert (
            self.with_ngraph(run_test) == self.without_ngraph(run_test)).all()
コード例 #33
0
ファイル: nn_grad.py プロジェクト: adit-chandra/tensorflow
def _Conv2DBackpropFilterGrad(op, grad):
  return [
      nn_ops.conv2d_backprop_input(
          array_ops.shape(op.inputs[0]),
          grad,
          op.inputs[2],
          dilations=op.get_attr("dilations"),
          strides=op.get_attr("strides"),
          padding=op.get_attr("padding"),
          use_cudnn_on_gpu=op.get_attr("use_cudnn_on_gpu"),
          data_format=op.get_attr("data_format").decode()), None,
      nn_ops.conv2d(
          op.inputs[0],
          grad,
          dilations=op.get_attr("dilations"),
          strides=op.get_attr("strides"),
          padding=op.get_attr("padding"),
          use_cudnn_on_gpu=op.get_attr("use_cudnn_on_gpu"),
          data_format=op.get_attr("data_format").decode())
  ]
コード例 #34
0
ファイル: nn_grad.py プロジェクト: adsar/tensorflow
def _Conv2DGrad(op, grad):
    return [
        nn_ops.conv2d_backprop_input(
            array_ops.shape(op.inputs[0]),
            op.inputs[1],
            grad,
            op.get_attr("strides"),
            op.get_attr("padding"),
            op.get_attr("use_cudnn_on_gpu"),
            op.get_attr("data_format"),
        ),
        nn_ops.conv2d_backprop_filter(
            op.inputs[0],
            array_ops.shape(op.inputs[1]),
            grad,
            op.get_attr("strides"),
            op.get_attr("padding"),
            op.get_attr("use_cudnn_on_gpu"),
            op.get_attr("data_format"),
        ),
    ]
コード例 #35
0
def tf_model(padding):
    t1 = tf.constant(input_sizes_nhwc, dtype=tf.int32, name='t1')
    t2 = tf.placeholder(dtype=tf.float32, shape=filter_size_hwio, name='t2')
    t3 = tf.placeholder(dtype=tf.float32,
                        shape=out_backprop_in_sizes[padding],
                        name='t3')

    #Cast dtype to bfloat16 for TF because NNP casts ng_model inputs
    t2 = tf.cast(t2, dtype=tf.bfloat16)
    t3 = tf.cast(t3, dtype=tf.bfloat16)

    inp = nn_ops.conv2d_backprop_input(t1,
                                       t2,
                                       t3,
                                       stride,
                                       padding=padding,
                                       data_format='NHWC')

    #Cast dtype back to float32 similar to NNP
    inp = tf.cast(inp, dtype=tf.float32)
    return inp, t2, t3