Beispiel #1
0
  def _RunAndVerifyBackprop(self, input_sizes, filter_sizes, output_sizes,
                            strides, dilations, padding, data_format, use_gpu,
                            err, mode):
    total_input_size = 1
    total_filter_size = 1
    for s in input_sizes:
      total_input_size *= s
    for s in filter_sizes:
      total_filter_size *= s
    # Initializes the input tensor with array containing incrementing
    # numbers from 1.
    x1 = [f * 1.0 for f in range(1, total_input_size + 1)]
    x2 = [f * 1.0 for f in range(1, total_filter_size + 1)]
    default_dilations = (
        dilations[0] == 1 and dilations[1] == 1 and dilations[2] == 1)

    # If any dilation rate is larger than 1, only do test on the GPU
    # because we currently do not have a CPU implementation for arbitrary
    # dilation rates.
    if default_dilations or use_gpu:
      with self.cached_session(use_gpu=use_gpu) as sess:
        if data_format == "NCDHW":
          input_sizes = test_util.NHWCToNCHW(input_sizes)
        t1 = constant_op.constant(x1, shape=input_sizes)
        t2 = constant_op.constant(x2, shape=filter_sizes)
        full_strides = [1] + strides + [1]
        full_dilations = [1] + dilations + [1]
        if data_format == "NCDHW":
          full_strides = test_util.NHWCToNCHW(full_strides)
          full_dilations = test_util.NHWCToNCHW(full_dilations)
        actual = nn_ops.conv3d(
            t1,
            t2,
            strides=full_strides,
            dilations=full_dilations,
            padding=padding,
            data_format=data_format)
        expected = nn_ops.convolution(
            t1,
            t2,
            padding=padding,
            strides=strides,
            dilation_rate=dilations,
            data_format=data_format)
        if data_format == "NCDHW":
          actual = test_util.NCHWToNHWC(actual)
          expected = test_util.NCHWToNHWC(expected)
        actual_grad = gradients_impl.gradients(actual, t1
                                               if mode == "input" else t2)[0]
        expected_grad = gradients_impl.gradients(expected, t1
                                                 if mode == "input" else t2)[0]
        # "values" consists of two tensors for two backprops
        actual_value = self.evaluate(actual_grad)
        expected_value = self.evaluate(expected_grad)
        self.assertShapeEqual(actual_value, actual_grad)
        self.assertShapeEqual(expected_value, expected_grad)
      print("expected = ", expected_value)
      print("actual = ", actual_value)
      self.assertArrayNear(expected_value.flatten(), actual_value.flatten(),
                           err)
Beispiel #2
0
    def _testOne(self, inputs, block_size, outputs, dtype=dtypes.float32):
        input_nhwc = math_ops.cast(inputs, dtype)
        with self.cached_session(use_gpu=False):
            # test NHWC (default) on CPU
            x_tf = array_ops.depth_to_space(input_nhwc, block_size)
            self.assertAllEqual(x_tf, outputs)

            # Run this test only if only CPU device is available
            if all(x.device_type == "CPU"
                   for x in device_lib.list_local_devices()):
                input_nchw = test_util.NHWCToNCHW(input_nhwc)
                output_nchw = array_ops.depth_to_space(input_nchw,
                                                       block_size,
                                                       data_format="NCHW")
                output_nhwc = test_util.NCHWToNHWC(output_nchw)
                with self.assertRaisesRegex(
                        errors_impl.InvalidArgumentError,
                        "No OpKernel was registered to support Op 'DepthToSpace'"
                ):
                    output_nhwc.eval()

        if test.is_gpu_available():
            with self.cached_session(use_gpu=True):
                # test NHWC (default) on GPU
                x_tf = array_ops.depth_to_space(input_nhwc, block_size)
                self.assertAllEqual(x_tf, outputs)
                # test NCHW on GPU
                input_nchw = test_util.NHWCToNCHW(input_nhwc)
                output_nchw = array_ops.depth_to_space(input_nchw,
                                                       block_size,
                                                       data_format="NCHW")
                output_nhwc = test_util.NCHWToNHWC(output_nchw)
                self.assertAllEqual(output_nhwc, outputs)
Beispiel #3
0
    def _SetupValuesForDevice(self, tensor_in_sizes, filter_in_sizes, bias,
                              strides, padding, activation_mode, data_format,
                              dtype):
        """Verifies the output values of the convolution function.

    Args:
      tensor_in_sizes: Input tensor dimensions in
        [batch, input_rows, input_cols, input_depth].
      filter_in_sizes: Filter tensor dimensions in
        [kernel_rows, kernel_cols, input_depth, output_depth].
      bias: 1-D bias tensor of length output_depth.
      strides: Stride: [col_stride, row_stride]
      padding: Padding type.
      activation_mode: Activation mode.
      data_format: Format of the data tensors.
      dtype: Data type for inputs and outputs.
    Returns:
      Symbolic tensor value and reference value that can be used to
      execute the computation and verify the results.
    """
        input_size = np.prod(tensor_in_sizes)
        filter_size = np.prod(filter_in_sizes)
        bias_size = filter_in_sizes[-1]  # equals to output depth
        # Initializes the input tensor with array containing incrementing
        # numbers from 1.
        x1 = [f * 1.0 for f in range(1, input_size + 1)]
        x2 = [f * 1.0 for f in range(1, filter_size + 1)]
        # This is to guarantee that there is always negative values after
        # bias add so that we can test whether relu works correctly.
        x3 = bias
        with self.test_session(use_gpu=True):
            t1 = constant_op.constant(x1, shape=tensor_in_sizes, dtype=dtype)
            t2 = constant_op.constant(x2, shape=filter_in_sizes, dtype=dtype)
            t3 = constant_op.constant(x3, shape=[bias_size], dtype=dtype)
            strides = [1] + strides + [1]
            if data_format == "NCHW":
                t1 = test_util.NHWCToNCHW(t1)
                strides = test_util.NHWCToNCHW(strides)
            output = fused_conv2d_bias_activation_op.fused_conv2d_bias_activation(
                t1,
                t2,
                t3,
                strides=strides,
                padding=padding,
                data_format=data_format,
                activation_mode=activation_mode)
            ref_conv_output = nn_ops.conv2d(t1,
                                            t2,
                                            strides=strides,
                                            padding=padding,
                                            data_format=data_format)
            ref_bias_output = nn_ops.bias_add(ref_conv_output,
                                              t3,
                                              data_format=data_format)
            ref_output = nn_ops.relu(ref_bias_output)
            if data_format == "NCHW":
                output = test_util.NCHWToNHWC(output)
                ref_output = test_util.NCHWToNHWC(ref_output)

            return output, ref_output
Beispiel #4
0
    def _SetupValuesForDevice(self, tensor_in_sizes, filter_in_sizes, stride,
                              padding, data_format, use_gpu):
        total_size_1 = 1
        total_size_2 = 1
        for s in tensor_in_sizes:
            total_size_1 *= s
        for s in filter_in_sizes:
            total_size_2 *= s

        # Initializes the input tensor with array containing incrementing
        # numbers from 1.
        x1 = [f * 1.0 for f in range(1, total_size_1 + 1)]
        x2 = [f * 1.0 for f in range(1, total_size_2 + 1)]
        with self.test_session(use_gpu=use_gpu):
            t1 = constant_op.constant(x1, shape=tensor_in_sizes)
            t2 = constant_op.constant(x2, shape=filter_in_sizes)

            if isinstance(stride, collections.Iterable):
                strides = [1] + list(stride) + [1]
            else:
                strides = [1, stride, stride, stride, 1]

            if data_format == "NCDHW":
                t1 = test_util.NHWCToNCHW(t1)
                strides = test_util.NHWCToNCHW(strides)
            conv = nn_ops.conv3d(t1,
                                 t2,
                                 strides,
                                 padding=padding,
                                 data_format=data_format)
            if data_format == "NCDHW":
                conv = test_util.NCHWToNHWC(conv)

            return conv
    def _SetupValuesForDevice(self, tensor_in_sizes, filter_in_sizes, stride,
                              padding, data_format, dtype, use_gpu):
        total_size_1 = 1
        total_size_2 = 1
        for s in tensor_in_sizes:
            total_size_1 *= s
        for s in filter_in_sizes:
            total_size_2 *= s

        # Initializes the input tensor with array containing numbers from 0 to 1.
        # We keep the input tensor values fairly small to avoid overflowing a float16
        # tensor during the conv3d
        x1 = [f * 1.0 / total_size_1 for f in range(1, total_size_1 + 1)]
        x2 = [f * 1.0 / total_size_2 for f in range(1, total_size_2 + 1)]
        with self.test_session(use_gpu=use_gpu):
            t1 = constant_op.constant(x1, shape=tensor_in_sizes, dtype=dtype)
            t2 = constant_op.constant(x2, shape=filter_in_sizes, dtype=dtype)

            if isinstance(stride, collections.Iterable):
                strides = [1] + list(stride) + [1]
            else:
                strides = [1, stride, stride, stride, 1]

            if data_format == "NCDHW":
                t1 = test_util.NHWCToNCHW(t1)
                strides = test_util.NHWCToNCHW(strides)
            conv = nn_ops.conv3d(t1,
                                 t2,
                                 strides,
                                 padding=padding,
                                 data_format=data_format)
            if data_format == "NCDHW":
                conv = test_util.NCHWToNHWC(conv)

            return conv
Beispiel #6
0
  def _ComputeReferenceDilatedConv(self, tensor_in_sizes, filter_in_sizes,
                                   stride, dilation, padding, data_format,
                                   use_gpu):
    total_size_tensor = 1
    total_size_filter = 1
    for s in tensor_in_sizes:
      total_size_tensor *= s
    for s in filter_in_sizes:
      total_size_filter *= s

    # Initializes the input tensor with array containing incrementing
    # numbers from 1.
    x1 = [f * 1.0 for f in range(1, total_size_tensor + 1)]
    x2 = [f * 1.0 for f in range(1, total_size_filter + 1)]
    with self.cached_session(use_gpu=use_gpu):
      t1 = constant_op.constant(x1, shape=tensor_in_sizes)
      t2 = constant_op.constant(x2, shape=filter_in_sizes)
      if isinstance(stride, collections.Iterable):
        strides = list(stride)
      else:
        strides = [stride, stride, stride]
      if data_format == "NCDHW":
        t1 = test_util.NHWCToNCHW(t1)
        full_strides = [1, 1] + strides
        full_dilation = [1, 1] + dilation
      else:
        full_strides = [1] + strides + [1]
        full_dilation = [1] + dilation + [1]
      expected = nn_ops.convolution(
          t1,
          t2,
          padding=padding,
          strides=strides,
          dilation_rate=dilation,
          data_format=data_format)
      computed = nn_ops.conv3d(
          t1,
          t2,
          strides=full_strides,
          dilations=full_dilation,
          padding=padding,
          data_format=data_format)
      if data_format == "NCDHW":
        expected = test_util.NCHWToNHWC(expected)
        computed = test_util.NCHWToNHWC(computed)
    return expected, computed
    def _SetupValuesForDevice(self, tensor_in_sizes, filter_in_sizes, stride,
                              padding, data_format, dtype, use_gpu):
        total_size_tensor = 1
        total_size_filter = 1
        for s in tensor_in_sizes:
            total_size_tensor *= s
        for s in filter_in_sizes:
            total_size_filter *= s

        # Initializes the input tensor with array containing numbers from 0 to 1.
        # We keep the input tensor values fairly small to avoid overflowing float16
        # during the conv3d.
        x1 = [
            f * 1.0 / total_size_tensor
            for f in range(1, total_size_tensor + 1)
        ]
        x2 = [
            f * 1.0 / total_size_filter
            for f in range(1, total_size_filter + 1)
        ]
        with self.cached_session(use_gpu=use_gpu):
            t1 = constant_op.constant(x1, shape=tensor_in_sizes, dtype=dtype)
            t2 = constant_op.constant(x2, shape=filter_in_sizes, dtype=dtype)

            if isinstance(stride, collections_abc.Iterable):
                strides = [1] + list(stride) + [1]
            else:
                strides = [1, stride, stride, stride, 1]

            if data_format == "NCDHW":
                t1 = test_util.NHWCToNCHW(t1)
                strides = test_util.NHWCToNCHW(strides)
            # For the 1x1x1 filter, the NDHWC conv3d will call blas kernel on GPU,
            # which might lead to deviated results from the reference with tf32.
            if (data_format == 'NDHWC' and use_gpu and filter_in_sizes[0] == 1
                    and filter_in_sizes[1] == 1 and filter_in_sizes[2] == 1
                    and dtype == dtypes.float32):
                with ops.device('/cpu:0'):
                    conv = nn_ops.conv3d(t1,
                                         t2,
                                         strides,
                                         padding=padding,
                                         data_format=data_format)
            else:
                conv = nn_ops.conv3d(t1,
                                     t2,
                                     strides,
                                     padding=padding,
                                     data_format=data_format)
            if data_format == "NCDHW":
                conv = test_util.NCHWToNHWC(conv)

            return conv
 def _SetupVal(data_format, use_gpu):
   with test_util.device(use_gpu):
     t1 = constant_op.constant(x1, shape=tensor_in_sizes)
     t2 = constant_op.constant(x2, shape=filter_in_sizes)
     strides = [1] + conv_strides + [1]
     if data_format == "NCHW":
       t1 = test_util.NHWCToNCHW(t1)
       strides = test_util.NHWCToNCHW(strides)
     conv = nn_ops.conv2d(
         t1, t2, strides=strides, padding=padding, data_format=data_format)
     if data_format == "NCHW":
       conv = test_util.NCHWToNHWC(conv)
     return conv
Beispiel #9
0
    def _SetupValuesForDevice(self, tensor_in_sizes, filter_in_sizes,
                              dilations, strides, padding, data_format, dtype,
                              use_ve):
        """Verifies the output values of the convolution function.

    Args:
      tensor_in_sizes: Input tensor dimensions in
        [batch, input_rows, input_cols, input_depth].
      filter_in_sizes: Filter tensor dimensions in
        [kernel_rows, kernel_cols, input_depth, output_depth].
      dilations: Dilated rate: [col_dilation, row_dilation]
      strides: Stride: [col_stride, row_stride]
      padding: Padding type.
      data_format: Format of the data tensors.
      dtype: Data type for inputs and outputs.
      use_ve: True if the operations should be run on VE
    Returns:
      Symbolic tensor value that can be used to execute the computation
    """
        total_size_1 = 1
        total_size_2 = 1
        for s in tensor_in_sizes:
            total_size_1 *= s
        for s in filter_in_sizes:
            total_size_2 *= s
        # Initializes the input tensor with array containing incrementing
        # numbers from 1.
        x1 = [f * 1.0 for f in range(1, total_size_1 + 1)]
        x2 = [f * 1.0 for f in range(1, total_size_2 + 1)]

        with test_util.device(use_ve=use_ve, use_gpu=False):
            t1 = constant_op.constant(x1, shape=tensor_in_sizes, dtype=dtype)
            t2 = constant_op.constant(x2, shape=filter_in_sizes, dtype=dtype)
            strides = [1] + strides + [1]
            dilations = [1] + dilations + [1]
            if data_format == "NCHW":
                t1 = test_util.NHWCToNCHW(t1)
                strides = test_util.NHWCToNCHW(strides)
                dilations = test_util.NHWCToNCHW(dilations)
            conv = nn_ops.conv2d(t1,
                                 t2,
                                 dilations=dilations,
                                 strides=strides,
                                 padding=padding,
                                 data_format=data_format)
            if data_format == "NCHW":
                conv = test_util.NCHWToNHWC(conv)

            return conv
Beispiel #10
0
    def _testOne(self, inputs, block_size, outputs, dtype=dtypes.float32):
        input_nhwc = math_ops.cast(inputs, dtype)
        # test NHWC (default)
        x_tf = array_ops.space_to_depth(input_nhwc, block_size)
        self.assertAllEqual(self.evaluate(x_tf), outputs)

        if test_util.is_gpu_available():
            with test_util.force_gpu():
                # test NCHW on GPU
                input_nchw = test_util.NHWCToNCHW(input_nhwc)
                output_nchw = array_ops.space_to_depth(input_nchw,
                                                       block_size,
                                                       data_format="NCHW")
                output_nhwc = test_util.NCHWToNHWC(output_nchw)
                self.assertAllEqual(self.evaluate(output_nhwc), outputs)
 def _testOne(self, inputs, block_size, outputs):
   input_nhwc = math_ops.to_float(inputs)
   with self.test_session(use_gpu=False):
     # test NHWC (default) on CPU
     x_tf = array_ops.space_to_depth(input_nhwc, block_size)
     self.assertAllEqual(x_tf.eval(), outputs)
   if test.is_gpu_available():
     with self.test_session(use_gpu=True):
       # test NHWC (default) on GPU
       x_tf = array_ops.space_to_depth(input_nhwc, block_size)
       self.assertAllEqual(x_tf.eval(), outputs)
       # test NCHW on GPU
       input_nchw = test_util.NHWCToNCHW(input_nhwc)
       output_nchw = array_ops.space_to_depth(
           input_nchw, block_size, data_format="NCHW")
       output_nhwc = test_util.NCHWToNHWC(output_nchw)
       self.assertAllEqual(output_nhwc.eval(), outputs)
        def _SetupVal(data_format, use_gpu):
            t1 = constant_op.constant(x1, shape=tensor_in_sizes)
            t2 = constant_op.constant(x2, shape=filter_in_sizes)
            t3 = constant_op.constant(x3, shape=[filter_in_sizes[-1]])
            strides = [1] + conv_strides + [1]
            if data_format == "NCHW":
                t1 = test_util.NHWCToNCHW(t1)
                strides = test_util.NHWCToNCHW(strides)
            output = fused_conv2d_bias_activation_op.fused_conv2d_bias_activation(
                t1,
                t2,
                t3,
                strides=strides,
                padding=padding,
                data_format=data_format,
                activation_mode="Relu")

            if data_format == "NCHW":
                output = test_util.NCHWToNHWC(output)
            return output
Beispiel #13
0
  def _VerifyOneTest(self, pool_func, input_sizes, window, strides, padding,
                     data_format, expected, use_gpu):
    """Verifies the output values of the pooling function.

    Args:
      pool_func: Function to be called: co.MaxPool, co.AvgPool.
      input_sizes: Input tensor dimensions.
      window: Tuple of kernel dims: planes, rows, cols.
      strides: Tuple of strides for dims: planes, rows, cols.
      padding: Padding type.
      data_format: The data format we use to run the pooling operation.
      expected: An array containing the expected operation outputs.
      use_gpu: Whether to run ops on GPU.
    """
    total_size = 1
    for s in input_sizes:
      total_size *= s
    # Initializes the input tensor with array containing incrementing
    # numbers from 1.
    x = [f * 1.0 for f in range(1, total_size + 1)]
    with self.cached_session(use_gpu=use_gpu) as sess:
      t = constant_op.constant(x, shape=input_sizes)
      window = [1] + list(window) + [1]
      strides = [1] + list(strides) + [1]
      if data_format == "NCDHW":
        t = test_util.NHWCToNCHW(t)
        window = test_util.NHWCToNCHW(window)
        strides = test_util.NHWCToNCHW(strides)
      t = pool_func(
          t,
          ksize=window,
          strides=strides,
          padding=padding,
          data_format=data_format)
      if data_format == "NCDHW":
        t = test_util.NCHWToNHWC(t)
      vals = self.evaluate(t)
    # Verifies values.
    actual = vals.flatten()
    self.assertAllClose(expected, actual)
        def _SetupVal(data_format, use_gpu):
            # TODO(b/79323979): re-enable memory optimization after this bug is fixed.
            with self.test_session(use_gpu=use_gpu,
                                   config=NoMemoryOptimizationConfig()):
                t1 = constant_op.constant(x1, shape=tensor_in_sizes)
                t2 = constant_op.constant(x2, shape=filter_in_sizes)
                t3 = constant_op.constant(x3, shape=[filter_in_sizes[-1]])
                strides = [1] + conv_strides + [1]
                if data_format == "NCHW":
                    t1 = test_util.NHWCToNCHW(t1)
                    strides = test_util.NHWCToNCHW(strides)
                output = fused_conv2d_bias_activation_op.fused_conv2d_bias_activation(
                    t1,
                    t2,
                    t3,
                    strides=strides,
                    padding=padding,
                    data_format=data_format,
                    activation_mode="Relu")

                if data_format == "NCHW":
                    output = test_util.NCHWToNHWC(output)
                return output
Beispiel #15
0
    def _ConstructAndTestGradientForConfig(self, pool_func, input_sizes,
                                           output_sizes, window, strides,
                                           padding, data_format, use_gpu):
        """Verifies the gradients of a pooling function.

    Args:
      pool_func: Function to be called, co.MaxPool, co.AvgPool,
        or the Lua version.
      input_sizes: Input tensor dimensions.
      output_sizes: Output tensor dimensions.
      window: Tuple of kernel dims: planes, rows, cols.
      strides: Tuple of strides for dims: planes, rows, cols.
      padding: Padding type.
      data_format: Data format string.
      use_gpu: Whether to run on GPU.
    """
        total_size = 1
        for s in input_sizes:
            total_size *= s
        # Initializes the input tensor with array containing incrementing
        # numbers from 1.
        x = [f * 1.0 for f in range(1, total_size + 1)]
        with self.test_session(use_gpu=use_gpu):
            input_tensor = constant_op.constant(x,
                                                shape=input_sizes,
                                                name="input")
            err_margin = 1e-3
            if pool_func == nn_ops.avg_pool3d:
                func_name = "avg_pool3d"
                x_init_value = None
            else:
                x_init_value = np.asfarray(
                    np.arange(1, total_size + 1),
                    dtype=np.float32).reshape(input_sizes)
                func_name = "max_pool3d"

            ksize = [1, window[0], window[1], window[2], 1]
            strides = [1, strides[0], strides[1], strides[2], 1]
            t = input_tensor

            if data_format == "NCDHW":
                ksize = test_util.NHWCToNCHW(ksize)
                strides = test_util.NHWCToNCHW(strides)
                t = test_util.NHWCToNCHW(t)

            t = pool_func(t,
                          ksize=ksize,
                          strides=strides,
                          padding=padding,
                          data_format=data_format,
                          name=func_name)

            if data_format == "NCDHW":
                t = test_util.NCHWToNHWC(t)

            err = gradient_checker.compute_gradient_error(
                input_tensor,
                input_sizes,
                t,
                output_sizes,
                x_init_value=x_init_value,
                delta=1e-2)
        print("%s gradient error = " % func_name, err)
        self.assertLess(err, err_margin)
Beispiel #16
0
    def _ConstructAndTestGradientForConfig(self, batch, input_shape,
                                           filter_shape, in_depth, out_depth,
                                           stride, padding, test_input,
                                           data_format, use_gpu):

        input_planes, input_rows, input_cols = input_shape
        filter_planes, filter_rows, filter_cols = filter_shape

        input_shape = [batch, input_planes, input_rows, input_cols, in_depth]
        filter_shape = [
            filter_planes, filter_rows, filter_cols, in_depth, out_depth
        ]

        if isinstance(stride, collections.Iterable):
            strides = [1] + list(stride) + [1]
        else:
            strides = [1, stride, stride, stride, 1]

        if padding == "VALID":
            output_planes = int(
                math.ceil((input_planes - filter_planes + 1.0) / strides[1]))
            output_rows = int(
                math.ceil((input_rows - filter_rows + 1.0) / strides[2]))
            output_cols = int(
                math.ceil((input_cols - filter_cols + 1.0) / strides[3]))
        else:
            output_planes = int(math.ceil(float(input_planes) / strides[1]))
            output_rows = int(math.ceil(float(input_rows) / strides[2]))
            output_cols = int(math.ceil(float(input_cols) / strides[3]))
        output_shape = [
            batch, output_planes, output_rows, output_cols, out_depth
        ]
        input_size = 1
        for x in input_shape:
            input_size *= x
        filter_size = 1
        for x in filter_shape:
            filter_size *= x
        input_data = [x * 1.0 / input_size for x in range(0, input_size)]
        filter_data = [x * 1.0 / filter_size for x in range(0, filter_size)]

        if test.is_gpu_available() and use_gpu:
            data_type = dtypes.float32
            if test.is_gpu_available():
                tolerance = 4e-3
            else:
                # As of Aug 2016, higher tolerance is needed for some CPU architectures.
                # Runs on a single machine can also generate slightly different errors
                # because of multithreading.
                tolerance = 8e-3
        else:
            data_type = dtypes.float64
            tolerance = 1e-8
        with self.test_session(use_gpu=use_gpu):
            orig_input_tensor = constant_op.constant(input_data,
                                                     shape=input_shape,
                                                     dtype=data_type,
                                                     name="input")
            filter_tensor = constant_op.constant(filter_data,
                                                 shape=filter_shape,
                                                 dtype=data_type,
                                                 name="filter")

            if data_format == "NCDHW":
                input_tensor = test_util.NHWCToNCHW(orig_input_tensor)
                strides = test_util.NHWCToNCHW(strides)
            else:
                input_tensor = orig_input_tensor

            conv = nn_ops.conv3d(input_tensor,
                                 filter_tensor,
                                 strides,
                                 padding,
                                 data_format=data_format,
                                 name="conv")

            if data_format == "NCDHW":
                conv = test_util.NCHWToNHWC(conv)

            if test_input:
                err = gradient_checker.compute_gradient_error(
                    orig_input_tensor, input_shape, conv, output_shape)
            else:
                err = gradient_checker.compute_gradient_error(
                    filter_tensor, filter_shape, conv, output_shape)
        print("conv3d gradient error = ", err)
        self.assertLess(err, tolerance)
Beispiel #17
0
    def _ConstructAndTestGradientForConfig(self, batch, input_shape,
                                           filter_shape, in_depth, out_depth,
                                           stride, padding, test_input,
                                           data_format, use_gpu):

        input_planes, input_rows, input_cols = input_shape
        filter_planes, filter_rows, filter_cols = filter_shape

        input_shape = [batch, input_planes, input_rows, input_cols, in_depth]
        filter_shape = [
            filter_planes, filter_rows, filter_cols, in_depth, out_depth
        ]

        if isinstance(stride, collections_abc.Iterable):
            strides = [1] + list(stride) + [1]
        else:
            strides = [1, stride, stride, stride, 1]

        if padding == "VALID":
            output_planes = int(
                math.ceil((input_planes - filter_planes + 1.0) / strides[1]))
            output_rows = int(
                math.ceil((input_rows - filter_rows + 1.0) / strides[2]))
            output_cols = int(
                math.ceil((input_cols - filter_cols + 1.0) / strides[3]))
        else:
            output_planes = int(math.ceil(float(input_planes) / strides[1]))
            output_rows = int(math.ceil(float(input_rows) / strides[2]))
            output_cols = int(math.ceil(float(input_cols) / strides[3]))
        output_shape = [
            batch, output_planes, output_rows, output_cols, out_depth
        ]
        input_size = 1
        for x in input_shape:
            input_size *= x
        filter_size = 1
        for x in filter_shape:
            filter_size *= x
        input_data = [x * 1.0 / input_size for x in range(0, input_size)]
        filter_data = [x * 1.0 / filter_size for x in range(0, filter_size)]

        for data_type in self._DtypesToTest(use_gpu=use_gpu):
            # TODO(mjanusz): Modify gradient_checker to also provide max relative
            # error and synchronize the tolerance levels between the tests for forward
            # and backward computations.
            if data_type == dtypes.float64:
                tolerance = 1e-8
            elif data_type == dtypes.float32:
                tolerance = 5e-3
            elif data_type == dtypes.float16:
                tolerance = 1e-3

            with self.cached_session(use_gpu=use_gpu):
                orig_input_tensor = constant_op.constant(input_data,
                                                         shape=input_shape,
                                                         dtype=data_type,
                                                         name="input")
                filter_tensor = constant_op.constant(filter_data,
                                                     shape=filter_shape,
                                                     dtype=data_type,
                                                     name="filter")

                if data_format == "NCDHW":
                    input_tensor = test_util.NHWCToNCHW(orig_input_tensor)
                    new_strides = test_util.NHWCToNCHW(strides)
                else:
                    input_tensor = orig_input_tensor
                    new_strides = strides

                conv = nn_ops.conv3d(input_tensor,
                                     filter_tensor,
                                     new_strides,
                                     padding,
                                     data_format=data_format,
                                     name="conv")

                if data_format == "NCDHW":
                    conv = test_util.NCHWToNHWC(conv)

                self.assertEqual(conv.shape,
                                 tensor_shape.TensorShape(output_shape))

                if test_input:
                    jacob_t, jacob_n = gradient_checker.compute_gradient(
                        orig_input_tensor, input_shape, conv, output_shape)
                else:
                    jacob_t, jacob_n = gradient_checker.compute_gradient(
                        filter_tensor, filter_shape, conv, output_shape)

                if data_type != dtypes.float16:
                    reference_jacob_t = jacob_t
                    err = np.fabs(jacob_t - jacob_n).max()
                else:
                    # Compare fp16 theoretical gradients to fp32 theoretical gradients,
                    # since fp16 numerical gradients are too imprecise.
                    err = np.fabs(jacob_t - reference_jacob_t).max()

            print("conv3d gradient error = ", err)
            self.assertLess(err, tolerance)
Beispiel #18
0
    def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, stride, padding,
                      expected):
        results = []
        for data_format, use_gpu in GetTestConfigs():
            for dtype in self._DtypesToTest(use_gpu):
                total_size_tensor = np.prod(tensor_in_sizes)
                total_size_filter = np.prod(filter_in_sizes)

                # Initializes the input tensor with array containing numbers from 0 to 1.
                # We keep the input tensor values fairly small to avoid overflowing float16
                # during the conv3d.
                x1 = [
                    f * 1.0 / total_size_tensor
                    for f in range(1, total_size_tensor + 1)
                ]
                x2 = [
                    f * 1.0 / total_size_filter
                    for f in range(1, total_size_filter + 1)
                ]
                with self.cached_session(use_gpu=use_gpu):

                    t1_ph = tf.compat.v1.placeholder(dtype,
                                                     shape=tensor_in_sizes)
                    t1 = constant_op.constant(x1,
                                              shape=tensor_in_sizes,
                                              dtype=dtype)
                    t2_ph = tf.compat.v1.placeholder(dtype,
                                                     shape=filter_in_sizes)
                    t2 = constant_op.constant(x2,
                                              shape=filter_in_sizes,
                                              dtype=dtype)

                    if isinstance(stride, collections_abc.Iterable):
                        strides = [1] + list(stride) + [1]
                    else:
                        strides = [1, stride, stride, stride, 1]

                    if data_format == "NCDHW":
                        t1 = test_util.NHWCToNCHW(t1)
                        strides = test_util.NHWCToNCHW(strides)
                    conv = nn_ops.conv3d(t1_ph,
                                         t2_ph,
                                         strides,
                                         padding=padding,
                                         data_format=data_format)
                    if data_format == "NCDHW":
                        conv = test_util.NCHWToNHWC(conv)

                    sess_fn = lambda sess: sess.run(conv,
                                                    feed_dict={
                                                        t1_ph: t1.eval(),
                                                        t2_ph: t2.eval()
                                                    })
                    value = self.with_ngraph(sess_fn)
                    print("expected = ", expected)
                    print("actual = ", value)
                    tol = 1e-6
                    if value.dtype == np.float16:
                        tol = 1e-3

                    self.assertAllClose(expected,
                                        value.flatten(),
                                        atol=tol,
                                        rtol=tol)