Exemplo n.º 1
0
  def testSimpleGpu(self):
    if not test_util.is_gpu_available():
      self.skipTest('No GPU available')

    np.random.seed(7)
    with test_util.force_gpu():
      for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
        for dtype in [np.float16, np.float32, np.float64, np.int32, np.int64]:
          data = np.random.randn(*shape).astype(dtype)
          # Convert data to a single tensorflow tensor
          x = constant_op.constant(data)
          # Unstack into a list of tensors
          cs = array_ops.unstack(x, num=shape[0])
          self.assertEqual(type(cs), list)
          self.assertEqual(len(cs), shape[0])
          cs = [self.evaluate(c) for c in cs]
          self.assertAllEqual(cs, data)
  def _testOne(self, inputs, block_size, outputs, dtype=dtypes.float32):
    input_nhwc = math_ops.cast(inputs, dtype)
    with test_util.force_cpu():
      # test NHWC (default) on CPU
      x_tf = array_ops.space_to_depth(input_nhwc, block_size)
      self.assertAllEqual(self.evaluate(x_tf), outputs)

    if test_util.is_gpu_available():
      with test_util.force_gpu():
        # test NHWC (default) on GPU
        x_tf = array_ops.space_to_depth(input_nhwc, block_size)
        self.assertAllEqual(self.evaluate(x_tf), outputs)
        # test NCHW on GPU
        input_nchw = test_util.NHWCToNCHW(input_nhwc)
        output_nchw = array_ops.space_to_depth(
            input_nchw, block_size, data_format="NCHW")
        output_nhwc = test_util.NCHWToNHWC(output_nchw)
        self.assertAllEqual(self.evaluate(output_nhwc), outputs)
Exemplo n.º 3
0
    def testSimpleGpu(self):
        if not test_util.is_gpu_available():
            self.skipTest('No GPU available')

        np.random.seed(7)
        with test_util.force_gpu():
            for shape in (2, ), (3, ), (2, 3), (3, 2), (4, 3, 2):
                for dtype in [
                        np.float16, np.float32, np.float64, np.int32, np.int64
                ]:
                    data = np.random.randn(*shape).astype(dtype)
                    # Convert data to a single tensorflow tensor
                    x = constant_op.constant(data)
                    # Unstack into a list of tensors
                    cs = array_ops.unstack(x, num=shape[0])
                    self.assertEqual(type(cs), list)
                    self.assertEqual(len(cs), shape[0])
                    cs = [self.evaluate(c) for c in cs]
                    self.assertAllEqual(cs, data)
Exemplo n.º 4
0
    def _testOne(self, inputs, block_size, outputs, dtype=dtypes.float32):
        input_nhwc = math_ops.cast(inputs, dtype)
        with test_util.force_cpu():
            # test NHWC (default) on CPU
            x_tf = array_ops.space_to_depth(input_nhwc, block_size)
            self.assertAllEqual(self.evaluate(x_tf), outputs)

        if test_util.is_gpu_available():
            with test_util.force_gpu():
                # test NHWC (default) on GPU
                x_tf = array_ops.space_to_depth(input_nhwc, block_size)
                self.assertAllEqual(self.evaluate(x_tf), outputs)
                # test NCHW on GPU
                input_nchw = test_util.NHWCToNCHW(input_nhwc)
                output_nchw = array_ops.space_to_depth(input_nchw,
                                                       block_size,
                                                       data_format="NCHW")
                output_nhwc = test_util.NCHWToNHWC(output_nchw)
                self.assertAllEqual(self.evaluate(output_nhwc), outputs)
 def testExceptionThrowing(self):
     with self.session(), test_util.force_gpu():
         for data_type in [
                 np.float16, np.float32, np.float64, np.complex64,
                 np.complex128
         ]:
             sparse_input, dense_input = _gen_data(m=5,
                                                   k=10,
                                                   n=7,
                                                   nnz=20,
                                                   row_occupied_rate=0.9,
                                                   data_type=data_type,
                                                   seed=456)
             with self.assertRaisesRegex(
                     errors.UnimplementedError,
                     "A deterministic GPU implementation of SparseTensorDenseMatmulOp"
                     + " is not currently available."):
                 result = sparse_ops.sparse_tensor_dense_matmul(
                     sparse_input, dense_input)
                 self.evaluate(result)
 def testExceptionThrowing(self):
     with self.session(), test_util.force_gpu():
         for features_dtype in [dtypes.float16, dtypes.float32]:
             for labels_dtype in [dtypes.int32, dtypes.int64]:
                 features = constant_op.constant([[0.3, 0.5], [0.2, 0.6]],
                                                 dtype=features_dtype)
                 labels = constant_op.constant([1, 0], dtype=labels_dtype)
                 with self.assertRaisesRegex(
                         errors_impl.UnimplementedError,
                         "The GPU implementation of SparseSoftmaxCrossEntropyWithLogits "
                         +
                         "that would have been executed is not deterministic. Note that "
                         +
                         "the Python API uses an alternative, deterministic, "
                         +
                         "GPU-accelerated path when determinsim is enabled."
                 ):
                     result = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
                         features=features, labels=labels)
                     self.evaluate(result)
Exemplo n.º 7
0
    def _disabledTestScatterOutOfRangeGpu(self):
        if test.is_gpu_available():
            return
        for op, _ in _TF_OPS_TO_NUMPY.items():
            params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
            updates = np.array([-3, -4, -5]).astype(np.float32)
            # With GPU, the code ignores indices that are out of range.
            # We don't test the implementation; just test there's no failures.
            with test_util.force_gpu():
                ref = variables.Variable(params)
                ref.initializer.run()

                # Indices all in range, no problem.
                indices = np.array([2, 0, 5])
                self.evaluate(op(ref, indices, updates))

                # Indicies out of range should not fail.
                indices = np.array([-1, 0, 5])
                self.evaluate(op(ref, indices, updates))
                indices = np.array([2, 0, 6])
                self.evaluate(op(ref, indices, updates))
Exemplo n.º 8
0
  def _disabledTestScatterOutOfRangeGpu(self):
    if test.is_gpu_available():
      return
    for op, _ in _TF_OPS_TO_NUMPY.items():
      params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
      updates = np.array([-3, -4, -5]).astype(np.float32)
      # With GPU, the code ignores indices that are out of range.
      # We don't test the implementation; just test there's no failures.
      with test_util.force_gpu():
        ref = variables.Variable(params)
        ref.initializer.run()

        # Indices all in range, no problem.
        indices = np.array([2, 0, 5])
        self.evaluate(op(ref, indices, updates))

        # Indicies out of range should not fail.
        indices = np.array([-1, 0, 5])
        self.evaluate(op(ref, indices, updates))
        indices = np.array([2, 0, 6])
        self.evaluate(op(ref, indices, updates))
 def _assert_reproducible(self, operation):
   with test_util.force_gpu():
     result_1 = operation()
     result_2 = operation()
   self.assertAllEqual(result_1, result_2)