示例#1
0
 def testGain(self):
   shape = (10, 10)
   for dtype in [dtypes.float32, dtypes.float64]:
     init_default = init_ops_v2.Identity()
     init_custom = init_ops_v2.Identity(gain=0.9)
     with test_util.use_gpu():
       self.assertAllClose(self.evaluate(init_default(shape, dtype=dtype)),
                           np.eye(*shape))
     with test_util.use_gpu():
       self.assertAllClose(self.evaluate(init_custom(shape, dtype=dtype)),
                           np.eye(*shape) * 0.9)
示例#2
0
 def _compareScalar(self, func, x, y, dtype):
   with test_util.use_gpu():
     out = func(
         ops.convert_to_tensor(np.array([x]).astype(dtype)),
         ops.convert_to_tensor(np.array([y]).astype(dtype)))
     ret = self.evaluate(out)
   return ret[0]
示例#3
0
 def testNCHWToNHWC2D(self):
   x_val = [[7, 4], [9, 3], [4, 5], [5, 1]]
   x = constant_op.constant(x_val)
   y = nn_ops.data_format_vec_permute(x, src_format="NCHW", dst_format="NHWC")
   with test_util.use_gpu():
     y_val = self.evaluate(y)
     self.assertAllEqual(y_val, [[7, 4], [4, 5], [5, 1], [9, 3]])
示例#4
0
  def Test(self):
    if not use_static_shape_ or a_np_.dtype in (np.int32, np.int64, np.float16):
      self.skipTest("Skipping infeasible gradient test.")

    # Transpose and possibly conjugate a_np_ and b_np_ according to the
    # attributes such that tf.matmul(effective_a_np, effective_b_np, **kwargs)
    # results in a valid matrix multiplication and produces the same result as
    # np.matrix(a_np_) * np.matrix(b_np_)
    effective_a_np = _GetTransposedMatrices(a_np_, "a", kwargs_)
    effective_b_np = _GetTransposedMatrices(b_np_, "b", kwargs_)

    epsilon = np.finfo(a_np_.dtype).eps
    delta = epsilon**(1.0 / 3.0)
    tol = 20 * delta
    with self.session(), test_util.use_gpu():
      theoretical, numerical = gradient_checker_v2.compute_gradient(
          lambda x: math_ops.matmul(x, effective_b_np, **kwargs_),
          [effective_a_np],
          delta=delta)
      self.assertAllClose(theoretical, numerical, rtol=tol, atol=tol)

      theoretical, numerical = gradient_checker_v2.compute_gradient(
          lambda x: math_ops.matmul(effective_a_np, x, **kwargs_),
          [effective_b_np],
          delta=delta)
      self.assertAllClose(theoretical, numerical, rtol=tol, atol=tol)
示例#5
0
 def _testGradientsSimple(self, dtype):
   # Test both positive and negative concat axis.
   # -2 and 1 correspond to the same axis for 3-dimensional tensors.
   for axis in [-2, 1]:
     with test_util.use_gpu():
       inp = []
       inp_tensors = []
       for x in [1, 2, 6]:
         shape = [10, x, 2]
         t = np.random.rand(*shape).astype(dtype.as_numpy_dtype)
         if dtype.is_complex:
           t += -1j * t
         inp.append(t)
         inp_tensors.append(
             constant_op.constant(
                 t.flatten(),
                 shape=shape,
                 dtype=dtype))
       c = array_ops.concat(inp_tensors, axis)
       output_shape = [10, 9, 2]
       grad_inp = np.random.rand(*output_shape).astype(dtype.as_numpy_dtype)
       if dtype.is_complex:
         grad_inp += -1j * grad_inp
       grad_tensor = constant_op.constant(
           grad_inp.flatten(), shape=output_shape)
       grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
       concated_grad = array_ops.concat(grad, axis)
       result = self.evaluate(concated_grad)
   self.assertAllEqual(result, grad_inp)
示例#6
0
  def _RunAndVerifyGradientsRandom(self):
    # Random dims of rank 5
    input_shape = np.random.randint(1, 5, size=5)
    # Random number of tensors
    num_tensors = np.random.randint(12, 20)
    # Random dim to concat on
    concat_dim = np.random.randint(5)
    concat_dim_sizes = np.random.randint(1, 5, size=num_tensors)
    with test_util.use_gpu():
      inp = []
      inp_tensors = []
      for x in concat_dim_sizes:
        shape = input_shape
        shape[concat_dim] = x
        t = np.random.rand(*shape).astype("f")
        inp.append(t)
        inp_tensors.append(
            constant_op.constant(t.flatten(), shape=shape,
                                 dtype=dtypes.float32))
      c = array_ops.concat(inp_tensors, concat_dim)
      output_shape = input_shape
      output_shape[concat_dim] = concat_dim_sizes.sum()
      grad_inp = np.random.rand(*output_shape).astype("f")
      grad_tensor = constant_op.constant(grad_inp.flatten(), shape=output_shape)
      grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
      concated_grad = array_ops.concat(grad, concat_dim)
      result = self.evaluate(concated_grad)

    self.assertAllEqual(result, grad_inp)
示例#7
0
 def testZeros(self):
   with test_util.use_gpu():
     for dtype in dtypes.uint8, dtypes.int16, dtypes.int32, dtypes.int64:
       zero = constant_op.constant(0, dtype=dtype)
       one = constant_op.constant(1, dtype=dtype)
       bads = [one // zero]
       if dtype in (dtypes.int32, dtypes.int64):
         bads.append(one % zero)
       for bad in bads:
         try:
           result = self.evaluate(bad)
         except errors_impl.OpError as e:
           # Ideally, we'd get a nice exception.  In theory, this should only
           # happen on CPU, but 32 bit integer GPU division is actually on
           # CPU due to a placer bug.
           # TODO(irving): Make stricter once the placer bug is fixed.
           self.assertIn('Integer division by zero', str(e))
         else:
           # On the GPU, integer division by zero produces all bits set.
           # But apparently on some GPUs "all bits set" for 64 bit division
           # means 32 bits set, so we allow 0xffffffff as well.  This isn't
           # very portable, so we may need to expand this list if other GPUs
           # do different things.
           self.assertTrue(test.is_gpu_available())
           self.assertIn(result, (-1, 0xff, 0xffffffff))
示例#8
0
 def testNHWCToNCHW(self):
   x_val = [7, 4, 9, 3]
   x = constant_op.constant(x_val)
   y = nn_ops.data_format_vec_permute(x)
   with test_util.use_gpu():
     y_val = self.evaluate(y)
     self.assertAllEqual(y_val, [7, 3, 4, 9])
示例#9
0
 def testOneOpMultipleStepsIndependent(self):
   with test_util.use_gpu():
     sample_op1, _ = self._make_ops(10)
     # Consecutive runs shouldn't yield identical output.
     sample1a = self.evaluate(sample_op1)
     sample1b = self.evaluate(sample_op1)
     self.assertFalse(np.equal(sample1a, sample1b).all())
示例#10
0
 def testTwoOpsIndependent(self):
   with test_util.use_gpu():
     sample_op1, sample_op2 = self._make_ops(32)
     sample1, sample2 = self.evaluate([sample_op1, sample_op2])
     # We expect sample1 and sample2 to be independent.
     # 1 in 2^32 chance of this assertion failing.
     self.assertFalse(np.equal(sample1, sample2).all())
示例#11
0
  def _do_sampling(self, logits, num_samples, sampler):
    """Samples using the supplied sampler and inputs.

    Args:
      logits: Numpy ndarray of shape [batch_size, num_classes].
      num_samples: Int; number of samples to draw.
      sampler: A sampler function that takes (1) a [batch_size, num_classes]
        Tensor, (2) num_samples and returns a [batch_size, num_samples] Tensor.

    Returns:
      Frequencies from sampled classes; shape [batch_size, num_classes].
    """
    with test_util.use_gpu():
      random_seed.set_random_seed(1618)
      op = sampler(constant_op.constant(logits), num_samples)
      d = self.evaluate(op)

    batch_size, num_classes = logits.shape
    freqs_mat = []
    for i in range(batch_size):
      cnts = dict(collections.Counter(d[i, :]))

      # Requires drawn class labels be in range.
      self.assertLess(max(cnts.keys()), num_classes)
      self.assertGreaterEqual(min(cnts.keys()), 0)

      freqs = [(cnts[k] * 1. / num_samples if k in cnts else 0)
               for k in range(num_classes)]
      freqs_mat.append(freqs)

    return freqs_mat
示例#12
0
 def testNegativeMinLogits(self):
   random_seed.set_random_seed(78844)
   with test_util.use_gpu():
     logits = constant_op.constant([[np.finfo(np.float32).min] * 1023 + [0]])
     num_samples = 1000
     samples = self.evaluate(random_ops.multinomial(logits, num_samples))
     self.assertAllEqual([[1023] * num_samples], samples)
  def _VerifyValues(self, image, ksizes, strides, padding, patches):
    """Tests input-output pairs for the ExtractVolumePatches op.

    Args:
      image: Input tensor with shape:
             [batch, in_planes, in_rows, in_cols, depth].
      ksizes: Patch size specified as: [ksize_planes, ksize_rows, ksize_cols].
      strides: Output strides, specified as:
               [stride_planes, stride_rows, stride_cols].
      padding: Padding type.
      patches: Expected output.

    Note:
      rates are not supported as of now.
    """
    ksizes = [1] + ksizes + [1]
    strides = [1] + strides + [1]

    with test_util.use_gpu():
      out_tensor = array_ops.extract_volume_patches(
          constant_op.constant(image),
          ksizes=ksizes,
          strides=strides,
          padding=padding,
          name="im2col_3d")
      self.assertAllClose(patches, self.evaluate(out_tensor))
示例#14
0
  def testGradientsLastDim(self):
    # Test both positive and negative concat axis.
    # -1 and 2 correspond to the same axis for 3-dimensional tensors.
    for axis in [-1, 2]:
      with test_util.use_gpu():
        inp = []
        inp_tensors = []
        for x in [1, 2, 6]:
          shape = [10, 2, x]
          t = np.random.rand(*shape).astype("f")
          inp.append(t)
          inp_tensors.append(
              constant_op.constant(
                  t.flatten(),
                  shape=shape,
                  dtype=dtypes.float32))
        c = array_ops.concat(inp_tensors, 2)
        output_shape = [10, 2, 9]
        grad_inp = np.random.rand(*output_shape).astype("f")
        grad_tensor = constant_op.constant(
            grad_inp.flatten(), shape=output_shape)
        grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
        concated_grad = array_ops.concat(grad, axis)
        result = self.evaluate(concated_grad)

    self.assertAllEqual(result, grad_inp)
示例#15
0
 def testHWNCToNHWC(self):
   x_val = [7, 4, 9, 3]
   x = constant_op.constant(x_val)
   y = nn_ops.data_format_vec_permute(x, src_format="HWNC", dst_format="NHWC")
   with test_util.use_gpu():
     y_val = self.evaluate(y)
     self.assertAllEqual(y_val, [9, 7, 4, 3])
 def _verifyLogarithm(self, x, np_type):
   inp = x.astype(np_type)
   with test_util.use_gpu():
     # Verify that expm(logm(A)) == A.
     tf_ans = linalg_impl.matrix_exponential(
         gen_linalg_ops.matrix_logarithm(inp))
     out = self.evaluate(tf_ans)
     self.assertAllClose(inp, out, rtol=1e-4, atol=1e-3)
 def _verifySquareRoot(self, matrix, np_type):
   matrix = matrix.astype(np_type)
   with test_util.use_gpu():
     # Verify that matmul(sqrtm(A), sqrtm(A)) = A
     sqrt = gen_linalg_ops.matrix_square_root(matrix)
     square = math_ops.matmul(sqrt, sqrt)
     self.assertShapeEqual(matrix, square)
     self.assertAllClose(matrix, square, rtol=1e-4, atol=1e-3)
示例#18
0
 def testGain(self):
   self.skipTest("Doesn't work without the graphs")
   init1 = init_ops_v2.Orthogonal(seed=1)
   init2 = init_ops_v2.Orthogonal(gain=3.14, seed=1)
   with test_util.use_gpu():
     t1 = self.evaluate(init1(shape=(10, 10)))
     t2 = self.evaluate(init2(shape=(10, 10)))
   self.assertAllClose(t1, t2 / 3.14)
 def testTest(self):
   model = rnn_colorbot.RNNColorbot(
       rnn_cell_sizes=[256],
       label_dimension=LABEL_DIMENSION,
       keep_prob=1.0)
   dataset = random_dataset()
   with test_util.use_gpu():
     rnn_colorbot.test(model, dataset)
示例#20
0
 def testVerifyTensorAllFiniteSucceeds(self):
   x_shape = [5, 4]
   x = np.random.random_sample(x_shape).astype(np.float32)
   with test_util.use_gpu():
     t = constant_op.constant(x, shape=x_shape, dtype=dtypes.float32)
     t_verified = numerics.verify_tensor_all_finite(t,
                                                    "Input is not a number.")
     self.assertAllClose(x, self.evaluate(t_verified))
示例#21
0
 def testTwoTemporaryVariablesNoLeaks(self):
   with test_util.use_gpu():
     var1 = gen_state_ops.temporary_variable(
         [1, 2], dtypes.float32, var_name="var1")
     var2 = gen_state_ops.temporary_variable(
         [1, 2], dtypes.float32, var_name="var2")
     final = var1 + var2
     self.evaluate(final)
示例#22
0
 def testTemporaryVariable(self):
   with test_util.use_gpu():
     var = gen_state_ops.temporary_variable(
         [1, 2], dtypes.float32, var_name="foo")
     var = state_ops.assign(var, [[4.0, 5.0]])
     var = state_ops.assign_add(var, [[6.0, 7.0]])
     final = gen_state_ops.destroy_temporary_variable(var, var_name="foo")
     self.assertAllClose([[10.0, 12.0]], self.evaluate(final))
示例#23
0
 def testNHWCtoWHCN(self):
   x_val = [-4, -3, -2, -1, 0, 1, 2, 3]
   y_val_expected = [3, 1, 0, 2, 3, 1, 0, 2]
   x = constant_op.constant(x_val)
   y = nn_ops.data_format_dim_map(x, src_format="NHWC", dst_format="WHCN")
   with test_util.use_gpu():
     y_val = self.evaluate(y)
     self.assertAllEqual(y_val, y_val_expected)
示例#24
0
 def testArbitraryASCII(self):
   x_val = [-4, -3, -2, -1, 0, 1, 2, 3]
   y_val_expected = [3, 2, 1, 0, 3, 2, 1, 0]
   x = constant_op.constant(x_val)
   y = nn_ops.data_format_dim_map(x, src_format="qwer", dst_format="rewq")
   with test_util.use_gpu():
     y_val = self.evaluate(y)
     self.assertAllEqual(y_val, y_val_expected)
示例#25
0
 def testNHWCtoNCHW(self):
   x_val = [1, -3, -2]
   y_val_expected = [2, 2, 3]
   x = constant_op.constant(x_val)
   y = nn_ops.data_format_dim_map(x, src_format="NHWC", dst_format="NCHW")
   with test_util.use_gpu():
     y_val = self.evaluate(y)
     self.assertAllEqual(y_val, y_val_expected)
示例#26
0
 def testDestroyTemporaryVariableTwice(self):
   with test_util.use_gpu():
     var = gen_state_ops.temporary_variable([1, 2], dtypes.float32)
     val1 = gen_state_ops.destroy_temporary_variable(var, var_name="dup")
     val2 = gen_state_ops.destroy_temporary_variable(var, var_name="dup")
     final = val1 + val2
     with self.assertRaises(errors.NotFoundError):
       self.evaluate(final)
 def testTrainOneEpoch(self):
   model = rnn_colorbot.RNNColorbot(
       rnn_cell_sizes=[256, 128, 64],
       label_dimension=LABEL_DIMENSION,
       keep_prob=1.0)
   optimizer = tf.train.AdamOptimizer(learning_rate=.01)
   dataset = random_dataset()
   with test_util.use_gpu():
     rnn_colorbot.train_one_epoch(model, optimizer, dataset)
示例#28
0
 def testBasic(self):
   with test_util.use_gpu():
     cdim = constant_op.constant(1, dtypes.int32)
     s0 = constant_op.constant([2, 3, 5], dtypes.int32)
     s1 = constant_op.constant([2, 7, 5], dtypes.int32)
     s2 = constant_op.constant([2, 20, 5], dtypes.int32)
     off = gen_array_ops.concat_offset(cdim, [s0, s1, s2])
     ans = self.evaluate(off)
     self.assertAllEqual(ans, [[0, 0, 0], [0, 3, 0], [0, 10, 0]])
示例#29
0
 def _compareGpu(self, x, np_func, tf_func):
   np_ans = np_func(x)
   with test_util.use_gpu():
     result = tf_func(ops.convert_to_tensor(x))
     tf_gpu = self.evaluate(result)
   if x.dtype == np.float16:
     self.assertAllClose(np_ans, tf_gpu, rtol=1e-3, atol=1e-3)
   else:
     self.assertAllClose(np_ans, tf_gpu)
 def testOneListOneDimensional(self):
   with test_util.use_gpu():
     indices = [constant_op.constant([1, 6, 2, 3, 5, 0, 4, 7])]
     data = [constant_op.constant([10, 60, 20, 30, 50, 0, 40, 70])]
     stitched_t = self.stitch_op(indices, data)
     stitched_val = self.evaluate(stitched_t)
     self.assertAllEqual([0, 10, 20, 30, 40, 50, 60, 70], stitched_val)
     # Dimension 0 is max(flatten(indices))+1.
     self.assertEqual([8], stitched_t.get_shape().as_list())
示例#31
0
  def testNumericEquivalenceForAmsgrad(self):
    if context.executing_eagerly():
      self.skipTest(
          'v1 optimizer does not run in eager mode')
    np.random.seed(1331)
    with test_util.use_gpu():
      train_samples = 20
      input_dim = 3
      num_classes = 2
      (x, y), _ = testing_utils.get_test_data(
          train_samples=train_samples,
          test_samples=10,
          input_shape=(input_dim,),
          num_classes=num_classes)
      y = np_utils.to_categorical(y)

      num_hidden = 5
      model_k_v1 = testing_utils.get_small_sequential_mlp(
          num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
      model_k_v2 = testing_utils.get_small_sequential_mlp(
          num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
      model_k_v2.set_weights(model_k_v1.get_weights())

      opt_k_v1 = optimizers.Adam(amsgrad=True)
      opt_k_v2 = adam.Adam(amsgrad=True)

      model_k_v1.compile(
          opt_k_v1,
          loss='categorical_crossentropy',
          metrics=[],
          run_eagerly=testing_utils.should_run_eagerly())
      model_k_v2.compile(
          opt_k_v2,
          loss='categorical_crossentropy',
          metrics=[],
          run_eagerly=testing_utils.should_run_eagerly())

      hist_k_v1 = model_k_v1.fit(x, y, batch_size=5, epochs=10, shuffle=False)
      hist_k_v2 = model_k_v2.fit(x, y, batch_size=5, epochs=10, shuffle=False)

      self.assertAllClose(model_k_v1.get_weights(), model_k_v2.get_weights())
      self.assertAllClose(opt_k_v1.get_weights(), opt_k_v2.get_weights())
      self.assertAllClose(hist_k_v1.history['loss'], hist_k_v2.history['loss'])
示例#32
0
 def testSimpleTwoDimensional(self):
     with test_util.use_gpu():
         indices = [
             constant_op.constant([0, 4, 7]),
             constant_op.constant([1, 6]),
             constant_op.constant([2, 3, 5])
         ]
         data = [
             constant_op.constant([[0, 1], [40, 41], [70, 71]]),
             constant_op.constant([[10, 11], [60, 61]]),
             constant_op.constant([[20, 21], [30, 31], [50, 51]])
         ]
         stitched_t = self.stitch_op(indices, data)
         stitched_val = self.evaluate(stitched_t)
         self.assertAllEqual([[0, 1], [10, 11], [20, 21], [30, 31],
                              [40, 41], [50, 51], [60, 61], [70, 71]],
                             stitched_val)
         # Dimension 0 is max(flatten(indices))+1.
         self.assertEqual([8, 2], stitched_t.get_shape().as_list())
示例#33
0
 def test_random_height_longer_numeric(self):
   for dtype in (np.int64, np.float32):
     with tf_test_util.use_gpu():
       input_image = np.reshape(np.arange(0, 6), (2, 3, 1)).astype(dtype)
       layer = image_preprocessing.RandomHeight(factor=(1., 1.))
       # Return type of RandomHeight() is float32 if `interpolation` is not
       # set to `ResizeMethod.NEAREST_NEIGHBOR`; cast `layer` to desired dtype.
       output_image = math_ops.cast(layer(np.expand_dims(input_image, axis=0)),
                                    dtype=dtype)
       # pyformat: disable
       expected_output = np.asarray([
           [0, 1, 2],
           [0.75, 1.75, 2.75],
           [2.25, 3.25, 4.25],
           [3, 4, 5]
       ]).astype(dtype)
       # pyformat: enable
       expected_output = np.reshape(expected_output, (1, 4, 3, 1))
       self.assertAllEqual(expected_output, output_image)
示例#34
0
 def testRandom1D(self):
     with test_util.use_gpu():
         d0 = 100
         x = array_ops.zeros([d0])
         y = np.zeros([d0])
         for _ in range(20):
             idx = np.random.choice(d0, d0 // 10, replace=False)
             val = np.random.randint(10, size=(d0 // 10))
             op = np.random.randint(3)
             if op == 0:
                 x = inplace_ops.inplace_update(x, idx, val)
                 y[idx] = val
             elif op == 1:
                 x = inplace_ops.inplace_add(x, idx, val)
                 y[idx] += val
             elif op == 2:
                 x = inplace_ops.inplace_sub(x, idx, val)
                 y[idx] -= val
             self.assertAllClose(x, y)
示例#35
0
  def testBasic(self):
    for dtype in _DATA_TYPES:
      with test_util.use_gpu():
        var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
        var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
        loss = lambda: 5 * var0 + 3 * var1  # pylint: disable=cell-var-from-loop
        sgd = gradient_descent.SGD(3.0)

        self.evaluate(variables.global_variables_initializer())
        # Fetch params to validate initial values
        self.assertAllClose([1.0, 2.0], self.evaluate(var0))
        self.assertAllClose([3.0, 4.0], self.evaluate(var1))
        # Run 1 step of sgd through optimizer
        opt_op = sgd.minimize(loss, var_list=[var0, var1])
        self.evaluate(variables.global_variables_initializer())
        self.evaluate(opt_op)
        # Validate updated params
        self.assertAllClose([-14., -13.], self.evaluate(var0))
        self.assertAllClose([-6., -5.], self.evaluate(var1))
示例#36
0
 def testInvalidMatrix(self):
     # LU factorization gives an error when the input is singular.
     # Note: A singular matrix may return without error but it won't be a valid
     # factorization.
     with test_util.use_gpu():
         for dtype in self.float_types:
             with self.assertRaises(errors.InvalidArgumentError):
                 self.evaluate(
                     linalg_ops.lu(
                         np.array(
                             [[1., 2., 3.], [2., 4., 6.], [2., 3., 4.]],
                             dtype=dtype)))
             with self.assertRaises(errors.InvalidArgumentError):
                 self.evaluate(
                     linalg_ops.lu(
                         np.array(
                             [[[1., 2., 3.], [2., 4., 6.], [1., 2., 3.]],
                              [[1., 2., 3.], [3., 4., 5.], [5., 6., 7.]]],
                             dtype=dtype)))
示例#37
0
 def testBasicUpdate(self):
     for dtype in [dtypes.float32, dtypes.int32, dtypes.int64]:
         with test_util.use_gpu():
             x = array_ops.ones([7, 3], dtype)
             y = np.ones([7, 3], dtype.as_numpy_dtype)
             self.assertAllClose(x, y)
             x = inplace_ops.inplace_update(x, [3],
                                            array_ops.ones([1, 3], dtype))
             y[3, :] = 1
             self.assertAllClose(x, y)
             x = inplace_ops.inplace_update(
                 x, [-1],
                 array_ops.ones([1, 3], dtype) * 2)
             y[-1, :] = 2
             self.assertAllClose(x, y)
             x = inplace_ops.inplace_update(x, 5,
                                            array_ops.ones([3], dtype) * 7)
             y[5, :] = 7
             self.assertAllClose(x, y)
示例#38
0
 def test_random_translation_left_numeric_constant(self):
   for dtype in (np.int64, np.float32):
     with tf_test_util.use_gpu():
       input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1)).astype(dtype)
       # Shifting by -.2 * 5 = 1 pixel.
       layer = image_preprocessing.RandomTranslation(
           height_factor=0., width_factor=(-.2, -.2), fill_mode='constant')
       output_image = layer(input_image)
       # pyformat: disable
       expected_output = np.asarray([
           [1, 2, 3, 4, 0],
           [6, 7, 8, 9, 0],
           [11, 12, 13, 14, 0],
           [16, 17, 18, 19, 0],
           [21, 22, 23, 24, 0]
       ]).astype(dtype)
       # pyformat: enable
       expected_output = np.reshape(expected_output, (1, 5, 5, 1))
       self.assertAllEqual(expected_output, output_image)
示例#39
0
 def test_random_translation_down_numeric_reflect(self):
   for dtype in (np.int64, np.float32):
     with tf_test_util.use_gpu():
       input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1)).astype(dtype)
       # Shifting by .2 * 5 = 1 pixel.
       layer = image_preprocessing.RandomTranslation(
           height_factor=(.2, .2), width_factor=0.)
       output_image = layer(input_image)
       # pyformat: disable
       expected_output = np.asarray([
           [0, 1, 2, 3, 4],
           [0, 1, 2, 3, 4],
           [5, 6, 7, 8, 9],
           [10, 11, 12, 13, 14],
           [15, 16, 17, 18, 19]
       ]).astype(dtype)
       # pyformat: enable
       expected_output = np.reshape(expected_output, (1, 5, 5, 1))
       self.assertAllEqual(expected_output, output_image)
示例#40
0
 def testSqrt(self):
   for dtype in [np.float16, np.float32, np.float64]:
     fi = np.finfo(dtype)
     for size in [1, 3, 4, 7, 8, 63, 64, 65]:
       # For float32 Eigen uses Carmack's fast vectorized sqrt algorithm.
       # It is not accurate for very large arguments, so we test for
       # fi.max/100 instead of fi.max here.
       for value in [fi.min, -2, -1, 0, fi.tiny, 1, 2, 1000, fi.max / 100]:
         with self.subTest(dtype=dtype, size=size, value=value):
           x = np.full((size,), value, dtype=dtype)
           np_y = np.sqrt(x)
           np_nan = np.isnan(np_y)
           with test_util.use_gpu():
             tf_y = math_ops.sqrt(x)
             tf_nan = math_ops.is_nan(tf_y)
             if value < 0:
               self.assertAllEqual(np_nan, self.evaluate(tf_nan))
             else:
               self.assertAllCloseAccordingToType(np_y, self.evaluate(tf_y))
 def _test_match(self, cases):
     # Stateless ops should be the same as stateful ops on the first call
     # after seed scrambling.
     cases = tuple(cases)
     key = 0x3ec8f720, 0x02461e29
     for seed in (7, 17), (11, 5), (2, 3):
         preseed = invert_philox(key,
                                 (seed[0], 0, seed[1], 0)).astype(np.uint64)
         preseed = preseed[::2] | preseed[1::2] << 32
         random_seed.set_random_seed(seed[0])
         with test_util.use_gpu():
             for stateless_op, stateful_op in cases:
                 if context.executing_eagerly():
                     # Call set_random_seed in order to clear kernel cache, to prevent
                     # kernel reusing for the stateful op
                     random_seed.set_random_seed(seed[0])
                 stateful = stateful_op(seed=seed[1])
                 pure = stateless_op(seed=preseed)
                 self.assertAllEqual(stateful, pure)
示例#42
0
  def testWeights(self):
    with test_util.use_gpu():
      opt1 = adam.Adam(learning_rate=1.0)
      var1 = variables.Variable([1.0, 2.0], dtype=dtypes.float32)
      loss1 = lambda: 3 * var1
      opt_op_1 = opt1.minimize(loss1, [var1])
      self.evaluate(variables.global_variables_initializer())
      config = opt1.get_config()
      opt2 = adam.Adam.from_config(config)
      var2 = variables.Variable([1.0, 2.0], dtype=dtypes.float32)
      loss2 = lambda: 3 * var2
      opt_op_2 = opt2.minimize(loss2, [var2])
      weights = opt1.get_weights()

      # Assert set_weights and both variables get updated to same value.
      self.evaluate(variables.global_variables_initializer())
      opt2.set_weights(weights)
      self.evaluate([opt_op_1, opt_op_2])
      self.assertAllClose(self.evaluate(var1), self.evaluate(var2))
      self.assertEqual(1, self.evaluate(opt1.iterations))
      self.assertEqual(1, self.evaluate(opt2.iterations))

      var3 = variables.Variable([1.0, 2.0, 3.0], dtype=dtypes.float32)
      var4 = variables.Variable([4.0, 5.0, 6.0], dtype=dtypes.float32)
      loss3 = lambda: 3 * var3 + 5 * var4
      opt_op_3 = opt1.minimize(loss3, [var3, var4])

      # Assert set_weights with ValueError since weight list does not match.
      self.evaluate(variables.global_variables_initializer())
      weights = opt1.get_weights()
      with self.assertRaisesRegex(ValueError, 'but the optimizer was'):
        opt2.set_weights(weights)

      # Assert set_weights and variables get updated to same value.
      var5 = variables.Variable([1.0, 2.0, 3.0], dtype=dtypes.float32)
      var6 = variables.Variable([4.0, 5.0, 6.0], dtype=dtypes.float32)
      loss4 = lambda: 3 * var5 + 5 * var6
      opt_op_4 = opt2.minimize(loss4, [var5, var6])
      self.evaluate(variables.global_variables_initializer())
      opt2.set_weights(weights)
      self.evaluate([opt_op_3, opt_op_4])
      self.assertAllClose(
          self.evaluate([var3, var4]), self.evaluate([var5, var6]))
示例#43
0
 def test_random_zoom_out_numeric_preserve_aspect_ratio(self):
   for dtype in (np.int64, np.float32):
     with tf_test_util.use_gpu():
       input_image = np.reshape(np.arange(0, 25), (5, 5, 1)).astype(dtype)
       layer = image_preprocessing.RandomZoom((.5, .5),
                                              fill_mode='constant',
                                              interpolation='nearest')
       output_image = layer(np.expand_dims(input_image, axis=0))
       # pyformat: disable
       expected_output = np.asarray([
           [0, 0, 0, 0, 0],
           [0, 6, 7, 9, 0],
           [0, 11, 12, 14, 0],
           [0, 21, 22, 24, 0],
           [0, 0, 0, 0, 0]
       ]).astype(dtype)
       # pyformat: enable
       expected_output = np.reshape(expected_output, (1, 5, 5, 1))
       self.assertAllEqual(expected_output, output_image)
示例#44
0
 def testZeroSize(self):
   # Verify that concat doesn't crash and burn for zero size inputs
   np.random.seed(7)
   with test_util.use_gpu():
     for shape0 in (), (2,):
       axis = len(shape0)
       for shape1 in (), (3,):
         for n0 in 0, 1, 2:
           for n1 in 0, 1, 2:
             x0 = np.random.randn(*(shape0 + (n0,) + shape1))
             x1 = np.random.randn(*(shape0 + (n1,) + shape1))
             correct = np.concatenate([x0, x1], axis=axis)
             # TODO(irving): Make tf.concat handle map, then drop list().
             xs = list(map(constant_op.constant, [x0, x1]))
             c = array_ops.concat(xs, axis)
             self.assertAllEqual(self.evaluate(c), correct)
             # Check gradients
             dc = np.random.randn(*c.get_shape().as_list())
             dxs = self.evaluate(gradients_impl.gradients(c, xs, dc))
             self.assertAllEqual(dc, np.concatenate(dxs, axis=axis))
示例#45
0
 def _run_test(self, expected_height, expected_width):
   np.random.seed(1337)
   num_samples = 2
   orig_height = 5
   orig_width = 8
   channels = 3
   kwargs = {'height': expected_height, 'width': expected_width}
   input_images = np.random.random(
       (num_samples, orig_height, orig_width, channels)).astype(np.float32)
   expected_output = get_numpy_center_crop(
       input_images, expected_height, expected_width)
   with tf_test_util.use_gpu():
     testing_utils.layer_test(
         image_preprocessing.CenterCrop,
         kwargs=kwargs,
         input_shape=(num_samples, orig_height, orig_width, channels),
         input_data=input_images,
         expected_output=expected_output,
         expected_output_shape=(None, expected_height, expected_width,
                                channels))
 def testShapesValues(self):
     for shape in [(10, 10), (10, 9, 8), (100, 5, 5), (50, 40), (40, 50)]:
         init = init_ops_v2.Orthogonal()
         tol = 1e-5
         with test_util.use_gpu():
             # Check the shape
             t = self.evaluate(init(shape))
             self.assertAllEqual(shape, t.shape)
             # Check orthogonality by computing the inner product
             t = t.reshape((np.prod(t.shape[:-1]), t.shape[-1]))
             if t.shape[0] > t.shape[1]:
                 self.assertAllClose(np.dot(t.T, t),
                                     np.eye(t.shape[1]),
                                     rtol=tol,
                                     atol=tol)
             else:
                 self.assertAllClose(np.dot(t, t.T),
                                     np.eye(t.shape[0]),
                                     rtol=tol,
                                     atol=tol)
示例#47
0
  def testShapesValues(self):

    if test.is_built_with_rocm():
      self.skipTest("Disable subtest on ROCm due to missing QR op support")

    for shape in [(10, 10), (10, 9, 8), (100, 5, 5), (50, 40), (40, 50)]:
      init = init_ops_v2.Orthogonal()
      tol = 1e-5
      with test_util.use_gpu():
        # Check the shape
        t = self.evaluate(init(shape))
        self.assertAllEqual(shape, t.shape)
        # Check orthogonality by computing the inner product
        t = t.reshape((np.prod(t.shape[:-1]), t.shape[-1]))
        if t.shape[0] > t.shape[1]:
          self.assertAllClose(
              np.dot(t.T, t), np.eye(t.shape[1]), rtol=tol, atol=tol)
        else:
          self.assertAllClose(
              np.dot(t, t.T), np.eye(t.shape[0]), rtol=tol, atol=tol)
示例#48
0
    def _checkGrad(self, x, block_size, data_format):
        # NCHW is implemented for only GPU.
        if data_format == "NCHW" and not test.is_gpu_available():
            return

        assert 4 == x.ndim

        def func(x):
            return array_ops.space_to_depth(x,
                                            block_size,
                                            data_format=data_format)

        with test_util.use_gpu():
            with self.cached_session():
                theoretical, numerical = gradient_checker_v2.compute_gradient(
                    func, [ops.convert_to_tensor(x)])
                self.assertAllClose(theoretical,
                                    numerical,
                                    rtol=1e-2,
                                    atol=1e-2)
示例#49
0
    def test_cudnn_rnn_basics(self):
        if not test.is_gpu_available(cuda_only=True):
            self.skipTest('No CUDA GPU available')

        with test_util.use_gpu():
            input_size = 10
            timesteps = 6
            units = 2
            num_samples = 32
            for layer_class in [keras.layers.CuDNNGRU, keras.layers.CuDNNLSTM]:
                for return_sequences in [True, False]:
                    with keras.utils.CustomObjectScope({
                            'keras.layers.CuDNNGRU':
                            keras.layers.CuDNNGRU,
                            'keras.layers.CuDNNLSTM':
                            keras.layers.CuDNNLSTM
                    }):
                        testing_utils.layer_test(layer_class,
                                                 kwargs={
                                                     'units':
                                                     units,
                                                     'return_sequences':
                                                     return_sequences
                                                 },
                                                 input_shape=(num_samples,
                                                              timesteps,
                                                              input_size))
                for go_backwards in [True, False]:
                    with keras.utils.CustomObjectScope({
                            'keras.layers.CuDNNGRU':
                            keras.layers.CuDNNGRU,
                            'keras.layers.CuDNNLSTM':
                            keras.layers.CuDNNLSTM
                    }):
                        testing_utils.layer_test(
                            layer_class,
                            kwargs={
                                'units': units,
                                'go_backwards': go_backwards
                            },
                            input_shape=(num_samples, timesteps, input_size))
示例#50
0
  def testGradientsAsVariables(self):
    self.skipTest('broken test to be fixed')
    for i, dtype in enumerate([
        dtypes.half, dtypes.float32, dtypes.float64, dtypes.complex64,
        dtypes.complex128
    ]):
      with test_util.use_gpu():
        var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
        var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
        loss = lambda: 5 * var0 + 3 * var1  # pylint: disable=cell-var-from-loop

        sgd = gradient_descent.SGD(3.0)
        grads_and_vars = sgd._compute_gradients(loss, [var0, var1])
        # Convert gradients to tf.Variables
        converted_grads = [
            resource_variable_ops.ResourceVariable(
                array_ops.zeros([2], dtype), name='c_%d_%d' % (i, j))
            for j, gv in enumerate(grads_and_vars)
        ]
        convert_ops = [
            state_ops.assign(converted_grads[j], gv[0])
            for j, gv in enumerate(grads_and_vars)
        ]

        # Run convert_ops to achieve the gradients converting
        self.evaluate(variables.global_variables_initializer())
        self.evaluate(convert_ops)
        # Fetch params to validate initial values
        self.assertAllClose([1.0, 2.0], self.evaluate(var0))
        self.assertAllClose([3.0, 4.0], self.evaluate(var1))

        # Run 1 step of sgd through optimizer
        converted_grads_and_vars = list(zip(converted_grads, [var0, var1]))
        opt_op = sgd.apply_gradients(converted_grads_and_vars)
        self.evaluate(variables.global_variables_initializer())
        self.evaluate(convert_ops)
        self.evaluate(opt_op)

        # Validate updated params
        self.assertAllClose([-14., -13.], self.evaluate(var0))
        self.assertAllClose([-6., -5.], self.evaluate(var1))
示例#51
0
    def testGrad(self):
        np.random.seed(42)
        for num_inputs in range(1, 10):
            with test_util.use_gpu():
                input_vars = [
                    variables.Variable(10.0 * np.random.random())
                    for _ in range(0, num_inputs)
                ]
                self.evaluate(variables.global_variables_initializer())
                if context.executing_eagerly():
                    with backprop.GradientTape() as tape:
                        tape.watch(input_vars)
                        addn = math_ops.add_n(input_vars)
                        add_n_grad = tape.gradient(addn, input_vars)
                else:
                    addn = math_ops.add_n(input_vars)
                    add_n_grad = gradients.gradients(addn, input_vars)

                self.assertAllEqual(
                    np.repeat(1.0, num_inputs),  # d/dx (x + y + ...) = 1
                    [self.evaluate(g) for g in add_n_grad])
示例#52
0
    def test_trainability(self):
        if not test.is_gpu_available(cuda_only=True):
            self.skipTest('No CUDA GPU available')

        with test_util.use_gpu():
            input_size = 10
            units = 2
            for layer_class in [keras.layers.CuDNNGRU, keras.layers.CuDNNLSTM]:
                layer = layer_class(units)
                layer.build((None, None, input_size))
                self.assertEqual(len(layer.weights), 3)
                self.assertEqual(len(layer.trainable_weights), 3)
                self.assertEqual(len(layer.non_trainable_weights), 0)
                layer.trainable = False
                self.assertEqual(len(layer.weights), 3)
                self.assertEqual(len(layer.non_trainable_weights), 3)
                self.assertEqual(len(layer.trainable_weights), 0)
                layer.trainable = True
                self.assertEqual(len(layer.weights), 3)
                self.assertEqual(len(layer.trainable_weights), 3)
                self.assertEqual(len(layer.non_trainable_weights), 0)
示例#53
0
  def testContains(self):
    with test_util.use_gpu():
      shape = (3, 5, 7)
      target = (2, 3, 4)
      value = np.random.randint(1000000, size=shape)
      iterations = 10
      value_set = set(
          tuple(value[i:i + 2, j:j + 3, k:k + 4].ravel())  # pylint: disable=g-complex-comprehension
          for i in range(2) for j in range(3) for k in range(4))
      test_seeds = [
          tuple(map(lambda x, i=i: x + 1 * i, t))
          for (i, t) in enumerate((1, 2) for _ in range(iterations))
      ]

      # Check that the result is valid by making sure that it is one of all
      # possible values for randomly cropping `value` with `target` shape.
      for seed in test_seeds:
        crop = random_ops.stateless_random_crop(value, size=target, seed=seed)
        y = self.evaluate(crop)
        self.assertAllEqual(y.shape, target)
        self.assertIn(tuple(y.ravel()), value_set)
示例#54
0
 def test_training_with_mock(self):
   if test.is_built_with_rocm():
     # TODO(rocm):
     # re-enable this test once ROCm adds support for
     # the StatefulUniformFullInt Op (on the GPU)
     self.skipTest('Feature not supported on ROCm')
   np.random.seed(1337)
   height, width = 3, 4
   height_offset = np.random.randint(low=0, high=3)
   width_offset = np.random.randint(low=0, high=5)
   mock_offset = [0, height_offset, width_offset, 0]
   with test.mock.patch.object(
       stateless_random_ops, 'stateless_random_uniform',
       return_value=mock_offset):
     with tf_test_util.use_gpu():
       layer = image_preprocessing.RandomCrop(height, width)
       inp = np.random.random((12, 5, 8, 3))
       actual_output = layer(inp, training=1)
       expected_output = inp[:, height_offset:(height_offset + height),
                             width_offset:(width_offset + width), :]
       self.assertAllClose(expected_output, actual_output)
示例#55
0
    def testNoEmptyRowsAndUnordered(self):
        with test_util.use_gpu():
            sp_input = sparse_tensor.SparseTensor(indices=np.array([[1, 2],
                                                                    [1, 3],
                                                                    [0, 1],
                                                                    [0, 3]]),
                                                  values=np.array([1, 3, 2,
                                                                   4]),
                                                  dense_shape=np.array([2, 5]))
            sp_output, empty_row_indicator = (
                sparse_ops.sparse_fill_empty_rows(sp_input, -1))

            output, empty_row_indicator_out = self.evaluate(
                [sp_output, empty_row_indicator])

            self.assertAllEqual(output.indices,
                                [[0, 1], [0, 3], [1, 2], [1, 3]])
            self.assertAllEqual(output.values, [2, 4, 1, 3])
            self.assertAllEqual(output.dense_shape, [2, 5])
            self.assertAllEqual(empty_row_indicator_out,
                                np.zeros(2).astype(np.bool))
    def testFreezing(self):
        with test_util.use_gpu():
            # Save an object-based checkpoint using a frozen saver
            directory = self.get_temp_dir()
            prefix = os.path.join(directory, "ckpt")
            v = resource_variable_ops.ResourceVariable(0, dtype=dtypes.int64)
            checkpoint = trackable_utils.Checkpoint(v=v)
            self.evaluate(v.assign(3))
            # Create the save counter so assert_consumed doesn't complain about it not
            # existing in the checkpoint on restore.
            self.evaluate(checkpoint.save_counter.assign(12))
            saver = trackable_utils.frozen_saver(checkpoint)
            with ops.device("cpu:0"):
                prefix_tensor = constant_op.constant(prefix)
            self.evaluate(saver.save(prefix_tensor))
            self.evaluate(v.assign(10))
            # Use the frozen saver to restore the same object graph
            self.evaluate(saver.restore(prefix_tensor))
            self.assertEqual(3, self.evaluate(v))

            # Restore using another frozen saver on an identical object graph
            del v, checkpoint, saver
            v = resource_variable_ops.ResourceVariable(0, dtype=dtypes.int64)
            checkpoint = trackable_utils.Checkpoint(v=v)
            saver = trackable_utils.frozen_saver(checkpoint)
            self.evaluate(saver.restore(prefix_tensor))
            self.assertEqual(3, self.evaluate(v))

            # Restore as an object-based checkpoint
            del v, checkpoint, saver
            checkpoint = trackable_utils.Checkpoint()
            status = checkpoint.restore(prefix)
            v = resource_variable_ops.ResourceVariable(0, dtype=dtypes.int64)
            if context.executing_eagerly():
                self.assertEqual(12, self.evaluate(checkpoint.save_counter))
                self.assertEqual(0, self.evaluate(v))
            checkpoint.v = v
            status.assert_consumed().run_restore_ops()
            self.assertEqual(3, self.evaluate(v))
            self.assertEqual(12, self.evaluate(checkpoint.save_counter))
示例#57
0
    def testGradientsFirstDim(self):
        with test_util.use_gpu():
            inp = []
            inp_tensors = []
            for x in [1, 2, 6]:
                shape = [x, 10, 2]
                t = np.random.rand(*shape).astype("f")
                inp.append(t)
                inp_tensors.append(
                    constant_op.constant(t.flatten(),
                                         shape=shape,
                                         dtype=dtypes.float32))
            c = array_ops.concat(inp_tensors, 0)
            output_shape = [9, 10, 2]
            grad_inp = np.random.rand(*output_shape).astype("f")
            grad_tensor = constant_op.constant(grad_inp.flatten(),
                                               shape=output_shape)
            grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
            concated_grad = array_ops.concat(grad, 0)
            result = self.evaluate(concated_grad)

        self.assertAllEqual(result, grad_inp)
示例#58
0
  def testConstraint(self):
    constraint_01 = lambda x: clip_ops.clip_by_value(x, -0.1, 0.)
    constraint_0 = lambda x: clip_ops.clip_by_value(x, 0., 1.)
    with test_util.use_gpu():
      var0 = variables.Variable([1.0, 2.0],
                                constraint=constraint_01)
      var1 = variables.Variable([3.0, 4.0],
                                constraint=constraint_0)
      loss = lambda: 5 * var0 + 3 * var1
      sgd = gradient_descent.SGD(3.0)

      self.evaluate(variables.global_variables_initializer())
      # Fetch params to validate initial values
      self.assertAllClose([1.0, 2.0], self.evaluate(var0))
      self.assertAllClose([3.0, 4.0], self.evaluate(var1))
      # Run 1 step of sgd through optimizer
      opt_op = sgd.minimize(loss, var_list=[var0, var1])
      self.evaluate(variables.global_variables_initializer())
      self.evaluate(opt_op)
      # Validate updated params
      self.assertAllClose([-0.1, -0.1], self.evaluate(var0))
      self.assertAllClose([0., 0.], self.evaluate(var1))
示例#59
0
  def testPrecomputedGradient(self):
    for dtype in _DATA_TYPES:
      with test_util.use_gpu():
        var0 = variables.Variable([1.0, 2.0], dtype=dtype)
        var1 = variables.Variable([3.0, 4.0], dtype=dtype)
        loss = lambda: 5 * var0 + 3 * var1  # pylint: disable=cell-var-from-loop
        grad_loss = constant_op.constant([42, -42], dtype=dtype)
        sgd = gradient_descent.SGD(3.0)

        self.evaluate(variables.global_variables_initializer())
        # Fetch params to validate initial values
        self.assertAllClose([1.0, 2.0], self.evaluate(var0))
        self.assertAllClose([3.0, 4.0], self.evaluate(var1))
        # Run 1 step of sgd through optimizer
        opt_op = sgd.minimize(loss, var_list=[var0, var1], grad_loss=grad_loss)
        self.evaluate(variables.global_variables_initializer())
        self.evaluate(opt_op)
        # Validate updated params
        self.assertAllClose([1.0 - 3 * 5 * 42.0, 2.0 - 3 * 5 * (-42.0)],
                            self.evaluate(var0))
        self.assertAllClose([3.0 - 3 * 3 * 42.0, 4.0 - 3 * 3 * (-42.0)],
                            self.evaluate(var1))
示例#60
0
  def testString(self):
    # Numpy does not support padding strings so we compare padding manually.
    x = ops.convert_to_tensor([["Hello", "World"],
                               ["Goodnight", "Moon"]])

    constant = array_ops.pad(x, [[1, 0], [0, 1]], mode="CONSTANT",
                             constant_values="PAD")
    reflect = array_ops.pad(x, [[1, 0], [0, 1]], mode="REFLECT",
                            constant_values="PAD")
    symmetric = array_ops.pad(x, [[1, 0], [0, 1]], mode="SYMMETRIC",
                              constant_values="PAD")
    with test_util.use_gpu():
      self.assertAllEqual(
          [[b"PAD", b"PAD", b"PAD"], [b"Hello", b"World", b"PAD"],
           [b"Goodnight", b"Moon", b"PAD"]], self.evaluate(constant))
      self.assertAllEqual([[b"Goodnight", b"Moon", b"Goodnight"],
                           [b"Hello", b"World", b"Hello"],
                           [b"Goodnight", b"Moon", b"Goodnight"]],
                          self.evaluate(reflect))
      self.assertAllEqual(
          [[b"Hello", b"World", b"World"], [b"Hello", b"World", b"World"],
           [b"Goodnight", b"Moon", b"Moon"]], self.evaluate(symmetric))