コード例 #1
0
    def testLogSpace(self):
        array_transforms = [
            lambda x: x,  # Identity,
            tf.convert_to_tensor,
            np.array,
            lambda x: np.array(x, dtype=np.float32),
            lambda x: np.array(x, dtype=np.float64),
            array_ops.array,
            lambda x: array_ops.array(x, dtype=np.float32),
            lambda x: array_ops.array(x, dtype=np.float64)
        ]

        def run_test(start, stop, **kwargs):
            for fn1 in array_transforms:
                for fn2 in array_transforms:
                    arg1 = fn1(start)
                    arg2 = fn2(stop)
                    self.match(math_ops.logspace(arg1, arg2, **kwargs),
                               np.logspace(arg1, arg2, **kwargs),
                               msg='logspace({}, {})'.format(arg1, arg2),
                               almost=True)

        run_test(0, 5)
        run_test(0, 5, num=10)
        run_test(0, 5, endpoint=False)
        run_test(0, 5, base=2.0)
        run_test(0, -5)
        run_test(0, -5, num=10)
        run_test(0, -5, endpoint=False)
        run_test(0, -5, base=2.0)
コード例 #2
0
    def _testReduce(self, math_fun, np_fun, name):
        axis_transforms = [
            lambda x: x,  # Identity,
            tf.convert_to_tensor,
            np.array,
            array_ops.array,
            lambda x: array_ops.array(x, dtype=np.float32),
            lambda x: array_ops.array(x, dtype=np.float64),
        ]

        def run_test(a, **kwargs):
            axis = kwargs.pop('axis', None)
            for fn1 in self.array_transforms:
                for fn2 in axis_transforms:
                    arg1 = fn1(a)
                    axis_arg = fn2(axis) if axis is not None else None
                    self.match(math_fun(arg1, axis=axis_arg, **kwargs),
                               np_fun(arg1, axis=axis, **kwargs),
                               msg='{}({}, axis={}, keepdims={})'.format(
                                   name, arg1, axis, kwargs.get('keepdims')))

        run_test(5)
        run_test([2, 3])
        run_test([[2, -3], [-6, 7]])
        run_test([[2, -3], [-6, 7]], axis=0)
        run_test([[2, -3], [-6, 7]], axis=0, keepdims=True)
        run_test([[2, -3], [-6, 7]], axis=1)
        run_test([[2, -3], [-6, 7]], axis=1, keepdims=True)
        run_test([[2, -3], [-6, 7]], axis=(0, 1))
        run_test([[2, -3], [-6, 7]], axis=(1, 0))
コード例 #3
0
def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=float):
    if dtype:
        dtype = utils.result_type(dtype)
    start = array_ops.array(start, dtype=dtype)
    stop = array_ops.array(stop, dtype=dtype)
    if num == 0:
        return empty(dtype)
    if num < 0:
        raise ValueError(
            'Number of samples {} must be non-negative.'.format(num))
    step = np.nan
    if endpoint:
        result = tf.linspace(start.data, stop.data, num)
        if num > 1:
            step = (stop - start) / (num - 1)
    else:
        # tf.linspace does not support endpoint=False so we manually handle it
        # here.
        if num > 1:
            step = (stop - start) / num
            result = tf.linspace(start.data, (stop - step).data, num)
        else:
            result = tf.linspace(start.data, stop.data, num)
    if dtype:
        result = tf.cast(result, dtype)
    if retstep:
        return arrays.tensor_to_ndarray(result), step
    else:
        return arrays.tensor_to_ndarray(result)
コード例 #4
0
ファイル: backprop_test.py プロジェクト: yzhen-li/trax
    def test_setitem(self):
        # Single integer index.
        a = array_ops.array([1., 2., 3.])
        b = array_ops.array(5.)
        c = array_ops.array(10.)

        tensors = [arr.data for arr in [a, b, c]]
        with tf.GradientTape() as g:
            g.watch(tensors)
            a[1] = b + c
            loss = array_ops.sum(a)

        gradients = g.gradient(loss.data, tensors)
        self.assertSequenceEqual(
            array_ops.array(gradients[0]).tolist(), [1., 0., 1.])
        self.assertEqual(array_ops.array(gradients[1]).tolist(), 1.)
        self.assertEqual(array_ops.array(gradients[2]).tolist(), 1.)

        # Tuple index.
        a = array_ops.array([[[1., 2.], [3., 4.]], [[5., 6.],
                                                    [7., 8.]]])  # 2x2x2 array.
        b = array_ops.array([10., 11.])

        tensors = [arr.data for arr in [a, b]]
        with tf.GradientTape() as g:
            g.watch(tensors)
            a[(1, 0)] = b
            loss = array_ops.sum(a)

        gradients = g.gradient(loss.data, tensors)
        self.assertSequenceEqual(
            array_ops.array(gradients[0]).tolist(),
            [[[1., 1.], [1., 1.]], [[0., 0.], [1., 1.]]])
        self.assertEqual(array_ops.array(gradients[1]).tolist(), [1., 1.])
コード例 #5
0
 def nan_reduction(a, axis=None, dtype=None, keepdims=False):
     a = array_ops.array(a)
     v = array_ops.array(init_val, dtype=a.dtype)
     return reduction(array_ops.where(isnan(a), v, a),
                      axis=axis,
                      dtype=dtype,
                      keepdims=keepdims)
コード例 #6
0
def _bin_op(tf_fun, a, b, promote=True):
    if promote:
        a, b = array_ops._promote_dtype(a, b)  # pylint: disable=protected-access
    else:
        a = array_ops.array(a)
        b = array_ops.array(b)
    return utils.tensor_to_ndarray(tf_fun(a.data, b.data))
コード例 #7
0
def average(a, axis=None, weights=None, returned=False):  # pylint: disable=missing-docstring
    if axis is not None and not isinstance(axis, six.integer_types):
        # TODO(wangpeng): Support tuple of ints as `axis`
        raise ValueError('`axis` must be an integer. Tuple of ints is not '
                         'supported yet. Got type: %s' % type(axis))
    a = array_ops.array(a)
    if weights is None:  # Treat all weights as 1
        if not np.issubdtype(a.dtype, np.inexact):
            a = a.astype(
                utils.result_type(a.dtype, dtypes.default_float_type()))
        avg = tf.reduce_mean(a.data, axis=axis)
        if returned:
            if axis is None:
                weights_sum = tf.size(a.data)
            else:
                weights_sum = tf.shape(a.data)[axis]
            weights_sum = tf.cast(weights_sum, a.data.dtype)
    else:
        if np.issubdtype(a.dtype, np.inexact):
            out_dtype = utils.result_type(a.dtype, weights)
        else:
            out_dtype = utils.result_type(a.dtype, weights,
                                          dtypes.default_float_type())
        a = array_ops.array(a, out_dtype).data
        weights = array_ops.array(weights, out_dtype).data

        def rank_equal_case():
            tf.debugging.Assert(
                tf.reduce_all(tf.shape(a) == tf.shape(weights)),
                [tf.shape(a), tf.shape(weights)])
            weights_sum = tf.reduce_sum(weights, axis=axis)
            avg = tf.reduce_sum(a * weights, axis=axis) / weights_sum
            return avg, weights_sum

        if axis is None:
            avg, weights_sum = rank_equal_case()
        else:

            def rank_not_equal_case():
                tf.debugging.Assert(tf.rank(weights) == 1, [tf.rank(weights)])
                weights_sum = tf.reduce_sum(weights)
                axes = tf.convert_to_tensor([[axis], [0]])
                avg = tf.tensordot(a, weights, axes) / weights_sum
                return avg, weights_sum

            # We condition on rank rather than shape equality, because if we do the
            # latter, when the shapes are partially unknown but the ranks are known
            # and different, utils.cond will run shape checking on the true branch,
            # which will raise a shape-checking error.
            avg, weights_sum = utils.cond(
                tf.rank(a) == tf.rank(weights), rank_equal_case,
                rank_not_equal_case)

    avg = array_ops.array(avg)
    if returned:
        weights_sum = array_ops.broadcast_to(weights_sum, tf.shape(avg.data))
        return avg, weights_sum
    return avg
コード例 #8
0
def _comparison(tf_fun, x1, x2, cast_bool_to_int=False):
    dtype = utils.result_type(x1, x2)
    # Cast x1 and x2 to the result_type if needed.
    x1 = array_ops.array(x1, dtype=dtype)
    x2 = array_ops.array(x2, dtype=dtype)
    x1 = x1.data
    x2 = x2.data
    if cast_bool_to_int and x1.dtype == tf.bool:
        x1 = tf.cast(x1, tf.int32)
        x2 = tf.cast(x2, tf.int32)
    return utils.tensor_to_ndarray(tf_fun(x1, x2))
コード例 #9
0
ファイル: math_test.py プロジェクト: rouniuyizu/trax
 def setUp(self):
     super(MathTest, self).setUp()
     self.array_transforms = [
         lambda x: x,  # Identity,
         tf.convert_to_tensor,
         np.array,
         lambda x: np.array(x, dtype=np.float32),
         lambda x: np.array(x, dtype=np.float64),
         array_ops.array,
         lambda x: array_ops.array(x, dtype=np.float32),
         lambda x: array_ops.array(x, dtype=np.float64),
     ]
     self.types = [np.int32, np.int64, np.float32, np.float64]
コード例 #10
0
def tile(a, reps):
    a = array_ops.array(a).data
    reps = array_ops.array(reps, dtype=tf.int32).reshape([-1]).data

    a_rank = tf.rank(a)
    reps_size = tf.size(reps)
    reps = tf.pad(reps, [[tf.math.maximum(a_rank - reps_size, 0), 0]],
                  constant_values=1)
    a_shape = tf.pad(tf.shape(a),
                     [[tf.math.maximum(reps_size - a_rank, 0), 0]],
                     constant_values=1)
    a = tf.reshape(a, a_shape)

    return arrays.tensor_to_ndarray(tf.tile(a, reps))
コード例 #11
0
ファイル: math_test.py プロジェクト: rouniuyizu/trax
 def testArgMaxArgMin(self):
     data = [
         0,
         5,
         [1],
         [1, 2, 3],
         [[1, 2, 3]],
         [[4, 6], [7, 8]],
         [[[4, 6], [9, 10]], [[7, 8], [12, 34]]],
     ]
     for fn, d in itertools.product(self.array_transforms, data):
         arr = fn(d)
         self.match(math_ops.argmax(arr), np.argmax(arr))
         self.match(math_ops.argmin(arr), np.argmin(arr))
         if hasattr(arr, 'shape'):
             ndims = len(arr.shape)
         else:
             ndims = array_ops.array(arr, copy=False).ndim
         if ndims == 0:
             # Numpy flattens the scalar ndarray and treats it as a 1-d array of
             # size 1.
             ndims = 1
         for axis in range(-ndims, ndims):
             self.match(math_ops.argmax(arr, axis=axis),
                        np.argmax(arr, axis=axis))
             self.match(math_ops.argmin(arr, axis=axis),
                        np.argmin(arr, axis=axis))
コード例 #12
0
def _argminmax(fn, a, axis=None):
    a = array_ops.array(a)
    if axis is None:
        # When axis is None numpy flattens the array.
        a_t = tf.reshape(a.data, [-1])
    else:
        a_t = array_ops.atleast_1d(a).data
    return utils.tensor_to_ndarray(fn(input=a_t, axis=axis))
コード例 #13
0
def concatenate(arys, axis=0):
    if not isinstance(arys, (list, tuple)):
        arys = [arys]
    if not arys:
        raise ValueError('Need at least one array to concatenate.')
    dtype = utils.result_type(*arys)
    arys = [array_ops.array(array, dtype=dtype).data for array in arys]
    return arrays.tensor_to_ndarray(tf.concat(arys, axis))
コード例 #14
0
 def testIndexedSlices(self):
     dtype = tf.int64
     iss = tf.IndexedSlices(values=tf.ones([2, 3], dtype=dtype),
                            indices=tf.constant([1, 9]),
                            dense_shape=[10, 3])
     a = array_ops.array(iss, copy=False)
     expected = tf.scatter_nd([[1], [9]], tf.ones([2, 3], dtype=dtype),
                              [10, 3])
     self.assertAllEqual(expected, a)
コード例 #15
0
ファイル: math_test.py プロジェクト: rouniuyizu/trax
 def run_test(a, b):
     for fn in self.array_transforms:
         arg1 = fn(a)
         arg2 = fn(b)
         self.match(math_fun(arg1, arg2),
                    np_fun(arg1, arg2),
                    msg='{}({}, {})'.format(name, arg1, arg2))
     # Tests type promotion
     for type_a in self.types:
         for type_b in self.types:
             if not check_promotion and type_a != type_b:
                 continue
             arg1 = array_ops.array(a, dtype=type_a)
             arg2 = array_ops.array(b, dtype=type_b)
             self.match(math_fun(arg1, arg2),
                        np_fun(arg1, arg2),
                        msg='{}({}, {})'.format(name, arg1, arg2),
                        check_type=check_promotion_result_type)
コード例 #16
0
ファイル: math_ops.py プロジェクト: victorustc/trax
def nanmean(a, axis=None, dtype=None, keepdims=None):  # pylint: disable=missing-docstring
  a = array_ops.array(a)
  if np.issubdtype(a.dtype, np.bool_) or np.issubdtype(a.dtype, np.integer):
    return array_ops.mean(a, axis=axis, dtype=dtype, keepdims=keepdims)
  nan_mask = logical_not(isnan(a))
  if dtype is None:
    dtype = a.dtype
  normalizer = array_ops.sum(
      nan_mask, axis=axis, dtype=dtype, keepdims=keepdims)
  return nansum(a, axis=axis, dtype=dtype, keepdims=keepdims) / normalizer
コード例 #17
0
def nanmean(a, axis=None, dtype=None, keepdims=None):
    a = array_ops.array(a)
    if np.issubdtype(a.dtype, np.bool_) or np.issubdtype(a.dtype, np.integer):
        return array_ops.mean(a, axis=axis, dtype=dtype, keepdims=keepdims)
    nan_mask = logical_not(isnan(a))
    normalizer = array_ops.sum(nan_mask,
                               axis=axis,
                               dtype=np.int64,
                               keepdims=keepdims)
    return nansum(a, axis=axis, dtype=dtype, keepdims=keepdims) / normalizer
コード例 #18
0
    def setUp(self):
        super(ArrayCreationTest, self).setUp()
        python_shapes = [
            0, 1, 2, (), (1, ), (2, ), (1, 2, 3), [], [1], [2], [1, 2, 3]
        ]
        self.shape_transforms = [
            lambda x: x, lambda x: np.array(x, dtype=int),
            lambda x: array_ops.array(x, dtype=int), tf.TensorShape
        ]

        self.all_shapes = []
        for fn in self.shape_transforms:
            self.all_shapes.extend([fn(s) for s in python_shapes])

        if sys.version_info.major == 3:
            # There is a bug of np.empty (and alike) in Python 3 causing a crash when
            # the `shape` argument is an arrays.ndarray scalar (or tf.Tensor scalar).
            def not_ndarray_scalar(s):
                return not (isinstance(s, arrays.ndarray) and s.ndim == 0)

            self.all_shapes = list(filter(not_ndarray_scalar, self.all_shapes))

        self.all_types = [
            int, float, np.int16, np.int32, np.int64, np.float16, np.float32,
            np.float64
        ]

        source_array_data = [
            1,
            5.5,
            7,
            (),
            (8, 10.),
            ((), ()),
            ((1, 4), (2, 8)),
            [],
            [7],
            [8, 10.],
            [[], []],
            [[1, 4], [2, 8]],
            ([], []),
            ([1, 4], [2, 8]),
            [(), ()],
            [(1, 4), (2, 8)],
        ]

        self.array_transforms = [
            lambda x: x,
            tf.convert_to_tensor,
            np.array,
            array_ops.array,
        ]
        self.all_arrays = []
        for fn in self.array_transforms:
            self.all_arrays.extend([fn(s) for s in source_array_data])
コード例 #19
0
 def run_test(arr, index, value):
     for fn in self.array_transforms:
         value_arg = fn(value)
         tf_array = array_ops.array(arr)
         np_array = np.array(arr)
         tf_array[index] = value_arg
         # TODO(srbs): "setting an array element with a sequence" is thrown
         # if we do not wrap value_arg in a numpy array. Investigate how this can
         # be avoided.
         np_array[index] = np.array(value_arg)
         self.match(tf_array, np_array)
コード例 #20
0
def normal(key, shape, dtype=tf.float32):
  """Sample standard-normal random values.

  Args:
    key: not used since TF doesn't pass RNG states explicitly.
    shape: the shape of the result.
    dtype: the dtype of the result.

  Returns:
    Random values in standard-normal distribution.
  """
  del key
  return array(tf.random.normal(shape, dtype=dtype), copy=False)
コード例 #21
0
ファイル: math_ops.py プロジェクト: victorustc/trax
def argsort(a, axis=-1, kind='quicksort', order=None):  # pylint: disable=missing-docstring
  # TODO(nareshmodi): make string tensors also work.
  if kind not in ('quicksort', 'stable'):
    raise ValueError("Only 'quicksort' and 'stable' arguments are supported.")
  if order is not None:
    raise ValueError("'order' argument to sort is not supported.")
  stable = (kind == 'stable')

  a = array_ops.array(a).data

  def _argsort(a, axis, stable):
    if axis is None:
      a = tf.reshape(a, [-1])
      axis = 0

    return tf.argsort(a, axis, stable=stable)

  tf_ans = tf.cond(
      tf.rank(a) == 0, lambda: tf.constant([0]),
      lambda: _argsort(a, axis, stable))

  return array_ops.array(tf_ans, dtype=np.intp)
コード例 #22
0
def sort(a, axis=-1, kind='quicksort', order=None):  # pylint: disable=missing-docstring
    if kind != 'quicksort':
        raise ValueError("Only 'quicksort' is supported.")
    if order is not None:
        raise ValueError("'order' argument to sort is not supported.")

    a = array_ops.array(a)

    if axis is None:
        result_t = tf.sort(tf.reshape(a.data, [-1]), 0)
        return utils.tensor_to_ndarray(result_t)
    else:
        return utils.tensor_to_ndarray(tf.sort(a.data, axis))
コード例 #23
0
    def testDiag(self):
        array_transforms = [
            lambda x: x,  # Identity,
            tf.convert_to_tensor,
            np.array,
            lambda x: np.array(x, dtype=np.float32),
            lambda x: np.array(x, dtype=np.float64),
            array_ops.array,
            lambda x: array_ops.array(x, dtype=np.float32),
            lambda x: array_ops.array(x, dtype=np.float64)
        ]

        def run_test(arr):
            for fn in array_transforms:
                arr = fn(arr)
                self.match(array_ops.diag(arr),
                           np.diag(arr),
                           msg='diag({})'.format(arr))
                for k in range(-3, 3):
                    self.match(array_ops.diag(arr, k),
                               np.diag(arr, k),
                               msg='diag({}, k={})'.format(arr, k))

        # 2-d arrays.
        run_test(np.arange(9).reshape((3, 3)).tolist())
        run_test(np.arange(6).reshape((2, 3)).tolist())
        run_test(np.arange(6).reshape((3, 2)).tolist())
        run_test(np.arange(3).reshape((1, 3)).tolist())
        run_test(np.arange(3).reshape((3, 1)).tolist())
        run_test([[5]])
        run_test([[]])
        run_test([[], []])

        # 1-d arrays.
        run_test([])
        run_test([1])
        run_test([1, 2])
コード例 #24
0
def linspace(  # pylint: disable=missing-docstring
        start,
        stop,
        num=50,
        endpoint=True,
        retstep=False,
        dtype=float,
        axis=0):
    if dtype:
        dtype = utils.result_type(dtype)
    start = array_ops.array(start, dtype=dtype).data
    stop = array_ops.array(stop, dtype=dtype).data
    if num < 0:
        raise ValueError(
            'Number of samples {} must be non-negative.'.format(num))
    step = tf.convert_to_tensor(np.nan)
    if endpoint:
        result = tf.linspace(start, stop, num, axis=axis)
        if num > 1:
            step = (stop - start) / (num - 1)
    else:
        # tf.linspace does not support endpoint=False so we manually handle it
        # here.
        if num > 1:
            step = ((stop - start) / num)
            new_stop = tf.cast(stop, step.dtype) - step
            start = tf.cast(start, new_stop.dtype)
            result = tf.linspace(start, new_stop, num, axis=axis)
        else:
            result = tf.linspace(start, stop, num, axis=axis)
    if dtype:
        result = tf.cast(result, dtype)
    if retstep:
        return arrays.tensor_to_ndarray(result), arrays.tensor_to_ndarray(step)
    else:
        return arrays.tensor_to_ndarray(result)
コード例 #25
0
def bernoulli(key, mean=np.float32(0.5), shape=()):
  """Sample Bernoulli random values with given shape and mean.

  Args:
    key: a random key, not used in the TF backend (stored in graph).
    mean: optional, an array_like broadcastable to `shape` for the mean of the
      random variables (default 0.5).
    shape: optional, a tuple of nonnegative integers representing the shape
      (default scalar).

  Returns:
    A random array with the specified shape and boolean dtype.
  """
  # TODO(wangpeng): convert types TF <-> numpy.
  shape = shape or arrays.convert_to_tensor(value=mean).shape
  return array(
      tf.less(uniform(key, shape), mean), copy=False)
コード例 #26
0
def uniform(key, shape, dtype=random.DEFAULT_RANDN_DTYPE, minval=0., maxval=1.):
  """Sample uniform random values in range [`minval`, `maxval`).

  Args:
    key: not used by this implementation.
    shape: the shape of the result.
    dtype: the dtype of the result.
    minval: the minimal value (inclusive).
    maxval: the maximal value (exclusive).

  Returns:
    An ndarray with shape `shape` and dtype `dtype`. Each value in the ndarray
    is sampled uniformly randomly in range [`minval`, `maxval`).
  """
  del key
  return array(
      tf.random.uniform(shape, dtype=dtype, minval=minval, maxval=maxval),
      copy=False)
コード例 #27
0
def _scalar(tf_fn, x, promote_to_float=False):
    """Computes the tf_fn(x) for each element in `x`.

  Args:
    tf_fn: function that takes a single Tensor argument.
    x: array_like. Could be an ndarray, a Tensor or any object that can
      be converted to a Tensor using `tf.convert_to_tensor`.
    promote_to_float: whether to cast the argument to a float dtype
      (`dtypes.default_float_type`) if it is not already.

  Returns:
    An ndarray with the same shape as `x`. The default output dtype is
    determined by `dtypes.default_float_type`, unless x is an ndarray with a
    floating point type, in which case the output type is same as x.dtype.
  """
    x = array_ops.array(x)
    if promote_to_float and not np.issubdtype(x.dtype, np.floating):
        x = x.astype(dtypes.default_float_type())
    return utils.tensor_to_ndarray(tf_fn(x.data))
コード例 #28
0
def logical_not(x):
    x = array_ops.array(x, dtype=np.bool_)
    return utils.tensor_to_ndarray(tf.logical_not(x.data))
コード例 #29
0
 def f(x):
   if isinstance(x, (tf.Tensor, tf.IndexedSlices)):
     return array(x, copy=False)
   else:
     return x
コード例 #30
0
    def testArray(self):
        ndmins = [0, 1, 2, 5]
        for a, dtype, ndmin, copy in itertools.product(self.all_arrays,
                                                       self.all_types, ndmins,
                                                       [True, False]):
            self.match(array_ops.array(a, dtype=dtype, ndmin=ndmin, copy=copy),
                       np.array(a, dtype=dtype, ndmin=ndmin, copy=copy))

        zeros_list = array_ops.zeros(5)

        # TODO(srbs): Test that copy=True when context.device is different from
        # tensor device copies the tensor.

        # Backing tensor is the same if copy=False, other attributes being None.
        self.assertIs(
            array_ops.array(zeros_list, copy=False).data, zeros_list.data)
        self.assertIs(
            array_ops.array(zeros_list.data, copy=False).data, zeros_list.data)

        # Backing tensor is different if ndmin is not satisfied.
        self.assertIsNot(
            array_ops.array(zeros_list, copy=False, ndmin=2).data,
            zeros_list.data)
        self.assertIsNot(
            array_ops.array(zeros_list.data, copy=False, ndmin=2).data,
            zeros_list.data)
        self.assertIs(
            array_ops.array(zeros_list, copy=False, ndmin=1).data,
            zeros_list.data)
        self.assertIs(
            array_ops.array(zeros_list.data, copy=False, ndmin=1).data,
            zeros_list.data)

        # Backing tensor is different if dtype is not satisfied.
        self.assertIsNot(
            array_ops.array(zeros_list, copy=False, dtype=int).data,
            zeros_list.data)
        self.assertIsNot(
            array_ops.array(zeros_list.data, copy=False, dtype=int).data,
            zeros_list.data)
        self.assertIs(
            array_ops.array(zeros_list, copy=False, dtype=float).data,
            zeros_list.data)
        self.assertIs(
            array_ops.array(zeros_list.data, copy=False, dtype=float).data,
            zeros_list.data)