def convert_to_mixed_eager_tensors(values): v = [t if isinstance(ag_core.getval(t), tensor.Tensor) else tensor.Tensor(t) for t in values] types = [t.dtype for t in v] return types, v
def testTensorCreationFailure(self): with self.assertRaises(Exception): # Should fail because the each row of the Python object has a different # number of columns. self.assertEqual(None, tensor.Tensor([[1], [1, 2]]))
def testMultiLineTensorStr(self): t = tensor.Tensor(np.eye(3)) tensor_str = str(t) self.assertIn("shape=%s, dtype=%s, " % (t.shape, t.dtype.name), tensor_str) self.assertIn("numpy=\n%s" % t.numpy(), tensor_str)
def testNumpyUnprintableTensor(self): t = tensor.Tensor(42) # Force change dtype to a numpy-unprintable type. t._dtype = dtypes.resource self.assertIn("numpy=<unprintable>", str(t)) self.assertIn("numpy=<unprintable>", repr(t))
def testFloatDowncast(self): # Unless explicitly specified, float64->float32 t = tensor.Tensor(3.0) self.assertEqual(dtypes.float32, t.dtype) t = tensor.Tensor(3.0, dtype=dtypes.float64) self.assertEqual(dtypes.float64, t.dtype)
def fn(): tape.watch_variable(x) b = tensor.Tensor(2.0) c = math_ops.add(x.value(), b) return math_ops.add(c, tensor.Tensor(3.0))
def testZeroDimTensorRepr(self): t = tensor.Tensor(42) self.assertTrue(repr(t).startswith("<")) self.assertTrue(repr(t).endswith(">")) self.assertIn("id=%d, shape=(), dtype=int32, numpy=42" % t._id, repr(t))
def testExecuteBasic(self): three = tensor.Tensor(3) five = tensor.Tensor(5) product = three * five self.assertEqual(15, product.numpy())
def testExecuteStringAttr(self): three = tensor.Tensor(3.0) checked_three = array_ops.check_numerics(three, message='just checking') self.assertEqual([[3]], checked_three.numpy())
def testComposition(self): x = tensor.Tensor(1, dtype=dtypes.int32) three_x = x + x + x self.assertEquals(dtypes.int32, three_x.dtype) self.assertEquals(3, three_x.numpy())
def testInvalidInputDataType(self): # Fill requires the first input to be an int32 tensor. with self.assertRaisesRegexp(errors.InvalidArgumentError, 'int64'): array_ops.fill(tensor.Tensor([2], dtype=dtypes.int64), tensor.Tensor(1))
def testExecuteListOutputLen0(self): empty = tensor.Tensor([], dtype=dtypes.int32) result = array_ops.unstack(empty, 0) self.assertTrue(isinstance(result, list)) self.assertEqual(0, len(result))
def testInt32CPUDefault(self): if not context.context().num_gpus(): self.skipTest('No GPUs found') with context.device('/gpu:0'): r = tensor.Tensor(1) + tensor.Tensor(2) self.assertEqual(r.numpy(), 3)
def fn(): tape.watch(x.handle) b = tensor.Tensor(2.0) c = math_ops.add(x.value(), b) return math_ops.add(c, tensor.Tensor(3.0))
def f(a, b): with context.device('/gpu:0'): c = math_ops.add(a.as_gpu_tensor(0), b.as_gpu_tensor(0)) return math_ops.add(c.as_cpu_tensor(), tensor.Tensor(3.0))
def testExecuteIntAttr(self): three = tensor.Tensor(3) four = tensor.Tensor(4) total = math_ops.add_n([three, four]) self.assertEqual(7, total.numpy())
def testTensoVspaceNoneMutAdd(self): t = tensor.Tensor(1.0) self.assertEqual(tensor_node.TensorVSpace(t).mut_add(t, None).numpy(), 1.0)
def testExecuteBoolAttr(self): three = tensor.Tensor([[3]]) five = tensor.Tensor([[5]]) product = math_ops.matmul(three, five, transpose_a=True) self.assertEqual([[15]], product.numpy())
def testZeroDimTensorStr(self): t = tensor.Tensor(42) self.assertIn("shape=(), dtype=int32, numpy=42", str(t))
def f(): tape.watch(embedding.handle) embedded_x = embedding_ops.embedding_lookup(embedding, x) return tensor.Tensor(1.0, dtypes.float32) - embedded_x
def testZeroSizeTensorStr(self): t = tensor.Tensor(np.zeros(0, dtype=np.float32)) self.assertIn("shape=(0,), dtype=float32, numpy=[]", str(t))
def loss(x, l): return math_ops.reduce_mean( nn_ops.softmax_cross_entropy_with_logits(logits=x, labels=l), tensor.Tensor([0]))
def testScalarTensor(self): t = tensor.Tensor(3) self.assertEqual(t.numpy(), tensor.Tensor(np.array(3)).numpy()) self.assertEqual(dtypes.int32, t.dtype) self.assertEqual(0, t.shape.ndims) self.assertAllEqual([], t.shape.as_list())
def first(x): l = tensor.Tensor([[0.0]]) x = nn_ops.softmax_cross_entropy_with_logits(labels=l, logits=x) x = math_ops.reduce_sum(x, tensor.Tensor([0])) return x
def testBool(self): t = tensor.Tensor(False) if t: self.assertFalse(True)
def second(x): grad = backprop.gradients_function(first, [0])(x)[0] return math_ops.reduce_sum(grad, tensor.Tensor([0]))
def testNumpyOrderHandling(self): n = np.array([[1, 2], [3, 4]], order="F") t = tensor.Tensor(n) self.assertAllEqual([[1, 2], [3, 4]], t.numpy())
def fn(x): b = tensor.Tensor(2.0) c = math_ops.add(x, b) return math_ops.add(c, tensor.Tensor(3.0))
def testCopyFromCPUToCPU(self): ta = tensor.Tensor([[1, 2], [3, 4]]) tb = ta.as_cpu_tensor() self.assertNotEqual(ta._handle, tb._handle) self.assertAllEqual(ta.numpy(), tb.numpy())
def testInvalidDevice(self): with self.assertRaises(ValueError): with context.device('pu:0'): _ = tensor.Tensor(1)