def testAcosh2ndDerivative(self): w1 = np.random.rand(2, 2)*10 + 1.1 x = np.random.rand(2, 2) + 1 self.w1_torch = torch.tensor(w1, dtype=torch.float, requires_grad=True) self.x_torch = torch.tensor(x, dtype=torch.float) y_torch = (self.x_torch*self.w1_torch).acosh() dy_dw1_torch = grad_torch(y_torch, self.w1_torch, grad_outputs=ones_like_torch(y_torch), create_graph=True, retain_graph=True)[0] d2y_dw12_torch = grad_torch(dy_dw1_torch, self.w1_torch, grad_outputs=ones_like_torch(dy_dw1_torch))[0] cxt = tc.Context() self.w1_tc = tc.ml.optimizer.Variable.load(w1.shape, w1.flatten().tolist(), tc.F32) self.x_tc = tc.tensor.Dense.load(x.shape, x.flatten().tolist(), tc.F32) y_tc = (self.x_tc*self.w1_tc).acosh() _dy_dw1_tc = grad_tc(y_tc, ones_like_tc(y_tc), self.w1_tc) _d2y_dw2_tc = grad_tc(_dy_dw1_tc, ones_like_tc(_dy_dw1_tc), self.w1_tc) cxt.map = tc.Map({'the_first_derivative': _dy_dw1_tc, 'the_second_derivative': _d2y_dw2_tc}) result = HOST.post(ENDPOINT, cxt) dy_dw1_tc = result['the_first_derivative'] d2y_dw2_tc = result['the_second_derivative'] self.assertAllClose(dy_dw1_torch, dy_dw1_tc) self.assertAllClose(d2y_dw12_torch, d2y_dw2_tc)
def testSlice(self): schema = tc.table.Schema([ tc.Column("0", tc.U64), tc.Column("1", tc.U64), tc.Column("2", tc.U64), tc.Column("3", tc.U64), ], [ tc.Column("value", tc.Number), ]) for i in range(4): schema.create_index(str(i), [str(i)]) data = [ ([0, 0, 1, 0], 1), ([0, 1, 2, 0], 2), ([1, 0, 0, 0], 3), ([1, 0, 1, 0], 3), ] cxt = tc.Context() cxt.table = tc.table.Table(schema) cxt.inserts = [cxt.table.insert(coord, [value]) for (coord, value) in data] cxt.result = tc.After(cxt.inserts, cxt.table.where({ "0": slice(2), "1": slice(3), "2": slice(4), "3": slice(1) })) expect = expected(schema, [coord + [value] for coord, value in data]) actual = self.host.post(ENDPOINT, cxt) self.assertEqual(actual, expect)
def testAtanh2ndDerivative(self): w_torch = self.w1_torch.atanh() y_torch = self.x_torch*w_torch dy_dw1_torch = grad_torch(y_torch, self.w1_torch, grad_outputs=ones_like_torch(y_torch), create_graph=True, retain_graph=True)[0] d2y_dw12_torch = grad_torch(dy_dw1_torch, self.w1_torch, grad_outputs=ones_like_torch(dy_dw1_torch))[0] cxt = tc.Context() w_tc = self.w1_tc.atanh() y_tc = self.x_tc*w_tc _dy_dw1_tc = grad_tc(y_tc, ones_like_tc(y_tc), self.w1_tc) _d2y_dw2_tc = grad_tc(_dy_dw1_tc, ones_like_tc(_dy_dw1_tc), self.w1_tc) cxt.map = tc.Map({'the_first_derivative': _dy_dw1_tc, 'the_second_derivative': _d2y_dw2_tc}) result = HOST.post(ENDPOINT, cxt) dy_dw1_tc = result['the_first_derivative'] d2y_dw2_tc = result['the_second_derivative'] self.assertAllClose(dy_dw1_torch, dy_dw1_tc, 0.01) self.assertAllClose(d2y_dw12_torch, d2y_dw2_tc, 0.01)
def testCreate(self): cxt = tc.Context() cxt.tree = tc.btree.BTree(SCHEMA) cxt.result = tc.After(cxt.tree.insert((1, "one")), cxt.tree.count()) count = self.host.post(ENDPOINT, cxt) self.assertEqual(count, 1)
def execute(self, fmt, *tensors): expected = np.einsum(fmt, *[np.array(t) for t in tensors]) cxt = tc.Context() cxt.dense = [to_dense(t) for t in tensors] cxt.sparse = [to_sparse(t) for t in tensors] cxt.results = (tc.tensor.einsum(fmt, cxt.dense), tc.tensor.einsum(fmt, cxt.sparse)) (dense, sparse) = self.host.post(ENDPOINT, cxt) # print("inputs:") # for tensor in tensors: # print(tensor.shape) # print(tensor) # print() # print("expect", expected.shape, expected) # print() # print("expect dense", expect_dense(expected)) # print("actual dense", dense) # print() # print("expect sparse", expect_sparse(expected)) # print("actual sparse", sparse) if expected.shape: self.assertEqual(dense, expect_dense(expected)) self.assertEqual(sparse, expect_sparse(expected)) else: self.assertEqual(dense, expected) self.assertEqual(sparse, expected)
def testMatMul2ndDerivative(self): y_torch = [email protected]_torch**2 + self.b1_torch y2_torch = ([email protected]_torch + self.b2_torch)**2 dy_dw1_torch = grad_torch(y2_torch, self.w1_torch, grad_outputs=ones_like_torch(y2_torch), create_graph=True, retain_graph=True)[0] d2y_dw12_torch = grad_torch(dy_dw1_torch, self.w1_torch, grad_outputs=ones_like_torch(dy_dw1_torch))[0] cxt = tc.Context() y_tc = [email protected]_tc**2 + self.b1_tc y_2tc = ([email protected]_tc + self.b2_tc)**2 _dy_dw1_tc = grad_tc(y_2tc, ones_like_tc(y_2tc), self.w1_tc) _d2y_dw2_tc = grad_tc(_dy_dw1_tc, ones_like_tc(_dy_dw1_tc), self.w1_tc) cxt.map = tc.Map({'the_first_derivative': _dy_dw1_tc, 'the_second_derivative': _d2y_dw2_tc}) result = HOST.post(ENDPOINT, cxt) dy_dw1_tc = result['the_first_derivative'] d2y_dw2_tc = result['the_second_derivative'] self.assertAllClose(dy_dw1_torch, dy_dw1_tc) self.assertAllClose(d2y_dw12_torch, d2y_dw2_tc)
def testCreate(self): cxt = tc.Context() cxt.table = tc.table.Table(SCHEMA) cxt.result = tc.After(cxt.table.insert(("name",), (0,)), cxt.table.count()) count = self.host.post(ENDPOINT, cxt) self.assertEqual(count, 1)
def testExpandDims(self): cxt = tc.Context() cxt.dense = tc.tensor.Dense.arange([2, 3], 0, 6) cxt.result = cxt.dense.expand_dims(1) actual = self.host.post(ENDPOINT, cxt) expected = expect_dense(np.arange(0, 6).reshape([2, 1, 3])) self.assertEqual(actual, expected)
def testTranspose(self): cxt = tc.Context() cxt.dense = tc.tensor.Dense.arange([3, 2], 0, 6) cxt.result = cxt.dense.transpose() actual = self.host.post(ENDPOINT, cxt) expected = np.transpose(np.arange(0, 6).reshape([3, 2])) expected = expect_dense(expected) self.assertEqual(actual, expected)
def testProductAll(self): shape = [2, 3] cxt = tc.Context() cxt.big = tc.tensor.Dense.arange(shape, 1, 7) cxt.result = cxt.big.product() actual = self.host.post(ENDPOINT, cxt) self.assertEqual(actual, np.product(range(1, 7)))
def testSumAll(self): shape = [5, 2] cxt = tc.Context() cxt.big = tc.tensor.Dense.arange(shape, 0, 10) cxt.result = cxt.big.sum() actual = self.host.post(ENDPOINT, cxt) self.assertEqual(actual, sum(range(10)))
def testPow(self): cxt = tc.Context() cxt.left = tc.tensor.Dense.load([1, 2], [1, 2], tc.I64) cxt.result = cxt.left**2 actual = self.host.post(ENDPOINT, cxt) expected = expect_dense(tc.I64, [1, 2], [1, 4]) self.assertEqual(actual, expected)
def testRandomUniform(self): minval = -1 maxval = 3 cxt = tc.Context() cxt.x = tc.tensor.Dense.random_uniform([5, 1], minval, maxval) cxt.result = (cxt.x >= -1).all().logical_and( (cxt.x <= 3).all()).logical_and(cxt.x.mean() > 0) self.assertTrue(self.host.post(ENDPOINT, cxt))
def testDiagonal(self): x = np.arange(0, 9).reshape(3, 3) cxt = tc.Context() cxt.x = tc.tensor.Dense.load(x.shape, x.flatten().tolist(), tc.I32) cxt.diag = tc.linalg.diagonal(cxt.x) expected = np.diag(x) actual = self.host.post(ENDPOINT, cxt) self.assertEqual(actual, expect_dense(expected, tc.I32))
def testSliceRange(self): keys = [[i, num2words(i)] for i in range(50)] cxt = tc.Context() cxt.tree = tc.btree.BTree(SCHEMA) cxt.inserts = [cxt.tree.insert(key) for key in keys] cxt.result = tc.After(cxt.inserts, cxt.tree[29:32]) result = self.host.post(ENDPOINT, cxt) self.assertEqual(result, expected(keys[29:32]))
def testDenseAsSparse(self): matrix = np.eye(3).astype(int) cxt = tc.Context() cxt.dense = load_dense(matrix, tc.I32) cxt.sparse = cxt.dense.as_sparse() actual = self.host.post(ENDPOINT, cxt) expected = expect_sparse(tc.I32, [3, 3], matrix) self.assertEqual(actual, expected)
def testExp_simple(self): cxt = tc.Context() cxt.x = tc.ml.Variable.ones([1]) cxt.g_x = 4 * cxt.x cxt.h_x = cxt.g_x.exp() cxt.result = tc.math.derivative_of(cxt.h_x) expected = 4 * math.e**4 actual = HOST.post(ENDPOINT, cxt) self.assertTrue(np.allclose(load_np(actual), np.array([expected])))
def testAdd(self): cxt = tc.Context() cxt.x = tc.ml.Variable.ones([1]) cxt.g_x = -2 * cxt.x + 5 cxt.f_x = 6 * cxt.g_x + 3 cxt.d_f_x = tc.math.derivative_of(cxt.f_x) cxt.f_x_grad = tc.math.gradients(cxt.f_x, ones_like_tc(cxt.f_x), cxt.x) cxt.passed = (cxt.d_f_x == cxt.f_x_grad).all() self.assertTrue(HOST.post(ENDPOINT, cxt))
def testExp_withOperatorExponent(self): cxt = tc.Context() cxt.x = tc.ml.Variable.ones([1]) cxt.f_g_x = (3 * (cxt.x**2) + 2).exp() cxt.result = tc.math.derivative_of(cxt.f_g_x) x = np.array([1]) expected = 6 * x * math.e**(3 * x**2 + 2) actual = HOST.post(ENDPOINT, cxt) self.assertTrue(np.allclose(load_np(actual), expected))
def testLogarithm(self): size = 1_000_000 shape = [10, size / 10] cxt = tc.Context() cxt.x = tc.tensor.Dense.arange(shape, 2, size + 2) cxt.ln = cxt.x.log() cxt.log = cxt.x.log(math.e) cxt.test = (cxt.ln == cxt.log).all() self.assertTrue(self.host.post(ENDPOINT, cxt))
def testSub(self): shape = [1, 3] cxt = tc.Context() cxt.left = tc.tensor.Dense.arange(shape, 0, 6) cxt.right = tc.tensor.Dense.constant([1], 2) cxt.result = cxt.left - cxt.right actual = self.host.post(ENDPOINT, cxt) expected = expect_dense(tc.I64, shape, np.arange(-2, 4, 2)) self.assertEqual(actual, expected)
def testTruncatedNormal(self): tolerance = 0.5 cxt = tc.Context() cxt.x = tc.tensor.Dense.truncated_normal([10, 20]) cxt.result = cxt.x.mean(), cxt.x.std() response = self.host.post(ENDPOINT, cxt) mean, std = response self.assertTrue(abs(mean) < tolerance) self.assertTrue(abs(std - 1) < tolerance)
def testLog(self): cxt = tc.Context() cxt.x = tc.ml.Variable.ones([1]) cxt.g_x = (cxt.x**2 + 1).log() cxt.result = tc.math.derivative_of(cxt.g_x) x = np.array([1]) expected = (2 * x) / (x**2 + 1) actual = HOST.post(ENDPOINT, cxt) self.assertTrue(np.allclose(load_np(actual), expected))
def testDiv(self): shape = [3] cxt = tc.Context() cxt.left = tc.tensor.Dense.arange(shape, 2., 8.) cxt.right = tc.tensor.Dense.constant([1], 2) cxt.result = cxt.left / cxt.right actual = self.host.post(ENDPOINT, cxt) expected = expect_dense(tc.F64, shape, np.arange(1, 4)) self.assertEqual(actual, expected)
def testDeleteAll(self): keys = [(i, num2words(i)) for i in range(100)] cxt = tc.Context() cxt.tree = tc.btree.BTree(SCHEMA) cxt.inserts = [cxt.tree.insert(key) for key in keys] cxt.delete = tc.After(cxt.inserts, cxt.tree.delete()) cxt.result = tc.After(cxt.delete, cxt.tree) result = self.host.post(ENDPOINT, cxt) self.assertEqual(result, expected([]))
def testSplitByNumber(self): splits = 3 shape = (6, 30) x = np.ones(shape, dtype=np.int64) cxt = tc.Context() cxt.x1 = tc.tensor.Dense.load(x.shape, x.flatten().tolist(), tc.I64) cxt.x2 = tc.tensor.split(cxt.x1, 3, axis=0) cxt.result = [tc.tensor.Tensor(cxt.x2[i]).shape for i in range(3)] actual = self.host.post(ENDPOINT, cxt) self.assertEqual(actual, [[shape[0] // splits, 30]] * splits)
def testWriteAndSlice(self): shape = [2, 5] cxt = tc.Context() cxt.tensor = tc.tensor.Sparse.zeros(shape) cxt.result = tc.After(cxt.tensor[:, 2:-1].write(1), cxt.tensor) actual = self.host.post(ENDPOINT, cxt) expected = expect_sparse( tc.F32, shape, [[[0, 2], 1], [[0, 3], 1], [[1, 2], 1], [[1, 3], 1]]) self.assertEqual(actual, expected)
def testSparseAsDense(self): matrix = np.eye(3).astype(bool) data = [(list(coord), bool(matrix[coord])) for coord in np.ndindex(matrix.shape) if matrix[coord] != 0] cxt = tc.Context() cxt.sparse = tc.tensor.Sparse.load([3, 3], data, tc.Bool) cxt.dense = cxt.sparse.as_dense() actual = self.host.post(ENDPOINT, cxt) expected = expect_dense(tc.Bool, [3, 3], matrix.flatten().tolist()) self.assertEqual(actual, expected)
def testMul(self): cxt = tc.Context() cxt.dense = tc.tensor.Dense.arange([3], 0, 3) cxt.sparse = tc.tensor.Sparse.zeros([2, 3], tc.I32) cxt.result = tc.After(cxt.sparse[0, 1:3].write(2), cxt.dense * cxt.sparse) actual = self.host.post(ENDPOINT, cxt) expected = np.zeros([2, 3]) expected[0, 1:3] = 2 expected = expected * np.arange(0, 3) self.assertEqual(actual, expect_sparse(tc.I64, [2, 3], expected))
def testSubAndSum(self): x = 300 y = 250 z = 2 cxt = tc.Context() cxt.sparse = tc.tensor.Sparse.zeros([1, y, z]) cxt.dense = tc.tensor.Dense.ones([x, 1, z]) cxt.result = (cxt.sparse - cxt.dense).sum() actual = self.host.post(ENDPOINT, cxt) self.assertEqual(actual, -(x * y * z))