def test_float_cast(self): # simple scalar tensor a = ht.ones(1, device=ht_device) casted_a = float(a) self.assertEqual(casted_a, 1.0) self.assertIsInstance(casted_a, float) # multi-dimensional scalar tensor b = ht.zeros((1, 1, 1, 1), device=ht_device) casted_b = float(b) self.assertEqual(casted_b, 0.0) self.assertIsInstance(casted_b, float) # split scalar tensor c = ht.full((1,), 5, split=0, device=ht_device) casted_c = float(c) self.assertEqual(casted_c, 5.0) self.assertIsInstance(casted_c, float) # exception on non-scalar tensor with self.assertRaises(TypeError): float(ht.empty(1, 2, 1, 1, device=ht_device)) # exception on empty tensor with self.assertRaises(TypeError): float(ht.empty((0, 1, 2), device=ht_device)) # exception on split tensor, where each chunk has size 1 if ht.MPI_WORLD.size > 1: with self.assertRaises(TypeError): float(ht.full((ht.MPI_WORLD.size,), 2, split=0), device=ht_device)
def test_bool_cast(self): # simple scalar tensor a = ht.ones(1, device=ht_device) casted_a = bool(a) self.assertEqual(casted_a, True) self.assertIsInstance(casted_a, bool) # multi-dimensional scalar tensor b = ht.zeros((1, 1, 1, 1), device=ht_device) casted_b = bool(b) self.assertEqual(casted_b, False) self.assertIsInstance(casted_b, bool) # split scalar tensor c = ht.full((1,), 5, split=0, device=ht_device) casted_c = bool(c) self.assertEqual(casted_c, True) self.assertIsInstance(casted_c, bool) # exception on non-scalar tensor with self.assertRaises(TypeError): bool(ht.empty(1, 2, 1, 1, device=ht_device)) # exception on empty tensor with self.assertRaises(TypeError): bool(ht.empty((0, 1, 2), device=ht_device)) # exception on split tensor, where each chunk has size 1 if ht.MPI_WORLD.size > 1: with self.assertRaises(TypeError): bool(ht.full((ht.MPI_WORLD.size,), 2, split=0, device=ht_device))
def test_int_cast(self): # simple scalar tensor a = ht.ones(1) casted_a = int(a) self.assertEqual(casted_a, 1) self.assertIsInstance(casted_a, int) # multi-dimensional scalar tensor b = ht.zeros((1, 1, 1, 1)) casted_b = int(b) self.assertEqual(casted_b, 0) self.assertIsInstance(casted_b, int) # split scalar tensor c = ht.full((1,), 5, split=0) casted_c = int(c) self.assertEqual(casted_c, 5) self.assertIsInstance(casted_c, int) # exception on non-scalar tensor with self.assertRaises(TypeError): int(ht.empty(1, 2, 1, 1)) # exception on empty tensor with self.assertRaises(TypeError): int(ht.empty((0, 1, 2))) # exception on split tensor, where each chunk has size 1 if ht.MPI_WORLD.size > 1: with self.assertRaises(TypeError): int(ht.full((ht.MPI_WORLD.size,), 2, split=0))
def test_cumprod(self): a = ht.full((2, 4), 2, dtype=ht.int32) result = ht.array([[2, 4, 8, 16], [2, 4, 8, 16]], dtype=ht.int32) # split = None cumprod = ht.cumprod(a, 1) self.assertTrue(ht.equal(cumprod, result)) # Alias cumprod = ht.cumproduct(a, 1) self.assertTrue(ht.equal(cumprod, result)) a = ht.full((4, 2), 2, dtype=ht.int64, split=0) result = ht.array([[2, 2], [4, 4], [8, 8], [16, 16]], dtype=ht.int64, split=0) cumprod = ht.cumprod(a, 0) self.assertTrue(ht.equal(cumprod, result)) # 3D out = ht.empty((2, 2, 2), dtype=ht.float32, split=0) a = ht.full((2, 2, 2), 2, split=0) result = ht.array([[[2, 2], [2, 2]], [[4, 4], [4, 4]]], dtype=ht.float32, split=0) cumprod = ht.cumprod(a, 0, out=out) self.assertTrue(ht.equal(cumprod, out)) self.assertTrue(ht.equal(cumprod, result)) a = ht.full((2, 2, 2), 2, dtype=ht.int32, split=1) result = ht.array([[[2, 2], [4, 4]], [[2, 2], [4, 4]]], dtype=ht.float32, split=1) cumprod = ht.cumprod(a, 1, dtype=ht.float64) self.assertTrue(ht.equal(cumprod, result)) a = ht.full((2, 2, 2), 2, dtype=ht.float32, split=2) result = ht.array([[[2, 4], [2, 4]], [[2, 4], [2, 4]]], dtype=ht.float32, split=2) cumprod = ht.cumprod(a, 2) self.assertTrue(ht.equal(cumprod, result)) with self.assertRaises(NotImplementedError): ht.cumprod(ht.ones((2, 2)), axis=None) with self.assertRaises(TypeError): ht.cumprod(ht.ones((2, 2)), axis="1") with self.assertRaises(ValueError): ht.cumprod(a, 2, out=out) with self.assertRaises(ValueError): ht.cumprod(ht.ones((2, 2)), 2)
def _spectral_embedding(self, X): """ Helper function to embed the dataset X into the eigenvectors of the graph Laplacian matrix Returns ------- ht.DNDarray, shape=(m_lanczos): Eigenvalues of the graph's Laplacian matrix. ht.DNDarray, shape=(n, m_lanczos): Eigenvectors of the graph's Laplacian matrix. """ L = self._laplacian.construct(X) # 3. Eigenvalue and -vector calculation via Lanczos Algorithm v0 = ht.full( (L.shape[0], ), fill_value=1.0 / math.sqrt(L.shape[0]), dtype=L.dtype, split=0, device=L.device, ) V, T = ht.lanczos(L, self.n_lanczos, v0) # 4. Calculate and Sort Eigenvalues and Eigenvectors of tridiagonal matrix T eval, evec = torch.eig(T._DNDarray__array, eigenvectors=True) # If x is an Eigenvector of T, then y = V@x is the corresponding Eigenvector of L eval, idx = torch.sort(eval[:, 0], dim=0) eigenvalues = ht.array(eval) eigenvectors = ht.matmul(V, ht.array(evec))[:, idx] return eigenvalues, eigenvectors
def test_isreal(self): a = ht.array([1, 1.2, 1 + 1j, 1 + 0j]) s = ht.array([True, True, False, True]) r = ht.isreal(a) self.assertEqual(r.shape, s.shape) self.assertEqual(r.dtype, s.dtype) self.assertEqual(r.device, s.device) self.assertTrue(ht.equal(r, s)) a = ht.array([1, 1.2, True], split=0) s = ht.array([True, True, True], split=0) r = ht.isreal(a) self.assertEqual(r.shape, s.shape) self.assertEqual(r.dtype, s.dtype) self.assertEqual(r.device, s.device) self.assertTrue(ht.equal(r, s)) a = ht.ones((6, 6), dtype=ht.bool, split=0) s = ht.ones((6, 6), dtype=ht.bool, split=0) r = ht.isreal(a) self.assertEqual(r.shape, s.shape) self.assertEqual(r.dtype, s.dtype) self.assertEqual(r.device, s.device) self.assertTrue(ht.equal(r, s)) a = ht.full((5, 5), 1 + 1j, dtype=ht.int, split=1) s = ht.zeros((5, 5), dtype=ht.bool, split=1) r = ht.isreal(a) self.assertEqual(r.shape, s.shape) self.assertEqual(r.dtype, s.dtype) self.assertEqual(r.device, s.device) self.assertTrue(ht.equal(r, s))
def _spectral_embedding(self, x: DNDarray) -> Tuple[DNDarray, DNDarray]: """ Helper function for dataset x embedding. Returns Tupel(Eigenvalues, Eigenvectors) of the graph's Laplacian matrix. Parameters ---------- x : DNDarray Sample Matrix for which the embedding should be calculated """ L = self._laplacian.construct(x) # 3. Eigenvalue and -vector calculation via Lanczos Algorithm v0 = ht.full( (L.shape[0],), fill_value=1.0 / math.sqrt(L.shape[0]), dtype=L.dtype, split=0, device=L.device, ) V, T = ht.lanczos(L, self.n_lanczos, v0) # 4. Calculate and Sort Eigenvalues and Eigenvectors of tridiagonal matrix T eval, evec = torch.eig(T.larray, eigenvectors=True) # If x is an Eigenvector of T, then y = V@x is the corresponding Eigenvector of L eval, idx = torch.sort(eval[:, 0], dim=0) eigenvalues = ht.array(eval) eigenvectors = ht.matmul(V, ht.array(evec))[:, idx] return eigenvalues, eigenvectors
def test_full(self): # simple tensor data = ht.full((10, 2), 4, device=ht_device) self.assertIsInstance(data, ht.DNDarray) self.assertEqual(data.shape, (10, 2)) self.assertEqual(data.lshape, (10, 2)) self.assertEqual(data.dtype, ht.float32) self.assertEqual(data._DNDarray__array.dtype, torch.float32) self.assertEqual(data.split, None) self.assertTrue(ht.allclose(data, ht.float32(4.0, device=ht_device))) # non-standard dtype tensor data = ht.full((10, 2), 4, dtype=ht.int32, device=ht_device) self.assertIsInstance(data, ht.DNDarray) self.assertEqual(data.shape, (10, 2)) self.assertEqual(data.lshape, (10, 2)) self.assertEqual(data.dtype, ht.int32) self.assertEqual(data._DNDarray__array.dtype, torch.int32) self.assertEqual(data.split, None) self.assertTrue(ht.allclose(data, ht.int32(4, device=ht_device))) # split tensor data = ht.full((10, 2), 4, split=0, device=ht_device) self.assertIsInstance(data, ht.DNDarray) self.assertEqual(data.shape, (10, 2)) self.assertLessEqual(data.lshape[0], 10) self.assertEqual(data.lshape[1], 2) self.assertEqual(data.dtype, ht.float32) self.assertEqual(data._DNDarray__array.dtype, torch.float32) self.assertEqual(data.split, 0) self.assertTrue(ht.allclose(data, ht.float32(4.0, device=ht_device))) # exceptions with self.assertRaises(TypeError): ht.full("(2, 3,)", 4, dtype=ht.float64, device=ht_device) with self.assertRaises(ValueError): ht.full((-1, 3), 2, dtype=ht.float64, device=ht_device) with self.assertRaises(TypeError): ht.full((2, 3), dtype=ht.float64, split="axis", device=ht_device)
def _spectral_embedding(self, x: DNDarray) -> Tuple[DNDarray, DNDarray]: """ Helper function for dataset x embedding. Returns Tupel(Eigenvalues, Eigenvectors) of the graph's Laplacian matrix. Parameters ---------- x : DNDarray Sample Matrix for which the embedding should be calculated Notes ----- This will throw out the complex side of the eigenvalues found during this. """ L = self._laplacian.construct(x) # 3. Eigenvalue and -vector calculation via Lanczos Algorithm v0 = ht.full( (L.shape[0], ), fill_value=1.0 / math.sqrt(L.shape[0]), dtype=L.dtype, split=0, device=L.device, ) V, T = ht.lanczos(L, self.n_lanczos, v0) # if int(torch.__version__.split(".")[1]) >= 9: try: # 4. Calculate and Sort Eigenvalues and Eigenvectors of tridiagonal matrix T eval, evec = torch.linalg.eig(T.larray) # If x is an Eigenvector of T, then y = V@x is the corresponding Eigenvector of L eval, idx = torch.sort(eval.real, dim=0) eigenvalues = ht.array(eval) eigenvectors = ht.matmul(V, ht.array(evec))[:, idx] return eigenvalues.real, eigenvectors.real except AttributeError: # torch version is less than 1.9.0 # 4. Calculate and Sort Eigenvalues and Eigenvectors of tridiagonal matrix T eval, evec = torch.eig(T.larray, eigenvectors=True) # If x is an Eigenvector of T, then y = V@x is the corresponding Eigenvector of L eval, idx = torch.sort(eval[:, 0], dim=0) eigenvalues = ht.array(eval) eigenvectors = ht.matmul(V, ht.array(evec))[:, idx] return eigenvalues, eigenvectors
def test_prod(self): array_len = 11 # check sum over all float elements of 1d tensor locally shape_noaxis = ht.ones(array_len) no_axis_prod = shape_noaxis.prod() self.assertIsInstance(no_axis_prod, ht.DNDarray) self.assertEqual(no_axis_prod.shape, (1, )) self.assertEqual(no_axis_prod.lshape, (1, )) self.assertEqual(no_axis_prod.dtype, ht.float32) self.assertEqual(no_axis_prod.larray.dtype, torch.float32) self.assertEqual(no_axis_prod.split, None) self.assertEqual(no_axis_prod.larray, 1) out_noaxis = ht.zeros((1, )) ht.prod(shape_noaxis, out=out_noaxis) self.assertEqual(out_noaxis.larray, 1) # check sum over all float elements of split 1d tensor shape_noaxis_split = ht.arange(1, array_len, split=0) shape_noaxis_split_prod = shape_noaxis_split.prod() self.assertIsInstance(shape_noaxis_split_prod, ht.DNDarray) self.assertEqual(shape_noaxis_split_prod.shape, (1, )) self.assertEqual(shape_noaxis_split_prod.lshape, (1, )) self.assertEqual(shape_noaxis_split_prod.dtype, ht.int64) self.assertEqual(shape_noaxis_split_prod.larray.dtype, torch.int64) self.assertEqual(shape_noaxis_split_prod.split, None) self.assertEqual(shape_noaxis_split_prod, 3628800) out_noaxis = ht.zeros((1, )) ht.prod(shape_noaxis_split, out=out_noaxis) self.assertEqual(out_noaxis.larray, 3628800) # check sum over all float elements of 3d tensor locally shape_noaxis = ht.full((3, 3, 3), 2) no_axis_prod = shape_noaxis.prod() self.assertIsInstance(no_axis_prod, ht.DNDarray) self.assertEqual(no_axis_prod.shape, (1, )) self.assertEqual(no_axis_prod.lshape, (1, )) self.assertEqual(no_axis_prod.dtype, ht.float32) self.assertEqual(no_axis_prod.larray.dtype, torch.float32) self.assertEqual(no_axis_prod.split, None) self.assertEqual(no_axis_prod.larray, 134217728) out_noaxis = ht.zeros((1, )) ht.prod(shape_noaxis, out=out_noaxis) self.assertEqual(out_noaxis.larray, 134217728) # check sum over all float elements of split 3d tensor shape_noaxis_split_axis = ht.full((3, 3, 3), 2, split=0) split_axis_prod = shape_noaxis_split_axis.prod(axis=0) self.assertIsInstance(split_axis_prod, ht.DNDarray) self.assertEqual(split_axis_prod.shape, (3, 3)) self.assertEqual(split_axis_prod.dtype, ht.float32) self.assertEqual(split_axis_prod.larray.dtype, torch.float32) self.assertEqual(split_axis_prod.split, None) out_axis = ht.ones((3, 3)) ht.prod(shape_noaxis, axis=0, out=out_axis) self.assertTrue((out_axis.larray == torch.full( (3, ), 8, dtype=torch.float, device=self.device.torch_device)).all()) # check sum over all float elements of splitted 5d tensor with negative axis shape_noaxis_split_axis_neg = ht.full((1, 2, 3, 4, 5), 2, split=1) shape_noaxis_split_axis_neg_prod = shape_noaxis_split_axis_neg.prod( axis=-2) self.assertIsInstance(shape_noaxis_split_axis_neg_prod, ht.DNDarray) self.assertEqual(shape_noaxis_split_axis_neg_prod.shape, (1, 2, 3, 5)) self.assertEqual(shape_noaxis_split_axis_neg_prod.dtype, ht.float32) self.assertEqual(shape_noaxis_split_axis_neg_prod.larray.dtype, torch.float32) self.assertEqual(shape_noaxis_split_axis_neg_prod.split, 1) out_noaxis = ht.zeros((1, 2, 3, 5), split=1) ht.prod(shape_noaxis_split_axis_neg, axis=-2, out=out_noaxis) # check sum over all float elements of splitted 3d tensor with tuple axis shape_split_axis_tuple = ht.ones((3, 4, 5), split=1) shape_split_axis_tuple_prod = shape_split_axis_tuple.prod(axis=(-2, -3)) expected_result = ht.ones((5, )) self.assertIsInstance(shape_split_axis_tuple_prod, ht.DNDarray) self.assertEqual(shape_split_axis_tuple_prod.shape, (5, )) self.assertEqual(shape_split_axis_tuple_prod.dtype, ht.float32) self.assertEqual(shape_split_axis_tuple_prod.larray.dtype, torch.float32) self.assertEqual(shape_split_axis_tuple_prod.split, None) self.assertTrue((shape_split_axis_tuple_prod == expected_result).all()) # exceptions with self.assertRaises(ValueError): ht.ones(array_len).prod(axis=1) with self.assertRaises(ValueError): ht.ones(array_len).prod(axis=-2) with self.assertRaises(ValueError): ht.ones((4, 4)).prod(axis=0, out=out_noaxis) with self.assertRaises(TypeError): ht.ones(array_len).prod(axis="bad_axis_type")
def test_full(self): # simple tensor data = ht.full(( 10, 2, ), 4) self.assertIsInstance(data, ht.tensor) self.assertEqual(data.shape, ( 10, 2, )) self.assertEqual(data.lshape, ( 10, 2, )) self.assertEqual(data.dtype, ht.float32) self.assertEqual(data._tensor__array.dtype, torch.float32) self.assertEqual(data.split, None) self.assertTrue(ht.allclose(data, ht.float32(4.0))) # non-standard dtype tensor data = ht.full(( 10, 2, ), 4, dtype=ht.int32) self.assertIsInstance(data, ht.tensor) self.assertEqual(data.shape, ( 10, 2, )) self.assertEqual(data.lshape, ( 10, 2, )) self.assertEqual(data.dtype, ht.int32) self.assertEqual(data._tensor__array.dtype, torch.int32) self.assertEqual(data.split, None) self.assertTrue(ht.allclose(data, ht.int32(4))) # split tensor data = ht.full(( 10, 2, ), 4, split=0) self.assertIsInstance(data, ht.tensor) self.assertEqual(data.shape, ( 10, 2, )) self.assertLessEqual(data.lshape[0], 10) self.assertEqual(data.lshape[1], 2) self.assertEqual(data.dtype, ht.float32) self.assertEqual(data._tensor__array.dtype, torch.float32) self.assertEqual(data.split, 0) self.assertTrue(ht.allclose(data, ht.float32(4.0))) # exceptions with self.assertRaises(TypeError): ht.full('(2, 3,)', 4, dtype=ht.float64) with self.assertRaises(ValueError): ht.full(( -1, 3, ), 2, dtype=ht.float64) with self.assertRaises(TypeError): ht.full(( 2, 3, ), dtype=ht.float64, split='axis')
def test_full(self): a = ht.full((4, 4), 1 + 1j) self.assertIs(a.dtype, ht.complex64)