def test_eq(self): result = ht.uint8([[0, 1], [0, 0]]) self.assertTrue( ht.equal(ht.eq(self.a_scalar, self.a_scalar), ht.uint8([1]))) self.assertTrue(ht.equal(ht.eq(self.a_tensor, self.a_scalar), result)) self.assertTrue(ht.equal(ht.eq(self.a_scalar, self.a_tensor), result)) self.assertTrue( ht.equal(ht.eq(self.a_tensor, self.another_tensor), result)) self.assertTrue(ht.equal(ht.eq(self.a_tensor, self.a_vector), result)) self.assertTrue( ht.equal(ht.eq(self.a_tensor, self.an_int_scalar), result)) self.assertTrue( ht.equal(ht.eq(self.a_split_tensor, self.a_tensor), result)) with self.assertRaises(ValueError): ht.eq(self.a_tensor, self.another_vector) with self.assertRaises(TypeError): ht.eq(self.a_tensor, self.errorneous_type) with self.assertRaises(TypeError): ht.eq("self.a_tensor", "s")
def test_mul(self): result = ht.array([[2.0, 4.0], [6.0, 8.0]]) self.assertTrue( ht.equal(ht.mul(self.a_scalar, self.a_scalar), ht.array([4.0]))) self.assertTrue(ht.equal(ht.mul(self.a_tensor, self.a_scalar), result)) self.assertTrue(ht.equal(ht.mul(self.a_scalar, self.a_tensor), result)) self.assertTrue( ht.equal(ht.mul(self.a_tensor, self.another_tensor), result)) self.assertTrue(ht.equal(ht.mul(self.a_tensor, self.a_vector), result)) self.assertTrue( ht.equal(ht.mul(self.a_tensor, self.an_int_scalar), result)) self.assertTrue( ht.equal(ht.mul(self.a_split_tensor, self.a_tensor), result)) with self.assertRaises(ValueError): ht.mul(self.a_tensor, self.another_vector) with self.assertRaises(TypeError): ht.mul(self.a_tensor, self.errorneous_type) with self.assertRaises(TypeError): ht.mul("T", "s")
def test_ge(self): T_r = ht.uint8([[0, 1], [1, 1]]) T_inv = ht.uint8([[1, 1], [0, 0]]) self.assertTrue(ht.equal(ht.ge(s, s), ht.uint8([1]))) self.assertTrue(ht.equal(ht.ge(T, s), T_r)) self.assertTrue(ht.equal(ht.ge(s, T), T_inv)) self.assertTrue(ht.equal(ht.ge(T, T1), T_r)) self.assertTrue(ht.equal(ht.ge(T, v), T_r)) self.assertTrue(ht.equal(ht.ge(T, s_int), T_r)) self.assertTrue(ht.equal(ht.ge(T_s, T), T_inv)) with self.assertRaises(ValueError): ht.ge(T, v2) with self.assertRaises(NotImplementedError): ht.ge(T, Ts) with self.assertRaises(TypeError): ht.ge(T, otherType) with self.assertRaises(TypeError): ht.ge('T', 's')
def test_add(self): result = ht.array([[3.0, 4.0], [5.0, 6.0]], device=ht_device) self.assertTrue( ht.equal(ht.add(self.a_scalar, self.a_scalar), ht.float32([4.0]))) self.assertTrue(ht.equal(ht.add(self.a_tensor, self.a_scalar), result)) self.assertTrue(ht.equal(ht.add(self.a_scalar, self.a_tensor), result)) self.assertTrue( ht.equal(ht.add(self.a_tensor, self.another_tensor), result)) self.assertTrue(ht.equal(ht.add(self.a_tensor, self.a_vector), result)) self.assertTrue( ht.equal(ht.add(self.a_tensor, self.an_int_scalar), result)) self.assertTrue( ht.equal(ht.add(self.a_split_tensor, self.a_tensor), result)) with self.assertRaises(ValueError): ht.add(self.a_tensor, self.another_vector) with self.assertRaises(TypeError): ht.add(self.a_tensor, self.errorneous_type) with self.assertRaises(TypeError): ht.add("T", "s")
def test_cumsum(self): a = ht.ones((2, 4), dtype=ht.int32) result = ht.array([[1, 2, 3, 4], [1, 2, 3, 4]], dtype=ht.int32) # split = None cumsum = ht.cumsum(a, 1) self.assertTrue(ht.equal(cumsum, result)) a = ht.ones((4, 2), dtype=ht.int64, split=0) result = ht.array([[1, 1], [2, 2], [3, 3], [4, 4]], dtype=ht.int64, split=0) cumsum = ht.cumsum(a, 0) self.assertTrue(ht.equal(cumsum, result)) # 3D out = ht.empty((2, 2, 2), dtype=ht.float32, split=0) a = ht.ones((2, 2, 2), split=0) result = ht.array([[[1, 1], [1, 1]], [[2, 2], [2, 2]]], dtype=ht.float32, split=0) cumsum = ht.cumsum(a, 0, out=out) self.assertTrue(ht.equal(cumsum, out)) self.assertTrue(ht.equal(cumsum, result)) a = ht.ones((2, 2, 2), dtype=ht.int32, split=1) result = ht.array([[[1, 1], [2, 2]], [[1, 1], [2, 2]]], dtype=ht.float32, split=1) cumsum = ht.cumsum(a, 1, dtype=ht.float64) self.assertTrue(ht.equal(cumsum, result)) a = ht.ones((2, 2, 2), dtype=ht.float32, split=2) result = ht.array([[[1, 2], [1, 2]], [[1, 2], [1, 2]]], dtype=ht.float32, split=2) cumsum = ht.cumsum(a, 2) self.assertTrue(ht.equal(cumsum, result)) with self.assertRaises(NotImplementedError): ht.cumsum(ht.ones((2, 2)), axis=None) with self.assertRaises(TypeError): ht.cumsum(ht.ones((2, 2)), axis="1") with self.assertRaises(ValueError): ht.cumsum(a, 2, out=out) with self.assertRaises(ValueError): ht.cumsum(ht.ones((2, 2)), 2)
def test_sub(self): result = ht.array([[-1.0, 0.0], [1.0, 2.0]]) minus_result = ht.array([[1.0, 0.0], [-1.0, -2.0]]) self.assertTrue( ht.equal(ht.sub(self.a_scalar, self.a_scalar), ht.array(0.0))) self.assertTrue(ht.equal(ht.sub(self.a_tensor, self.a_scalar), result)) self.assertTrue( ht.equal(ht.sub(self.a_scalar, self.a_tensor), minus_result)) self.assertTrue( ht.equal(ht.sub(self.a_tensor, self.another_tensor), result)) self.assertTrue(ht.equal(ht.sub(self.a_tensor, self.a_vector), result)) self.assertTrue( ht.equal(ht.sub(self.a_tensor, self.an_int_scalar), result)) self.assertTrue( ht.equal(ht.sub(self.a_split_tensor, self.a_tensor), minus_result)) with self.assertRaises(ValueError): ht.sub(self.a_tensor, self.another_vector) with self.assertRaises(TypeError): ht.sub(self.a_tensor, self.erroneous_type) with self.assertRaises(TypeError): ht.sub("T", "s")
def test_pow(self): result = ht.array([[1.0, 4.0], [9.0, 16.0]]) commutated_result = ht.array([[2.0, 4.0], [8.0, 16.0]]) self.assertTrue( ht.equal(ht.pow(self.a_scalar, self.a_scalar), ht.array(4.0))) self.assertTrue(ht.equal(ht.pow(self.a_tensor, self.a_scalar), result)) self.assertTrue( ht.equal(ht.pow(self.a_scalar, self.a_tensor), commutated_result)) self.assertTrue( ht.equal(ht.pow(self.a_tensor, self.another_tensor), result)) self.assertTrue(ht.equal(ht.pow(self.a_tensor, self.a_vector), result)) self.assertTrue( ht.equal(ht.pow(self.a_tensor, self.an_int_scalar), result)) self.assertTrue( ht.equal(ht.pow(self.a_split_tensor, self.a_tensor), commutated_result)) with self.assertRaises(ValueError): ht.pow(self.a_tensor, self.another_vector) with self.assertRaises(TypeError): ht.pow(self.a_tensor, self.erroneous_type) with self.assertRaises(TypeError): ht.pow("T", "s")
def test_eq(self): result = ht.array([[False, True], [False, False]]) self.assertTrue( ht.equal(ht.eq(self.a_scalar, self.a_scalar), ht.array(True))) self.assertTrue(ht.equal(ht.eq(self.a_tensor, self.a_scalar), result)) self.assertTrue(ht.equal(ht.eq(self.a_scalar, self.a_tensor), result)) self.assertTrue( ht.equal(ht.eq(self.a_tensor, self.another_tensor), result)) self.assertTrue(ht.equal(ht.eq(self.a_tensor, self.a_vector), result)) self.assertTrue( ht.equal(ht.eq(self.a_tensor, self.an_int_scalar), result)) self.assertTrue( ht.equal(ht.eq(self.a_split_tensor, self.a_tensor), result)) self.assertEqual( ht.eq(self.a_split_tensor, self.a_tensor).dtype, ht.bool) with self.assertRaises(ValueError): ht.eq(self.a_tensor, self.another_vector) with self.assertRaises(TypeError): ht.eq(self.a_tensor, self.errorneous_type) with self.assertRaises(TypeError): ht.eq("self.a_tensor", "s")
def test_gt(self): result = ht.uint8([[0, 0], [1, 1]], device=ht_device) commutated_result = ht.uint8([[1, 0], [0, 0]], device=ht_device) self.assertTrue( ht.equal(ht.gt(self.a_scalar, self.a_scalar), ht.uint8([0]))) self.assertTrue(ht.equal(ht.gt(self.a_tensor, self.a_scalar), result)) self.assertTrue( ht.equal(ht.gt(self.a_scalar, self.a_tensor), commutated_result)) self.assertTrue( ht.equal(ht.gt(self.a_tensor, self.another_tensor), result)) self.assertTrue(ht.equal(ht.gt(self.a_tensor, self.a_vector), result)) self.assertTrue( ht.equal(ht.gt(self.a_tensor, self.an_int_scalar), result)) self.assertTrue( ht.equal(ht.gt(self.a_split_tensor, self.a_tensor), commutated_result)) with self.assertRaises(ValueError): ht.gt(self.a_tensor, self.another_vector) with self.assertRaises(TypeError): ht.gt(self.a_tensor, self.errorneous_type) with self.assertRaises(TypeError): ht.gt("self.a_tensor", "s")
def test_div(self): result = ht.array([[0.5, 1.0], [1.5, 2.0]]) commutated_result = ht.array([[2.0, 1.0], [2.0 / 3.0, 0.5]]) self.assertTrue( ht.equal(ht.div(self.a_scalar, self.a_scalar), ht.float32(1.0))) self.assertTrue(ht.equal(ht.div(self.a_tensor, self.a_scalar), result)) self.assertTrue( ht.equal(ht.div(self.a_scalar, self.a_tensor), commutated_result)) self.assertTrue( ht.equal(ht.div(self.a_tensor, self.another_tensor), result)) self.assertTrue(ht.equal(ht.div(self.a_tensor, self.a_vector), result)) self.assertTrue( ht.equal(ht.div(self.a_tensor, self.an_int_scalar), result)) self.assertTrue( ht.equal(ht.div(self.a_split_tensor, self.a_tensor), commutated_result)) with self.assertRaises(ValueError): ht.div(self.a_tensor, self.another_vector) with self.assertRaises(TypeError): ht.div(self.a_tensor, self.erroneous_type) with self.assertRaises(TypeError): ht.div("T", "s")
def test_fill_diagonal(self): ref = ht.zeros((ht.MPI_WORLD.size * 2, ht.MPI_WORLD.size * 2), dtype=ht.float32, split=0) a = ht.eye(ht.MPI_WORLD.size * 2, dtype=ht.float32, split=0) a.fill_diagonal(0) self.assertTrue(ht.equal(a, ref)) ref = ht.zeros((ht.MPI_WORLD.size * 2, ht.MPI_WORLD.size * 2), dtype=ht.int32, split=0) a = ht.eye(ht.MPI_WORLD.size * 2, dtype=ht.int32, split=0) a.fill_diagonal(0) self.assertTrue(ht.equal(a, ref)) ref = ht.zeros((ht.MPI_WORLD.size * 2, ht.MPI_WORLD.size * 2), dtype=ht.float32, split=1) a = ht.eye(ht.MPI_WORLD.size * 2, dtype=ht.float32, split=1) a.fill_diagonal(0) self.assertTrue(ht.equal(a, ref)) ref = ht.zeros((ht.MPI_WORLD.size * 2, ht.MPI_WORLD.size * 3), dtype=ht.float32, split=0) a = ht.eye((ht.MPI_WORLD.size * 2, ht.MPI_WORLD.size * 3), dtype=ht.float32, split=0) a.fill_diagonal(0) self.assertTrue(ht.equal(a, ref)) # ToDo: uneven tensor dimensions x and y when bug in factories.eye is fixed ref = ht.zeros((ht.MPI_WORLD.size * 3, ht.MPI_WORLD.size * 3), dtype=ht.float32, split=1) a = ht.eye((ht.MPI_WORLD.size * 3, ht.MPI_WORLD.size * 3), dtype=ht.float32, split=1) a.fill_diagonal(0) self.assertTrue(ht.equal(a, ref)) # ToDo: uneven tensor dimensions x and y when bug in factories.eye is fixed ref = ht.zeros((ht.MPI_WORLD.size * 4, ht.MPI_WORLD.size * 4), dtype=ht.float32, split=0) a = ht.eye((ht.MPI_WORLD.size * 4, ht.MPI_WORLD.size * 4), dtype=ht.float32, split=0) a.fill_diagonal(0) self.assertTrue(ht.equal(a, ref)) a = ht.ones((ht.MPI_WORLD.size * 2,), dtype=ht.float32, split=0) with self.assertRaises(ValueError): a.fill_diagonal(0)
def test_load_csv(self): csv_file_length = 150 csv_file_cols = 4 first_value = torch.tensor([5.1, 3.5, 1.4, 0.2], dtype=torch.float32, device=self.device.torch_device) tenth_value = torch.tensor([4.9, 3.1, 1.5, 0.1], dtype=torch.float32, device=self.device.torch_device) a = ht.load_csv(self.CSV_PATH, sep=";") self.assertEqual(len(a), csv_file_length) self.assertEqual(a.shape, (csv_file_length, csv_file_cols)) self.assertTrue(torch.equal(a.larray[0], first_value)) self.assertTrue(torch.equal(a.larray[9], tenth_value)) a = ht.load_csv(self.CSV_PATH, sep=";", split=0) rank = a.comm.Get_rank() expected_gshape = (csv_file_length, csv_file_cols) self.assertEqual(a.gshape, expected_gshape) counts, _, _ = a.comm.counts_displs_shape(expected_gshape, 0) expected_lshape = (counts[rank], csv_file_cols) self.assertEqual(a.lshape, expected_lshape) if rank == 0: self.assertTrue(torch.equal(a.larray[0], first_value)) a = ht.load_csv(self.CSV_PATH, sep=";", header_lines=9, dtype=ht.float32, split=0) expected_gshape = (csv_file_length - 9, csv_file_cols) counts, _, _ = a.comm.counts_displs_shape(expected_gshape, 0) expected_lshape = (counts[rank], csv_file_cols) self.assertEqual(a.gshape, expected_gshape) self.assertEqual(a.lshape, expected_lshape) self.assertEqual(a.dtype, ht.float32) if rank == 0: self.assertTrue(torch.equal(a.larray[0], tenth_value)) a = ht.load_csv(self.CSV_PATH, sep=";", split=1) self.assertEqual(a.shape, (csv_file_length, csv_file_cols)) self.assertEqual(a.lshape[0], csv_file_length) a = ht.load_csv(self.CSV_PATH, sep=";", split=0) b = ht.load(self.CSV_PATH, sep=";", split=0) self.assertTrue(ht.equal(a, b)) # Test for csv where header is longer then the first process`s share of lines a = ht.load_csv(self.CSV_PATH, sep=";", header_lines=100, split=0) self.assertEqual(a.shape, (50, 4)) with self.assertRaises(TypeError): ht.load_csv(12314) with self.assertRaises(TypeError): ht.load_csv(self.CSV_PATH, sep=11) with self.assertRaises(TypeError): ht.load_csv(self.CSV_PATH, header_lines="3", sep=";", split=0)
def test_add(self): # test basics result = ht.array([[3.0, 4.0], [5.0, 6.0]]) self.assertTrue( ht.equal(ht.add(self.a_scalar, self.a_scalar), ht.float32(4.0))) self.assertTrue(ht.equal(ht.add(self.a_tensor, self.a_scalar), result)) self.assertTrue(ht.equal(ht.add(self.a_scalar, self.a_tensor), result)) self.assertTrue( ht.equal(ht.add(self.a_tensor, self.another_tensor), result)) self.assertTrue(ht.equal(ht.add(self.a_tensor, self.a_vector), result)) self.assertTrue( ht.equal(ht.add(self.a_tensor, self.an_int_scalar), result)) self.assertTrue( ht.equal(ht.add(self.a_split_tensor, self.a_tensor), result)) # Single element split a = ht.array([1], split=0) b = ht.array([1, 2], split=0) c = ht.add(a, b) self.assertTrue(ht.equal(c, ht.array([2, 3]))) if c.comm.size > 1: if c.comm.rank < 2: self.assertEqual(c.larray.size()[0], 1) else: self.assertEqual(c.larray.size()[0], 0) # test with differently distributed DNDarrays a = ht.ones(10, split=0) b = ht.zeros(10, split=0) c = a[:-1] + b[1:] self.assertTrue((c == 1).all()) self.assertTrue(c.lshape == a[:-1].lshape) c = a[1:-1] + b[1:-1] # test unbalanced self.assertTrue((c == 1).all()) self.assertTrue(c.lshape == a[1:-1].lshape) # test one unsplit a = ht.ones(10, split=None) b = ht.zeros(10, split=0) c = a[:-1] + b[1:] self.assertTrue((c == 1).all()) self.assertEqual(c.lshape, b[1:].lshape) c = b[:-1] + a[1:] self.assertTrue((c == 1).all()) self.assertEqual(c.lshape, b[:-1].lshape) # broadcast in split dimension a = ht.ones((1, 10), split=0) b = ht.zeros((2, 10), split=0) c = a + b self.assertTrue((c == 1).all()) self.assertTrue(c.lshape == b.lshape) c = b + a self.assertTrue((c == 1).all()) self.assertTrue(c.lshape == b.lshape) with self.assertRaises(ValueError): ht.add(self.a_tensor, self.another_vector) with self.assertRaises(TypeError): ht.add(self.a_tensor, self.erroneous_type) with self.assertRaises(TypeError): ht.add("T", "s")
def test_equal(self): self.assertTrue(ht.equal(self.a_tensor, self.a_tensor)) self.assertFalse(ht.equal(self.a_tensor, self.another_tensor)) self.assertFalse(ht.equal(self.a_tensor, self.a_scalar)) self.assertFalse(ht.equal(self.another_tensor, self.a_scalar))
def test_resplit(self): # resplitting with same axis, should leave everything unchanged shape = (ht.MPI_WORLD.size, ht.MPI_WORLD.size) data = ht.zeros(shape, split=None, device=ht_device) data.resplit_(None) self.assertIsInstance(data, ht.DNDarray) self.assertEqual(data.shape, shape) self.assertEqual(data.lshape, shape) self.assertEqual(data.split, None) # resplitting with same axis, should leave everything unchanged shape = (ht.MPI_WORLD.size, ht.MPI_WORLD.size) data = ht.zeros(shape, split=1, device=ht_device) data.resplit_(1) self.assertIsInstance(data, ht.DNDarray) self.assertEqual(data.shape, shape) self.assertEqual(data.lshape, (data.comm.size, 1)) self.assertEqual(data.split, 1) # splitting an unsplit tensor should result in slicing the tensor locally shape = (ht.MPI_WORLD.size, ht.MPI_WORLD.size) data = ht.zeros(shape, device=ht_device) data.resplit_(-1) self.assertIsInstance(data, ht.DNDarray) self.assertEqual(data.shape, shape) self.assertEqual(data.lshape, (data.comm.size, 1)) self.assertEqual(data.split, 1) # unsplitting, aka gathering a tensor shape = (ht.MPI_WORLD.size + 1, ht.MPI_WORLD.size) data = ht.ones(shape, split=0, device=ht_device) data.resplit_(None) self.assertIsInstance(data, ht.DNDarray) self.assertEqual(data.shape, shape) self.assertEqual(data.lshape, shape) self.assertEqual(data.split, None) # assign and entirely new split axis shape = (ht.MPI_WORLD.size + 2, ht.MPI_WORLD.size + 1) data = ht.ones(shape, split=0, device=ht_device) data.resplit_(1) self.assertIsInstance(data, ht.DNDarray) self.assertEqual(data.shape, shape) self.assertEqual(data.lshape[0], ht.MPI_WORLD.size + 2) self.assertTrue(data.lshape[1] == 1 or data.lshape[1] == 2) self.assertEqual(data.split, 1) # test sorting order of resplit a_tensor = self.reference_tensor.copy() N = ht.MPI_WORLD.size # split along axis = 0 a_tensor.resplit_(axis=0) local_shape = (1, N + 1, 2 * N) local_tensor = self.reference_tensor[ht.MPI_WORLD.rank, :, :] self.assertEqual(a_tensor.lshape, local_shape) self.assertTrue( (a_tensor._DNDarray__array == local_tensor._DNDarray__array).all()) # unsplit a_tensor.resplit_(axis=None) self.assertTrue((a_tensor._DNDarray__array == self.reference_tensor._DNDarray__array).all()) # split along axis = 1 a_tensor.resplit_(axis=1) if ht.MPI_WORLD.rank == 0: local_shape = (N, 2, 2 * N) local_tensor = self.reference_tensor[:, 0:2, :] else: local_shape = (N, 1, 2 * N) local_tensor = self.reference_tensor[:, ht.MPI_WORLD.rank + 1:ht.MPI_WORLD.rank + 2, :] self.assertEqual(a_tensor.lshape, local_shape) self.assertTrue( (a_tensor._DNDarray__array == local_tensor._DNDarray__array).all()) # unsplit a_tensor.resplit_(axis=None) self.assertTrue((a_tensor._DNDarray__array == self.reference_tensor._DNDarray__array).all()) # split along axis = 2 a_tensor.resplit_(axis=2) local_shape = (N, N + 1, 2) local_tensor = self.reference_tensor[:, :, 2 * ht.MPI_WORLD.rank:2 * ht.MPI_WORLD.rank + 2] self.assertEqual(a_tensor.lshape, local_shape) self.assertTrue( (a_tensor._DNDarray__array == local_tensor._DNDarray__array).all()) expected = torch.ones((ht.MPI_WORLD.size, 100), dtype=torch.int64, device=device) data = ht.array(expected, split=1, device=ht_device) data.resplit_(None) self.assertTrue(torch.equal(data._DNDarray__array, expected)) self.assertFalse(data.is_distributed()) self.assertIsNone(data.split) self.assertEqual(data.dtype, ht.int64) self.assertEqual(data._DNDarray__array.dtype, expected.dtype) expected = torch.zeros((100, ht.MPI_WORLD.size), dtype=torch.uint8, device=device) data = ht.array(expected, split=0, device=ht_device) data.resplit_(None) self.assertTrue(torch.equal(data._DNDarray__array, expected)) self.assertFalse(data.is_distributed()) self.assertIsNone(data.split) self.assertEqual(data.dtype, ht.uint8) self.assertEqual(data._DNDarray__array.dtype, expected.dtype) # "in place" length = torch.tensor([i + 20 for i in range(2)], device=device) test = torch.arange(torch.prod(length), dtype=torch.float64, device=device).reshape([i + 20 for i in range(2)]) a = ht.array(test, split=1) a.resplit_(axis=0) self.assertTrue(ht.equal(a, ht.array(test, split=0))) self.assertEqual(a.split, 0) self.assertEqual(a.dtype, ht.float64) del a test = torch.arange(torch.prod(length), device=device) a = ht.array(test, split=0) a.resplit_(axis=None) self.assertTrue(ht.equal(a, ht.array(test, split=None))) self.assertEqual(a.split, None) self.assertEqual(a.dtype, ht.int64) del a a = ht.array(test, split=None) a.resplit_(axis=0) self.assertTrue(ht.equal(a, ht.array(test, split=0))) self.assertEqual(a.split, 0) self.assertEqual(a.dtype, ht.int64) del a a = ht.array(test, split=0) resplit_a = ht.manipulations.resplit(a, axis=None) self.assertTrue(ht.equal(resplit_a, ht.array(test, split=None))) self.assertEqual(resplit_a.split, None) self.assertEqual(resplit_a.dtype, ht.int64) del a a = ht.array(test, split=None) resplit_a = ht.manipulations.resplit(a, axis=0) self.assertTrue(ht.equal(resplit_a, ht.array(test, split=0))) self.assertEqual(resplit_a.split, 0) self.assertEqual(resplit_a.dtype, ht.int64) del a
def test_matmul(self): with self.assertRaises(ValueError): ht.matmul(ht.ones((25, 25)), ht.ones((42, 42))) # cases to test: n, m = 21, 31 j, k = m, 45 a_torch = torch.ones((n, m), device=self.device.torch_device) a_torch[0] = torch.arange(1, m + 1, device=self.device.torch_device) a_torch[:, -1] = torch.arange(1, n + 1, device=self.device.torch_device) b_torch = torch.ones((j, k), device=self.device.torch_device) b_torch[0] = torch.arange(1, k + 1, device=self.device.torch_device) b_torch[:, 0] = torch.arange(1, j + 1, device=self.device.torch_device) # splits None None a = ht.ones((n, m), split=None) b = ht.ones((j, k), split=None) a[0] = ht.arange(1, m + 1) a[:, -1] = ht.arange(1, n + 1) b[0] = ht.arange(1, k + 1) b[:, 0] = ht.arange(1, j + 1) ret00 = ht.matmul(a, b) self.assertEqual(ht.all(ret00 == ht.array(a_torch @ b_torch)), 1) self.assertIsInstance(ret00, ht.DNDarray) self.assertEqual(ret00.shape, (n, k)) self.assertEqual(ret00.dtype, ht.float) self.assertEqual(ret00.split, None) self.assertEqual(a.split, None) self.assertEqual(b.split, None) # splits None None a = ht.ones((n, m), split=None) b = ht.ones((j, k), split=None) a[0] = ht.arange(1, m + 1) a[:, -1] = ht.arange(1, n + 1) b[0] = ht.arange(1, k + 1) b[:, 0] = ht.arange(1, j + 1) ret00 = ht.matmul(a, b, allow_resplit=True) self.assertEqual(ht.all(ret00 == ht.array(a_torch @ b_torch)), 1) self.assertIsInstance(ret00, ht.DNDarray) self.assertEqual(ret00.shape, (n, k)) self.assertEqual(ret00.dtype, ht.float) self.assertEqual(ret00.split, None) self.assertEqual(a.split, 0) self.assertEqual(b.split, None) if a.comm.size > 1: # splits 00 a = ht.ones((n, m), split=0, dtype=ht.float64) b = ht.ones((j, k), split=0) a[0] = ht.arange(1, m + 1) a[:, -1] = ht.arange(1, n + 1) b[0] = ht.arange(1, k + 1) b[:, 0] = ht.arange(1, j + 1) ret00 = a @ b ret_comp00 = ht.array(a_torch @ b_torch, split=0) self.assertTrue(ht.equal(ret00, ret_comp00)) self.assertIsInstance(ret00, ht.DNDarray) self.assertEqual(ret00.shape, (n, k)) self.assertEqual(ret00.dtype, ht.float64) self.assertEqual(ret00.split, 0) # splits 00 (numpy) a = ht.array(np.ones((n, m)), split=0) b = ht.array(np.ones((j, k)), split=0) a[0] = ht.arange(1, m + 1) a[:, -1] = ht.arange(1, n + 1) b[0] = ht.arange(1, k + 1) b[:, 0] = ht.arange(1, j + 1) ret00 = a @ b ret_comp00 = ht.array(a_torch @ b_torch, split=0) self.assertTrue(ht.equal(ret00, ret_comp00)) self.assertIsInstance(ret00, ht.DNDarray) self.assertEqual(ret00.shape, (n, k)) self.assertEqual(ret00.dtype, ht.float64) self.assertEqual(ret00.split, 0) # splits 01 a = ht.ones((n, m), split=0) b = ht.ones((j, k), split=1, dtype=ht.float64) a[0] = ht.arange(1, m + 1) a[:, -1] = ht.arange(1, n + 1) b[0] = ht.arange(1, k + 1) b[:, 0] = ht.arange(1, j + 1) ret00 = ht.matmul(a, b) ret_comp01 = ht.array(a_torch @ b_torch, split=0) self.assertTrue(ht.equal(ret00, ret_comp01)) self.assertIsInstance(ret00, ht.DNDarray) self.assertEqual(ret00.shape, (n, k)) self.assertEqual(ret00.dtype, ht.float64) self.assertEqual(ret00.split, 0) # splits 10 a = ht.ones((n, m), split=1) b = ht.ones((j, k), split=0) a[0] = ht.arange(1, m + 1) a[:, -1] = ht.arange(1, n + 1) b[0] = ht.arange(1, k + 1) b[:, 0] = ht.arange(1, j + 1) ret00 = ht.matmul(a, b) ret_comp10 = ht.array(a_torch @ b_torch, split=1) self.assertTrue(ht.equal(ret00, ret_comp10)) self.assertIsInstance(ret00, ht.DNDarray) self.assertEqual(ret00.shape, (n, k)) self.assertEqual(ret00.dtype, ht.float) self.assertEqual(ret00.split, 1) # splits 11 a = ht.ones((n, m), split=1) b = ht.ones((j, k), split=1) a[0] = ht.arange(1, m + 1) a[:, -1] = ht.arange(1, n + 1) b[0] = ht.arange(1, k + 1) b[:, 0] = ht.arange(1, j + 1) ret00 = ht.matmul(a, b) ret_comp11 = ht.array(a_torch @ b_torch, split=1) self.assertTrue(ht.equal(ret00, ret_comp11)) self.assertIsInstance(ret00, ht.DNDarray) self.assertEqual(ret00.shape, (n, k)) self.assertEqual(ret00.dtype, ht.float) self.assertEqual(ret00.split, 1) # splits 11 (torch) a = ht.array(torch.ones((n, m), device=self.device.torch_device), split=1) b = ht.array(torch.ones((j, k), device=self.device.torch_device), split=1) a[0] = ht.arange(1, m + 1) a[:, -1] = ht.arange(1, n + 1) b[0] = ht.arange(1, k + 1) b[:, 0] = ht.arange(1, j + 1) ret00 = ht.matmul(a, b) ret_comp11 = ht.array(a_torch @ b_torch, split=1) self.assertTrue(ht.equal(ret00, ret_comp11)) self.assertIsInstance(ret00, ht.DNDarray) self.assertEqual(ret00.shape, (n, k)) self.assertEqual(ret00.dtype, ht.float) self.assertEqual(ret00.split, 1) # splits 0 None a = ht.ones((n, m), split=0) b = ht.ones((j, k), split=None) a[0] = ht.arange(1, m + 1) a[:, -1] = ht.arange(1, n + 1) b[0] = ht.arange(1, k + 1) b[:, 0] = ht.arange(1, j + 1) ret00 = ht.matmul(a, b) ret_comp0 = ht.array(a_torch @ b_torch, split=0) self.assertTrue(ht.equal(ret00, ret_comp0)) self.assertIsInstance(ret00, ht.DNDarray) self.assertEqual(ret00.shape, (n, k)) self.assertEqual(ret00.dtype, ht.float) self.assertEqual(ret00.split, 0) # splits 1 None a = ht.ones((n, m), split=1) b = ht.ones((j, k), split=None) a[0] = ht.arange(1, m + 1) a[:, -1] = ht.arange(1, n + 1) b[0] = ht.arange(1, k + 1) b[:, 0] = ht.arange(1, j + 1) ret00 = ht.matmul(a, b) ret_comp1 = ht.array(a_torch @ b_torch, split=1) self.assertTrue(ht.equal(ret00, ret_comp1)) self.assertIsInstance(ret00, ht.DNDarray) self.assertEqual(ret00.shape, (n, k)) self.assertEqual(ret00.dtype, ht.float) self.assertEqual(ret00.split, 1) # splits None 0 a = ht.ones((n, m), split=None) b = ht.ones((j, k), split=0) a[0] = ht.arange(1, m + 1) a[:, -1] = ht.arange(1, n + 1) b[0] = ht.arange(1, k + 1) b[:, 0] = ht.arange(1, j + 1) ret00 = ht.matmul(a, b) ret_comp = ht.array(a_torch @ b_torch, split=0) self.assertTrue(ht.equal(ret00, ret_comp)) self.assertIsInstance(ret00, ht.DNDarray) self.assertEqual(ret00.shape, (n, k)) self.assertEqual(ret00.dtype, ht.float) self.assertEqual(ret00.split, 0) # splits None 1 a = ht.ones((n, m), split=None) b = ht.ones((j, k), split=1) a[0] = ht.arange(1, m + 1) a[:, -1] = ht.arange(1, n + 1) b[0] = ht.arange(1, k + 1) b[:, 0] = ht.arange(1, j + 1) ret00 = ht.matmul(a, b) ret_comp = ht.array(a_torch @ b_torch, split=1) self.assertTrue(ht.equal(ret00, ret_comp)) self.assertIsInstance(ret00, ht.DNDarray) self.assertEqual(ret00.shape, (n, k)) self.assertEqual(ret00.dtype, ht.float) self.assertEqual(ret00.split, 1) # vector matrix mult: # a -> vector a_torch = torch.ones((m), device=self.device.torch_device) b_torch = torch.ones((j, k), device=self.device.torch_device) b_torch[0] = torch.arange(1, k + 1, device=self.device.torch_device) b_torch[:, 0] = torch.arange(1, j + 1, device=self.device.torch_device) # splits None None a = ht.ones((m), split=None) b = ht.ones((j, k), split=None) b[0] = ht.arange(1, k + 1) b[:, 0] = ht.arange(1, j + 1) ret00 = ht.matmul(a, b) ret_comp = ht.array(a_torch @ b_torch, split=None) self.assertTrue(ht.equal(ret00, ret_comp)) self.assertIsInstance(ret00, ht.DNDarray) self.assertEqual(ret00.shape, (k, )) self.assertEqual(ret00.dtype, ht.float) self.assertEqual(ret00.split, None) # splits None 0 a = ht.ones((m), split=None) b = ht.ones((j, k), split=0) b[0] = ht.arange(1, k + 1) b[:, 0] = ht.arange(1, j + 1) ret00 = ht.matmul(a, b) ret_comp = ht.array(a_torch @ b_torch, split=None) self.assertTrue(ht.equal(ret00, ret_comp)) self.assertIsInstance(ret00, ht.DNDarray) self.assertEqual(ret00.shape, (k, )) self.assertEqual(ret00.dtype, ht.float) self.assertEqual(ret00.split, 0) # splits None 1 a = ht.ones((m), split=None) b = ht.ones((j, k), split=1) b[0] = ht.arange(1, k + 1) b[:, 0] = ht.arange(1, j + 1) ret00 = ht.matmul(a, b) ret_comp = ht.array(a_torch @ b_torch, split=0) self.assertTrue(ht.equal(ret00, ret_comp)) self.assertIsInstance(ret00, ht.DNDarray) self.assertEqual(ret00.shape, (k, )) self.assertEqual(ret00.dtype, ht.float) self.assertEqual(ret00.split, 0) # splits 0 None a = ht.ones((m), split=None) b = ht.ones((j, k), split=0) b[0] = ht.arange(1, k + 1) b[:, 0] = ht.arange(1, j + 1) ret00 = ht.matmul(a, b) ret_comp = ht.array(a_torch @ b_torch, split=None) self.assertTrue(ht.equal(ret00, ret_comp)) self.assertIsInstance(ret00, ht.DNDarray) self.assertEqual(ret00.shape, (k, )) self.assertEqual(ret00.dtype, ht.float) self.assertEqual(ret00.split, 0) # splits 0 0 a = ht.ones((m), split=0) b = ht.ones((j, k), split=0) b[0] = ht.arange(1, k + 1) b[:, 0] = ht.arange(1, j + 1) ret00 = ht.matmul(a, b) ret_comp = ht.array(a_torch @ b_torch, split=None) self.assertTrue(ht.equal(ret00, ret_comp)) self.assertIsInstance(ret00, ht.DNDarray) self.assertEqual(ret00.shape, (k, )) self.assertEqual(ret00.dtype, ht.float) self.assertEqual(ret00.split, 0) # splits 0 1 a = ht.ones((m), split=0) b = ht.ones((j, k), split=1) b[0] = ht.arange(1, k + 1) b[:, 0] = ht.arange(1, j + 1) ret00 = ht.matmul(a, b) ret_comp = ht.array(a_torch @ b_torch, split=None) self.assertTrue(ht.equal(ret00, ret_comp)) self.assertIsInstance(ret00, ht.DNDarray) self.assertEqual(ret00.shape, (k, )) self.assertEqual(ret00.dtype, ht.float) self.assertEqual(ret00.split, 0) # b -> vector a_torch = torch.ones((n, m), device=self.device.torch_device) a_torch[0] = torch.arange(1, m + 1, device=self.device.torch_device) a_torch[:, -1] = torch.arange(1, n + 1, device=self.device.torch_device) b_torch = torch.ones((j), device=self.device.torch_device) # splits None None a = ht.ones((n, m), split=None) b = ht.ones((j), split=None) a[0] = ht.arange(1, m + 1) a[:, -1] = ht.arange(1, n + 1) ret00 = ht.matmul(a, b) ret_comp = ht.array(a_torch @ b_torch, split=None) self.assertTrue(ht.equal(ret00, ret_comp)) self.assertIsInstance(ret00, ht.DNDarray) self.assertEqual(ret00.shape, (n, )) self.assertEqual(ret00.dtype, ht.float) self.assertEqual(ret00.split, None) # splits 0 None a = ht.ones((n, m), split=0) b = ht.ones((j), split=None) a[0] = ht.arange(1, m + 1) a[:, -1] = ht.arange(1, n + 1) ret00 = ht.matmul(a, b) ret_comp = ht.array((a_torch @ b_torch), split=None) self.assertTrue(ht.equal(ret00, ret_comp)) self.assertIsInstance(ret00, ht.DNDarray) self.assertEqual(ret00.shape, (n, )) self.assertEqual(ret00.dtype, ht.float) self.assertEqual(ret00.split, 0) # splits 1 None a = ht.ones((n, m), split=1) b = ht.ones((j), split=None) a[0] = ht.arange(1, m + 1) a[:, -1] = ht.arange(1, n + 1) ret00 = ht.matmul(a, b) ret_comp = ht.array((a_torch @ b_torch), split=None) self.assertTrue(ht.equal(ret00, ret_comp)) self.assertIsInstance(ret00, ht.DNDarray) self.assertEqual(ret00.shape, (n, )) self.assertEqual(ret00.dtype, ht.float) self.assertEqual(ret00.split, 0) # splits None 0 a = ht.ones((n, m), split=None) b = ht.ones((j), split=0) a[0] = ht.arange(1, m + 1) a[:, -1] = ht.arange(1, n + 1) ret00 = ht.matmul(a, b) ret_comp = ht.array((a_torch @ b_torch), split=None) self.assertTrue(ht.equal(ret00, ret_comp)) self.assertIsInstance(ret00, ht.DNDarray) self.assertEqual(ret00.shape, (n, )) self.assertEqual(ret00.dtype, ht.float) self.assertEqual(ret00.split, 0) # splits 0 0 a = ht.ones((n, m), split=0) b = ht.ones((j), split=0) a[0] = ht.arange(1, m + 1) a[:, -1] = ht.arange(1, n + 1) ret00 = ht.matmul(a, b) ret_comp = ht.array((a_torch @ b_torch), split=None) self.assertTrue(ht.equal(ret00, ret_comp)) self.assertIsInstance(ret00, ht.DNDarray) self.assertEqual(ret00.shape, (n, )) self.assertEqual(ret00.dtype, ht.float) self.assertEqual(ret00.split, 0) # splits 1 0 a = ht.ones((n, m), split=1) b = ht.ones((j), split=0) a[0] = ht.arange(1, m + 1) a[:, -1] = ht.arange(1, n + 1) ret00 = ht.matmul(a, b) ret_comp = ht.array((a_torch @ b_torch), split=None) self.assertTrue(ht.equal(ret00, ret_comp)) self.assertIsInstance(ret00, ht.DNDarray) self.assertEqual(ret00.shape, (n, )) self.assertEqual(ret00.dtype, ht.float) self.assertEqual(ret00.split, 0) with self.assertRaises(NotImplementedError): a = ht.zeros((3, 3, 3), split=2) b = a.copy() a @ b
def test_cdist(self): n = ht.communication.MPI_WORLD.size X = ht.ones((n * 2, 4), dtype=ht.float32, split=None) Y = ht.zeros((n * 2, 4), dtype=ht.float32, split=None) res_XX_cdist = ht.zeros((n * 2, n * 2), dtype=ht.float32, split=None) res_XX_rbf = ht.ones((n * 2, n * 2), dtype=ht.float32, split=None) res_XY_cdist = ht.ones( (n * 2, n * 2), dtype=ht.float32, split=None) * 2 res_XY_rbf = ht.ones( (n * 2, n * 2), dtype=ht.float32, split=None) * math.exp(-1.0) # Case 1a: X.split == None, Y == None d = ht.spatial.cdist(X, quadratic_expansion=False) self.assertTrue(ht.equal(d, res_XX_cdist)) self.assertEqual(d.split, None) d = ht.spatial.cdist(X, quadratic_expansion=True) self.assertTrue(ht.equal(d, res_XX_cdist)) self.assertEqual(d.split, None) d = ht.spatial.rbf(X, quadratic_expansion=False) self.assertTrue(ht.equal(d, res_XX_rbf)) self.assertEqual(d.split, None) d = ht.spatial.rbf(X, quadratic_expansion=True) self.assertTrue(ht.equal(d, res_XX_rbf)) self.assertEqual(d.split, None) # Case 1b: X.split == None, Y != None, Y.split == None d = ht.spatial.cdist(X, Y, quadratic_expansion=False) self.assertTrue(ht.equal(d, res_XY_cdist)) self.assertEqual(d.split, None) d = ht.spatial.cdist(X, Y, quadratic_expansion=True) self.assertTrue(ht.equal(d, res_XY_cdist)) self.assertEqual(d.split, None) d = ht.spatial.rbf(X, Y, sigma=math.sqrt(2.0), quadratic_expansion=False) self.assertTrue(ht.equal(d, res_XY_rbf)) self.assertEqual(d.split, None) d = ht.spatial.rbf(X, Y, sigma=math.sqrt(2.0), quadratic_expansion=True) self.assertTrue(ht.equal(d, res_XY_rbf)) self.assertEqual(d.split, None) # Case 1c: X.split == None, Y != None, Y.split == 0 Y = ht.zeros((n * 2, 4), dtype=ht.float32, split=0) res_XX_cdist = ht.zeros((n * 2, n * 2), dtype=ht.float32, split=1) res_XX_rbf = ht.ones((n * 2, n * 2), dtype=ht.float32, split=1) res_XY_cdist = ht.ones((n * 2, n * 2), dtype=ht.float32, split=1) * 2 res_XY_rbf = ht.ones( (n * 2, n * 2), dtype=ht.float32, split=1) * math.exp(-1.0) d = ht.spatial.cdist(X, Y, quadratic_expansion=False) self.assertTrue(ht.equal(d, res_XY_cdist)) self.assertEqual(d.split, 1) d = ht.spatial.cdist(X, Y, quadratic_expansion=True) self.assertTrue(ht.equal(d, res_XY_cdist)) self.assertEqual(d.split, 1) d = ht.spatial.rbf(X, Y, sigma=math.sqrt(2.0), quadratic_expansion=False) self.assertTrue(ht.equal(d, res_XY_rbf)) self.assertEqual(d.split, 1) d = ht.spatial.rbf(X, Y, sigma=math.sqrt(2.0), quadratic_expansion=True) self.assertTrue(ht.equal(d, res_XY_rbf)) self.assertEqual(d.split, 1) # Case 2a: X.split == 0, Y == None X = ht.ones((n * 2, 4), dtype=ht.float32, split=0) Y = ht.zeros((n * 2, 4), dtype=ht.float32, split=None) res_XX_cdist = ht.zeros((n * 2, n * 2), dtype=ht.float32, split=0) res_XX_rbf = ht.ones((n * 2, n * 2), dtype=ht.float32, split=0) res_XY_cdist = ht.ones((n * 2, n * 2), dtype=ht.float32, split=0) * 2 res_XY_rbf = ht.ones( (n * 2, n * 2), dtype=ht.float32, split=0) * math.exp(-1.0) d = ht.spatial.cdist(X, quadratic_expansion=False) self.assertTrue(ht.equal(d, res_XX_cdist)) self.assertEqual(d.split, 0) d = ht.spatial.cdist(X, quadratic_expansion=True) self.assertTrue(ht.equal(d, res_XX_cdist)) self.assertEqual(d.split, 0) d = ht.spatial.rbf(X, quadratic_expansion=False) self.assertTrue(ht.equal(d, res_XX_rbf)) self.assertEqual(d.split, 0) d = ht.spatial.rbf(X, quadratic_expansion=True) self.assertTrue(ht.equal(d, res_XX_rbf)) self.assertEqual(d.split, 0) # Case 2b: X.split == 0, Y != None, Y.split == None d = ht.spatial.cdist(X, Y, quadratic_expansion=False) self.assertTrue(ht.equal(d, res_XY_cdist)) self.assertEqual(d.split, 0) d = ht.spatial.cdist(X, Y, quadratic_expansion=True) self.assertTrue(ht.equal(d, res_XY_cdist)) self.assertEqual(d.split, 0) d = ht.spatial.rbf(X, Y, sigma=math.sqrt(2.0), quadratic_expansion=False) self.assertTrue(ht.equal(d, res_XY_rbf)) self.assertEqual(d.split, 0) d = ht.spatial.rbf(X, Y, sigma=math.sqrt(2.0), quadratic_expansion=True) self.assertTrue(ht.equal(d, res_XY_rbf)) self.assertEqual(d.split, 0) # Case 2c: X.split == 0, Y != None, Y.split == 0 Y = ht.zeros((n * 2, 4), dtype=ht.float32, split=0) d = ht.spatial.cdist(X, Y, quadratic_expansion=False) self.assertTrue(ht.equal(d, res_XY_cdist)) self.assertEqual(d.split, 0) d = ht.spatial.cdist(X, Y, quadratic_expansion=True) self.assertTrue(ht.equal(d, res_XY_cdist)) self.assertEqual(d.split, 0) d = ht.spatial.rbf(X, Y, sigma=math.sqrt(2.0), quadratic_expansion=False) self.assertTrue(ht.equal(d, res_XY_rbf)) self.assertEqual(d.split, 0) d = ht.spatial.rbf(X, Y, sigma=math.sqrt(2.0), quadratic_expansion=True) self.assertTrue(ht.equal(d, res_XY_rbf)) self.assertEqual(d.split, 0) # Case 3 X.split == 1 X = ht.ones((n * 2, 4), dtype=ht.float32, split=1) with self.assertRaises(NotImplementedError): ht.spatial.cdist(X) with self.assertRaises(NotImplementedError): ht.spatial.cdist(X, Y, quadratic_expansion=False) X = ht.ones((n * 2, 4), dtype=ht.float32, split=None) Y = ht.zeros((n * 2, 4), dtype=ht.float32, split=1) with self.assertRaises(NotImplementedError): ht.spatial.cdist(X, Y, quadratic_expansion=False) Z = ht.ones((n * 2, 6, 3), dtype=ht.float32, split=None) with self.assertRaises(NotImplementedError): ht.spatial.cdist(Z, quadratic_expansion=False) with self.assertRaises(NotImplementedError): ht.spatial.cdist(X, Z, quadratic_expansion=False) n = ht.communication.MPI_WORLD.size A = ht.ones((n * 2, 6), dtype=ht.float32, split=None) for i in range(n): A[2 * i, :] = A[2 * i, :] * (2 * i) A[2 * i + 1, :] = A[2 * i + 1, :] * (2 * i + 1) res = torch.cdist(A._DNDarray__array, A._DNDarray__array) A = ht.ones((n * 2, 6), dtype=ht.float32, split=0) for i in range(n): A[2 * i, :] = A[2 * i, :] * (2 * i) A[2 * i + 1, :] = A[2 * i + 1, :] * (2 * i + 1) B = A.astype(ht.int32) d = ht.spatial.cdist(A, B, quadratic_expansion=False) result = ht.array(res, dtype=ht.float64, split=0) self.assertTrue(ht.allclose(d, result, atol=1e-5)) n = ht.communication.MPI_WORLD.size A = ht.ones((n * 2, 6), dtype=ht.float32, split=None) for i in range(n): A[2 * i, :] = A[2 * i, :] * (2 * i) A[2 * i + 1, :] = A[2 * i + 1, :] * (2 * i + 1) res = torch.cdist(A._DNDarray__array, A._DNDarray__array) A = ht.ones((n * 2, 6), dtype=ht.float32, split=0) for i in range(n): A[2 * i, :] = A[2 * i, :] * (2 * i) A[2 * i + 1, :] = A[2 * i + 1, :] * (2 * i + 1) B = A.astype(ht.int32) d = ht.spatial.cdist(A, B, quadratic_expansion=False) result = ht.array(res, dtype=ht.float64, split=0) self.assertTrue(ht.allclose(d, result, atol=1e-8)) B = A.astype(ht.float64) d = ht.spatial.cdist(A, B, quadratic_expansion=False) result = ht.array(res, dtype=ht.float64, split=0) self.assertTrue(ht.allclose(d, result, atol=1e-8)) B = A.astype(ht.int16) d = ht.spatial.cdist(A, B, quadratic_expansion=False) result = ht.array(res, dtype=ht.float32, split=0) self.assertTrue(ht.allclose(d, result, atol=1e-8)) d = ht.spatial.cdist(B, quadratic_expansion=False) result = ht.array(res, dtype=ht.float32, split=0) self.assertTrue(ht.allclose(d, result, atol=1e-8)) B = A.astype(ht.int32) d = ht.spatial.cdist(B, quadratic_expansion=False) result = ht.array(res, dtype=ht.float64, split=0) self.assertTrue(ht.allclose(d, result, atol=1e-8)) B = A.astype(ht.float64) d = ht.spatial.cdist(B, quadratic_expansion=False) result = ht.array(res, dtype=ht.float64, split=0) self.assertTrue(ht.allclose(d, result, atol=1e-8))
def test_where(self): # cases to test # no x and y a = ht.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], split=None, device=ht_device) cond = a > 3 wh = ht.where(cond) self.assertEqual(wh.gshape, (6, 2)) self.assertEqual(wh.dtype, ht.int64) self.assertEqual(wh.split, None) # split a = ht.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], split=1, device=ht_device) cond = a > 3 wh = ht.where(cond) self.assertEqual(wh.gshape, (6, 2)) self.assertEqual(wh.dtype, ht.int64) self.assertEqual(wh.split, 0) # not split cond a = ht.array([[0.0, 1.0, 2.0], [0.0, 2.0, 4.0], [0.0, 3.0, 6.0]], split=None, device=ht_device) res = ht.array([[0.0, 1.0, 2.0], [0.0, 2.0, -1.0], [0.0, 3.0, -1.0]], split=None, device=ht_device) wh = ht.where(a < 4.0, a, -1.0) self.assertTrue( ht.equal( a[ht.nonzero(a < 4)], ht.array([0.0, 1.0, 2.0, 0.0, 2.0, 0.0, 3.0], device=ht_device), )) self.assertTrue(ht.equal(wh, res)) self.assertEqual(wh.gshape, (3, 3)) self.assertEqual(wh.dtype, ht.float) # split cond a = ht.array([[0.0, 1.0, 2.0], [0.0, 2.0, 4.0], [0.0, 3.0, 6.0]], split=0, device=ht_device) res = ht.array([[0.0, 1.0, 2.0], [0.0, 2.0, -1.0], [0.0, 3.0, -1.0]], split=0, device=ht_device) wh = ht.where(a < 4.0, a, -1) self.assertTrue(ht.all(wh[ht.nonzero(a >= 4)], -1)) self.assertTrue(ht.equal(wh, res)) self.assertEqual(wh.gshape, (3, 3)) self.assertEqual(wh.dtype, ht.float) self.assertEqual(wh.split, 0) a = ht.array([[0.0, 1.0, 2.0], [0.0, 2.0, 4.0], [0.0, 3.0, 6.0]], split=1, device=ht_device) res = ht.array([[0.0, 1.0, 2.0], [0.0, 2.0, -1.0], [0.0, 3.0, -1.0]], split=1, device=ht_device) wh = ht.where(a < 4.0, a, -1) self.assertTrue(ht.equal(wh, res)) self.assertEqual(wh.gshape, (3, 3)) self.assertEqual(wh.dtype, ht.float) self.assertEqual(wh.split, 1) with self.assertRaises(TypeError): ht.where(cond, a) with self.assertRaises(NotImplementedError): ht.where( cond, ht.ones((3, 3), split=0, device=ht_device), ht.ones((3, 3), split=1, device=ht_device), )
def test_equal(self): self.assertTrue(ht.equal(T, T)) self.assertFalse(ht.equal(T, T1)) self.assertFalse(ht.equal(T, s)) self.assertFalse(ht.equal(T1, s))
def test_sort(self): size = ht.MPI_WORLD.size rank = ht.MPI_WORLD.rank tensor = torch.arange(size, device=device).repeat(size).reshape(size, size) data = ht.array(tensor, split=None, device=ht_device) result, result_indices = ht.sort(data, axis=0, descending=True) expected, exp_indices = torch.sort(tensor, dim=0, descending=True) self.assertTrue(torch.equal(result._DNDarray__array, expected)) self.assertTrue(torch.equal(result_indices._DNDarray__array, exp_indices)) result, result_indices = ht.sort(data, axis=1, descending=True) expected, exp_indices = torch.sort(tensor, dim=1, descending=True) self.assertTrue(torch.equal(result._DNDarray__array, expected)) self.assertTrue(torch.equal(result_indices._DNDarray__array, exp_indices)) data = ht.array(tensor, split=0, device=ht_device) exp_axis_zero = torch.arange(size, device=device).reshape(1, size) exp_indices = torch.tensor([[rank] * size], device=device) result, result_indices = ht.sort(data, descending=True, axis=0) self.assertTrue(torch.equal(result._DNDarray__array, exp_axis_zero)) self.assertTrue(torch.equal(result_indices._DNDarray__array, exp_indices)) exp_axis_one, exp_indices = ( torch.arange(size, device=device).reshape(1, size).sort(dim=1, descending=True) ) result, result_indices = ht.sort(data, descending=True, axis=1) self.assertTrue(torch.equal(result._DNDarray__array, exp_axis_one)) self.assertTrue(torch.equal(result_indices._DNDarray__array, exp_indices)) result1 = ht.sort(data, axis=1, descending=True) result2 = ht.sort(data, descending=True) self.assertTrue(ht.equal(result1[0], result2[0])) self.assertTrue(ht.equal(result1[1], result2[1])) data = ht.array(tensor, split=1, device=ht_device) exp_axis_zero = torch.tensor(rank, device=device).repeat(size).reshape(size, 1) indices_axis_zero = torch.arange(size, dtype=torch.int64, device=device).reshape(size, 1) result, result_indices = ht.sort(data, axis=0, descending=True) self.assertTrue(torch.equal(result._DNDarray__array, exp_axis_zero)) # comparison value is only true on CPU if result_indices._DNDarray__array.is_cuda is False: self.assertTrue(torch.equal(result_indices._DNDarray__array, indices_axis_zero)) exp_axis_one = torch.tensor(size - rank - 1, device=device).repeat(size).reshape(size, 1) result, result_indices = ht.sort(data, descending=True, axis=1) self.assertTrue(torch.equal(result._DNDarray__array, exp_axis_one)) self.assertTrue(torch.equal(result_indices._DNDarray__array, exp_axis_one)) tensor = torch.tensor( [ [[2, 8, 5], [7, 2, 3]], [[6, 5, 2], [1, 8, 7]], [[9, 3, 0], [1, 2, 4]], [[8, 4, 7], [0, 8, 9]], ], dtype=torch.int32, device=device, ) data = ht.array(tensor, split=0, device=ht_device) exp_axis_zero = torch.tensor([[2, 3, 0], [0, 2, 3]], dtype=torch.int32, device=device) if torch.cuda.is_available() and data.device == ht.gpu and size < 4: indices_axis_zero = torch.tensor( [[0, 2, 2], [3, 2, 0]], dtype=torch.int32, device=device ) else: indices_axis_zero = torch.tensor( [[0, 2, 2], [3, 0, 0]], dtype=torch.int32, device=device ) result, result_indices = ht.sort(data, axis=0) first = result[0]._DNDarray__array first_indices = result_indices[0]._DNDarray__array if rank == 0: self.assertTrue(torch.equal(first, exp_axis_zero)) self.assertTrue(torch.equal(first_indices, indices_axis_zero)) data = ht.array(tensor, split=1, device=ht_device) exp_axis_one = torch.tensor([[2, 2, 3]], dtype=torch.int32, device=device) indices_axis_one = torch.tensor([[0, 1, 1]], dtype=torch.int32, device=device) result, result_indices = ht.sort(data, axis=1) first = result[0]._DNDarray__array[:1] first_indices = result_indices[0]._DNDarray__array[:1] if rank == 0: self.assertTrue(torch.equal(first, exp_axis_one)) self.assertTrue(torch.equal(first_indices, indices_axis_one)) data = ht.array(tensor, split=2, device=ht_device) exp_axis_two = torch.tensor([[2], [2]], dtype=torch.int32, device=device) indices_axis_two = torch.tensor([[0], [1]], dtype=torch.int32, device=device) result, result_indices = ht.sort(data, axis=2) first = result[0]._DNDarray__array[:, :1] first_indices = result_indices[0]._DNDarray__array[:, :1] if rank == 0: self.assertTrue(torch.equal(first, exp_axis_two)) self.assertTrue(torch.equal(first_indices, indices_axis_two)) # out = ht.empty_like(data, device=ht_device) indices = ht.sort(data, axis=2, out=out) self.assertTrue(ht.equal(out, result)) self.assertTrue(ht.equal(indices, result_indices)) with self.assertRaises(ValueError): ht.sort(data, axis=3) with self.assertRaises(TypeError): ht.sort(data, axis="1") rank = ht.MPI_WORLD.rank data = ht.random.randn(100, 1, split=0, device=ht_device) result, _ = ht.sort(data, axis=0) counts, _, _ = ht.get_comm().counts_displs_shape(data.gshape, axis=0) for i, c in enumerate(counts): for idx in range(c - 1): if rank == i: self.assertTrue( torch.lt( result._DNDarray__array[idx], result._DNDarray__array[idx + 1] ).all() )
def test_properties(self): # ---- m = n ------------- properties ------ s0 ----------- m_eq_n_s0 = ht.random.randn(47, 47, split=0) # m_eq_n_s0.create_square_diag_tiles(tiles_per_proc=1) m_eq_n_s0_t1 = ht.tiling.SquareDiagTiles(m_eq_n_s0, tiles_per_proc=1) m_eq_n_s0_t2 = ht.tiling.SquareDiagTiles(m_eq_n_s0, tiles_per_proc=2) # arr self.assertTrue(ht.equal(m_eq_n_s0_t1.arr, m_eq_n_s0)) self.assertTrue(ht.equal(m_eq_n_s0_t2.arr, m_eq_n_s0)) # lshape_map self.assertTrue(torch.equal(m_eq_n_s0_t1.lshape_map, m_eq_n_s0.create_lshape_map())) self.assertTrue(torch.equal(m_eq_n_s0_t2.lshape_map, m_eq_n_s0.create_lshape_map())) if m_eq_n_s0.comm.size == 3: # col_inds self.assertEqual(m_eq_n_s0_t1.col_indices, [0, 16, 32]) self.assertEqual(m_eq_n_s0_t2.col_indices, [0, 8, 16, 24, 32, 40]) # row inds self.assertEqual(m_eq_n_s0_t1.row_indices, [0, 16, 32]) self.assertEqual(m_eq_n_s0_t2.row_indices, [0, 8, 16, 24, 32, 40]) # tile cols per proc self.assertEqual(m_eq_n_s0_t1.tile_columns_per_process, [3, 3, 3]) self.assertEqual(m_eq_n_s0_t2.tile_columns_per_process, [6, 6, 6]) # tile rows per proc self.assertEqual(m_eq_n_s0_t1.tile_rows_per_process, [1, 1, 1]) self.assertEqual(m_eq_n_s0_t2.tile_rows_per_process, [2, 2, 2]) # last diag pr self.assertEqual(m_eq_n_s0_t1.last_diagonal_process, m_eq_n_s0.comm.size - 1) self.assertEqual(m_eq_n_s0_t2.last_diagonal_process, m_eq_n_s0.comm.size - 1) # tile cols self.assertEqual(m_eq_n_s0_t1.tile_columns, m_eq_n_s0.comm.size) self.assertEqual(m_eq_n_s0_t2.tile_columns, m_eq_n_s0.comm.size * 2) # tile rows self.assertEqual(m_eq_n_s0_t1.tile_rows, m_eq_n_s0.comm.size) self.assertEqual(m_eq_n_s0_t2.tile_rows, m_eq_n_s0.comm.size * 2) # ---- m = n ------------- properties ------ s1 ----------- m_eq_n_s1 = ht.random.randn(47, 47, split=1) m_eq_n_s1_t1 = ht.core.tiling.SquareDiagTiles(m_eq_n_s1, tiles_per_proc=1) m_eq_n_s1_t2 = ht.core.tiling.SquareDiagTiles(m_eq_n_s1, tiles_per_proc=2) # lshape_map self.assertTrue(torch.equal(m_eq_n_s1_t1.lshape_map, m_eq_n_s1.create_lshape_map())) self.assertTrue(torch.equal(m_eq_n_s1_t2.lshape_map, m_eq_n_s1.create_lshape_map())) if m_eq_n_s1.comm.size == 3: # col_inds self.assertEqual(m_eq_n_s1_t1.col_indices, [0, 16, 32]) self.assertEqual(m_eq_n_s1_t2.col_indices, [0, 8, 16, 24, 32, 40]) # row inds self.assertEqual(m_eq_n_s1_t1.row_indices, [0, 16, 32]) self.assertEqual(m_eq_n_s1_t2.row_indices, [0, 8, 16, 24, 32, 40]) # tile cols per proc self.assertEqual(m_eq_n_s1_t1.tile_columns_per_process, [1, 1, 1]) self.assertEqual(m_eq_n_s1_t2.tile_columns_per_process, [2, 2, 2]) # tile rows per proc self.assertEqual(m_eq_n_s1_t1.tile_rows_per_process, [3, 3, 3]) self.assertEqual(m_eq_n_s1_t2.tile_rows_per_process, [6, 6, 6]) # last diag pr self.assertEqual(m_eq_n_s1_t1.last_diagonal_process, m_eq_n_s1.comm.size - 1) self.assertEqual(m_eq_n_s1_t2.last_diagonal_process, m_eq_n_s1.comm.size - 1) # tile cols self.assertEqual(m_eq_n_s1_t1.tile_columns, m_eq_n_s1.comm.size) self.assertEqual(m_eq_n_s1_t2.tile_columns, m_eq_n_s1.comm.size * 2) # tile rows self.assertEqual(m_eq_n_s1_t1.tile_rows, m_eq_n_s1.comm.size) self.assertEqual(m_eq_n_s1_t2.tile_rows, m_eq_n_s1.comm.size * 2) # ---- m > n ------------- properties ------ s0 ----------- m_gr_n_s0 = ht.random.randn(38, 128, split=0) m_gr_n_s0_t1 = ht.core.tiling.SquareDiagTiles(m_gr_n_s0, tiles_per_proc=1) m_gr_n_s0_t2 = ht.core.tiling.SquareDiagTiles(m_gr_n_s0, tiles_per_proc=2) if m_eq_n_s1.comm.size == 3: # col_inds self.assertEqual(m_gr_n_s0_t1.col_indices, [0, 13, 26]) self.assertEqual(m_gr_n_s0_t2.col_indices, [0, 7, 13, 20, 26, 32]) # row inds self.assertEqual(m_gr_n_s0_t1.row_indices, [0, 13, 26]) self.assertEqual(m_gr_n_s0_t2.row_indices, [0, 7, 13, 20, 26, 32]) # tile cols per proc self.assertEqual(m_gr_n_s0_t1.tile_columns_per_process, [3, 3, 3]) self.assertEqual(m_gr_n_s0_t2.tile_columns_per_process, [6, 6, 6]) # tile rows per proc self.assertEqual(m_gr_n_s0_t1.tile_rows_per_process, [1, 1, 1]) self.assertEqual(m_gr_n_s0_t2.tile_rows_per_process, [2, 2, 2]) # last diag pr self.assertEqual(m_gr_n_s0_t1.last_diagonal_process, m_eq_n_s1.comm.size - 1) self.assertEqual(m_gr_n_s0_t2.last_diagonal_process, m_eq_n_s1.comm.size - 1) # tile cols self.assertEqual(m_gr_n_s0_t1.tile_columns, m_eq_n_s1.comm.size) self.assertEqual(m_gr_n_s0_t2.tile_columns, m_eq_n_s1.comm.size * 2) # tile rows self.assertEqual(m_gr_n_s0_t1.tile_rows, m_eq_n_s1.comm.size) self.assertEqual(m_gr_n_s0_t2.tile_rows, m_eq_n_s1.comm.size * 2) # ---- m > n ------------- properties ------ s1 ----------- m_gr_n_s1 = ht.random.randn(38, 128, split=1) m_gr_n_s1_t1 = ht.core.tiling.SquareDiagTiles(m_gr_n_s1, tiles_per_proc=1) m_gr_n_s1_t2 = ht.core.tiling.SquareDiagTiles(m_gr_n_s1, tiles_per_proc=2) if m_eq_n_s1.comm.size == 3: # col_inds self.assertEqual(m_gr_n_s1_t1.col_indices, [0, 38, 43, 86, 128, 171]) self.assertEqual(m_gr_n_s1_t2.col_indices, [0, 19, 38, 43, 86, 128, 171]) # row inds self.assertEqual(m_gr_n_s1_t1.row_indices, [0]) self.assertEqual(m_gr_n_s1_t2.row_indices, [0, 19]) # tile cols per proc self.assertEqual(m_gr_n_s1_t1.tile_columns_per_process, [2, 1, 1]) self.assertEqual(m_gr_n_s1_t2.tile_columns_per_process, [3, 1, 1]) # tile rows per proc self.assertEqual(m_gr_n_s1_t1.tile_rows_per_process, [1, 1, 1]) self.assertEqual(m_gr_n_s1_t2.tile_rows_per_process, [2, 2, 2]) # last diag pr self.assertEqual(m_gr_n_s1_t1.last_diagonal_process, 0) self.assertEqual(m_gr_n_s1_t2.last_diagonal_process, 0) # tile cols self.assertEqual(m_gr_n_s1_t1.tile_columns, 6) self.assertEqual(m_gr_n_s1_t2.tile_columns, 7) # tile rows self.assertEqual(m_gr_n_s1_t1.tile_rows, 1) self.assertEqual(m_gr_n_s1_t2.tile_rows, 2) # ---- m < n ------------- properties ------ s0 ----------- m_ls_n_s0 = ht.random.randn(323, 49, split=0) m_ls_n_s0_t1 = ht.core.tiling.SquareDiagTiles(m_ls_n_s0, tiles_per_proc=1) m_ls_n_s0_t2 = ht.core.tiling.SquareDiagTiles(m_ls_n_s0, tiles_per_proc=2) if m_eq_n_s1.comm.size == 3: # col_inds self.assertEqual(m_ls_n_s0_t1.col_indices, [0]) self.assertEqual(m_ls_n_s0_t2.col_indices, [0, 25]) # row inds self.assertEqual(m_ls_n_s0_t1.row_indices, [0, 49, 109, 216]) self.assertEqual(m_ls_n_s0_t2.row_indices, [0, 25, 49, 110, 163, 216, 270]) # tile cols per proc self.assertEqual(m_ls_n_s0_t1.tile_columns_per_process, [1]) self.assertEqual(m_ls_n_s0_t2.tile_columns_per_process, [2]) # tile rows per proc self.assertEqual(m_ls_n_s0_t1.tile_rows_per_process, [2, 1, 1]) self.assertEqual(m_ls_n_s0_t2.tile_rows_per_process, [3, 2, 2]) # last diag pr self.assertEqual(m_ls_n_s0_t1.last_diagonal_process, 0) self.assertEqual(m_ls_n_s0_t2.last_diagonal_process, 0) # tile cols self.assertEqual(m_ls_n_s0_t1.tile_columns, 1) self.assertEqual(m_ls_n_s0_t2.tile_columns, 2) # tile rows self.assertEqual(m_ls_n_s0_t1.tile_rows, 4) self.assertEqual(m_ls_n_s0_t2.tile_rows, 7) # ---- m < n ------------- properties ------ s1 ----------- m_ls_n_s1 = ht.random.randn(323, 49, split=1) m_ls_n_s1_t1 = ht.core.tiling.SquareDiagTiles(m_ls_n_s1, tiles_per_proc=1) m_ls_n_s1_t2 = ht.core.tiling.SquareDiagTiles(m_ls_n_s1, tiles_per_proc=2) if m_eq_n_s1.comm.size == 3: # col_inds self.assertEqual(m_ls_n_s1_t1.col_indices, [0, 17, 33]) self.assertEqual(m_ls_n_s1_t2.col_indices, [0, 9, 17, 25, 33, 41]) # row inds self.assertEqual(m_ls_n_s1_t1.row_indices, [0, 17, 33, 49]) self.assertEqual(m_ls_n_s1_t2.row_indices, [0, 9, 17, 25, 33, 41, 49]) # tile cols per proc self.assertEqual(m_ls_n_s1_t1.tile_columns_per_process, [1, 1, 1]) self.assertEqual(m_ls_n_s1_t2.tile_columns_per_process, [2, 2, 2]) # tile rows per proc self.assertEqual(m_ls_n_s1_t1.tile_rows_per_process, [4, 4, 4]) self.assertEqual(m_ls_n_s1_t2.tile_rows_per_process, [7, 7, 7]) # last diag pr self.assertEqual(m_ls_n_s1_t1.last_diagonal_process, 2) self.assertEqual(m_ls_n_s1_t2.last_diagonal_process, 2) # tile cols self.assertEqual(m_ls_n_s1_t1.tile_columns, 3) self.assertEqual(m_ls_n_s1_t2.tile_columns, 6) # tile rows self.assertEqual(m_ls_n_s1_t1.tile_rows, 4) self.assertEqual(m_ls_n_s1_t2.tile_rows, 7)
def test_dot(self): # ONLY TESTING CORRECTNESS! ALL CALLS IN DOT ARE PREVIOUSLY TESTED # cases to test: data2d = np.ones((10, 10)) data3d = np.ones((10, 10, 10)) data1d = np.arange(10) a1d = ht.array(data1d, dtype=ht.float32, split=0, device=ht_device) b1d = ht.array(data1d, dtype=ht.float32, split=0, device=ht_device) # 2 1D arrays, self.assertEqual(ht.dot(a1d, b1d), np.dot(data1d, data1d)) ret = [] self.assertEqual(ht.dot(a1d, b1d, out=ret), np.dot(data1d, data1d)) a1d = ht.array(data1d, dtype=ht.float32, split=None, device=ht_device) b1d = ht.array(data1d, dtype=ht.float32, split=0, device=ht_device) self.assertEqual(ht.dot(a1d, b1d), np.dot(data1d, data1d)) a1d = ht.array(data1d, dtype=ht.float32, split=None, device=ht_device) b1d = ht.array(data1d, dtype=ht.float32, split=None, device=ht_device) self.assertEqual(ht.dot(a1d, b1d), np.dot(data1d, data1d)) a1d = ht.array(data1d, dtype=ht.float32, split=0, device=ht_device) b1d = ht.array(data1d, dtype=ht.float32, split=0, device=ht_device) self.assertEqual(ht.dot(a1d, b1d), np.dot(data1d, data1d)) # 2 1D arrays, a2d = ht.array(data2d, split=1, device=ht_device) b2d = ht.array(data2d, split=1, device=ht_device) # 2 2D arrays, res = ht.dot(a2d, b2d) - ht.array(np.dot(data2d, data2d), device=ht_device) self.assertEqual(ht.equal(res, ht.zeros(res.shape, device=ht_device)), 1) ret = ht.array(data2d, split=1, device=ht_device) ht.dot(a2d, b2d, out=ret) # print(ht.dot(a2d, b2d, out=ret)) res = ret - ht.array(np.dot(data2d, data2d), device=ht_device) self.assertEqual(ht.equal(res, ht.zeros(res.shape, device=ht_device)), 1) const1 = 5 const2 = 6 # a is const res = ht.dot(const1, b2d) - ht.array(np.dot(const1, data2d), device=ht_device) ret = 0 ht.dot(const1, b2d, out=ret) self.assertEqual(ht.equal(res, ht.zeros(res.shape, device=ht_device)), 1) # b is const res = ht.dot(a2d, const2) - ht.array(np.dot(data2d, const2), device=ht_device) self.assertEqual(ht.equal(res, ht.zeros(res.shape, device=ht_device)), 1) # a and b and const self.assertEqual(ht.dot(const2, const1), 5 * 6) with self.assertRaises(NotImplementedError): ht.dot(ht.array(data3d, device=ht_device), ht.array(data1d, device=ht_device))
def test_diag(self): size = ht.MPI_WORLD.size rank = ht.MPI_WORLD.rank data = torch.arange(size * 2, device=device) a = ht.array(data, device=ht_device) res = ht.diag(a) self.assertTrue(torch.equal(res._DNDarray__array, torch.diag(data))) res = ht.diag(a, offset=size) self.assertTrue(torch.equal(res._DNDarray__array, torch.diag(data, diagonal=size))) res = ht.diag(a, offset=-size) self.assertTrue(torch.equal(res._DNDarray__array, torch.diag(data, diagonal=-size))) a = ht.array(data, split=0, device=ht_device) res = ht.diag(a) self.assertEqual(res.split, a.split) self.assertEqual(res.shape, (size * 2, size * 2)) self.assertEqual(res.lshape[res.split], 2) exp = torch.diag(data) for i in range(rank * 2, (rank + 1) * 2): self.assertTrue(torch.equal(res[i, i]._DNDarray__array, exp[i, i])) res = ht.diag(a, offset=size) self.assertEqual(res.split, a.split) self.assertEqual(res.shape, (size * 3, size * 3)) self.assertEqual(res.lshape[res.split], 3) exp = torch.diag(data, diagonal=size) for i in range(rank * 3, min((rank + 1) * 3, a.shape[0])): self.assertTrue(torch.equal(res[i, i + size]._DNDarray__array, exp[i, i + size])) res = ht.diag(a, offset=-size) self.assertEqual(res.split, a.split) self.assertEqual(res.shape, (size * 3, size * 3)) self.assertEqual(res.lshape[res.split], 3) exp = torch.diag(data, diagonal=-size) for i in range(max(size, rank * 3), (rank + 1) * 3): self.assertTrue(torch.equal(res[i, i - size]._DNDarray__array, exp[i, i - size])) self.assertTrue(ht.equal(ht.diag(ht.diag(a)), a)) a = ht.random.rand(15, 20, 5, split=1, device=ht_device) res_1 = ht.diag(a) res_2 = ht.diagonal(a) self.assertTrue(ht.equal(res_1, res_2)) with self.assertRaises(ValueError): ht.diag(data) with self.assertRaises(ValueError): ht.diag(a, offset=None) a = ht.arange(size, device=ht_device) with self.assertRaises(ValueError): ht.diag(a, offset="3") a = ht.empty([], device=ht_device) with self.assertRaises(ValueError): ht.diag(a) if rank == 0: data = torch.ones(size, dtype=torch.int32, device=device) else: data = torch.empty(0, dtype=torch.int32, device=device) a = ht.array(data, is_split=0, device=ht_device) res = ht.diag(a) self.assertTrue( torch.equal( res[rank, rank]._DNDarray__array, torch.tensor(1, dtype=torch.int32, device=device) ) ) self.assert_func_equal_for_tensor( np.arange(23), heat_func=ht.diag, numpy_func=np.diag, heat_args={"offset": 2}, numpy_args={"k": 2}, ) self.assert_func_equal( (27,), heat_func=ht.diag, numpy_func=np.diag, heat_args={"offset": -3}, numpy_args={"k": -3}, )
def test_conjugate(self): a = ht.array([1.0, 1.0j, 1 + 1j, -2 + 2j, 3 - 3j]) conj = ht.conjugate(a) res = ht.array( [1 - 0j, -1j, 1 - 1j, -2 - 2j, 3 + 3j], dtype=ht.complex64, device=self.device ) self.assertIs(conj.device, self.device) self.assertIs(conj.dtype, ht.complex64) self.assertEqual(conj.shape, (5,)) # equal on complex numbers does not work on PyTorch self.assertTrue(ht.equal(ht.real(conj), ht.real(res))) self.assertTrue(ht.equal(ht.imag(conj), ht.imag(res))) a = ht.array([[1.0, 1.0j], [1 + 1j, -2 + 2j], [3 - 3j, -4 - 4j]], split=0) conj = ht.conjugate(a) res = ht.array( [[1 - 0j, -1j], [1 - 1j, -2 - 2j], [3 + 3j, -4 + 4j]], dtype=ht.complex64, device=self.device, split=0, ) self.assertIs(conj.device, self.device) self.assertIs(conj.dtype, ht.complex64) self.assertEqual(conj.shape, (3, 2)) # equal on complex numbers does not work on PyTorch self.assertTrue(ht.equal(ht.real(conj), ht.real(res))) self.assertTrue(ht.equal(ht.imag(conj), ht.imag(res))) a = ht.array( [[1.0, 1.0j], [1 + 1j, -2 + 2j], [3 - 3j, -4 - 4j]], dtype=ht.complex128, split=1 ) conj = ht.conjugate(a) res = ht.array( [[1 - 0j, -1j], [1 - 1j, -2 - 2j], [3 + 3j, -4 + 4j]], dtype=ht.complex128, device=self.device, split=1, ) self.assertIs(conj.device, self.device) self.assertIs(conj.dtype, ht.complex128) self.assertEqual(conj.shape, (3, 2)) # equal on complex numbers does not work on PyTorch self.assertTrue(ht.equal(ht.real(conj), ht.real(res))) self.assertTrue(ht.equal(ht.imag(conj), ht.imag(res))) # Not complex a = ht.ones((4, 4)) conj = ht.conj(a) res = ht.ones((4, 4)) self.assertIs(conj.device, self.device) self.assertIs(conj.dtype, ht.float32) self.assertEqual(conj.shape, (4, 4)) self.assertTrue(ht.equal(conj, res)) # DNDarray method a = ht.array([1 + 1j, 1 - 1j]) conj = a.conj() res = ht.array([1 - 1j, 1 + 1j]) self.assertIs(conj.device, self.device) self.assertTrue(ht.equal(conj, res))
def test_diff(self): ht_array = ht.random.rand(20, 20, 20, split=None) arb_slice = [0] * 3 for dim in range(0, 3): # loop over 3 dimensions arb_slice[dim] = slice(None) tup_arb = tuple(arb_slice) np_array = ht_array[tup_arb].numpy() for ax in range(dim + 1): # loop over the possible axis values for sp in range(dim + 1): # loop over the possible split values lp_array = ht.manipulations.resplit(ht_array[tup_arb], sp) # loop to 3 for the number of times to do the diff for nl in range(1, 4): # only generating the number once and then ht_diff = ht.diff(lp_array, n=nl, axis=ax) np_diff = ht.array(np.diff(np_array, n=nl, axis=ax)) self.assertTrue(ht.equal(ht_diff, np_diff)) self.assertEqual(ht_diff.split, sp) self.assertEqual(ht_diff.dtype, lp_array.dtype) # test prepend/append. Note heat's intuitive casting vs. numpy's safe casting append_shape = lp_array.gshape[:ax] + ( 1, ) + lp_array.gshape[ax + 1:] ht_append = ht.ones(append_shape, dtype=lp_array.dtype, split=lp_array.split) ht_diff_pend = ht.diff(lp_array, n=nl, axis=ax, prepend=0, append=ht_append) np_append = np.ones( append_shape, dtype=lp_array.larray.cpu().numpy().dtype) np_diff_pend = ht.array( np.diff(np_array, n=nl, axis=ax, prepend=0, append=np_append)) self.assertTrue(ht.equal(ht_diff_pend, np_diff_pend)) self.assertEqual(ht_diff_pend.split, sp) self.assertEqual(ht_diff_pend.dtype, ht.float64) np_array = ht_array.numpy() ht_diff = ht.diff(ht_array, n=2) np_diff = ht.array(np.diff(np_array, n=2)) self.assertTrue(ht.equal(ht_diff, np_diff)) self.assertEqual(ht_diff.split, None) self.assertEqual(ht_diff.dtype, ht_array.dtype) ht_array = ht.random.rand(20, 20, 20, split=1, dtype=ht.float64) np_array = ht_array.copy().numpy() ht_diff = ht.diff(ht_array, n=2) np_diff = ht.array(np.diff(np_array, n=2)) self.assertTrue(ht.equal(ht_diff, np_diff)) self.assertEqual(ht_diff.split, 1) self.assertEqual(ht_diff.dtype, ht_array.dtype) # raises with self.assertRaises(ValueError): ht.diff(ht_array, n=-2) with self.assertRaises(TypeError): ht.diff(ht_array, axis="string") with self.assertRaises(TypeError): ht.diff("string", axis=2) t_prepend = torch.zeros(ht_array.gshape) with self.assertRaises(TypeError): ht.diff(ht_array, prepend=t_prepend) append_wrong_shape = ht.ones(ht_array.gshape) with self.assertRaises(ValueError): ht.diff(ht_array, axis=0, append=append_wrong_shape)
def test_rand(self): # int64 tests # Resetting seed works seed = 12345 ht.random.seed(seed) a = ht.random.rand(2, 5, 7, 3, split=0) self.assertEqual(a.dtype, ht.float32) self.assertEqual(a.larray.dtype, torch.float32) b = ht.random.rand(2, 5, 7, 3, split=0) self.assertFalse(ht.equal(a, b)) ht.random.seed(seed) c = ht.random.rand(2, 5, 7, 3, dtype=ht.float32, split=0) self.assertTrue(ht.equal(a, c)) # Random numbers with overflow ht.random.set_state(("Threefry", seed, 0xFFFFFFFFFFFFFFF0)) a = ht.random.rand(2, 3, 4, 5, split=0) ht.random.set_state(("Threefry", seed, 0x10000000000000000)) b = ht.random.rand(2, 44, split=0) a = a.numpy().flatten() b = b.numpy().flatten() self.assertEqual(a.dtype, np.float32) self.assertTrue(np.array_equal(a[32:], b)) # Check that random numbers don't repeat after first overflow seed = 12345 ht.random.set_state(("Threefry", seed, 0x100000000)) a = ht.random.rand(2, 44) ht.random.seed(seed) b = ht.random.rand(2, 44) self.assertFalse(ht.equal(a, b)) # Check that we start from beginning after 128 bit overflow ht.random.seed(seed) a = ht.random.rand(2, 34, split=0) ht.random.set_state( ("Threefry", seed, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF0)) b = ht.random.rand(2, 50, split=0) a = a.numpy().flatten() b = b.numpy().flatten() self.assertTrue(np.array_equal(a, b[32:])) # different split axis with resetting seed ht.random.seed(seed) a = ht.random.rand(3, 5, 2, 9, split=3) ht.random.seed(seed) c = ht.random.rand(3, 5, 2, 9, split=3) self.assertTrue(ht.equal(a, c)) # Random values are in correct order ht.random.seed(seed) a = ht.random.rand(2, 50, split=0) ht.random.seed(seed) b = ht.random.rand(100, split=None) a = a.numpy().flatten() b = b.larray.cpu().numpy() self.assertTrue(np.array_equal(a, b)) # On different shape and split the same random values are used ht.random.seed(seed) a = ht.random.rand(3, 5, 2, 9, split=3) ht.random.seed(seed) b = ht.random.rand(30, 9, split=1) a = np.sort(a.numpy().flatten()) b = np.sort(b.numpy().flatten()) self.assertTrue(np.array_equal(a, b)) # One large array does not have two similar values a = ht.random.rand(11, 15, 3, 7, split=2) a = a.numpy() _, counts = np.unique(a, return_counts=True) # Assert that no value appears more than once self.assertTrue((counts == 1).all()) # Two large arrays that were created after each other don't share any values b = ht.random.rand(14, 7, 3, 12, 18, 42, split=5, comm=ht.MPI_WORLD, dtype=ht.float64) c = np.concatenate((a.flatten(), b.numpy().flatten())) _, counts = np.unique(c, return_counts=True) self.assertTrue((counts == 1).all()) # Values should be spread evenly across the range [0, 1) mean = np.mean(c) median = np.median(c) std = np.std(c) self.assertTrue(0.49 < mean < 0.51) self.assertTrue(0.49 < median < 0.51) self.assertTrue(std < 0.3) self.assertTrue(((0 <= c) & (c < 1)).all()) # No arguments work correctly ht.random.seed(seed) a = ht.random.rand() ht.random.seed(seed) b = ht.random.rand(1) self.assertTrue(ht.equal(a, b)) # Too big arrays cant be created with self.assertRaises(ValueError): ht.random.randn(0x7FFFFFFFFFFFFFFF) with self.assertRaises(ValueError): ht.random.rand(3, 2, -2, 5, split=1) with self.assertRaises(ValueError): ht.random.randn(12, 43, dtype=ht.int32, split=0) # 32 Bit tests ht.random.seed(9876) shape = (13, 43, 13, 23) a = ht.random.rand(*shape, dtype=ht.float32, split=0) self.assertEqual(a.dtype, ht.float32) self.assertEqual(a.larray.dtype, torch.float32) ht.random.seed(9876) b = ht.random.rand(np.prod(shape), dtype=ht.float32) a = a.numpy().flatten() b = b.larray.cpu().numpy() self.assertTrue(np.array_equal(a, b)) self.assertEqual(a.dtype, np.float32) a = ht.random.rand(21, 16, 17, 21, dtype=ht.float32, split=2) b = ht.random.rand(15, 11, 19, 31, dtype=ht.float32, split=0) a = a.numpy().flatten() b = b.numpy().flatten() c = np.concatenate((a, b)) # Values should be spread evenly across the range [0, 1) mean = np.mean(c) median = np.median(c) std = np.std(c) self.assertTrue(0.49 < mean < 0.51) self.assertTrue(0.49 < median < 0.51) self.assertTrue(std < 0.3) self.assertTrue(((0 <= c) & (c < 1)).all()) ht.random.seed(11111) a = ht.random.rand(12, 32, 44, split=1, dtype=ht.float32).numpy() # Overflow reached ht.random.set_state(("Threefry", 11111, 0x10000000000000000)) b = ht.random.rand(12, 32, 44, split=1, dtype=ht.float32).numpy() self.assertTrue(np.array_equal(a, b)) ht.random.set_state(("Threefry", 11111, 0x100000000)) c = ht.random.rand(12, 32, 44, split=1, dtype=ht.float32).numpy() self.assertFalse(np.array_equal(a, c)) self.assertFalse(np.array_equal(b, c))
def test_randint(self): # Checked that the random values are in the correct range a = ht.random.randint(low=0, high=10, size=(10, 10), dtype=ht.int64) self.assertEqual(a.dtype, ht.int64) a = a.numpy() self.assertTrue(((0 <= a) & (a < 10)).all()) a = ht.random.randint(low=100000, high=150000, size=(31, 25, 11), dtype=ht.int64, split=2) a = a.numpy() self.assertTrue(((100000 <= a) & (a < 150000)).all()) # For the range [0, 1) only the value 0 is allowed a = ht.random.randint(1, size=(10, ), split=0, dtype=ht.int64) b = ht.zeros((10, ), dtype=ht.int64, split=0) self.assertTrue(ht.equal(a, b)) # size parameter allows int arguments a = ht.random.randint(1, size=10, split=0, dtype=ht.int64) self.assertTrue(ht.equal(a, b)) # size is None a = ht.random.randint(0, 10) self.assertEqual(a.shape, ()) # Two arrays with the same seed and same number of elements have the same random values ht.random.seed(13579) shape = (15, 13, 9, 21, 65) a = ht.random.randint(15, 100, size=shape, split=0, dtype=ht.int64) a = a.numpy().flatten() ht.random.seed(13579) elements = np.prod(shape) b = ht.random.randint(low=15, high=100, size=(elements, ), dtype=ht.int64) b = b.numpy() self.assertTrue(np.array_equal(a, b)) # Two arrays with the same seed and shape have identical values ht.random.seed(13579) a = ht.random.randint(10000, size=shape, split=2, dtype=ht.int64) a = a.numpy() ht.random.seed(13579) b = ht.random.randint(low=0, high=10000, size=shape, split=2, dtype=ht.int64) b = b.numpy() ht.random.seed(13579) c = ht.random.randint(low=0, high=10000, dtype=ht.int64) self.assertTrue(np.equal(b[0, 0, 0, 0, 0], c)) self.assertTrue(np.array_equal(a, b)) mean = np.mean(a) median = np.median(a) std = np.std(a) # Mean and median should be in the center while the std is very high due to an even distribution self.assertTrue(4900 < mean < 5100) self.assertTrue(4900 < median < 5100) self.assertTrue(std < 2900) with self.assertRaises(ValueError): ht.random.randint(5, 5, size=(10, 10), split=0) with self.assertRaises(ValueError): ht.random.randint(low=0, high=10, size=(3, -4)) with self.assertRaises(ValueError): ht.random.randint(low=0, high=10, size=(15, ), dtype=ht.float32) # int32 tests ht.random.seed(4545) a = ht.random.randint(50, 1000, size=(13, 45), dtype=ht.int32, split=0) ht.random.set_state(("Threefry", 4545, 0x10000000000000000)) b = ht.random.randint(50, 1000, size=(13, 45), dtype=ht.int32, split=0) self.assertEqual(a.dtype, ht.int32) self.assertEqual(a.larray.dtype, torch.int32) self.assertEqual(b.dtype, ht.int32) a = a.numpy() b = b.numpy() self.assertEqual(a.dtype, np.int32) self.assertTrue(np.array_equal(a, b)) self.assertTrue(((50 <= a) & (a < 1000)).all()) self.assertTrue(((50 <= b) & (b < 1000)).all()) c = ht.random.randint(50, 1000, size=(13, 45), dtype=ht.int32, split=0) c = c.numpy() self.assertFalse(np.array_equal(a, c)) self.assertFalse(np.array_equal(b, c)) self.assertTrue(((50 <= c) & (c < 1000)).all()) ht.random.seed(0xFFFFFFF) a = ht.random.randint(10000, size=(123, 42, 13, 21), split=3, dtype=ht.int32, comm=ht.MPI_WORLD) a = a.numpy() mean = np.mean(a) median = np.median(a) std = np.std(a) # Mean and median should be in the center while the std is very high due to an even distribution self.assertTrue(4900 < mean < 5100) self.assertTrue(4900 < median < 5100) self.assertTrue(std < 2900) # test aliases ht.random.seed(234) a = ht.random.randint(10, 50) ht.random.seed(234) b = ht.random.random_integer(10, 50) self.assertTrue(ht.equal(a, b))
def test_equal(self): self.assertTrue(ht.equal(self.a_tensor, self.a_tensor)) self.assertFalse(ht.equal(self.a_tensor[1:], self.a_tensor)) self.assertFalse(ht.equal(self.a_split_tensor[1:], self.a_tensor[1:])) self.assertFalse(ht.equal(self.a_tensor[1:], self.a_split_tensor[1:])) self.assertFalse(ht.equal(self.a_tensor, self.another_tensor)) self.assertFalse(ht.equal(self.a_tensor, self.a_scalar)) self.assertFalse(ht.equal(self.a_scalar, self.a_tensor)) self.assertFalse(ht.equal(self.a_scalar, self.a_tensor[0, 0])) self.assertFalse(ht.equal(self.a_tensor[0, 0], self.a_scalar)) self.assertFalse(ht.equal(self.another_tensor, self.a_scalar)) self.assertTrue( ht.equal(self.split_ones_tensor[:, 0], self.split_ones_tensor[:, 1])) self.assertTrue( ht.equal(self.split_ones_tensor[:, 1], self.split_ones_tensor[:, 0])) self.assertFalse(ht.equal(self.a_tensor, self.a_split_tensor)) self.assertFalse(ht.equal(self.a_split_tensor, self.a_tensor)) arr = ht.array([[1, 2], [3, 4]], comm=ht.MPI_SELF) with self.assertRaises(NotImplementedError): ht.equal(self.a_tensor, arr) with self.assertRaises(ValueError): ht.equal(self.a_split_tensor, self.a_split_tensor.resplit(1))
def test_diagonal(self): size = ht.MPI_WORLD.size rank = ht.MPI_WORLD.rank data = torch.arange(size, device=device).repeat(size).reshape(size, size) a = ht.array(data, device=ht_device) res = ht.diagonal(a) self.assertTrue(torch.equal(res._DNDarray__array, torch.arange(size, device=device))) self.assertEqual(res.split, None) a = ht.array(data, split=0, device=ht_device) res = ht.diagonal(a) self.assertTrue(torch.equal(res._DNDarray__array, torch.tensor([rank], device=device))) self.assertEqual(res.split, 0) a = ht.array(data, split=1, device=ht_device) res2 = ht.diagonal(a, dim1=1, dim2=0) self.assertTrue(ht.equal(res, res2)) res = ht.diagonal(a) self.assertTrue(torch.equal(res._DNDarray__array, torch.tensor([rank], device=device))) self.assertEqual(res.split, 0) a = ht.array(data, split=0, device=ht_device) res2 = ht.diagonal(a, dim1=1, dim2=0) self.assertTrue(ht.equal(res, res2)) data = torch.arange(size + 1, device=device).repeat(size + 1).reshape(size + 1, size + 1) a = ht.array(data, device=ht_device) res = ht.diagonal(a, offset=0) self.assertTrue(torch.equal(res._DNDarray__array, torch.arange(size + 1, device=device))) res = ht.diagonal(a, offset=1) self.assertTrue(torch.equal(res._DNDarray__array, torch.arange(1, size + 1, device=device))) res = ht.diagonal(a, offset=-1) self.assertTrue(torch.equal(res._DNDarray__array, torch.arange(0, size, device=device))) a = ht.array(data, split=0, device=ht_device) res = ht.diagonal(a, offset=1) res.balance_() self.assertTrue(torch.equal(res._DNDarray__array, torch.tensor([rank + 1], device=device))) res = ht.diagonal(a, offset=-1) res.balance_() self.assertTrue(torch.equal(res._DNDarray__array, torch.tensor([rank], device=device))) a = ht.array(data, split=1, device=ht_device) res = ht.diagonal(a, offset=1) res.balance_() self.assertTrue(torch.equal(res._DNDarray__array, torch.tensor([rank + 1], device=device))) res = ht.diagonal(a, offset=-1) res.balance_() self.assertTrue(torch.equal(res._DNDarray__array, torch.tensor([rank], device=device))) data = ( torch.arange(size * 2 + 10, device=device) .repeat(size * 2 + 10) .reshape(size * 2 + 10, size * 2 + 10) ) a = ht.array(data, device=ht_device) res = ht.diagonal(a, offset=10) self.assertTrue( torch.equal(res._DNDarray__array, torch.arange(10, 10 + size * 2, device=device)) ) res = ht.diagonal(a, offset=-10) self.assertTrue(torch.equal(res._DNDarray__array, torch.arange(0, size * 2, device=device))) a = ht.array(data, split=0, device=ht_device) res = ht.diagonal(a, offset=10) res.balance_() self.assertTrue( torch.equal( res._DNDarray__array, torch.tensor([10 + rank * 2, 11 + rank * 2], device=device) ) ) res = ht.diagonal(a, offset=-10) res.balance_() self.assertTrue( torch.equal(res._DNDarray__array, torch.tensor([rank * 2, 1 + rank * 2], device=device)) ) a = ht.array(data, split=1, device=ht_device) res = ht.diagonal(a, offset=10) res.balance_() self.assertTrue( torch.equal( res._DNDarray__array, torch.tensor([10 + rank * 2, 11 + rank * 2], device=device) ) ) res = ht.diagonal(a, offset=-10) res.balance_() self.assertTrue( torch.equal(res._DNDarray__array, torch.tensor([rank * 2, 1 + rank * 2], device=device)) ) data = ( torch.arange(size + 1, device=device) .repeat((size + 1) * (size + 1)) .reshape(size + 1, size + 1, size + 1) ) a = ht.array(data, device=ht_device) res = ht.diagonal(a) self.assertTrue( torch.equal( res._DNDarray__array, torch.arange(size + 1, device=device) .repeat(size + 1) .reshape(size + 1, size + 1) .t(), ) ) res = ht.diagonal(a, offset=1) self.assertTrue( torch.equal( res._DNDarray__array, torch.arange(size + 1, device=device).repeat(size).reshape(size, size + 1).t(), ) ) res = ht.diagonal(a, offset=-1) self.assertTrue( torch.equal( res._DNDarray__array, torch.arange(size + 1, device=device).repeat(size).reshape(size, size + 1).t(), ) ) res = ht.diagonal(a, dim1=1, dim2=2) self.assertTrue( torch.equal( res._DNDarray__array, torch.arange(size + 1, device=device).repeat(size + 1).reshape(size + 1, size + 1), ) ) res = ht.diagonal(a, offset=1, dim1=1, dim2=2) self.assertTrue( torch.equal( res._DNDarray__array, torch.arange(1, size + 1, device=device).repeat(size + 1).reshape(size + 1, size), ) ) res = ht.diagonal(a, offset=-1, dim1=1, dim2=2) self.assertTrue( torch.equal( res._DNDarray__array, torch.arange(size, device=device).repeat(size + 1).reshape(size + 1, size), ) ) res = ht.diagonal(a, dim1=0, dim2=2) self.assertTrue( torch.equal( res._DNDarray__array, torch.arange(size + 1, device=device).repeat(size + 1).reshape(size + 1, size + 1), ) ) a = ht.array(data, split=0, device=ht_device) res = ht.diagonal(a, offset=1, dim1=0, dim2=1) res.balance_() self.assertTrue( torch.equal( res._DNDarray__array, torch.arange(size + 1, device=device).reshape(size + 1, 1) ) ) self.assertEqual(res.split, 1) res = ht.diagonal(a, offset=-1, dim1=0, dim2=1) res.balance_() self.assertTrue( torch.equal( res._DNDarray__array, torch.arange(size + 1, device=device).reshape(size + 1, 1) ) ) self.assertEqual(res.split, 1) res = ht.diagonal(a, offset=size + 1, dim1=0, dim2=1) res.balance_() self.assertTrue( torch.equal( res._DNDarray__array, torch.empty((size + 1, 0), dtype=torch.int64, device=device) ) ) self.assertTrue(res.shape[res.split] == 0) with self.assertRaises(ValueError): ht.diagonal(a, offset=None) with self.assertRaises(ValueError): ht.diagonal(a, dim1=1, dim2=1) with self.assertRaises(ValueError): ht.diagonal(a, dim1=1, dim2=-2) with self.assertRaises(ValueError): ht.diagonal(data) self.assert_func_equal( (5, 5, 5), heat_func=ht.diagonal, numpy_func=np.diagonal, heat_args={"dim1": 0, "dim2": 2}, numpy_args={"axis1": 0, "axis2": 2}, ) self.assert_func_equal( (5, 4, 3, 2), heat_func=ht.diagonal, numpy_func=np.diagonal, heat_args={"dim1": 1, "dim2": 2}, numpy_args={"axis1": 1, "axis2": 2}, ) self.assert_func_equal( (4, 6, 3), heat_func=ht.diagonal, numpy_func=np.diagonal, heat_args={"dim1": 0, "dim2": 1}, numpy_args={"axis1": 0, "axis2": 1}, )
def test_randn(self): # Test that the random values have the correct distribution ht.random.seed(54321) shape = (5, 10, 13, 23, 15, 20) a = ht.random.randn(*shape, split=0, dtype=ht.float64) self.assertEqual(a.dtype, ht.float64) a = a.numpy() mean = np.mean(a) median = np.median(a) std = np.std(a) self.assertTrue(-0.01 < mean < 0.01) self.assertTrue(-0.01 < median < 0.01) self.assertTrue(0.99 < std < 1.01) # Compare to a second array with a different shape but same number of elements and same seed ht.random.seed(54321) elements = np.prod(shape) b = ht.random.randn(elements, split=0, dtype=ht.float64) b = b.numpy() a = a.flatten() self.assertTrue(np.allclose(a, b)) # Creating the same array two times without resetting seed results in different elements c = ht.random.randn(elements, split=0, dtype=ht.float64) c = c.numpy() self.assertEqual(c.shape, b.shape) self.assertFalse(np.allclose(b, c)) # All the created values should be different d = np.concatenate((b, c)) _, counts = np.unique(d, return_counts=True) self.assertTrue((counts == 1).all()) # Two arrays are the same for same seed and split-axis != 0 ht.random.seed(12345) a = ht.random.randn(*shape, split=5, dtype=ht.float64) ht.random.seed(12345) b = ht.random.randn(*shape, split=5, dtype=ht.float64) self.assertTrue(ht.equal(a, b)) a = a.numpy() b = b.numpy() self.assertTrue(np.allclose(a, b)) # Tests with float32 ht.random.seed(54321) a = ht.random.randn(30, 30, 30, dtype=ht.float32, split=2) self.assertEqual(a.dtype, ht.float32) self.assertEqual(a.larray[0, 0, 0].dtype, torch.float32) a = a.numpy() self.assertEqual(a.dtype, np.float32) mean = np.mean(a) median = np.median(a) std = np.std(a) self.assertTrue(-0.01 < mean < 0.01) self.assertTrue(-0.01 < median < 0.01) self.assertTrue(0.99 < std < 1.01) ht.random.set_state(("Threefry", 54321, 0x10000000000000000)) b = ht.random.randn(30, 30, 30, dtype=ht.float32, split=2).numpy() self.assertTrue(np.allclose(a, b)) c = ht.random.randn(30, 30, 30, dtype=ht.float32, split=2).numpy() self.assertFalse(np.allclose(a, c)) self.assertFalse(np.allclose(b, c))