def test_add(self): result = ht.array([[3.0, 4.0], [5.0, 6.0]]) self.assertTrue( ht.equal(ht.add(self.a_scalar, self.a_scalar), ht.float32(4.0))) self.assertTrue(ht.equal(ht.add(self.a_tensor, self.a_scalar), result)) self.assertTrue(ht.equal(ht.add(self.a_scalar, self.a_tensor), result)) self.assertTrue( ht.equal(ht.add(self.a_tensor, self.another_tensor), result)) self.assertTrue(ht.equal(ht.add(self.a_tensor, self.a_vector), result)) self.assertTrue( ht.equal(ht.add(self.a_tensor, self.an_int_scalar), result)) self.assertTrue( ht.equal(ht.add(self.a_split_tensor, self.a_tensor), result)) with self.assertRaises(ValueError): ht.add(self.a_tensor, self.another_vector) with self.assertRaises(TypeError): ht.add(self.a_tensor, self.errorneous_type) with self.assertRaises(TypeError): ht.add("T", "s")
def test_add(self): # test basics result = ht.array([[3.0, 4.0], [5.0, 6.0]]) self.assertTrue( ht.equal(ht.add(self.a_scalar, self.a_scalar), ht.float32(4.0))) self.assertTrue(ht.equal(ht.add(self.a_tensor, self.a_scalar), result)) self.assertTrue(ht.equal(ht.add(self.a_scalar, self.a_tensor), result)) self.assertTrue( ht.equal(ht.add(self.a_tensor, self.another_tensor), result)) self.assertTrue(ht.equal(ht.add(self.a_tensor, self.a_vector), result)) self.assertTrue( ht.equal(ht.add(self.a_tensor, self.an_int_scalar), result)) self.assertTrue( ht.equal(ht.add(self.a_split_tensor, self.a_tensor), result)) # Single element split a = ht.array([1], split=0) b = ht.array([1, 2], split=0) c = ht.add(a, b) self.assertTrue(ht.equal(c, ht.array([2, 3]))) if c.comm.size > 1: if c.comm.rank < 2: self.assertEqual(c.larray.size()[0], 1) else: self.assertEqual(c.larray.size()[0], 0) # test with differently distributed DNDarrays a = ht.ones(10, split=0) b = ht.zeros(10, split=0) c = a[:-1] + b[1:] self.assertTrue((c == 1).all()) self.assertTrue(c.lshape == a[:-1].lshape) c = a[1:-1] + b[1:-1] # test unbalanced self.assertTrue((c == 1).all()) self.assertTrue(c.lshape == a[1:-1].lshape) # test one unsplit a = ht.ones(10, split=None) b = ht.zeros(10, split=0) c = a[:-1] + b[1:] self.assertTrue((c == 1).all()) self.assertEqual(c.lshape, b[1:].lshape) c = b[:-1] + a[1:] self.assertTrue((c == 1).all()) self.assertEqual(c.lshape, b[:-1].lshape) # broadcast in split dimension a = ht.ones((1, 10), split=0) b = ht.zeros((2, 10), split=0) c = a + b self.assertTrue((c == 1).all()) self.assertTrue(c.lshape == b.lshape) c = b + a self.assertTrue((c == 1).all()) self.assertTrue(c.lshape == b.lshape) with self.assertRaises(ValueError): ht.add(self.a_tensor, self.another_vector) with self.assertRaises(TypeError): ht.add(self.a_tensor, self.erroneous_type) with self.assertRaises(TypeError): ht.add("T", "s")
def test___binary_op_broadcast(self): # broadcast without split left_tensor = ht.ones((4, 1), device=ht_device) right_tensor = ht.ones((1, 2), device=ht_device) result = left_tensor + right_tensor self.assertEqual(result.shape, (4, 2)) result = right_tensor + left_tensor self.assertEqual(result.shape, (4, 2)) # broadcast with split=0 for both operants left_tensor = ht.ones((4, 1), split=0, device=ht_device) right_tensor = ht.ones((1, 2), split=0, device=ht_device) result = left_tensor + right_tensor self.assertEqual(result.shape, (4, 2)) result = right_tensor + left_tensor self.assertEqual(result.shape, (4, 2)) # broadcast with split=1 for both operants left_tensor = ht.ones((4, 1), split=1, device=ht_device) right_tensor = ht.ones((1, 2), split=1, device=ht_device) result = left_tensor + right_tensor self.assertEqual(result.shape, (4, 2)) result = right_tensor + left_tensor self.assertEqual(result.shape, (4, 2)) # broadcast with split=1 for second operant left_tensor = ht.ones((4, 1), device=ht_device) right_tensor = ht.ones((1, 2), split=1, device=ht_device) result = left_tensor - right_tensor self.assertEqual(result.shape, (4, 2)) result = right_tensor + left_tensor self.assertEqual(result.shape, (4, 2)) # broadcast with split=0 for first operant left_tensor = ht.ones((4, 1), split=0, device=ht_device) right_tensor = ht.ones((1, 2), device=ht_device) result = left_tensor - right_tensor self.assertEqual(result.shape, (4, 2)) result = right_tensor + left_tensor self.assertEqual(result.shape, (4, 2)) # broadcast with unequal dimensions and one splitted tensor left_tensor = ht.ones((2, 4, 1), split=0, device=ht_device) right_tensor = ht.ones((1, 2), device=ht_device) result = left_tensor + right_tensor self.assertEqual(result.shape, (2, 4, 2)) result = right_tensor - left_tensor self.assertEqual(result.shape, (2, 4, 2)) # broadcast with unequal dimensions and two splitted tensors left_tensor = ht.ones((4, 1, 3, 1, 2), split=0, dtype=torch.uint8, device=ht_device) right_tensor = ht.ones((1, 3, 1), split=0, dtype=torch.uint8, device=ht_device) result = left_tensor + right_tensor self.assertEqual(result.shape, (4, 1, 3, 3, 2)) result = right_tensor + left_tensor self.assertEqual(result.shape, (4, 1, 3, 3, 2)) with self.assertRaises(TypeError): ht.add(ht.ones((1, 2), device=ht_device), "wrong type") with self.assertRaises(NotImplementedError): ht.add( ht.ones((1, 2), split=0, device=ht_device), ht.ones((1, 2), split=1, device=ht_device), )