def test_stack_index_error(test_case): with test_case.assertRaises(Exception) as context: x1 = flow.ones((2, 1), dtype=flow.float32, requires_grad=True) x2 = flow.ones((2, 1), dtype=flow.float32, requires_grad=True) y = flow.concat([x1, x2], dim=4) test_case.assertTrue( "Dimension out of range" in str(context.exception))
def test_broadcast_like_runtime_error(test_case): with test_case.assertRaises(Exception) as context: x = flow.ones((1, 0), dtype=flow.float32, requires_grad=True) like = flow.ones((2, 2, 2), dtype=flow.float32, requires_grad=True) y = flow.broadcast_like(x, like) test_case.assertTrue( "The expanded size of the tensor" in str(context.exception))
def test_stack_runtime_error(test_case): with test_case.assertRaises(Exception) as context: x1 = flow.ones((2, 1), dtype=flow.float32, requires_grad=True) x2 = flow.ones((2, 2), dtype=flow.float32, requires_grad=True) y = flow.stack([x1, x2]) test_case.assertTrue("stack expects each tensor to be equal size" in str(context.exception))
def test_to_dtype(test_case): x = flow.ones((2, 3), dtype=flow.int32, device="cpu") placement = flow.placement("cpu", ranks=[0, 1]) c_x = flow.ones( (2, 3), dtype=flow.int32, placement=placement, sbp=flow.sbp.broadcast ) class CastModule(flow.nn.Module): def __init__(self, dtype): super().__init__() self.dtype = dtype def forward(self, x): return x.to(dtype=self.dtype) m = CastModule(flow.float32) g = MyGraph(m) e_x = m(x) e_c_x = m(c_x) # NOTE(chengcheng): # There are two BUG in this test script: # 1. first call and second call input tensor meta is NOT same # 2. nn.Graph NOT support local input with multi-rank yet. # g_x = g(x) g_c_x = g(c_x) test_case.assertTrue(e_x.dtype == flow.float32) # test_case.assertTrue(g_x.dtype == flow.float32) test_case.assertTrue(e_c_x.dtype == flow.float32) test_case.assertTrue(g_c_x.dtype == flow.float32)
def test_expand_l_shape_runtime_error(test_case): with test_case.assertRaises(Exception) as context: x1 = flow.ones((2, 2), dtype=flow.float32, requires_grad=True) x2 = flow.ones((2, 0), dtype=flow.float32, requires_grad=True) y = flow.expand(x1, x2.shape) test_case.assertTrue( "The expanded size of the tensor" in str(context.exception))
def _test_different_dtype(test_case, device, shape): y1 = flow.ones(shape, dtype=flow.int32, device=flow.device(device)) test_case.assertTrue(np.array_equal(np.ones(shape, dtype=np.int32), y1.numpy())) y2 = flow.ones(shape, dtype=flow.uint8, device=flow.device(device)) test_case.assertTrue(np.array_equal(np.ones(shape, dtype=np.uint8), y2.numpy())) y3 = flow.ones(shape, dtype=flow.float64, device=flow.device(device)) test_case.assertTrue(np.array_equal(np.ones(shape, dtype=np.float64), y3.numpy()))
def test_matmul_dimension_error2(test_case): with test_case.assertRaises(Exception) as ctx: x = flow.ones((4, 4), dtype=flow.float32) w = flow.ones((4, ), dtype=flow.float32) out = flow._C.matmul(x, w, False, False, 1.0) test_case.assertTrue( "Tensor b's dim should >= 2" in str(ctx.exception))
def test_concat_match_size_runtime_error(test_case): with test_case.assertRaises(Exception) as context: x1 = flow.ones((2, 2), dtype=flow.float32, requires_grad=True) x2 = flow.ones((2, 3), dtype=flow.float32, requires_grad=True) y = flow.concat([x1, x2]) test_case.assertTrue("Sizes of tensors must match except in dimension" in str(context.exception))
def _test_global_tensor_str(test_case, device): placement = flow.placement(device, range(1)) # split global tensor x = flow.ones((10, 10), placement=placement, sbp=[flow.sbp.split(0)]) tensor_str = str(x) test_case.assertTrue("1." in tensor_str) # broadcast global tensor x = flow.ones((10, 10), placement=placement, sbp=[flow.sbp.broadcast]) tensor_str = str(x) test_case.assertTrue("1." in tensor_str) # partial_sum global tensor x = flow.ones((10, 10), placement=placement, sbp=[flow.sbp.partial_sum]) tensor_str = str(x) test_case.assertTrue("1." in tensor_str) # summarized global tensor x = flow.ones((100, 100), placement=placement, sbp=[flow.sbp.split(0)]) tensor_str = str(x) test_case.assertTrue("1." in tensor_str) test_case.assertTrue("..." in tensor_str) # empty global tensor x = flow.ones((0, 10), placement=placement, sbp=[flow.sbp.split(0)]) tensor_str = str(x) test_case.assertTrue("[]" in tensor_str)
def test_gather_size_runtime_error(test_case): with test_case.assertRaises(Exception) as context: x1 = flow.ones((2, 2), dtype=flow.float32, requires_grad=True) x2 = flow.ones((4, 2), dtype=flow.int64) y = flow.gather(x1, 1, x2) test_case.assertTrue( "Size does not match at dimension" in str(context.exception))
def test_meshgrid_indexing_runtime_error(test_case): with test_case.assertRaises(Exception) as context: x1 = flow.ones((2), dtype=flow.float32, requires_grad=True) x2 = flow.ones((2), dtype=flow.float32, requires_grad=True) y = flow.meshgrid(x1, x2, indexing="ab") test_case.assertTrue( "meshgrid: indexing must be one of" in str(context.exception))
def test_gather_dim_value_runtime_error(test_case): with test_case.assertRaises(Exception) as context: x1 = flow.ones((2, 2), dtype=flow.float32, requires_grad=True) x2 = flow.ones((2, 2), dtype=flow.int64) y = flow.gather(x1, 2, x2) test_case.assertTrue( "Dimension out of range" in str(context.exception))
def test_masked_select_broadcast(test_case): x = flow.ones(2, 3, 3) mask = flow.triu(flow.ones(3, 3), 1) flow_res = flow.masked_select(x, mask) np_res = [1, 1, 1, 1, 1, 1] test_case.assertTrue( np.allclose(flow_res.numpy(), np_res, 1e-05, 1e-05))
def test_concat_dim_equal_runtime_error(test_case): with test_case.assertRaises(Exception) as context: x1 = flow.ones((2, 2), dtype=flow.float32, requires_grad=True) x2 = flow.ones((2, 2, 2), dtype=flow.float32, requires_grad=True) y = flow.concat([x1, x2]) test_case.assertTrue("Tensors must have same number of dimensions" in str(context.exception))
def _test_consistent_tensor_str(test_case, device): placement = flow.placement(device, {0: range(1)}) # split consistent tensor x = flow.ones((10, 10), placement=placement, sbp=[flow.sbp.split(0)]) tensor_str = str(x) test_case.assertTrue("1." in tensor_str) # broadcast consistent tensor x = flow.ones((10, 10), placement=placement, sbp=[flow.sbp.broadcast]) tensor_str = str(x) test_case.assertTrue("1." in tensor_str) # partial_sum consistent tensor x = flow.ones((10, 10), placement=placement, sbp=[flow.sbp.partial_sum]) tensor_str = str(x) test_case.assertTrue("1." in tensor_str) # summarized consistent tensor x = flow.ones((100, 100), placement=placement, sbp=[flow.sbp.split(0)]) tensor_str = str(x) test_case.assertTrue("1." in tensor_str) test_case.assertTrue("..." in tensor_str) # empty consistent tensor x = flow.ones((0, 10), placement=placement, sbp=[flow.sbp.split(0)]) tensor_str = str(x) test_case.assertTrue("[]" in tensor_str)
def __init__(self, contiguous: bool, device): super().__init__() if contiguous: self.weight = flow.nn.Parameter(flow.ones(4, 3, device=device)) else: self.weight = flow.nn.Parameter( flow.ones(3, 4, device=device).transpose(0, 1) )
def test_gather_index_type_runtime_error(test_case): with test_case.assertRaises(Exception) as context: x1 = flow.ones((2, 2), dtype=flow.float32, requires_grad=True) x2 = flow.ones((2, 2), dtype=flow.float32) y = flow.gather(x1, 1, x2) test_case.assertTrue( "gather(): Expected dtype int32 or int64 for index" in str( context.exception))
def test_cross_entropy_reduction_type_error(test_case): with test_case.assertRaises(Exception) as ctx: x = flow.ones((4, 4), dtype=flow.float32) target = flow.ones((4, 4), dtype=flow.float32) out = flow._C.cross_entropy(x, target, None, 0, "just_test") test_case.assertTrue( "Reduction should be none, sum or mean." in str(ctx.exception))
def test_meshgrid_tensors_dtype_runtime_error(test_case): with test_case.assertRaises(Exception) as context: x1 = flow.ones((2), dtype=flow.float32, requires_grad=True) x2 = flow.ones((2), dtype=flow.float16, requires_grad=True) y = flow.meshgrid(x1, x2) test_case.assertTrue( "meshgrid expects all tensors to have the same dtype" in str( context.exception))
def test_gather_dim_equal_runtime_error(test_case): with test_case.assertRaises(Exception) as context: x1 = flow.ones((2, 2), dtype=flow.float32, requires_grad=True) x2 = flow.ones((2, 2, 2), dtype=flow.int64) y = flow.gather(x1, 1, x2) test_case.assertTrue( "Index tensor must have the same number of dimensions as input tensor" in str(context.exception))
def test_add_inplace_runtime_error(test_case): with test_case.assertRaises(RuntimeError) as context: x = flow.ones((4, 4), dtype=flow.float32, requires_grad=True) y = flow.ones((4, 4), dtype=flow.float32, requires_grad=True) x.add_(y) test_case.assertTrue( "a leaf Tensor that requires grad is being used in an in-place operation" in str(context.exception))
def test_expand_dim_runtime_error(test_case): with test_case.assertRaises(Exception) as context: x1 = flow.ones((2, 1), dtype=flow.float32, requires_grad=True) x2 = flow.ones((2), dtype=flow.float32, requires_grad=True) y = flow.expand(x1, x2.shape) test_case.assertTrue( "be greater or equal to the number of dimensions in the tensor" in str(context.exception))
def test_parameter_set_data(test_case): a = flow.nn.Parameter(flow.ones(2, 3), False) old_id = id(a) b = flow.nn.Parameter(flow.ones(4, 5), True) a.data = b test_case.assertEqual(old_id, id(a)) test_case.assertTrue(a.shape == (4, 5)) test_case.assertFalse(a.requires_grad) test_case.assertTrue(a.is_leaf)
def test_copy(test_case): x = flow.zeros(2, 3) y = flow.ones(2, 3) x.copy_(y) test_case.assertTrue(np.array_equal(x.numpy(), y.numpy())) x = flow.zeros(4, 6, placement=flow.placement("cuda", [0, 1]), sbp=flow.sbp.broadcast) y = flow.ones(4, 6, placement=flow.placement("cpu", [0]), sbp=flow.sbp.broadcast) x.copy_(y) test_case.assertTrue(np.array_equal(x.numpy(), y.numpy())) x = flow.zeros(4, 6, placement=flow.placement("cuda", [0, 1]), sbp=flow.sbp.broadcast) y = flow.ones(4, 6, placement=flow.placement("cuda", [0]), sbp=flow.sbp.broadcast) x.copy_(y) test_case.assertTrue(np.array_equal(x.numpy(), y.numpy())) x = flow.zeros(4, 6, placement=flow.placement("cuda", [0, 1]), sbp=flow.sbp.split(0)) y = flow.ones(4, 6, placement=flow.placement("cuda", [0, 1]), sbp=flow.sbp.broadcast) x.copy_(y) test_case.assertTrue(np.array_equal(x.numpy(), y.numpy())) x = flow.zeros(4, 6, placement=flow.placement("cuda", [0, 1]), sbp=flow.sbp.broadcast) y = flow.ones(4, 6, placement=flow.placement("cuda", [0, 1]), sbp=flow.sbp.broadcast) x.copy_(y) test_case.assertTrue(np.array_equal(x.numpy(), y.numpy())) x = flow.zeros(4, 6, placement=flow.placement("cuda", [0, 1]), sbp=flow.sbp.broadcast) y = np.ones((4, 6), dtype=np.float32) x.copy_(y) test_case.assertTrue(np.array_equal(x.numpy(), y))
def test_indecies_on_different_devices(test_case): x = flow.ones(3, 10) y = flow.ones(3, 10, device=flow.device("cuda:0")) x_idx = [flow.tensor([1, 2]), flow.tensor([2, 0], device=flow.device("cuda:0"))] y_idx = [flow.tensor([1, 2], device=flow.device("cuda:0")), flow.tensor([2, 0])] test_case.assertTrue(np.allclose(x[x_idx].numpy(), np.array([1, 1]))) test_case.assertTrue(np.allclose(y[y_idx].numpy(), np.array([1, 1])))
def test_fuse_mlp_weight_size_error(test_case): with test_case.assertRaises(Exception) as ctx: x = flow.ones((4, 4), dtype=flow.float32) bias = flow.ones((4, ), dtype=flow.float32) out = flow._C.fused_mlp(x, [], [bias], False) test_case.assertTrue( "The number of weights should be greater equal than 1" in str( ctx.exception))
def test_matmul_dimension_error3(test_case): with test_case.assertRaises(Exception) as ctx: x = flow.ones((4, 1, 2, 1), dtype=flow.float32) w = flow.ones((4, 4, 4), dtype=flow.float32) out = flow._C.matmul(x, w, False, False, 1.0) test_case.assertTrue( "Not support number of dimensions of a being less than number of dimensions of b!" in str(ctx.exception))
def test_bias_add_dimension_match_error(test_case): with test_case.assertRaises(Exception) as ctx: x = flow.ones((4, 4), dtype=flow.float32) bias = flow.ones((5, ), dtype=flow.float32) out = flow._C.bias_add(x, bias, axis=1) test_case.assertTrue( "The size of tensor x (4,4) must match the size of tensor b (5,) at dimension 1" in str(ctx.exception))
def test_bias_add_index_error(test_case): with test_case.assertRaises(Exception) as ctx: x = flow.ones((4, 4), dtype=flow.float32) bias = flow.ones((5, ), dtype=flow.float32) out = flow._C.bias_add(x, bias, axis=3) test_case.assertTrue( "Dimension out of range (expected to be in range of [-2,1], but got 3)" in str(ctx.exception))
def test_view_runtime_error(test_case): with test_case.assertRaises(Exception) as context: x1 = flow.ones((2, 3, 4), dtype=flow.float32, requires_grad=True).permute(1, 0, 2) x2 = flow.ones((4, 6), dtype=flow.float32, requires_grad=True) y = flow.view(x1, x2.shape) test_case.assertTrue( "view size is not compatible with input tensor's size" in str( context.exception))