コード例 #1
0
 def test_binary_op_scalarlist_fastpath(self, device, dtype, op):
     for N in N_values:
         for type_str, scalarlist in getScalarLists(N):
             bool_int_div = op.ref == torch.div and dtype in integral_types_and(torch.bool)
             disable_fastpath = bool_int_div
             if type_str == "int":
                 disable_fastpath |= dtype == torch.bool
             if type_str == "float":
                 disable_fastpath |= dtype in integral_types_and(torch.bool)
             if type_str == "complex":
                 disable_fastpath |= dtype not in complex_types()
             if type_str == "mixed":
                 disable_fastpath |= True and dtype not in complex_types()
             self._test_binary_op_scalarlist(device, dtype, op, N, scalarlist, True, disable_fastpath)
コード例 #2
0
 def _test_unary(self, device, dtype, opinfo, N, is_fastpath):
     op, ref, inplace_op, inplace_ref = self._get_funcs(opinfo, 1)
     inputs = opinfo.sample_inputs(device, dtype, N, noncontiguous=not is_fastpath),
     # note(mkozuki): Complex inputs for `_foreach_abs` go through slowpath.
     if opinfo.name == "_foreach_abs" and dtype in complex_types():
         is_fastpath = False
     self._regular_unary_test(dtype, op, ref, inputs, is_fastpath)
     self._inplace_unary_test(dtype, inplace_op, inplace_ref, inputs, is_fastpath)
コード例 #3
0
 def test_binary_op_scalar_fastpath(self, device, dtype, op):
     for N, scalar in itertools.product(N_values, Scalars):
         disable_fastpath = op.ref == torch.div and dtype in integral_types_and(torch.bool)
         if isinstance(scalar, int):
             disable_fastpath |= dtype == torch.bool
         if isinstance(scalar, float):
             disable_fastpath |= dtype in integral_types_and(torch.bool)
         if isinstance(scalar, bool):
             disable_fastpath |= dtype == torch.bool
             if op.ref in (torch.add, torch.mul):
                 disable_fastpath = False
         if isinstance(scalar, complex):
             disable_fastpath |= dtype not in complex_types()
         self._test_binary_op_scalar(device, dtype, op, N, scalar, True, disable_fastpath)
コード例 #4
0
ファイル: test_complex.py プロジェクト: yuguo68/pytorch
class TestComplexTensor(TestCase):
    @dtypes(*complex_types())
    def test_to_list(self, device, dtype):
        # test that the complex float tensor has expected values and
        # there's no garbage value in the resultant list
        self.assertEqual(
            torch.zeros((2, 2), device=device, dtype=dtype).tolist(),
            [[0j, 0j], [0j, 0j]])

    @dtypes(torch.float32, torch.float64)
    def test_dtype_inference(self, device, dtype):
        # issue: https://github.com/pytorch/pytorch/issues/36834
        default_dtype = torch.get_default_dtype()
        torch.set_default_dtype(dtype)
        x = torch.tensor([3., 3. + 5.j], device=device)
        torch.set_default_dtype(default_dtype)
        self.assertEqual(
            x.dtype, torch.cdouble if dtype == torch.float64 else torch.cfloat)
コード例 #5
0
     DecorateInfo(
         unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"
     ),
     DecorateInfo(
         unittest.skip("Failing on some jobs"),
         "TestReductions",
         "test_reference_masked",
         dtypes=(torch.bool, torch.int8, torch.int16, torch.int32),
     ),
     # FIXME: "cuda_scatter_gather_base_kernel_func" not implemented for ... (used for sparse_coo inputs)
     DecorateInfo(
         unittest.skip("Skipped!"),
         "TestMasked",
         "test_mask_layout",
         device_type="cuda",
         dtypes=(torch.bool, *integral_types(), *complex_types()),
     ),
 ),
 decorators=[
     DecorateInfo(
         toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1e-02)}),
         "TestReductions",
         "test_reference_masked",
     ),
     DecorateInfo(
         toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1e-03)}),
         "TestReductions",
         "test_ref_duplicate_values",
     ),
 ],
 sample_inputs_func=sample_inputs_masked_reduction,