Example #1
0
    def test_aliases(self):
        # tests that op aliases are correctly being normalized
        # does not check for other properties such as correctness because
        # the common method registry gets tested for those in test_jit.py

        op_registry = {}
        for op in method_tests():
            op_registry[op[0]] = op

        for alias, mapping in op_alias_mappings.items():
            assert alias in op_registry, "Test not found for {} alias".format(alias)

            name, self_size, args, kwargs, output_process_fn = get_defaults(*op_registry[alias])

            def fn(*inputs, **kwargs):
                attr = getattr(inputs[0], name)
                output = attr(*inputs[1:], **kwargs)
                return output_process_fn(output)

            self_variable = create_input((self_size,))[0][0]
            args_variable, kwargs_variable = create_input(args, requires_grad=False, call_kwargs=kwargs)

            traced_fn = create_traced_fn(self, fn)
            inputs = (self_variable,) + args_variable
            traced_fn(*inputs, **kwargs)
            last_graph = traced_fn.last_graph
            FileCheck().check(mapping).check_not(alias).run(last_graph)

            script_fn = create_script_fn(self, name, 'method', output_process_fn)
            script_fn(*inputs, **kwargs)
            last_graph = script_fn.last_graph
            FileCheck().check(mapping).check_not(alias).run(last_graph)
Example #2
0
from torch.testing._internal.common_utils import \
    (TestCase, is_iterable_of_tensors, run_tests, IS_SANDCASTLE, clone_input_helper, make_tensor)
from torch.testing._internal.common_methods_invocations import \
    (op_db, method_tests)
from torch.testing._internal.common_device_type import \
    (instantiate_device_type_tests, ops, onlyCPU, onlyOnCPUAndCUDA, skipCUDAIfRocm, OpDTypes)
from torch.testing._internal.common_jit import JitCommonTestCase, check_against_reference
from torch.autograd.gradcheck import gradcheck, gradgradcheck

from torch.testing._internal.jit_metaprogramming_utils import create_script_fn, create_traced_fn, \
    check_alias_annotation
from torch.testing._internal.jit_utils import disable_autodiff_subgraph_inlining

# Get names of all the operators which have entry in `method_tests` (legacy testing infra)
method_tested_operators = set(
    map(lambda test_details: test_details[0], method_tests()))

# Tests that apply to all operators


class TestOpInfo(TestCase):
    exact_dtype = True

    # Verifies that ops have their unsupported dtypes
    #   registered correctly by testing that each claimed unsupported dtype
    #   throws a runtime error
    @skipCUDAIfRocm
    @onlyOnCPUAndCUDA
    @ops(op_db, dtypes=OpDTypes.unsupported)
    def test_unsupported_dtypes(self, device, dtype, op):
        # sample_inputs can have a function for generating the input that doesn't work for specified dtype
Example #3
0
from torch.testing._internal.common_utils import \
    (TestCase, is_iterable_of_tensors, run_tests, IS_SANDCASTLE, clone_input_helper, make_tensor,
     gradcheck, gradgradcheck)
from torch.testing._internal.common_methods_invocations import \
    (op_db, method_tests)
from torch.testing._internal.common_device_type import \
    (instantiate_device_type_tests, ops, onlyCPU, onlyOnCPUAndCUDA, skipCUDAIfRocm, OpDTypes)
from torch.testing._internal.common_jit import JitCommonTestCase, check_against_reference

from torch.testing._internal.jit_metaprogramming_utils import create_script_fn, create_traced_fn, \
    check_alias_annotation
from torch.testing._internal.jit_utils import disable_autodiff_subgraph_inlining


# Get names of all the operators which have entry in `method_tests` (legacy testing infra)
method_tested_operators = set(map(lambda test_details: test_details[0], method_tests()))

# Tests that apply to all operators

class TestOpInfo(TestCase):
    exact_dtype = True

    # Verifies that ops have their unsupported dtypes
    #   registered correctly by testing that each claimed unsupported dtype
    #   throws a runtime error
    @skipCUDAIfRocm
    @onlyOnCPUAndCUDA
    @ops(op_db, dtypes=OpDTypes.unsupported)
    def test_unsupported_dtypes(self, device, dtype, op):
        # sample_inputs can have a function for generating the input that doesn't work for specified dtype
        # https://github.com/pytorch/pytorch/issues/49024