Example #1
0
    def test_named_grads_and_indexed_grads(self) -> None:
        specification = "func(Tensor a, Tensor b) -> (Tensor x, Tensor y)"
        schema = torchgen.model.FunctionSchema.parse(specification)
        native_function = dataclasses.replace(DEFAULT_NATIVE_FUNCTION,
                                              func=schema)

        with self.assertRaisesRegex(
                RuntimeError, 'illegally mixes use of "grad_RETURN_NAME"'):
            load_derivatives.create_differentiability_info(
                defn_dict={
                    "name": specification,
                    # Uh-oh, the derivatives reference gradients by
                    # name and by index.
                    "dispatch": {
                        "Default": {
                            "a": "grad_x",
                            "b": "grads[1]",
                        }
                    },
                },
                functions_by_signature={schema.signature(): [native_function]},
                functions_by_schema={specification: native_function},
                op_counter=typing.Counter[str](),
                used_dispatch_keys=set(),
            )
Example #2
0
    def test_register_bogus_dispatch_key(self) -> None:
        specification = "func(Tensor a, Tensor b) -> (Tensor x, bool y, Tensor z)"
        schema = torchgen.model.FunctionSchema.parse(specification)
        native_function = dataclasses.replace(DEFAULT_NATIVE_FUNCTION,
                                              func=schema)

        with self.assertRaisesRegex(
                RuntimeError,
                "Invalid dispatch key AutogradRandomTensor in derivatives.yaml for",
        ):
            load_derivatives.create_differentiability_info(
                defn_dict={
                    "name": specification,
                    "dispatch": {
                        "Default": {
                            "a": "grad_x",
                            "b": "grad_z",
                        },
                        "AutogradRandomTensor": {
                            "a": "grad_x",
                            "b": "grad_z",
                        },
                    },
                },
                functions_by_signature={schema.signature(): [native_function]},
                functions_by_schema={specification: native_function},
                op_counter=typing.Counter[str](),
                used_dispatch_keys=set(),
            )
Example #3
0
    def test_non_differentiable_output(self) -> None:
        specification = "func(Tensor a, Tensor b) -> (Tensor x, bool y, Tensor z)"
        schema = torchgen.model.FunctionSchema.parse(specification)
        native_function = dataclasses.replace(DEFAULT_NATIVE_FUNCTION,
                                              func=schema)

        _, differentiability_info = load_derivatives.create_differentiability_info(
            defn_dict={
                "name": specification,
                "dispatch": {
                    "Default": {
                        "a": "grads[0]",
                        "b": "grads[2]"
                    }
                },
            },
            functions_by_signature={schema.signature(): [native_function]},
            functions_by_schema={specification: native_function},
            op_counter=typing.Counter[str](),
            used_dispatch_keys=set(),
        )

        self.assertSequenceEqual(
            differentiability_info["Default"].available_named_gradients,
            # grad_y is not present because y is a
            # bool and thus not differentiable.
            ["grad_x", "grad_z"],
        )
Example #4
0
    def test_non_differentiable_output_invalid_type(self) -> None:
        specification = "func(Tensor a, Tensor b) -> (Tensor x, bool y, Tensor z)"
        schema = torchgen.model.FunctionSchema.parse(specification)
        native_function = dataclasses.replace(DEFAULT_NATIVE_FUNCTION,
                                              func=schema)

        _, differentiability_info = load_derivatives.create_differentiability_info(
            defn_dict={
                "name": specification,
                "dispatch": {
                    "Default": {
                        "a": "grad_x",
                        "b": "grad_z",
                    }
                },
            },
            functions_by_signature={schema.signature(): [native_function]},
            functions_by_schema={specification: native_function},
            op_counter=typing.Counter[str](),
            used_dispatch_keys=set(),
        )
        definition = gen_autograd_functions.process_function(
            differentiability_info["Default"],
            gen_autograd_functions.FUNCTION_DEFINITION,
        )
        # grad_z should map to grads[1], not grads[2] because output 1
        # (y) is not differentiable.
        assert "grad_z = grads[2]" not in definition
        assert "grad_z = grads[1]" in definition
Example #5
0
    def test_named_grads_and_indexed_grads(self) -> None:
        specification = 'func(Tensor a, Tensor b) -> (Tensor x, Tensor y)'
        schema = tools.codegen.model.FunctionSchema.parse(specification)
        native_function = dataclasses.replace(DEFAULT_NATIVE_FUNCTION,
                                              func=schema)

        with self.assertRaisesRegex(
                RuntimeError, 'illegally mixes use of "grad_RETURN_NAME"'):
            load_derivatives.create_differentiability_info(
                defn={
                    'name': specification,
                    # Uh-oh, the derivatives reference gradients by
                    # name and by index.
                    'a': 'grad_x',
                    'b': 'grads[1]',
                },
                functions_by_signature={schema.signature(): [native_function]},
                functions_by_schema={specification: native_function},
                op_counter=typing.Counter[str](),
            )
Example #6
0
    def test_non_differentiable_output_invalid_type(self) -> None:
        specification = 'func(Tensor a, Tensor b) -> (Tensor x, bool y, Tensor z)'
        schema = tools.codegen.model.FunctionSchema.parse(specification)
        native_function = dataclasses.replace(DEFAULT_NATIVE_FUNCTION,
                                              func=schema)

        differentiability_info = load_derivatives.create_differentiability_info(
            defn={
                'name': specification,
                'a': 'grad_x',
                'b': 'grad_z',
            },
            functions_by_signature={schema.signature(): [native_function]},
            functions_by_schema={specification: native_function},
            op_counter=typing.Counter[str](),
        )
        definition = gen_autograd_functions.process_function(
            differentiability_info, gen_autograd_functions.FUNCTION_DEFINITION)
        # grad_z should map to grads[1], not grads[2] because output 1
        # (y) is not differentiable.
        assert 'grad_z = grads[2]' not in definition
        assert 'grad_z = grads[1]' in definition
Example #7
0
    def test_non_differentiable_output(self) -> None:
        specification = 'func(Tensor a, Tensor b) -> (Tensor x, bool y, Tensor z)'
        schema = tools.codegen.model.FunctionSchema.parse(specification)
        native_function = dataclasses.replace(DEFAULT_NATIVE_FUNCTION,
                                              func=schema)

        differentiability_info = load_derivatives.create_differentiability_info(
            defn={
                'name': specification,
                'a': 'grads[0]',
                'b': 'grads[2]',
            },
            functions_by_signature={schema.signature(): [native_function]},
            functions_by_schema={specification: native_function},
            op_counter=typing.Counter[str](),
        )

        self.assertSequenceEqual(
            differentiability_info.available_named_gradients,
            # grad_y is not present because y is a
            # bool and thus not differentiable.
            ['grad_x', 'grad_z'])
Example #8
0
    def test_non_differentiable_output_output_differentiability(self) -> None:
        specification = "func(Tensor a, Tensor b) -> (Tensor x, Tensor y, Tensor z)"
        schema = torchgen.model.FunctionSchema.parse(specification)
        native_function = dataclasses.replace(DEFAULT_NATIVE_FUNCTION,
                                              func=schema)

        differentiability_info = load_derivatives.create_differentiability_info(
            defn={
                "name": specification,
                "a": "grad_x",
                "b": "grad_z",
                "output_differentiability": [True, False, True],
            },
            functions_by_signature={schema.signature(): [native_function]},
            functions_by_schema={specification: native_function},
            op_counter=typing.Counter[str](),
        )
        definition = gen_autograd_functions.process_function(
            differentiability_info, gen_autograd_functions.FUNCTION_DEFINITION)
        # grad_z should map to grads[1], not grads[2] because output 1
        # (y) is not differentiable.
        assert "grad_z = grads[2]" not in definition
        assert "grad_z = grads[1]" in definition