コード例 #1
0
ファイル: test_codegen.py プロジェクト: huaxz1986/pytorch
    def setUp(self) -> None:
        self.native_functions: List[NativeFunction] = []
        self.backend_indices: Dict[DispatchKey,
                                   Dict[OperatorName,
                                        BackendMetadata]] = defaultdict(dict)
        yaml_entry = """
- func: op(Tensor self) -> Tensor
  dispatch:
    CompositeExplicitAutograd: op
  autogen: op.out
        """
        es = yaml.load(yaml_entry, Loader=LineLoader)
        self.one_return_func, m = NativeFunction.from_yaml(es[0],
                                                           loc=Location(
                                                               __file__, 1),
                                                           valid_tags=set())

        BackendIndex.grow_index(self.backend_indices, m)

        self.two_returns_func, two_returns_backend_index = NativeFunction.from_yaml(
            {
                "func": "op_2() -> (Tensor, Tensor)",
                "dispatch": {
                    "CPU": "kernel_1"
                },
                "autogen": "op_2.out",
            },
            loc=torchgen.model.Location(__file__, 1),
            valid_tags=set(),
        )
        BackendIndex.grow_index(self.backend_indices,
                                two_returns_backend_index)
コード例 #2
0
ファイル: test_codegen.py プロジェクト: huaxz1986/pytorch
    def setUp(self) -> None:
        self.op_1_native_function, op_1_backend_index = NativeFunction.from_yaml(
            {
                "func": "op_1() -> bool",
                "dispatch": {
                    "CPU": "kernel_1"
                }
            },
            loc=torchgen.model.Location(__file__, 1),
            valid_tags=set(),
        )
        self.op_2_native_function, op_2_backend_index = NativeFunction.from_yaml(
            {
                "func": "op_2() -> bool",
                "dispatch": {
                    "CPU": "kernel_2",
                    "QuantizedCPU": "custom::kernel_3"
                },
            },
            loc=torchgen.model.Location(__file__, 1),
            valid_tags=set(),
        )

        backend_indices: Dict[DispatchKey,
                              Dict[OperatorName, BackendMetadata]] = {
                                  DispatchKey.CPU: {},
                                  DispatchKey.QuantizedCPU: {},
                              }
        BackendIndex.grow_index(backend_indices, op_1_backend_index)
        BackendIndex.grow_index(backend_indices, op_2_backend_index)
        self.backend_indices = {
            k: BackendIndex(
                dispatch_key=k,
                use_out_as_primary=True,
                external=False,
                symint=False,
                device_guard=False,
                index=backend_indices[k],
            )
            for k in backend_indices
        }
コード例 #3
0
    def test_custom_namespace_selected_correctly(self):
        yaml_config = """
operators:
  aten::add.int:
    is_used_for_training: No
    is_root_operator: Yes
    include_all_overloads: No
  custom::add:
    is_used_for_training: Yes
    is_root_operator: No
    include_all_overloads: Yes
"""
        selector = SelectiveBuilder.from_yaml_str(yaml_config)
        native_function, _ = NativeFunction.from_yaml(
            {"func": "custom::add() -> Tensor"},
            loc=Location(__file__, 1),
            valid_tags=set(),
        )
        self.assertTrue(selector.is_native_function_selected(native_function))
コード例 #4
0
def generate_function(
    f: NativeFunction, k: SchemaKind
) -> Tuple[NativeFunction, Dict[DispatchKey, Dict["OperatorName", "BackendMetadata"]]]:
    from torchgen.api import cpp

    if k == SchemaKind.functional:
        assert f.func.kind() != SchemaKind.functional
        # The new "functional" NativeFunction has:
        # - any mutable arguments have been converted into (immutable) returns.
        #   (if a mutable argument was not also a return, it gets converted to one)
        # - "_functional" appended to the base name, ONLY IF this op has a mutable variant.
        #   See Note [Overload Ambiguity With Functional Variants]
        # The default grouping logic in signature() actually already does this,
        # so we can piggy-back off it (but we still want return names)
        func = f.func.signature(keep_return_names=True).with_name(
            OperatorName(
                name=BaseOperatorName(
                    base=f.func.name.name.base,
                    inplace=False,
                    dunder_method=f.func.name.name.dunder_method,
                    # See Note [Overload Ambiguity With Functional Variants]
                    functional_overload=f.func.kind() == SchemaKind.mutable,
                ),
                overload_name=f.func.name.overload_name,
            )
        )
    elif k == SchemaKind.out:
        # We generate out= ops mostly just so that we can pair up NativeFunctions into groups easily,
        # but at least today, there is no good reason to actually use them.
        # we'll generate a dispatcher entry for them, but won't actually register any kernels for them.
        if f.func.kind() == SchemaKind.inplace:
            func = self_to_out_signature(f.func)
        elif f.func.kind() == SchemaKind.mutable:
            func = mutable_to_out_signature(f.func)
        elif f.func.kind() == SchemaKind.functional:
            func = functional_to_out_signature(f.func)
        else:
            raise AssertionError(
                "We only bother generating out= functions from either inplace or mutable or functional variants"
            )
    else:
        raise AssertionError(
            "We currently only generate either functional or out= NativeFunctions"
        )

    # Generated kernel naming convention for out: <op_name>_<overload_name>. The reason for this is to
    # disambiguate operator with the same name but different overload name, e.g., `randn.names_out` and
    # `randn.generator_with_names_out`.
    kernel_name = (
        func.name.unambiguous_name()
        if func.kind() == SchemaKind.out
        else cpp.name(func)
    )
    backend_metadata = {
        DispatchKey.CompositeExplicitAutograd: {
            func.name: BackendMetadata(
                kernel=kernel_name,
                structured=False,
                cpp_namespace=DEFAULT_KERNEL_NAMESPACE,
            )
        }
    }

    return (
        NativeFunction(
            func=func,
            use_const_ref_for_mutable_tensors=f.use_const_ref_for_mutable_tensors,
            # These generated fn's aren't meant to be user friendly- don't generate methods.
            variants=set([Variant.function]),
            structured=False,
            structured_delegate=None,
            structured_inherits=None,
            precomputed=None,
            autogen=[],
            ufunc_inner_loop={},
            manual_kernel_registration=False,
            manual_cpp_binding=False,
            python_module=None,
            category_override=None,
            device_guard=False,
            device_check=DeviceCheckType.NoCheck,
            loc=f.loc,
            cpp_no_default_args=set(),
            is_abstract=f.is_abstract,
            has_composite_implicit_autograd_kernel=False,
            has_composite_explicit_autograd_kernel=True,
            has_composite_explicit_autograd_non_functional_kernel=False,
            # Every generated NativeFunction gets a "generated" tag, so it's easy to tell
            # which NativeFunction objects did not come directly from native_functions.yaml.
            tags=set(["generated"]) | (f.tags & {"nondeterministic_seeded"}),
            namespace=f.namespace,
        ),
        backend_metadata,
    )
コード例 #5
0
def generate_function(
    f: NativeFunction, k: SchemaKind
) -> Tuple[NativeFunction, Dict[DispatchKey, Dict["OperatorName",
                                                  "BackendMetadata"]]]:
    from torchgen.api import cpp

    if k == SchemaKind.functional:
        assert f.func.kind() != SchemaKind.functional
        gets_composite_kernel = True
        # The new "functional" NativeFunction has:
        # - any mutable arguments have been converted into (immutable) returns.
        #   (if a mutable argument was not also a return, it gets converted to one)
        # - a "functional" overload name.
        # The default grouping logic in signature() actually already does this,
        # so we can piggy-back off it (but we still want return names)
        func = f.func.signature(keep_return_names=True).with_name(
            f.func.name.remove_inplace().with_overload(
                "functional" if not f.func.name.overload_name else
                f"{f.func.name.overload_name}_functional"))
    elif k == SchemaKind.out:
        # We generate out= ops mostly just so that we can pair up NativeFunctions into groups easily,
        # but at least today, there is no good reason to actually use them.
        # we'll generate a dispatcher entry for them, but won't actually register any kernels for them.
        gets_composite_kernel = False
        if f.func.kind() == SchemaKind.inplace:
            func = self_to_out_signature(f.func)
        elif f.func.kind() == SchemaKind.mutable:
            func = mutable_to_out_signature(f.func)
        else:
            raise AssertionError(
                "We only bother generating out= functions from either inplace or mutable variants"
            )
    else:
        raise AssertionError(
            "We currently only generate either functional or out= NativeFunctions"
        )

    if gets_composite_kernel:
        backend_metadata = {
            DispatchKey.CompositeExplicitAutograd: {
                func.name: BackendMetadata(cpp.name(func), structured=False)
            }
        }
    else:
        backend_metadata = {}

    return (
        NativeFunction(
            func=func,
            use_const_ref_for_mutable_tensors=f.
            use_const_ref_for_mutable_tensors,
            # These generated fn's aren't meant to be user friendly- don't generate methods.
            variants=set([Variant.function]),
            structured=False,
            structured_delegate=None,
            structured_inherits=None,
            precomputed=None,
            autogen=[],
            ufunc_inner_loop={},
            manual_kernel_registration=False,
            manual_cpp_binding=False,
            python_module=None,
            category_override=None,
            device_guard=False,
            device_check=DeviceCheckType.NoCheck,
            loc=f.loc,
            cpp_no_default_args=set(),
            is_abstract=f.is_abstract,
            has_composite_implicit_autograd_kernel=False,
            has_composite_explicit_autograd_kernel=gets_composite_kernel,
            # Every generated NativeFunction gets a "generated" tag, so it's easy to tell
            # which NativeFunction objects did not come directly from native_functions.yaml.
            tags=set(["generated"]),
        ),
        backend_metadata,
    )