Example #1
0
def gen_variable_type_func(
        fn: NativeFunctionWithDifferentiabilityInfo) -> Dict[str, List[str]]:
    f = fn.func
    with native_function_manager(f):
        name = cpp.name(f.func)
        formals = gen_formals(f)

        type_definition = METHOD_DEFINITION.substitute(
            return_type=cpp.returns_type(f.func.returns).cpp_type(),
            type_wrapper_name=type_wrapper_name(f),
            type_definition_body=emit_body(fn),
            formals=formals,
        )
        wrapper_registration = gen_wrapper_registration(f)

    # See Note [Manual Backend kernels]
    assert (name in MANUAL_BACKEND) == f.manual_kernel_registration
    # If you want to register a kernel to Autograd, you must make the op abstract.
    # In other words, this op must have dispatch section in native_functions.yaml.
    if name in MANUAL_AUTOGRAD_AND_TRACER or (fn.info
                                              and fn.info.has_derivatives):
        msg = (
            f'There\'s a formula for {name}(or its functional variant) in derivatives.yaml. '
            f'It\'s required to add a dispatch section for it with explicit supported backends e.g CPU/CUDA '
            f'or CompositeExplicitAutograd in native_functions.yaml. Please see '
            f'https://github.com/pytorch/pytorch/tree/master/aten/src/ATen/native#choosing-the-right-dispatch-keyword '
            f'for instructions to choose the right dispatch keyword.')
        assert f.is_abstract, msg

    return {
        'type_derived_method_definitions': [type_definition],
        'wrapper_registrations': [wrapper_registration],
    }
def gen_variable_type_func(
        fn: NativeFunctionWithDifferentiabilityInfo) -> Dict[str, List[str]]:
    f = fn.func
    with native_function_manager(f):
        name = cpp.name(f.func)
        formals = gen_formals(f)

        if fn.info is None and not get_base_name(f) in RESET_GRAD_ACCUMULATOR \
                and not get_base_name(f) in DONT_REQUIRE_DERIVATIVE \
                and len(gen_differentiable_outputs(fn)) > 0 \
                and not get_base_name(f) in DONT_ENFORCE_SAME_TENSOR_IMPL_OR_STORAGE \
                and not get_base_name(f) in DONT_ENFORCE_STORAGE_IMPL_USE_COUNT \
                and not get_base_name(f) in DONT_ENFORCE_TENSOR_IMPL_USE_COUNT:
            # NOTE: [ Registering AutogradNotImplemented boxed kernel ]
            #
            # When there is no derivatives.yaml entry, we register a generic boxed
            # NotImplemented kernel to set grad_fn to be NotImplemented, so that forward
            # proceeds as usual but an error is properly produced on backward.
            # TODO: it would be nice to not have these special cases
            #
            # There are several cases where still let codegen handle it:
            # 1) ops that need to reset grad accumulator (we let codegen handle this case
            #     because) the list is (currently) only accessible in Python.
            # 2) User explicitly specifies DONT_REQUIRE_DERIVATIVE. This basically makes
            #    autograd a fallthrough with NDEBUG checks. This can be useful for when all
            #    outputs are integral.
            # 3) When there are no differentiable outputs. This is similar to (2).
            # 4) There are certain ops where we skip certain NDEBUG checks. this is similar
            #    to (1).
            type_definition = ""
            wrapper_registration = AUTOGRAD_NOT_IMPLEMENTED_REGISTRATION.substitute(
                unqual_operator_name_with_overload=f.func.name)
        else:
            type_definition = METHOD_DEFINITION.substitute(
                return_type=cpp.returns_type(f.func.returns).cpp_type(),
                type_wrapper_name=type_wrapper_name(f),
                type_definition_body=emit_body(fn),
                formals=formals,
            )
            wrapper_registration = gen_wrapper_registration(f)

    # See Note [Manual Backend kernels]
    assert (name in MANUAL_BACKEND) == f.manual_kernel_registration
    # If you want to register a kernel to Autograd, you must make the op abstract.
    # In other words, this op must have dispatch section in native_functions.yaml.
    if name in MANUAL_AUTOGRAD_AND_TRACER or (fn.info
                                              and fn.info.has_derivatives):
        msg = (
            f'There\'s a formula for {name}(or its functional variant) in derivatives.yaml. '
            f'It\'s required to add a dispatch section for it with explicit supported backends e.g CPU/CUDA '
            f'or CompositeExplicitAutograd in native_functions.yaml. Please see '
            f'https://github.com/pytorch/pytorch/tree/master/aten/src/ATen/native#choosing-the-right-dispatch-keyword '
            f'for instructions to choose the right dispatch keyword.')
        assert f.is_abstract, msg

    return {
        'type_derived_method_definitions': [type_definition],
        'wrapper_registrations': [wrapper_registration],
    }
Example #3
0
    def __call__(self, f: NativeFunction) -> Optional[str]:
        # We unconditionally generate function variants of the redispatch API.
        # This is mainly because we can namespace functions separately, but not methods,
        if Variant.function not in f.variants and not self.is_redispatching_fn:
            return None

        with native_function_manager(f):
            return self.callImpl(f)
Example #4
0
def compute_meta_function_declaration(g: NativeFunctionsGroup) -> Optional[str]:
    if not g.structured:
        return None
    with native_function_manager(g.out):
        name = meta.name(g)
        args = structured.meta_arguments(g)
        args_str = ', '.join(a.decl() for a in args)
        parent_class = g.out.structured_inherits
        if parent_class is None:
            parent_class = "at::impl::MetaBase"
        return f"""\
 def __call__(self, groups: Sequence[NativeFunctionsGroup]) -> str:
     if not groups:
         return ""
     generated_type_variants = []
     for g in groups:
         with native_function_manager(g):
             assert is_supported(g)
             assert isinstance(g, NativeFunctionsGroup)
             generated_type_variant = self.gen_structured_test_case(g)
             generated_type_variants.append(generated_type_variant)
     return "\n".join(generated_type_variants)
Example #6
0
def gen_variable_type_shard(
    fm: FileManager,
    fns_with_diff_infos: List[NativeFunctionWithDifferentiabilityInfo],
    template_name: str,
    output_name: str,
) -> None:
    type_definitions: List[str] = []
    wrapper_registrations: List[str] = []

    filtered_fns_with_diff_infos = list(
        filter(use_derived, fns_with_diff_infos))
    for fn in filtered_fns_with_diff_infos:
        f = fn.func
        with native_function_manager(f):
            name = cpp.name(f.func)
            formals = gen_formals(f)

            type_definitions.append(
                METHOD_DEFINITION.substitute(
                    return_type=cpp.returns_type(f.func.returns).cpp_type(),
                    type_wrapper_name=type_wrapper_name(f),
                    type_definition_body=emit_body(fn),
                    formals=formals,
                ))
            wrapper_registrations.append(gen_wrapper_registration(f))

        # See Note [Manual Backend kernels]
        assert (name in MANUAL_BACKEND) == f.manual_kernel_registration
        # If you want to register a kernel to Autograd, you must make the op abstract.
        # In other words, this op must have dispatch section in native_functions.yaml.
        if name in MANUAL_AUTOGRAD_AND_TRACER or (fn.info
                                                  and fn.info.has_derivatives):
            msg = (
                f'There\'s a formula for {name}(or its functional variant) in derivatives.yaml. '
                f'It\'s required to add a dispatch section for it with explicit supported backends e.g CPU/CUDA '
                f'or CompositeExplicitAutograd in native_functions.yaml. Please see '
                f'https://github.com/pytorch/pytorch/tree/master/aten/src/ATen/native#choosing-the-right-dispatch-keyword '
                f'for instructions to choose the right dispatch keyword.')
            assert f.is_abstract, msg

    fm.write_with_template(
        output_name, template_name, lambda: {
            'generated_comment': '@'
            f'generated from {fm.template_dir}/{template_name}',
            'type_derived_method_definitions': type_definitions,
            'wrapper_registrations': wrapper_registrations,
        })
    def __call__(self, groups: Sequence[NativeFunctionsGroup]) -> str:
        if not groups:
            return ""
        generated_type_variants = []
        for g in groups:
            with native_function_manager(g):
                assert is_supported(g)
                assert isinstance(g, NativeFunctionsGroup)
                generated_type_variant = self.gen_structured(g)
                generated_type_variants.append(generated_type_variant)
        op_name = op_name_from_group(groups[0])
        body = "\n".join(generated_type_variants)
        generated = f"""
REGISTER_OPERATOR_FUNCTOR(
    aten::{op_name},
    aten_{op_name},
    [](Node* n) -> SROperator {{
      {body}
      LogAndDumpSchema(n);
      return nullptr;
    }});
"""
        return generated
Example #8
0
 def create_decl(f: NativeFunction) -> str:
     with native_function_manager(f):
         return DispatcherSignature.from_schema(f.func).decl()
Example #9
0
    def gen_unstructured(
            self,
            f: NativeFunction,
            g: Optional[NativeFunctionsGroup] = None) -> Optional[str]:
        with native_function_manager(f):
            inplace_meta = False
            gets_out_inplace_wrapper = False
            if not self.backend_index.has_kernel(f):
                if (self.backend_index.dispatch_key == DispatchKey.Meta
                        and f.func.kind() is SchemaKind.inplace and
                        # Defer to composites for meta implementation
                        not f.has_composite_kernel and
                        # Inplace list operations are not supported
                        len(f.func.returns) == 1):
                    inplace_meta = True
                elif (not self.backend_index.use_out_as_primary
                      and g is not None and gets_generated_out_inplace_wrapper(
                          f, g, self.backend_index)):
                    # We want to generate inplace/out wrappers, that don't have a kernel for the backend.
                    gets_out_inplace_wrapper = True
                else:
                    return None
            if f.manual_kernel_registration:
                return None

            if self.target is Target.REGISTRATION and not self.selector.is_native_function_selected(
                    f):
                return None

            sig = self.wrapper_kernel_sig(f)

            name = sig.name()
            returns_type = sig.returns_type().cpp_type()
            args = sig.arguments()
            args_str = ', '.join(a.defn() for a in args)

            # See Note [Direct dispatch bindings]
            cpp_sig_group = CppSignatureGroup.from_native_function(
                f, method=False, fallback_binding=False)

            if self.target is Target.NAMESPACED_DECLARATION:
                result = f"TORCH_API {cpp_sig_group.signature.decl()};\n"
                if cpp_sig_group.faithful_signature is not None:
                    result += f"TORCH_API {cpp_sig_group.faithful_signature.decl()};\n"
                return result
            elif self.target is Target.NAMESPACED_DEFINITION:

                def generate_defn(cpp_sig: CppSignature) -> str:
                    return f"""
{cpp_sig.defn()} {{
return {sig.name()}({', '.join(e.expr for e in translate(cpp_sig.arguments(), sig.arguments()))});
}}
"""

                result = generate_defn(cpp_sig_group.signature)
                if cpp_sig_group.faithful_signature is not None:
                    result += generate_defn(cpp_sig_group.faithful_signature)
                return result
            elif self.target is Target.ANONYMOUS_DEFINITION:
                # short circuit for inplace_meta
                if inplace_meta:
                    assert f.func.arguments.self_arg is not None
                    self_arg_name = f.func.arguments.self_arg.argument.name
                    # TODO: handle in place on tensor list
                    return f"""
{returns_type} {name}({args_str}) {{
  TORCH_CHECK_NOT_IMPLEMENTED({self_arg_name}.is_meta(),
    "Cannot inplace into non-meta tensor with meta tensor argument");
  return {self_arg_name};
}}
"""

                # short circuit for generated inplace/out wrappers
                if gets_out_inplace_wrapper:
                    return self.gen_out_inplace_wrapper(f, g)

                metadata = self.backend_index.get_kernel(f)
                if metadata is None:
                    return None
                if self.class_method_name is None:
                    impl_name = f"{self.cpp_namespace}::{metadata.kernel}"
                else:
                    impl_name = f"{self.cpp_namespace}::{self.class_method_name}::{metadata.kernel}"

                args_exprs_str = ', '.join(a.name for a in args)

                device_check = '  // No device check\n'
                if is_cuda_dispatch_key(self.backend_index.dispatch_key):
                    device_check_args = itertools.chain(
                        f.func.arguments.out, f.func.arguments.flat_positional)
                    device_check = RegisterDispatchKey.gen_device_check(
                        f.device_check, list(device_check_args), name)

                device_guard = "// DeviceGuard omitted"  # default
                if f.device_guard and is_cuda_dispatch_key(
                        self.backend_index.dispatch_key):
                    has_tensor_options = any(
                        isinstance(a.argument, TensorOptionsArguments)
                        for a in args)
                    if has_tensor_options:
                        # kernel is creating a tensor
                        device_guard = """globalContext().lazyInitCUDA();
  const DeviceGuard device_guard(device_or_default(device));"""
                    else:
                        # kernel is operating on existing tensors

                        # There is precedence for which argument we use to do
                        # device guard.  This describes the precedence order.
                        self_arg = [
                            f.func.arguments.self_arg.argument
                        ] if f.func.arguments.self_arg is not None else []
                        candidate_args = itertools.chain(
                            self_arg, f.func.arguments.out,
                            f.func.arguments.flat_positional)

                        # Only tensor like arguments are eligible
                        device_of = next((f'{a.name}' for a in candidate_args
                                          if a.type.is_tensor_like()), None)
                        if device_of is not None:
                            device_guard = f"const OptionalDeviceGuard device_guard(device_of({device_of}));"

                return f"""\
namespace {{

{returns_type} {name}({args_str}) {{
  {device_check}

  {device_guard}
  return {impl_name}({args_exprs_str});
}}

}} // anonymous namespace
"""

            elif self.target is Target.REGISTRATION:
                if f.manual_kernel_registration:
                    return None
                else:
                    payload = f"TORCH_FN({name})"
                    return f'm.impl("{f.func.name}",\n{payload});\n'
            else:
                assert_never(self.target)
Example #10
0
 def wrapper(f: NFWDI) -> T:
     with native_function_manager(f.func):
         return func(f)
 def is_supported(g: NativeFunctionsGroup) -> bool:
     with native_function_manager(g):
         return gen_structured.is_supported(g)