def run(source_yaml: str, output_dir: str, dry_run: bool, impl_path: Optional[str]) -> None: # Assumes that this file lives at PYTORCH_ROOT/tools/codegen/gen_backend_stubs.py pytorch_root = pathlib.Path(__file__).parent.parent.parent.absolute() template_dir = os.path.join(pytorch_root, "aten/src/ATen/templates") def make_file_manager(install_dir: str) -> FileManager: return FileManager(install_dir=install_dir, template_dir=template_dir, dry_run=dry_run) fm = make_file_manager(output_dir) native_yaml_path = os.path.join( pytorch_root, 'aten/src/ATen/native/native_functions.yaml') parsed_yaml = parse_native_yaml(native_yaml_path) native_functions, backend_indices = parsed_yaml.native_functions, parsed_yaml.backend_indices grouped_native_functions = get_grouped_native_functions(native_functions) parsed_backend_yaml = parse_backend_yaml(source_yaml, grouped_native_functions, backend_indices) backend_key = parsed_backend_yaml.backend_key autograd_key = parsed_backend_yaml.autograd_key cpp_namespace = parsed_backend_yaml.cpp_namespace backend_indices = parsed_backend_yaml.backend_indices selector = SelectiveBuilder.get_nop_selector() # TODO: handle cases when yaml contains zero ops properly in a later PR. if backend_key is not None and autograd_key is not None: backend_dispatch_key: DispatchKey = backend_key autograd_dispatch_key: DispatchKey = autograd_key class_name = backend_indices[ backend_dispatch_key].native_function_class_name() if impl_path is not None: error_on_missing_kernels(native_functions, backend_indices, backend_key, autograd_key, impl_path) assert class_name is not None generated_comment = 'Autogenerated file by gen_backend_stubs.py. Do not edit directly!' fm.write_with_template( f'{backend_dispatch_key}NativeFunctions.h', 'DispatchKeyNativeFunctions.h', lambda: { 'generated_comment': generated_comment, 'cpp_namespace': cpp_namespace, 'class_name': class_name, # Convert to a set first to remove duplicate kernel names. # Backends are allowed to repeat kernel names; only generate the declaration once! 'dispatch_declarations': list( set( concatMap( lambda f: dest.compute_native_function_declaration( f, backend_indices[backend_dispatch_key]), grouped_native_functions))) + list( set( concatMap( lambda f: dest.compute_native_function_declaration( f, backend_indices[autograd_dispatch_key]), grouped_native_functions))), }) for dispatch_key in [backend_dispatch_key, autograd_dispatch_key]: fm.write_with_template( f'Register{dispatch_key}.cpp', 'RegisterDispatchKey.cpp', lambda: { 'extra_cuda_headers': '', 'external_backend_headers': f'#include "{output_dir}/{backend_key}NativeFunctions.h"', 'namespaced_headers': '', 'DispatchKey': dispatch_key, 'dispatch_namespace': dispatch_key.lower(), 'dispatch_helpers': dest.gen_registration_helpers(backend_indices[dispatch_key] ), 'dispatch_namespaced_definitions': list( concatMap( dest.RegisterDispatchKey( backend_indices[dispatch_key], Target.NAMESPACED_DEFINITION, selector, rocm=False, cpp_namespace=cpp_namespace, class_method_name= f'{backend_dispatch_key}NativeFunctions'), grouped_native_functions)), 'dispatch_anonymous_definitions': list( concatMap( dest.RegisterDispatchKey( backend_indices[dispatch_key], Target.ANONYMOUS_DEFINITION, selector, rocm=False, cpp_namespace=cpp_namespace, class_method_name= f'{backend_dispatch_key}NativeFunctions'), grouped_native_functions)), 'dispatch_registrations': list( concatMap( dest.RegisterDispatchKey( backend_indices[dispatch_key], Target.REGISTRATION, selector, rocm=False, cpp_namespace=cpp_namespace, class_method_name= f'{backend_dispatch_key}NativeFunctions'), grouped_native_functions)), })
def main() -> None: parser = argparse.ArgumentParser(description='Generate ATen source files') parser.add_argument( '-s', '--source-path', help='path to source directory for ATen', default='aten/src/ATen') parser.add_argument( '-o', '--output-dependencies', help='output a list of dependencies into the given file and exit') parser.add_argument( '-d', '--install_dir', help='output directory', default='build/aten/src/ATen') parser.add_argument( '--rocm', action='store_true', help='reinterpret CUDA as ROCm/HIP and adjust filepaths accordingly') # TODO: --op_registration_whitelist will be removed when all call-sites # for gen.py are moved over to using the operator YAML file for mobile # custom build. parser.add_argument( '--op_registration_whitelist', nargs='*', help='filter op registrations by the whitelist (if set); ' 'each item is `namespace`::`operator name` without overload name; ' 'e.g.: aten::empty aten::conv2d ...') parser.add_argument( '--op_selection_yaml_path', help='Provide a path to the operator selection (for custom build) YAML ' 'that contains the information about the set of selected operators ' 'and their categories (training, ...). Each operator is either a ' 'full operator name with overload or just a bare operator name. ' 'The operator names also contain the namespace prefix (e.g. aten::)') parser.add_argument( '--backend_whitelist', nargs='*', help='filter dispatch backend by the whitelist (if set), ' 'e.g.: CPU CUDA QuantizedCPU ...') parser.add_argument( '--static_dispatch_backend', help='generate static dispatch code for the specific backend (if set)') parser.add_argument( '--force_schema_registration', action='store_true', help='force it to generate schema-only registrations for all ops, including' 'those that are not listed on --op_registration_whitelist') options = parser.parse_args() selector = get_custom_build_selector( options.op_registration_whitelist, options.op_selection_yaml_path, ) native_yaml_path = os.path.join(options.source_path, 'native/native_functions.yaml') parsed_yaml = parse_native_yaml(native_yaml_path) native_functions, backend_indices = parsed_yaml.native_functions, parsed_yaml.backend_indices grouped_native_functions = get_grouped_native_functions(native_functions) structured_native_functions = [g for g in grouped_native_functions if isinstance(g, NativeFunctionsGroup)] template_dir = os.path.join(options.source_path, "templates") # NB: It is mandatory to NOT use os.path.join here, as the install directory # will eventually be ingested by cmake, which does not respect Windows style # path slashes. If you switch this to use os.path.join, you'll get an error # like: # # Syntax error in cmake code when parsing string # # C:/Jenkins/workspace/pytorch-builds/pytorch-win-ws2016-cuda9-cudnn7-py3-build/build/aten/src/ATen\core/TensorMethods.h # # Invalid character escape '\c'. core_install_dir = f'{options.install_dir}/core' pathlib.Path(core_install_dir).mkdir(parents=True, exist_ok=True) def make_file_manager(install_dir: str) -> FileManager: return FileManager(install_dir=install_dir, template_dir=template_dir, dry_run=options.output_dependencies) core_fm = make_file_manager(core_install_dir) cpu_fm = make_file_manager(options.install_dir) cuda_fm = make_file_manager(options.install_dir) extra_cuda_headers = '''\ #include <c10/cuda/CUDAGuard.h> #include <ATen/cuda/ATenCUDAGeneral.h> #include <ATen/cuda/CUDADevice.h> #include <ATen/cuda/CUDAContext.h>''' if options.rocm: extra_cuda_headers = '''\ #include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h> #include <ATen/hip/ATenHIPGeneral.h> #include <ATen/hip/HIPDevice.h> #include <ATen/hip/HIPContext.h>''' dispatch_keys = [ DispatchKey.CPU, DispatchKey.SparseCPU, DispatchKey.SparseCsrCPU, DispatchKey.MkldnnCPU, DispatchKey.CUDA, DispatchKey.SparseCUDA, DispatchKey.SparseCsrCUDA, DispatchKey.QuantizedCPU, DispatchKey.QuantizedCUDA, DispatchKey.CompositeImplicitAutograd, DispatchKey.CompositeExplicitAutograd, # Meta is a magic key: it is automatically generated for structured # kernels DispatchKey.Meta, ] # Only a limited set of dispatch keys get CPUFunctions.h headers generated # for them; this is the set functions_keys = { DispatchKey.CPU, DispatchKey.CUDA, DispatchKey.CompositeImplicitAutograd, DispatchKey.CompositeExplicitAutograd, DispatchKey.Meta, } if options.backend_whitelist: dispatch_keys = [k for k in dispatch_keys if is_generic_dispatch_key(k) or str(k) in options.backend_whitelist] static_dispatch_idx: Optional[BackendIndex] = None if options.static_dispatch_backend: static_dispatch_idx = backend_indices[DispatchKey.parse(options.static_dispatch_backend)] for dispatch_key in dispatch_keys: fm = cuda_fm if is_cuda_dispatch_key(dispatch_key) else cpu_fm fm.write_with_template(f'Register{dispatch_key}.cpp', 'RegisterDispatchKey.cpp', lambda: { 'extra_cuda_headers': extra_cuda_headers if is_cuda_dispatch_key(dispatch_key) else '', 'legacy_th_headers': '#include <ATen/LegacyTHFunctionsCUDA.h>' if dispatch_key == DispatchKey.CUDA else '', 'external_backend_headers': '', 'namespaced_headers': f'#include <ATen/{dispatch_key}Functions.h>' if dispatch_key in functions_keys else '', 'DispatchKey': dispatch_key, 'dispatch_namespace': dispatch_key.lower(), 'dispatch_namespaced_definitions': list(concatMap( dest.RegisterDispatchKey( backend_indices[dispatch_key], Target.NAMESPACED_DEFINITION, selector, rocm=options.rocm, cpp_namespace='at::native'), grouped_native_functions )), 'dispatch_anonymous_definitions': list(concatMap( dest.RegisterDispatchKey( backend_indices[dispatch_key], Target.ANONYMOUS_DEFINITION, selector, rocm=options.rocm, cpp_namespace='at::native'), grouped_native_functions )), 'dispatch_registrations': list(concatMap( dest.RegisterDispatchKey( backend_indices[dispatch_key], Target.REGISTRATION, selector, rocm=options.rocm, cpp_namespace='at::native'), grouped_native_functions )), }) if dispatch_key in functions_keys: if dispatch_key in static_dispatch_keys(static_dispatch_idx): # See Note [Avoiding Include Cycles In Static Dispatch] inl_headers = '' else: inl_headers = f'#include <ATen/{dispatch_key}Functions_inl.h>' fm.write_with_template(f'{dispatch_key}Functions.h', 'DispatchKeyFunctions.h', lambda: { 'dispatch_key': str(dispatch_key), 'inline_headers_for_nonstatic_build': inl_headers, }) fm.write_with_template(f'{dispatch_key}Functions_inl.h', 'DispatchKeyFunctions_inl.h', lambda: { 'dispatch_namespace': dispatch_key.lower(), 'dispatch_namespaced_declarations': list(concatMap( dest.RegisterDispatchKey( backend_indices[dispatch_key], Target.NAMESPACED_DECLARATION, selector, rocm=options.rocm, cpp_namespace='at::native'), grouped_native_functions )), }) del fm # BackendSelect is generated specially cpu_fm.write('RegisterBackendSelect.cpp', lambda: { 'backend_select_method_definitions': list(mapMaybe(ComputeBackendSelect(Target.DEFINITION, selector), native_functions)), 'backend_select_function_registrations': list(mapMaybe(ComputeBackendSelect(Target.REGISTRATION, selector), native_functions)), }) cpu_fm.write('NativeMetaFunctions.h', lambda: { 'declarations': list(mapMaybe(compute_meta_function_declaration, structured_native_functions)), }) schema_selector = selector if options.force_schema_registration: schema_selector = SelectiveBuilder.get_nop_selector() cpu_fm.write('RegisterSchema.cpp', lambda: { 'schema_registrations': list(mapMaybe(RegisterSchema(schema_selector), native_functions)), }) cpu_fm.write('Operators.cpp', lambda: { 'definitions': list(mapMaybe(ComputeOperators( Target.DEFINITION), native_functions)), }) cpu_fm.write('Operators.h', lambda: { 'declarations': list(mapMaybe(ComputeOperators( Target.DECLARATION), native_functions)), }) cpu_fm.write('Functions.h', lambda: { 'static_dispatch_extra_headers': static_dispatch_extra_headers(static_dispatch_idx), 'function_definitions': list(mapMaybe(ComputeFunction( static_dispatch_backend_index=static_dispatch_idx), native_functions)), }) cpu_fm.write('Functions.cpp', lambda: {}) core_fm.write('TensorBody.h', lambda: { 'static_dispatch_extra_headers': static_dispatch_extra_headers(static_dispatch_idx, skip_tensor_include=True), 'tensor_method_declarations': list(mapMaybe(ComputeTensorMethod( target=Target.DECLARATION, static_dispatch_backend_index=static_dispatch_idx), native_functions)), 'tensor_method_definitions': list(mapMaybe(ComputeTensorMethod( target=Target.DEFINITION, static_dispatch_backend_index=static_dispatch_idx), native_functions)), }) core_fm.write('TensorMethods.cpp', lambda: {}) cpu_fm.write('RedispatchFunctions.h', lambda: { 'function_redispatch_definitions': list(mapMaybe(ComputeRedispatchFunction(), native_functions)), }) core_fm.write('ATenOpList.cpp', lambda: { 'aten_ops': list(mapMaybe(compute_aten_op, native_functions)), }) cpu_fm.write('NativeFunctions.h', lambda: { 'native_function_declarations': list(concatMap( # Convert to a set first to remove duplicate kernel names. # Backends are allowed to repeat kernel names; only generate the declaration once! lambda f: list(OrderedDict.fromkeys(concatMap( lambda backend_idx: dest.compute_native_function_declaration(f, backend_idx), backend_indices.values()))), grouped_native_functions)), }) cpu_fm.write('Declarations.yaml', lambda: format_yaml([compute_declaration_yaml(f) for f in native_functions])) cpu_fm.write('RegistrationDeclarations.h', lambda: { 'registration_declarations': [compute_registration_declarations(f, backend_indices) for f in native_functions], }) if options.output_dependencies: cpu_fm.write_outputs(options.output_dependencies) core_fm.write_outputs(f"{options.output_dependencies}-core") cuda_fm.write_outputs(f"{options.output_dependencies}-cuda")
def main() -> None: parser = argparse.ArgumentParser(description='Generate ATen source files') parser.add_argument('-s', '--source-path', help='path to source directory for ATen', default='aten/src/ATen') parser.add_argument( '-o', '--output-dependencies', help='output a list of dependencies into the given file and exit') parser.add_argument('-d', '--install_dir', help='output directory', default='build/aten/src/ATen') parser.add_argument( '--rocm', action='store_true', help='reinterpret CUDA as ROCm/HIP and adjust filepaths accordingly') # TODO: --op_registration_whitelist will be removed when all call-sites # for gen.py are moved over to using the operator YAML file for mobile # custom build. parser.add_argument( '--op_registration_whitelist', nargs='*', help='filter op registrations by the whitelist (if set); ' 'each item is `namespace`::`operator name` without overload name; ' 'e.g.: aten::empty aten::conv2d ...') parser.add_argument( '--op_selection_yaml_path', help='Provide a path to the operator selection (for custom build) YAML ' 'that contains the information about the set of selected operators ' 'and their categories (training, ...). Each operator is either a ' 'full operator name with overload or just a bare operator name. ' 'The operator names also contain the namespace prefix (e.g. aten::)') parser.add_argument( '--backend_whitelist', nargs='*', help='filter dispatch backend by the whitelist (if set), ' 'e.g.: CPU CUDA QuantizedCPU ...') parser.add_argument( '--force_schema_registration', action='store_true', help= 'force it to generate schema-only registrations for all ops, including' 'those that are not listed on --op_registration_whitelist') options = parser.parse_args() selector = get_custom_build_selector( options.op_registration_whitelist, options.op_selection_yaml_path, ) native_functions = parse_native_yaml( os.path.join(options.source_path, 'native/native_functions.yaml')) pre_grouped_native_functions: Dict[FunctionSchema, Dict[SchemaKind, NativeFunction]] pre_grouped_native_functions = defaultdict(dict) for f in native_functions: d = pre_grouped_native_functions[f.func.signature()] assert f.func.kind() not in d d[f.func.kind()] = f grouped_native_functions = [ NativeFunctionGroup.from_dict(v) for v in pre_grouped_native_functions.values() ] # NB: At the moment, grouped_native_functions isn't used by anything, # this code lives here to help potential future consumers; for a live # example see https://github.com/pytorch/pytorch/pull/45277 template_dir = os.path.join(options.source_path, "templates") # NB: It is mandatory to NOT use os.path.join here, as the install directory # will eventually be ingested by cmake, which does not respect Windows style # path slashes. If you switch this to use os.path.join, you'll get an error # like: # # Syntax error in cmake code when parsing string # # C:/Jenkins/workspace/pytorch-builds/pytorch-win-ws2016-cuda9-cudnn7-py3-build/build/aten/src/ATen\core/TensorMethods.h # # Invalid character escape '\c'. core_install_dir = f'{options.install_dir}/core' pathlib.Path(core_install_dir).mkdir(parents=True, exist_ok=True) def make_file_manager(install_dir: str) -> FileManager: return FileManager(install_dir=install_dir, template_dir=template_dir, dry_run=options.output_dependencies) core_fm = make_file_manager(core_install_dir) cpu_fm = make_file_manager(options.install_dir) cuda_fm = make_file_manager(options.install_dir) extra_cuda_headers = '''\ #include <ATen/cuda/ATenCUDAGeneral.h> #include <ATen/cuda/CUDADevice.h> #include <ATen/cuda/CUDAContext.h>''' if options.rocm: extra_cuda_headers = '''\ #include <ATen/hip/ATenHIPGeneral.h> #include <ATen/hip/HIPDevice.h> #include <ATen/hip/HIPContext.h>''' # NB: substrings in these dispatch keys matter, we do tests to see if # a key contains, e.g., CUDA to classify it as a CUDA backend dispatch_keys = [ "CPU", "SparseCPU", "MkldnnCPU", "CUDA", "SparseCUDA", "QuantizedCPU", "QuantizedCUDA", "Math", "DefaultBackend", ] if options.backend_whitelist: dispatch_keys = [ k for k in dispatch_keys if is_generic_dispatch_key(k) or k in options.backend_whitelist ] for dispatch_key in dispatch_keys: cpp_template = 'RegisterDispatchKey.cpp' fm = cuda_fm if is_cuda_dispatch_key(dispatch_key) else cpu_fm fm.write_with_template( f'Register{dispatch_key}.cpp', cpp_template, lambda: { 'extra_cuda_headers': extra_cuda_headers if is_cuda_dispatch_key(dispatch_key) else '', 'legacy_th_headers': '#include <ATen/LegacyTHFunctionsCPU.h>' if dispatch_key == "CPU" else '#include <ATen/LegacyTHFunctionsCUDA.h>' if dispatch_key == "CUDA" else '', 'DispatchKey': dispatch_key, 'dispatch_definitions': list( mapMaybe( RegisterDispatchKey(dispatch_key, Target.DEFINITION, selector), native_functions)), 'dispatch_registrations': list( mapMaybe( RegisterDispatchKey(dispatch_key, Target.REGISTRATION, selector), native_functions)), }) del fm # BackendSelect is generated specially cpu_fm.write( 'RegisterBackendSelect.cpp', lambda: { 'backend_select_method_definitions': list( mapMaybe(ComputeBackendSelect(Target.DEFINITION), native_functions)), 'backend_select_function_registrations': list( mapMaybe(ComputeBackendSelect(Target.REGISTRATION), native_functions)), }) schema_selector = selector if options.force_schema_registration: schema_selector = SelectiveBuilder.get_nop_selector() cpu_fm.write( 'RegisterSchema.cpp', lambda: { 'schema_registrations': list(mapMaybe(RegisterSchema(schema_selector), native_functions)), }) cpu_fm.write( 'Functions.h', lambda: { 'function_declarations': list( mapMaybe(ComputeFunction(Target.DECLARATION), native_functions) ), }) cpu_fm.write( 'Functions.cpp', lambda: { 'function_definitions': list(mapMaybe(ComputeFunction(Target.DEFINITION), native_functions) ), }) core_fm.write( 'TensorBody.h', lambda: { 'tensor_method_declarations': list( mapMaybe(ComputeTensorMethod(Target.DECLARATION), native_functions)), }) core_fm.write( 'TensorMethods.cpp', lambda: { 'tensor_method_definitions': list( mapMaybe(ComputeTensorMethod(Target.DEFINITION), native_functions)), }) core_fm.write( 'ATenOpList.cpp', lambda: { 'aten_ops': list(mapMaybe(compute_aten_op, native_functions)), }) cpu_fm.write( 'NativeFunctions.h', lambda: { 'native_function_declarations': list( concatMap(compute_native_function_declaration, native_functions )), }) cpu_fm.write( 'Declarations.yaml', lambda: format_yaml( [compute_declaration_yaml(f) for f in native_functions])) cpu_fm.write( 'RegistrationDeclarations.h', lambda: { 'registration_declarations': [compute_registration_declarations(f) for f in native_functions], }) if options.output_dependencies: cpu_fm.write_outputs(options.output_dependencies) core_fm.write_outputs(f"{options.output_dependencies}-core") cuda_fm.write_outputs(f"{options.output_dependencies}-cuda")
def run(source_yaml: str, output_dir: str, dry_run: bool, impl_path: Optional[str], gen_ts_lowerings: bool, node_base: str, node_base_hdr: Optional[str], tensor_class: str, tensor_class_hdr: str) -> None: # Assumes that this file lives at PYTORCH_ROOT/tools/codegen/gen_backend_stubs.py pytorch_root = pathlib.Path(__file__).parent.parent.parent.absolute() template_dir = os.path.join(pytorch_root, "aten/src/ATen/templates") def make_file_manager(install_dir: str) -> FileManager: return FileManager(install_dir=install_dir, template_dir=template_dir, dry_run=dry_run) fm = make_file_manager(output_dir) native_yaml_path = os.path.join(pytorch_root, 'aten/src/ATen/native/native_functions.yaml') parsed_yaml = parse_native_yaml(native_yaml_path) native_functions, backend_indices = parsed_yaml.native_functions, parsed_yaml.backend_indices grouped_native_functions = get_grouped_native_functions(native_functions) def sort_native_function(f: Union[NativeFunctionsGroup, NativeFunction]) -> str: """ We sort the native function because of the note in concat_map_codegen. TODO(alanwaketan): Remove this sorting hack once all ops are grouped properly. """ func = f.functional.func if isinstance(f, NativeFunctionsGroup) else f.func return str(func.name.name) grouped_native_functions = sorted(grouped_native_functions, key=sort_native_function) parsed_backend_yaml = parse_backend_yaml(source_yaml, grouped_native_functions, backend_indices) backend_key = parsed_backend_yaml.backend_key autograd_key = parsed_backend_yaml.autograd_key cpp_namespace = parsed_backend_yaml.cpp_namespace backend_indices = parsed_backend_yaml.backend_indices full_codegen = parse_full_codegen_ops(source_yaml, grouped_native_functions) def concat_map_codegen(func: Callable[[NativeFunction], Sequence[str]], xs: Iterable[Union[NativeFunctionsGroup, NativeFunction]], *, codegenInplaceVariant: bool = False) -> Iterator[str]: """ We code-gen for the functional variant, which is all we need for IR classes/lowerings/shape inferences, but we only code-gen additional entries for the inplace variant for the native functions. Note: If xs is not sorted, there may be an edge case when generating IR classes. Considering relu and relu_, if we encounter relu_ before relu. we will then generate an IR class with op = at::aten::relu_ for both relu and relu_ which will cause problems for relu. TODO(alanwaketan): Once all ops are grouped properly, we should no longer need this hack. """ generated = set() def gen_key(func: FunctionSchema) -> Tuple[str, str]: # we want to generate unique entries for overloads of functional variants, # but not for inplace variants unless explicitly told `codegenInplaceVariant` return (func.name.name.base, func.name.overload_name) for x in xs: f = x.functional if isinstance(x, NativeFunctionsGroup) else x # For the 'or'd terms: # 1. codegenInplaceVariant means we can generate the in-place variant corresponding items. # 2. not f.func.name.name.inplace means the op is not a in-place variant, so we can generate the item. # 3. f.func.name.name.base not in generated means even for in-place ops we still need to generate the item # as if they were the functional variants for one time. if f.func.name in full_codegen and \ (codegenInplaceVariant or not f.func.name.name.inplace or gen_key(f.func) not in generated): generated.add(gen_key(f.func)) for r in func(f): yield r selector = SelectiveBuilder.get_nop_selector() assert backend_key is not None class_name = backend_indices[backend_key].native_function_class_name() if impl_path is not None: error_on_missing_kernels(native_functions, backend_indices, backend_key, autograd_key, impl_path, full_codegen) assert class_name is not None # Generate nativefunction declarations gen_dispatchkey_nativefunc_headers(fm, class_name, cpp_namespace, backend_indices, grouped_native_functions, backend_key, autograd_key) # Generate Dispatcher registrations which hook up the nativefunctions for dispatch_key in [backend_key] if autograd_key is None else [backend_key, autograd_key]: gen_dispatcher_registrations(fm, output_dir, cpp_namespace, backend_indices, grouped_native_functions, backend_key, dispatch_key, selector) # Generate native function impls that build IR nodes fm.write_with_template(f'{backend_key}NativeFunctions.cpp', 'DispatchKeyNativeFunctions.cpp', lambda: { 'includes': [f'#include <{path}>' for path in [ tensor_class_hdr, "ATen/MetaFunctions.h", "torch/csrc/lazy/core/shape.h", "lazy_tensor_core/csrc/aten_ltc_bridge.h", "lazy_tensors/computation_client/metrics.h", f"{output_dir}/{backend_key}NativeFunctions.h", f"{output_dir}/{backend_key}LazyIr.h", f"{output_dir}/{backend_key}ShapeInference.h", ]], 'native_functions_include': '', 'backend_namespace': 'torch_lazy_tensors', # this is wrong 'native_function_definitions': list(concat_map_codegen( dest.GenLazyNativeFuncDefinition(f'{backend_key}NativeFunctions', backend_indices[backend_key], tensor_class), grouped_native_functions, codegenInplaceVariant=True )), }) # Generate headers for shape/dtype funcs for non-meta kernels fm.write_with_template(f'{backend_key}ShapeInference.h', 'ShapeInference.h', lambda: { 'lazy_ir_sysinc': [f'#include <{path}>' for path in [ "ATen/Tensor.h", "c10/core/ScalarType.h", "c10/util/Optional.h", "torch/csrc/lazy/core/ir.h", "torch/csrc/lazy/core/shape.h", "vector", ]], 'lazy_ir_inc': [], 'DispatchKey': backend_key, 'dispatch_namespace': backend_key.lower(), 'func_declarations': list(concat_map_codegen( dest.GenLazyShapeInferenceDefinition(backend_indices[backend_key], tensor_class), grouped_native_functions, codegenInplaceVariant=True )), }) # Generate headers for shape/dtype funcs for non-meta kernels fm.write_with_template(f'{backend_key}ShapeInference.h', 'ShapeInference.h', lambda: { 'lazy_ir_sysinc': [f'#include <{path}>' for path in [ "ATen/Tensor.h", "c10/core/ScalarType.h", "c10/util/Optional.h", "torch/csrc/lazy/core/ir.h", "torch/csrc/lazy/core/shape.h", "vector", ]], 'lazy_ir_inc': [], 'DispatchKey': backend_key, 'dispatch_namespace': backend_key.lower(), 'func_declarations': list(concat_map_codegen( dest.GenLazyShapeInferenceDefinition(backend_indices[backend_key], tensor_class), grouped_native_functions )), }) # Generate IR node classes fm.write_with_template(f'{backend_key}LazyIr.h', 'LazyIr.h', lambda: { 'lazy_ir_sysinc': [f'#include <{path}>' for path in [ "ATen/core/Formatting.h", "c10/core/ScalarType.h", "c10/util/Optional.h", "torch/csrc/lazy/core/hash.h", "torch/csrc/lazy/core/ir.h", "vector", ]], 'lazy_ir_inc': [f'#include "{path}"' for path in [ node_base_hdr if node_base_hdr is not None else None ] if path is not None], 'external_backend_headers': f'#include "{output_dir}/{backend_key}NativeFunctions.h"', 'namespaced_headers': '', 'DispatchKey': backend_key, 'dispatch_namespace': backend_key.lower(), 'ir_declarations': list(concat_map_codegen( dest.LazyIR(backend_indices[backend_key], node_base), grouped_native_functions )), })
def main() -> None: parser = argparse.ArgumentParser(description='Generate ATen source files') parser.add_argument( '-s', '--source-path', help='path to source directory for ATen', default='aten/src/ATen') parser.add_argument( '-o', '--output-dependencies', help='output a list of dependencies into the given file and exit') parser.add_argument( '-d', '--install_dir', help='output directory', default='build/aten/src/ATen') parser.add_argument( '--rocm', action='store_true', help='reinterpret CUDA as ROCm/HIP and adjust filepaths accordingly') # TODO: --op_registration_whitelist will be removed when all call-sites # for gen.py are moved over to using the operator YAML file for mobile # custom build. parser.add_argument( '--op_registration_whitelist', nargs='*', help='filter op registrations by the whitelist (if set); ' 'each item is `namespace`::`operator name` without overload name; ' 'e.g.: aten::empty aten::conv2d ...') parser.add_argument( '--op_selection_yaml_path', help='Provide a path to the operator selection (for custom build) YAML ' 'that contains the information about the set of selected operators ' 'and their categories (training, ...). Each operator is either a ' 'full operator name with overload or just a bare operator name. ' 'The operator names also contain the namespace prefix (e.g. aten::)') parser.add_argument( '--backend_whitelist', nargs='*', help='filter dispatch backend by the whitelist (if set), ' 'e.g.: CPU CUDA QuantizedCPU ...') parser.add_argument( '--force_schema_registration', action='store_true', help='force it to generate schema-only registrations for all ops, including' 'those that are not listed on --op_registration_whitelist') options = parser.parse_args() selector = get_custom_build_selector( options.op_registration_whitelist, options.op_selection_yaml_path, ) native_functions = parse_native_yaml(os.path.join(options.source_path, 'native/native_functions.yaml')) pre_grouped_native_functions: Dict[FunctionSchema, Dict[SchemaKind, NativeFunction]] pre_grouped_native_functions = defaultdict(dict) for f in native_functions: d = pre_grouped_native_functions[f.func.signature()] assert f.func.kind() not in d d[f.func.kind()] = f def flatten_pre_group(d: Dict[SchemaKind, NativeFunction]) -> Sequence[Union[NativeFunction, StructuredNativeFunctions]]: r = StructuredNativeFunctions.from_dict(d) if r is None: return list(d.values()) else: return [r] # TODO: how come ValuesView isn't a Sequence lol grouped_native_functions = list(concatMap(flatten_pre_group, list(pre_grouped_native_functions.values()))) structured_native_functions = [g for g in grouped_native_functions if isinstance(g, StructuredNativeFunctions)] template_dir = os.path.join(options.source_path, "templates") # NB: It is mandatory to NOT use os.path.join here, as the install directory # will eventually be ingested by cmake, which does not respect Windows style # path slashes. If you switch this to use os.path.join, you'll get an error # like: # # Syntax error in cmake code when parsing string # # C:/Jenkins/workspace/pytorch-builds/pytorch-win-ws2016-cuda9-cudnn7-py3-build/build/aten/src/ATen\core/TensorMethods.h # # Invalid character escape '\c'. core_install_dir = f'{options.install_dir}/core' pathlib.Path(core_install_dir).mkdir(parents=True, exist_ok=True) def make_file_manager(install_dir: str) -> FileManager: return FileManager(install_dir=install_dir, template_dir=template_dir, dry_run=options.output_dependencies) core_fm = make_file_manager(core_install_dir) cpu_fm = make_file_manager(options.install_dir) cuda_fm = make_file_manager(options.install_dir) extra_cuda_headers = '''\ #include <c10/cuda/CUDAGuard.h> #include <ATen/cuda/ATenCUDAGeneral.h> #include <ATen/cuda/CUDADevice.h> #include <ATen/cuda/CUDAContext.h>''' if options.rocm: extra_cuda_headers = '''\ #include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h> #include <ATen/hip/ATenHIPGeneral.h> #include <ATen/hip/HIPDevice.h> #include <ATen/hip/HIPContext.h>''' dispatch_keys = [ DispatchKey.CPU, DispatchKey.SparseCPU, DispatchKey.MkldnnCPU, DispatchKey.CUDA, DispatchKey.SparseCUDA, DispatchKey.QuantizedCPU, DispatchKey.QuantizedCUDA, DispatchKey.Math, DispatchKey.DefaultBackend, # Meta is a magic key: it is automatically generated for structured # kernels DispatchKey.Meta, ] # Only a limited set of dispatch keys get CPUFunctions.h headers generated # for them; this is the set functions_keys = { DispatchKey.CPU, DispatchKey.CUDA, } if options.backend_whitelist: dispatch_keys = [k for k in dispatch_keys if is_generic_dispatch_key(k) or str(k) in options.backend_whitelist] for dispatch_key in dispatch_keys: fm = cuda_fm if is_cuda_dispatch_key(dispatch_key) else cpu_fm fm.write_with_template(f'Register{dispatch_key}.cpp', 'RegisterDispatchKey.cpp', lambda: { 'extra_cuda_headers': extra_cuda_headers if is_cuda_dispatch_key(dispatch_key) else '', 'legacy_th_headers': '#include <ATen/LegacyTHFunctionsCPU.h>' if dispatch_key == DispatchKey.CPU else '#include <ATen/LegacyTHFunctionsCUDA.h>' if dispatch_key == DispatchKey.CUDA else '', 'DispatchKey': dispatch_key, 'dispatch_namespace': dispatch_key.lower(), 'dispatch_namespaced_definitions': list(concatMap( dest.RegisterDispatchKey( dispatch_key, Target.NAMESPACED_DEFINITION, selector, rocm=options.rocm), grouped_native_functions )), 'dispatch_anonymous_definitions': list(concatMap( dest.RegisterDispatchKey( dispatch_key, Target.ANONYMOUS_DEFINITION, selector, rocm=options.rocm), grouped_native_functions )), 'dispatch_registrations': list(concatMap( dest.RegisterDispatchKey(dispatch_key, Target.REGISTRATION, selector, rocm=options.rocm), grouped_native_functions )), }) if dispatch_key in functions_keys: fm.write_with_template(f'{dispatch_key}Functions.h', 'DispatchKeyFunctions.h', lambda: { 'dispatch_namespace': dispatch_key.lower(), 'dispatch_namespaced_declarations': list(concatMap( dest.RegisterDispatchKey( dispatch_key, Target.NAMESPACED_DECLARATION, selector, rocm=options.rocm), grouped_native_functions )), }) del fm # BackendSelect is generated specially cpu_fm.write('RegisterBackendSelect.cpp', lambda: { 'backend_select_method_definitions': list(mapMaybe(ComputeBackendSelect(Target.DEFINITION), native_functions)), 'backend_select_function_registrations': list(mapMaybe(ComputeBackendSelect(Target.REGISTRATION), native_functions)), }) cpu_fm.write('MetaFunctions.h', lambda: { 'declarations': list(map(compute_meta_function_declaration, structured_native_functions)), }) schema_selector = selector if options.force_schema_registration: schema_selector = SelectiveBuilder.get_nop_selector() cpu_fm.write('RegisterSchema.cpp', lambda: { 'schema_registrations': list(mapMaybe(RegisterSchema(schema_selector), native_functions)), }) cpu_fm.write('Functions.h', lambda: { 'function_declarations': list(mapMaybe(ComputeFunction(Target.DECLARATION), native_functions)), }) cpu_fm.write('Functions.cpp', lambda: { 'function_definitions': list(mapMaybe(ComputeFunction(Target.DEFINITION), native_functions)), }) core_fm.write('TensorBody.h', lambda: { 'tensor_method_declarations': list(mapMaybe(ComputeTensorMethod(Target.DECLARATION), native_functions)), }) core_fm.write('TensorMethods.cpp', lambda: { 'tensor_method_definitions': list(mapMaybe(ComputeTensorMethod(Target.DEFINITION), native_functions)), }) core_fm.write('ATenOpList.cpp', lambda: { 'aten_ops': list(mapMaybe(compute_aten_op, native_functions)), }) cpu_fm.write('NativeFunctions.h', lambda: { 'native_function_declarations': list(concatMap(compute_native_function_declaration, grouped_native_functions)), }) cpu_fm.write('Declarations.yaml', lambda: format_yaml([compute_declaration_yaml(f) for f in native_functions])) cpu_fm.write('RegistrationDeclarations.h', lambda: { 'registration_declarations': [compute_registration_declarations(f) for f in native_functions], }) if options.output_dependencies: cpu_fm.write_outputs(options.output_dependencies) core_fm.write_outputs(f"{options.output_dependencies}-core") cuda_fm.write_outputs(f"{options.output_dependencies}-cuda")
def run_gen_lazy_tensor( aten_path: str, source_yaml: str, output_dir: str, dry_run: bool, impl_path: Optional[str], node_base: str = default_args.node_base, node_base_hdr: Optional[str] = default_args.node_base_hdr, tensor_class: str = default_args.tensor_class, tensor_class_hdr: str = default_args.tensor_class_hdr, shape_inference_hdr: str = default_args.shape_inference_hdr, lazy_ir_cls: Type[LazyIR] = default_args.lazy_ir_cls, # build_in_tree is true for TS backend and affects include paths build_in_tree: bool = False, # per_operator_headers changes whether ATen/Functions.h or individual operator headers are used # it must match how ATen was built per_operator_headers: bool = False, backend_name: str = default_args.backend_name, gen_forced_fallback_code: bool = False) -> None: template_dir = os.path.join(aten_path, "templates") def make_file_manager(install_dir: str) -> FileManager: return FileManager(install_dir=install_dir, template_dir=template_dir, dry_run=dry_run) fm = make_file_manager(output_dir) native_yaml_path = os.path.join(aten_path, 'native/native_functions.yaml') parsed_yaml = parse_native_yaml(native_yaml_path) native_functions, backend_indices = parsed_yaml.native_functions, parsed_yaml.backend_indices grouped_native_functions = get_grouped_native_functions(native_functions) def sort_native_function( f: Union[NativeFunctionsGroup, NativeFunction]) -> str: """ We sort the native function because of the note in concat_map_codegen. TODO(alanwaketan): Remove this sorting hack once all ops are grouped properly. """ func = f.functional.func if isinstance( f, NativeFunctionsGroup) else f.func return str(func.name.name) grouped_native_functions = sorted(grouped_native_functions, key=sort_native_function) parsed_backend_yaml = parse_backend_yaml(source_yaml, grouped_native_functions, backend_indices) backend_key = parsed_backend_yaml.backend_key autograd_key = parsed_backend_yaml.autograd_key cpp_namespace = parsed_backend_yaml.cpp_namespace backend_indices = parsed_backend_yaml.backend_indices full_codegen = parse_full_codegen_ops(source_yaml, grouped_native_functions) def concat_map_codegen( func: Callable[[NativeFunction], Sequence[str]], xs: Iterable[Union[NativeFunctionsGroup, NativeFunction]], *, codegenInplaceVariant: bool = False) -> Iterator[str]: """ We code-gen for the functional variant, which is all we need for IR classes/lowerings/shape inferences, but we only code-gen additional entries for the inplace variant for the native functions. Note: If xs is not sorted, there may be an edge case when generating IR classes. Considering relu and relu_, if we encounter relu_ before relu. we will then generate an IR class with op = at::aten::relu_ for both relu and relu_ which will cause problems for relu. TODO(alanwaketan): Once all ops are grouped properly, we should no longer need this hack. """ generated = set() def gen_key(func: FunctionSchema) -> Tuple[str, str]: # we want to generate unique entries for overloads of functional variants, # but not for inplace variants unless explicitly told `codegenInplaceVariant` return (func.name.name.base, func.name.overload_name) for x in xs: f = x.functional if isinstance(x, NativeFunctionsGroup) else x # For the 'or'd terms: # 1. codegenInplaceVariant means we can generate the in-place variant corresponding items. # 2. not f.func.name.name.inplace means the op is not a in-place variant, so we can generate the item. # 3. f.func.name.name.base not in generated means even for in-place ops we still need to generate the item # as if they were the functional variants for one time. if f.func.name in full_codegen and \ (codegenInplaceVariant or not f.func.name.name.inplace or gen_key(f.func) not in generated): generated.add(gen_key(f.func)) for r in func(f): yield r selector = SelectiveBuilder.get_nop_selector() assert backend_key is not None class_name = backend_indices[backend_key].native_function_class_name() if impl_path is not None: error_on_missing_kernels(native_functions, backend_indices, backend_key, autograd_key, class_name, impl_path, full_codegen) """ Validate Shape Inference Definitions Generated lazy native functions all perform shape inference, by first using a meta:: kernel if available for that op, and otherwise using a 'compute_shape_{op}' function instead. The generator knows the call signature for compute_shape_{op} becuase it matches the nativefunction (and meta::) signature, so it just has to check whether the op is structured and generate a call for one or the other. It's up to the dev to supply the missing compute_shape_{op} function, but the codegen at least warns you about this and provides the expected signature which can be copy-pasted into shape_inference.h. compute_shape_{op} functions are handwritten and should be replaced over time as ops get ported to structured kernels. See torch/csrc/lazy/core/shape_inference.cpp #READ THIS! for more information. """ if shape_inference_hdr is not None: expected_shape_infr_decls = list( concat_map_codegen(dest.GenLazyShapeInferenceDefinition( backend_indices[backend_key], tensor_class), grouped_native_functions, codegenInplaceVariant=True)) validate_shape_inference_header(shape_inference_hdr, expected_shape_infr_decls) assert class_name is not None # Generate nativefunction declarations # Note, eager registrations is set to False for the lazy TS backend as another LTC backend # may want to register their own lazy kernels instead of registering the TS ones. # The registration will lazily happen when init_ts_backend is called. gen_dispatchkey_nativefunc_headers(fm, class_name, cpp_namespace, backend_indices, grouped_native_functions, backend_key, autograd_key, backend_name) # Generate Dispatcher registrations which hook up the nativefunctions for dispatch_key in [backend_key] if autograd_key is None else [ backend_key, autograd_key ]: gen_dispatcher_registrations(fm, output_dir, class_name, cpp_namespace, backend_indices, grouped_native_functions, backend_key, dispatch_key, selector, build_in_tree=build_in_tree, per_operator_headers=per_operator_headers, backend_name=backend_name, eager_registration=False) # Generate native function impls that build IR nodes ns_helper = NamespaceHelper(cpp_namespace) fm.write_with_template( f'{backend_key}NativeFunctions.cpp', 'DispatchKeyNativeFunctions.cpp', lambda: { 'includes': [ f'#include <{path}>' for path in [ tensor_class_hdr, shape_inference_hdr, "ATen/Functions.h", "ATen/MetaFunctions.h", "ATen/Operators.h", "ATen/native/CPUFallback.h", "torch/csrc/lazy/core/lazy_graph_executor.h", "torch/csrc/lazy/core/metrics.h", "torch/csrc/lazy/core/shape.h", f"{output_dir}/{backend_key}NativeFunctions.h", f"{output_dir}/LazyIr.h", ] + (["torch/csrc/lazy/ts_backend/ts_eager_fallback.h"] if gen_forced_fallback_code else []) ], 'native_functions_include': '', 'namespace_prologue': ns_helper.prologue, 'namespace_epilogue': ns_helper.epilogue, 'native_function_definitions': list( concat_map_codegen(dest.GenLazyNativeFuncDefinition( f'{backend_key}NativeFunctions', backend_indices[ backend_key], tensor_class, gen_forced_fallback_code), grouped_native_functions, codegenInplaceVariant=True)), }) # Generate IR node classes fm.write_with_template( 'LazyIr.h', 'LazyIr.h', lambda: { 'lazy_ir_sysinc': [ f'#include <{path}>' for path in [ "ATen/core/Formatting.h", "c10/core/ScalarType.h", "c10/util/Optional.h", "torch/csrc/lazy/core/hash.h", "torch/csrc/lazy/core/ir.h", "torch/csrc/lazy/core/shape.h", "vector", ] ], 'lazy_ir_inc': [ f'#include "{path}"' for path in [node_base_hdr if node_base_hdr is not None else None] if path is not None ], 'ir_declarations': list( concat_map_codegen( lazy_ir_cls(backend_indices[backend_key], node_base), grouped_native_functions)), 'namespace_prologue': ns_helper.prologue, 'namespace_epilogue': ns_helper.epilogue, })
def run(source_yaml: str, output_dir: str, dry_run: bool) -> None: # Assumes that this file lives at PYTORCH_ROOT/tools/codegen/gen_backend_stubs.py pytorch_root = pathlib.Path(__file__).parent.parent.parent.absolute() template_dir = os.path.join(pytorch_root, "aten/src/ATen/templates") def make_file_manager(install_dir: str) -> FileManager: return FileManager(install_dir=install_dir, template_dir=template_dir, dry_run=dry_run) fm = make_file_manager(output_dir) native_yaml_path = os.path.join( pytorch_root, 'aten/src/ATen/native/native_functions.yaml') parsed_yaml = parse_native_yaml(native_yaml_path) native_functions, backend_indices = parsed_yaml.native_functions, parsed_yaml.backend_indices grouped_native_functions = get_grouped_native_functions(native_functions) parsed_backend_yaml = parse_backend_yaml(source_yaml, grouped_native_functions, backend_indices) backend_key = parsed_backend_yaml.backend_key autograd_key = parsed_backend_yaml.autograd_key cpp_namespace = parsed_backend_yaml.cpp_namespace backend_indices = parsed_backend_yaml.backend_indices selector = SelectiveBuilder.get_nop_selector() # TODO: handle cases when yaml contains zero ops properly in a later PR. if backend_key is not None and autograd_key is not None: backend_dispatch_key: DispatchKey = backend_key autograd_dispatch_key: DispatchKey = autograd_key generated_comment = 'Autogenerated file by gen_backend_stubs.py. Do not edit directly!' fm.write( 'aten_xla_type.h', lambda: { 'generated_comment': generated_comment, 'cpp_namespace': cpp_namespace, # Convert to a set first to remove duplicate kernel names. # Backends are allowed to repeat kernel names; only generate the declaration once! 'dispatch_xla_declarations': list( set( concatMap( lambda f: dest.compute_native_function_declaration( f, backend_indices[backend_dispatch_key]), grouped_native_functions))) + list( set( concatMap( lambda f: dest.compute_native_function_declaration( f, backend_indices[autograd_dispatch_key]), grouped_native_functions))), }) external_backend_headers = '''\ #include <tensorflow/compiler/xla/xla_client/debug_macros.h> #include <tensorflow/compiler/xla/xla_client/metrics.h> #include <tensorflow/compiler/xla/xla_client/tf_logging.h> #include <torch_xla/csrc/function_call_tracker.h> #include <torch_xla/csrc/aten_xla_type.h> #include <torch_xla/csrc/aten_xla_type_default.h>''' for dispatch_key in [backend_dispatch_key, autograd_dispatch_key]: fm.write_with_template( f'Register{dispatch_key}.cpp', 'RegisterDispatchKey.cpp', lambda: { 'extra_cuda_headers': '', 'legacy_th_headers': '', 'external_backend_headers': external_backend_headers, 'DispatchKey': dispatch_key, 'dispatch_namespace': dispatch_key.lower(), 'dispatch_namespaced_definitions': list( concatMap( dest.RegisterDispatchKey( backend_indices[dispatch_key], Target.NAMESPACED_DEFINITION, selector, rocm=False, cpp_namespace=cpp_namespace), grouped_native_functions)), 'dispatch_anonymous_definitions': list( concatMap( dest.RegisterDispatchKey( backend_indices[dispatch_key], Target.ANONYMOUS_DEFINITION, selector, rocm=False, cpp_namespace=cpp_namespace), grouped_native_functions)), 'dispatch_registrations': list( concatMap( dest.RegisterDispatchKey( backend_indices[dispatch_key], Target.REGISTRATION, selector, rocm=False, cpp_namespace=cpp_namespace), grouped_native_functions)), }) fm.write( 'aten_xla_type_default.h', lambda: { 'generated_comment': generated_comment, 'cpp_namespace': cpp_namespace, 'dispatch_aten_fallback_declarations': list( concatMap( dest.GenExternalAtenFallback( Target.NAMESPACED_DECLARATION, backend_indices[ backend_dispatch_key]), grouped_native_functions)), }) fm.write( 'aten_xla_type_default.cpp', lambda: { 'generated_comment': generated_comment, 'cpp_namespace': cpp_namespace, # TODO: after cpu fallbacks are moved to a boxed kernel, # merge registrations / definitions into RegisterDispatchKey 'dispatch_aten_fallback_definitions': list( concatMap( dest.GenExternalAtenFallback( Target.NAMESPACED_DEFINITION, backend_indices[ backend_dispatch_key]), grouped_native_functions)), 'dispatch_registrations': list( concatMap( dest.GenExternalAtenFallback( Target.REGISTRATION, backend_indices[ backend_dispatch_key]), grouped_native_functions)), })