def main() -> None: parser = argparse.ArgumentParser(description='Generate ATen source files') parser.add_argument('-s', '--source-path', help='path to source directory for ATen', default='aten/src/ATen') parser.add_argument( '-o', '--output-dependencies', help='output a list of dependencies into the given file and exit') parser.add_argument('-d', '--install_dir', help='output directory', default='build/aten/src/ATen') parser.add_argument( '--rocm', action='store_true', help='reinterpret CUDA as ROCm/HIP and adjust filepaths accordingly') # TODO: --op_registration_whitelist will be removed when all call-sites # for gen.py are moved over to using the operator YAML file for mobile # custom build. parser.add_argument( '--op_registration_whitelist', nargs='*', help='filter op registrations by the whitelist (if set); ' 'each item is `namespace`::`operator name` without overload name; ' 'e.g.: aten::empty aten::conv2d ...') parser.add_argument( '--op_selection_yaml_path', help='Provide a path to the operator selection (for custom build) YAML ' 'that contains the information about the set of selected operators ' 'and their categories (training, ...). Each operator is either a ' 'full operator name with overload or just a bare operator name. ' 'The operator names also contain the namespace prefix (e.g. aten::)') parser.add_argument( '--backend_whitelist', nargs='*', help='filter dispatch backend by the whitelist (if set), ' 'e.g.: CPU CUDA QuantizedCPU ...') parser.add_argument( '--static_dispatch_backend', help='generate static dispatch code for the specific backend (if set)') parser.add_argument( '--force_schema_registration', action='store_true', help= 'force it to generate schema-only registrations for all ops, including' 'those that are not listed on --op_registration_whitelist') options = parser.parse_args() selector = get_custom_build_selector( options.op_registration_whitelist, options.op_selection_yaml_path, ) native_functions = parse_native_yaml( os.path.join(options.source_path, 'native/native_functions.yaml')) pre_grouped_native_functions: Dict[FunctionSchema, Dict[SchemaKind, NativeFunction]] pre_grouped_native_functions = defaultdict(dict) for f in native_functions: d = pre_grouped_native_functions[f.func.signature()] assert f.func.kind() not in d d[f.func.kind()] = f def flatten_pre_group( d: Dict[SchemaKind, NativeFunction] ) -> Sequence[Union[NativeFunction, NativeFunctionsGroup]]: r = NativeFunctionsGroup.from_dict(d) if r is None: return list(d.values()) else: return [r] # TODO: how come ValuesView isn't a Sequence lol grouped_native_functions = list( concatMap(flatten_pre_group, list(pre_grouped_native_functions.values()))) structured_native_functions = [ g for g in grouped_native_functions if isinstance(g, NativeFunctionsGroup) ] template_dir = os.path.join(options.source_path, "templates") # NB: It is mandatory to NOT use os.path.join here, as the install directory # will eventually be ingested by cmake, which does not respect Windows style # path slashes. If you switch this to use os.path.join, you'll get an error # like: # # Syntax error in cmake code when parsing string # # C:/Jenkins/workspace/pytorch-builds/pytorch-win-ws2016-cuda9-cudnn7-py3-build/build/aten/src/ATen\core/TensorMethods.h # # Invalid character escape '\c'. core_install_dir = f'{options.install_dir}/core' pathlib.Path(core_install_dir).mkdir(parents=True, exist_ok=True) def make_file_manager(install_dir: str) -> FileManager: return FileManager(install_dir=install_dir, template_dir=template_dir, dry_run=options.output_dependencies) core_fm = make_file_manager(core_install_dir) cpu_fm = make_file_manager(options.install_dir) cuda_fm = make_file_manager(options.install_dir) extra_cuda_headers = '''\ #include <c10/cuda/CUDAGuard.h> #include <ATen/cuda/ATenCUDAGeneral.h> #include <ATen/cuda/CUDADevice.h> #include <ATen/cuda/CUDAContext.h>''' if options.rocm: extra_cuda_headers = '''\ #include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h> #include <ATen/hip/ATenHIPGeneral.h> #include <ATen/hip/HIPDevice.h> #include <ATen/hip/HIPContext.h>''' dispatch_keys = [ DispatchKey.CPU, DispatchKey.SparseCPU, DispatchKey.SparseCsrCPU, DispatchKey.MkldnnCPU, DispatchKey.CUDA, DispatchKey.SparseCUDA, DispatchKey.SparseCsrCUDA, DispatchKey.QuantizedCPU, DispatchKey.QuantizedCUDA, DispatchKey.CompositeImplicitAutograd, DispatchKey.CompositeExplicitAutograd, # Meta is a magic key: it is automatically generated for structured # kernels DispatchKey.Meta, ] # Only a limited set of dispatch keys get CPUFunctions.h headers generated # for them; this is the set functions_keys = { DispatchKey.CPU, DispatchKey.CUDA, DispatchKey.CompositeImplicitAutograd, DispatchKey.CompositeExplicitAutograd, } if options.backend_whitelist: dispatch_keys = [ k for k in dispatch_keys if is_generic_dispatch_key(k) or str(k) in options.backend_whitelist ] static_dispatch_backend: Optional[DispatchKey] = None if options.static_dispatch_backend: static_dispatch_backend = DispatchKey.parse( options.static_dispatch_backend) for dispatch_key in dispatch_keys: fm = cuda_fm if is_cuda_dispatch_key(dispatch_key) else cpu_fm fm.write_with_template( f'Register{dispatch_key}.cpp', 'RegisterDispatchKey.cpp', lambda: { 'extra_cuda_headers': extra_cuda_headers if is_cuda_dispatch_key(dispatch_key) else '', 'legacy_th_headers': '#include <ATen/LegacyTHFunctionsCPU.h>' if dispatch_key == DispatchKey.CPU else '#include <ATen/LegacyTHFunctionsCUDA.h>' if dispatch_key == DispatchKey.CUDA else '', 'DispatchKey': dispatch_key, 'dispatch_namespace': dispatch_key.lower(), 'dispatch_namespaced_definitions': list( concatMap( dest.RegisterDispatchKey(dispatch_key, Target.NAMESPACED_DEFINITION, selector, rocm=options.rocm), grouped_native_functions)), 'dispatch_anonymous_definitions': list( concatMap( dest.RegisterDispatchKey(dispatch_key, Target.ANONYMOUS_DEFINITION, selector, rocm=options.rocm), grouped_native_functions)), 'dispatch_registrations': list( concatMap( dest.RegisterDispatchKey(dispatch_key, Target.REGISTRATION, selector, rocm=options.rocm), grouped_native_functions)), }) if dispatch_key in functions_keys: fm.write_with_template( f'{dispatch_key}Functions.h', 'DispatchKeyFunctions.h', lambda: { 'dispatch_namespace': dispatch_key.lower(), 'dispatch_namespaced_declarations': list( concatMap( dest.RegisterDispatchKey( dispatch_key, Target.NAMESPACED_DECLARATION, selector, rocm=options.rocm), grouped_native_functions)), }) del fm # BackendSelect is generated specially cpu_fm.write( 'RegisterBackendSelect.cpp', lambda: { 'backend_select_method_definitions': list( mapMaybe(ComputeBackendSelect(Target.DEFINITION), native_functions)), 'backend_select_function_registrations': list( mapMaybe(ComputeBackendSelect(Target.REGISTRATION), native_functions)), }) cpu_fm.write( 'MetaFunctions.h', lambda: { 'declarations': list( mapMaybe(compute_meta_function_declaration, structured_native_functions)), }) schema_selector = selector if options.force_schema_registration: schema_selector = SelectiveBuilder.get_nop_selector() cpu_fm.write( 'RegisterSchema.cpp', lambda: { 'schema_registrations': list(mapMaybe(RegisterSchema(schema_selector), native_functions)), }) cpu_fm.write( 'Functions.h', lambda: { 'function_declarations': list( mapMaybe( ComputeFunction( Target.DECLARATION, static_dispatch_backend=static_dispatch_backend, is_redispatching_fn=False), native_functions)), }) cpu_fm.write( 'Functions.cpp', lambda: { 'static_dispatch_extra_headers': static_dispatch_extra_headers(static_dispatch_backend), 'function_definitions': list( mapMaybe( ComputeFunction( Target.DEFINITION, static_dispatch_backend=static_dispatch_backend, is_redispatching_fn=False), native_functions)), }) cpu_fm.write( 'RedispatchFunctions.h', lambda: { 'function_redispatch_declarations': list( mapMaybe( ComputeFunction( Target.DECLARATION, static_dispatch_backend=static_dispatch_backend, is_redispatching_fn=True), native_functions)), }) cpu_fm.write( 'RedispatchFunctions.cpp', lambda: { 'static_dispatch_extra_headers': static_dispatch_extra_headers(static_dispatch_backend), 'function_redispatch_definitions': list( mapMaybe( ComputeFunction( Target.DEFINITION, static_dispatch_backend=static_dispatch_backend, is_redispatching_fn=True), native_functions)), }) core_fm.write( 'TensorBody.h', lambda: { 'tensor_method_declarations': list( mapMaybe( ComputeTensorMethod(Target.DECLARATION, static_dispatch_backend= static_dispatch_backend), native_functions)), }) core_fm.write( 'TensorMethods.cpp', lambda: { 'static_dispatch_extra_headers': static_dispatch_extra_headers(static_dispatch_backend), 'tensor_method_definitions': list( mapMaybe( ComputeTensorMethod(Target.DEFINITION, static_dispatch_backend= static_dispatch_backend), native_functions)), }) core_fm.write( 'ATenOpList.cpp', lambda: { 'aten_ops': list(mapMaybe(compute_aten_op, native_functions)), }) cpu_fm.write( 'NativeFunctions.h', lambda: { 'native_function_declarations': list( concatMap(dest.compute_native_function_declaration, grouped_native_functions)), }) cpu_fm.write( 'Declarations.yaml', lambda: format_yaml( [compute_declaration_yaml(f) for f in native_functions])) cpu_fm.write( 'RegistrationDeclarations.h', lambda: { 'registration_declarations': [compute_registration_declarations(f) for f in native_functions], }) if options.output_dependencies: cpu_fm.write_outputs(options.output_dependencies) core_fm.write_outputs(f"{options.output_dependencies}-core") cuda_fm.write_outputs(f"{options.output_dependencies}-cuda")
def parse_backend_yaml( backend_yaml_path: str, grouped_native_functions: Sequence[Union[NativeFunction, NativeFunctionsGroup]], backend_indices: Dict[DispatchKey, BackendIndex]) -> ParsedExternalYaml: native_functions_map: Dict[OperatorName, NativeFunction] = { f.func.name: f for f in concatMap( lambda f: [f] if isinstance(f, NativeFunction) else list( f.functions()), grouped_native_functions) } with open(backend_yaml_path, 'r') as f: yaml_values = yaml.load(f, Loader=YamlLoader) assert isinstance(yaml_values, dict) valid_keys = [ 'backend', 'cpp_namespace', 'extra_headers', 'supported', 'autograd' ] backend = yaml_values.pop('backend', None) assert backend is not None, 'You must provide a value for "backend"' cpp_namespace = yaml_values.pop('cpp_namespace', None) assert cpp_namespace is not None, 'You must provide a value for "cpp_namespace"' supported = yaml_values.pop('supported', []) if supported is None: supported = [] # Allow an empty list of supported ops assert isinstance( supported, list ), f'expected "supported" to be a list, but got: {supported} (of type {type(supported)})' supported_autograd = yaml_values.pop('autograd', []) assert isinstance( supported, list ), f'expected "autograd" to be a list, but got: {supported_autograd}' assert len(yaml_values.keys()) == 0, \ f'{backend_yaml_path} contains unexpected keys: {", ".join(yaml_values.keys())}. \ Only the following keys are supported: {", ".join(valid_keys)}' def create_backend_index(backend_ops: List[str], dispatch_key: DispatchKey) -> BackendIndex: metadata: Dict[OperatorName, BackendMetadata] = {} for op in backend_ops: op_name = OperatorName.parse(op) assert op_name in native_functions_map, f"Found an invalid operator name: {op_name}" # See Note [External Backends Follow Dispatcher API] kernel_name = dispatcher.name(native_functions_map[op_name].func) # TODO: allow structured external backends later. m = BackendMetadata(kernel=kernel_name, structured=False) metadata[op_name] = m # TODO: currently hardcoding the fact that XLA implements out/inplace in terms of functional ops, # this should eventually be toggleable per-backend. return BackendIndex(dispatch_key=dispatch_key, use_out_as_primary=False, external=True, index=metadata) backend_key: Optional[DispatchKey] = None if len(supported) > 0: with context( lambda: f'The provided value for "backend" must be a valid DispatchKey, but got {backend}.' ): backend_key = DispatchKey.parse(backend) backend_idx = create_backend_index(supported, backend_key) assert backend_key not in backend_indices backend_indices[backend_key] = backend_idx autograd_key: Optional[DispatchKey] = None if len(supported_autograd) > 0: with context( lambda: f'The "autograd" key was specified, which indicates that you would like to override \ the behavior of autograd for some operators on your backend. However "Autograd{backend}" is not a valid DispatchKey.' ): autograd_key = DispatchKey.parse(f'Autograd{backend}') autograd_idx = create_backend_index(supported_autograd, autograd_key) assert autograd_key not in backend_indices backend_indices[autograd_key] = autograd_idx for g in grouped_native_functions: if isinstance(g, NativeFunction): forward_kernels = [] if backend_key is None else \ [m for m in [backend_indices[backend_key].get_kernel(g)] if m is not None] backward_kernels = [] if autograd_key is None else \ [m for m in [backend_indices[autograd_key].get_kernel(g)] if m is not None] else: forward_kernels = [] if backend_key is None else [ m for m in [ backend_indices[backend_key].get_kernel(f) for f in g.functions() ] if m is not None ] backward_kernels = [] if autograd_key is None else [ m for m in [ backend_indices[autograd_key].get_kernel(f) for f in g.functions() ] if m is not None ] forward_kernels = [f for f in forward_kernels if f is not None] backward_kernels = [f for f in backward_kernels if f is not None] assert len(forward_kernels) == 0 or len(backward_kernels) == 0, \ f'Currently, all variants of an op must either be registered to a backend key, or to a backend\'s \ autograd key. They cannot be mix and matched. If this is something you need, feel free to create an issue! \ {forward_kernels[0].kernel} is listed under "supported", but {backward_kernels[0].kernel} is listed under "autograd".' return ParsedExternalYaml(backend_key, autograd_key, cpp_namespace, backend_indices)