def gen_trace_type(out: str, native_functions: List[NativeFunction], template_path: str) -> None: # NOTE: see Note [Sharded File] at the top of the VariableType.cpp # template regarding sharding of the generated files. fm = FileManager(install_dir=out, template_dir=template_path, dry_run=False) fm.write_sharded( "TraceType.cpp", [ fn for fn in native_functions if cpp.name(fn.func) not in MANUAL_TRACER ], key_fn=lambda fn: fn.root_name, base_env={ "generated_comment": f"@generated from {template_path}/TraceType.cpp", }, env_callable=gen_trace_type_func, num_shards=5, sharded_keys={ "ops_headers", "trace_method_definitions", "trace_wrapper_registrations", }, )
def gen_inplace_or_view_type( out: str, native_yaml_path: str, tags_yaml_path: str, fns_with_infos: List[NativeFunctionWithDifferentiabilityInfo], template_path: str, ) -> None: # NOTE: see Note [Sharded File] at the top of the VariableType.cpp # template regarding sharding of the generated files. num_shards = 2 fm = FileManager(install_dir=out, template_dir=template_path, dry_run=False) fm.write_sharded( "ADInplaceOrViewType.cpp", [fn for fn in fns_with_infos if use_derived(fn)], key_fn=lambda fn: fn.func.root_name, base_env={ "generated_comment": f"@generated from {template_path}/ADInplaceOrViewType.cpp", }, env_callable=gen_inplace_or_view_type_env, num_shards=2, sharded_keys={ "ops_headers", "inplace_or_view_method_definitions", "inplace_or_view_wrapper_registrations", }, )
def gen_autograd_functions_python( out: str, differentiability_infos: Sequence[DifferentiabilityInfo], template_path: str, ) -> None: fm = FileManager(install_dir=out, template_dir=template_path, dry_run=False) num_shards = 5 fm.write( "python_functions.h", lambda: { "generated_comment": f"@generated from {fm.template_dir}/python_functions.h", "shard_forward_declare": [ f"void initialize_autogenerated_functions_{i}();" for i in range(num_shards) ], "shard_call": [ f"initialize_autogenerated_functions_{i}();" for i in range(num_shards) ], }, ) infos = list( filter(lambda info: info.args_with_derivatives, differentiability_infos)) fm.write_sharded( "python_functions.cpp", infos, key_fn=lambda info: info.name, base_env={ "generated_comment": f"@generated from {fm.template_dir}/python_functions.cpp", }, env_callable=lambda info: { "py_function_initializers": [process_function(info, PY_FUNCTION_DEFINITION)], "py_function_props_and_getters": [process_function(info, PY_FUNCTION_PROPS_AND_GETTERS)], }, num_shards=num_shards, sharded_keys={ "py_function_initializers", "py_function_props_and_getters" }, )
def create_python_bindings_sharded( fm: FileManager, pairs: Sequence[PythonSignatureNativeFunctionPair], pred: Callable[[NativeFunction], bool], module: Optional[str], filename: str, *, method: bool, num_shards: int, ) -> None: """Generates Python bindings to ATen functions""" grouped = group_filter_overloads(pairs, pred) def key_func( kv: Tuple[BaseOperatorName, List[PythonSignatureNativeFunctionPair]] ) -> str: return kv[0].base def env_func( kv: Tuple[BaseOperatorName, List[PythonSignatureNativeFunctionPair]] ) -> Dict[str, List[str]]: name, fn_pairs = kv return { "ops_headers": [f"#include <ATen/ops/{name.base}.h>"], "py_forwards": list(forward_decls(name, fn_pairs, method=method)), "py_methods": [method_impl(name, module, fn_pairs, method=method)], "py_method_defs": [method_def(name, module, fn_pairs, method=method)], } fm.write_sharded( filename, grouped.items(), base_env={ "generated_comment": "@" + f"generated from {fm.template_dir}/{filename}", }, key_fn=key_func, env_callable=env_func, num_shards=num_shards, sharded_keys={ "ops_headers", "py_forwards", "py_methods", "py_method_defs" }, )
def gen_autograd_functions_python( out: str, differentiability_infos: Dict[FunctionSchema, Dict[str, DifferentiabilityInfo]], template_path: str, ) -> None: fm = FileManager(install_dir=out, template_dir=template_path, dry_run=False) num_shards = 5 fm.write( "python_functions.h", lambda: { "generated_comment": f"@generated from {fm.template_dir}/python_functions.h", "shard_forward_declare": [ f"void initialize_autogenerated_functions_{i}();" for i in range(num_shards) ], "shard_call": [ f"initialize_autogenerated_functions_{i}();" for i in range(num_shards) ], }, ) # get a 1D list of diffinfos, we do not need them to be per FunctionSchema/DispatchKey here # infos with the diff dispatchkeys but the same name will still be in the same shard. infos = get_infos_with_derivatives_list(differentiability_infos) fm.write_sharded( "python_functions.cpp", infos, key_fn=lambda info: info.name, base_env={ "generated_comment": f"@generated from {fm.template_dir}/python_functions.cpp", }, env_callable=lambda info: { "py_function_initializers": [ process_function(info, PY_FUNCTION_DEFINITION) ], "py_function_props_and_getters": [ process_function(info, PY_FUNCTION_PROPS_AND_GETTERS) ], }, num_shards=num_shards, sharded_keys={"py_function_initializers", "py_function_props_and_getters"}, )
def gen_unboxing( *, native_functions: Sequence[NativeFunction], cpu_fm: FileManager, selector: SelectiveBuilder, ) -> None: def key_func(fn: Union[NativeFunction, NativeFunctionsGroup]) -> str: return fn.root_name selected_op_num: int = len(selector.operators) # a best practice threshold of operators to enable sharding sharding_threshold: int = 100 cpu_fm.write_sharded( "UnboxingFunctions.cpp", native_functions, key_fn=key_func, env_callable=lambda fn: { "definitions": [ComputeUnboxingFunctions(Target.DEFINITION, selector)(fn)] }, num_shards=1 if selected_op_num < sharding_threshold else 5, sharded_keys={"definitions"}, ) cpu_fm.write( "UnboxingFunctions.h", lambda: { "declarations": list( mapMaybe( ComputeUnboxingFunctions(Target.DECLARATION, selector), native_functions, ) ), }, ) cpu_fm.write_sharded( "RegisterCodegenUnboxedKernels.cpp", native_functions, key_fn=key_func, env_callable=lambda fn: { "unboxed_ops": [ComputeCodegenUnboxedKernels(selector)(fn)] }, num_shards=1 if selected_op_num < sharding_threshold else 10, sharded_keys={"unboxed_ops"}, )