def parse_native_yaml(path: str) -> ParsedYaml: global _GLOBAL_PARSE_NATIVE_YAML_CACHE if path not in _GLOBAL_PARSE_NATIVE_YAML_CACHE: with open(path, 'r') as f: es = yaml.load(f, Loader=LineLoader) assert isinstance(es, list) rs: List[NativeFunction] = [] bs: Dict[DispatchKey, Dict[OperatorName, BackendMetadata]] = defaultdict(dict) for e in es: assert isinstance(e.get('__line__'), int), e loc = Location(path, e['__line__']) funcs = e.get('func') with context(lambda: f'in {loc}:\n {funcs}'): func, m = NativeFunction.from_yaml(e, loc) rs.append(func) BackendIndex.grow_index(bs, m) error_check_native_functions(rs) # Default dict is to prevent the codegen from barfing when we have a dispatch key that has no kernels yet. indices: Dict[DispatchKey, BackendIndex] = defaultdict(lambda: BackendIndex( dispatch_key=DispatchKey.Undefined, use_out_as_primary=True, external=False, index={})) for k, v in bs.items(): # All structured in-tree operators are implemented in terms of their out operator. indices[k] = BackendIndex(dispatch_key=k, use_out_as_primary=True, external=False, index=v) _GLOBAL_PARSE_NATIVE_YAML_CACHE[path] = ParsedYaml(rs, indices) return _GLOBAL_PARSE_NATIVE_YAML_CACHE[path]
def native_function_manager(g: Union[NativeFunctionsGroup, NativeFunction]) -> Iterator[None]: if isinstance(g, NativeFunctionsGroup): # By default, we associate all errors with structured native functions # with the out variant. In some cases, it might be better to have # a more specific place to hang things; if so, use # native_function_manager again on the inside f = g.out else: f = g with context(f'in {f.loc}:\n {f.func}'): with local.parametrize(): yield
def parse_native_yaml(path: str) -> List[NativeFunction]: with open(path, 'r') as f: es = yaml.load(f, Loader=LineLoader) assert isinstance(es, list) rs: List[NativeFunction] = [] for e in es: assert isinstance(e.get('__line__'), int), e loc = Location(path, e['__line__']) funcs = e.get('func') with context(f'in {loc}:\n {funcs}'): rs.append(NativeFunction.from_yaml(e, loc)) error_check_native_functions(rs) return rs
def native_function_manager( g: Union[NativeFunctionsGroup, NativeFunction, ExternalBackendFunction, ExternalBackendFunctionsGroup] ) -> Iterator[None]: if isinstance(g, ExternalBackendFunctionsGroup): f = g.primary.native_function elif isinstance(g, ExternalBackendFunction): f = g.native_function elif isinstance(g, NativeFunctionsGroup): # By default, we associate all errors with structured native functions # with the out variant. In some cases, it might be better to have # a more specific place to hang things; if so, use # native_function_manager again on the inside f = g.out else: f = g with context(f'in native_functions.yaml line {f.loc}:\n {f.func}'): with local.parametrize(use_const_ref_for_mutable_tensors=f. use_const_ref_for_mutable_tensors): yield
def parse_backend_yaml( backend_yaml_path: str, grouped_native_functions: Sequence[Union[NativeFunction, NativeFunctionsGroup]], backend_indices: Dict[DispatchKey, BackendIndex]) -> ParsedExternalYaml: native_functions_map: Dict[OperatorName, NativeFunction] = { f.func.name: f for f in concatMap( lambda f: [f] if isinstance(f, NativeFunction) else list( f.functions()), grouped_native_functions) } with open(backend_yaml_path, 'r') as f: yaml_values = yaml.load(f, Loader=YamlLoader) assert isinstance(yaml_values, dict) valid_keys = [ 'backend', 'cpp_namespace', 'extra_headers', 'supported', 'autograd' ] backend = yaml_values.pop('backend', None) assert backend is not None, 'You must provide a value for "backend"' cpp_namespace = yaml_values.pop('cpp_namespace', None) assert cpp_namespace is not None, 'You must provide a value for "cpp_namespace"' supported = yaml_values.pop('supported', []) if supported is None: supported = [] # Allow an empty list of supported ops assert isinstance( supported, list ), f'expected "supported" to be a list, but got: {supported} (of type {type(supported)})' supported_autograd = yaml_values.pop('autograd', []) assert isinstance( supported, list ), f'expected "autograd" to be a list, but got: {supported_autograd}' assert len(yaml_values.keys()) == 0, \ f'{backend_yaml_path} contains unexpected keys: {", ".join(yaml_values.keys())}. \ Only the following keys are supported: {", ".join(valid_keys)}' def create_backend_index(backend_ops: List[str], dispatch_key: DispatchKey) -> BackendIndex: metadata: Dict[OperatorName, BackendMetadata] = {} for op in backend_ops: op_name = OperatorName.parse(op) assert op_name in native_functions_map, f"Found an invalid operator name: {op_name}" # See Note [External Backends Follow Dispatcher API] kernel_name = dispatcher.name(native_functions_map[op_name].func) # TODO: allow structured external backends later. m = BackendMetadata(kernel=kernel_name, structured=False) metadata[op_name] = m # TODO: currently hardcoding the fact that XLA implements out/inplace in terms of functional ops, # this should eventually be toggleable per-backend. return BackendIndex(dispatch_key=dispatch_key, use_out_as_primary=False, external=True, index=metadata) backend_key: Optional[DispatchKey] = None if len(supported) > 0: with context( lambda: f'The provided value for "backend" must be a valid DispatchKey, but got {backend}.' ): backend_key = DispatchKey.parse(backend) backend_idx = create_backend_index(supported, backend_key) assert backend_key not in backend_indices backend_indices[backend_key] = backend_idx autograd_key: Optional[DispatchKey] = None if len(supported_autograd) > 0: with context( lambda: f'The "autograd" key was specified, which indicates that you would like to override \ the behavior of autograd for some operators on your backend. However "Autograd{backend}" is not a valid DispatchKey.' ): autograd_key = DispatchKey.parse(f'Autograd{backend}') autograd_idx = create_backend_index(supported_autograd, autograd_key) assert autograd_key not in backend_indices backend_indices[autograd_key] = autograd_idx for g in grouped_native_functions: if isinstance(g, NativeFunction): forward_kernels = [] if backend_key is None else \ [m for m in [backend_indices[backend_key].get_kernel(g)] if m is not None] backward_kernels = [] if autograd_key is None else \ [m for m in [backend_indices[autograd_key].get_kernel(g)] if m is not None] else: forward_kernels = [] if backend_key is None else [ m for m in [ backend_indices[backend_key].get_kernel(f) for f in g.functions() ] if m is not None ] backward_kernels = [] if autograd_key is None else [ m for m in [ backend_indices[autograd_key].get_kernel(f) for f in g.functions() ] if m is not None ] forward_kernels = [f for f in forward_kernels if f is not None] backward_kernels = [f for f in backward_kernels if f is not None] assert len(forward_kernels) == 0 or len(backward_kernels) == 0, \ f'Currently, all variants of an op must either be registered to a backend key, or to a backend\'s \ autograd key. They cannot be mix and matched. If this is something you need, feel free to create an issue! \ {forward_kernels[0].kernel} is listed under "supported", but {backward_kernels[0].kernel} is listed under "autograd".' return ParsedExternalYaml(backend_key, autograd_key, cpp_namespace, backend_indices)