def argumenttype_type(t: Type, *, mutable: bool, binds: ArgName) -> CType: if str(t) == 'Tensor?': tensor_type: OptionalCType = OptionalCType(BaseCType('Tensor', binds)) if mutable: return MutRefCType(tensor_type) else: return ConstRefCType(tensor_type) elif str(t) == 'Tensor?[]': return ConstRefCType(BaseCType("c10::List<c10::optional<Tensor>>", binds)) elif str(t) == 'Scalar': return ConstRefCType(BaseCType('Scalar', binds)) elif str(t) == 'Scalar?': return ConstRefCType(OptionalCType(BaseCType('Scalar', binds))) return cpp.argumenttype_type(t, mutable=mutable, binds=binds)
def returntype_type(t: Type, *, mutable: bool) -> CType: # placeholder is ignored r = valuetype_type(t, binds="__placeholder__") if r is not None: return r.type if isinstance(t, BaseType): if t.name == BaseTy.Tensor: if mutable: if local.use_const_ref_for_mutable_tensors(): return ConstRefCType(BaseCType(tensorT)) else: return MutRefCType(BaseCType(tensorT)) else: # Note [Tensor Copy Returns] # Currently, we use "Argument.is_write" to determine # whether or not Tensor return types should be copies or references. # If that ever changes, take a look at other locations of this note! return BaseCType(tensorT) elif t.name == BaseTy.Scalar: return BaseCType(scalarT) elif isinstance(t, ListType): elem = returntype_type(t.elem, mutable=mutable) assert t.size is None, f"fixed size list returns not supported: {t}" return VectorCType(elem) raise AssertionError(f"unrecognized return type {t}")
def argumenttype_type(t: Type, *, mutable: bool, binds: ArgName) -> NamedCType: if str(t) == 'Tensor?': tensor_type: OptionalCType = OptionalCType(BaseCType(tensorT)) if mutable and not local.use_const_ref_for_mutable_tensors(): return NamedCType(binds, MutRefCType(tensor_type)) else: return NamedCType(binds, ConstRefCType(tensor_type)) elif str(t) == 'Tensor?[]': return NamedCType( binds, ConstRefCType(ListCType(OptionalCType(BaseCType(tensorT))))) elif str(t) == 'Scalar': return NamedCType(binds, ConstRefCType(BaseCType(scalarT))) elif str(t) == 'Scalar?': return NamedCType(binds, ConstRefCType(OptionalCType(BaseCType(scalarT)))) return cpp.argumenttype_type(t, mutable=mutable, binds=binds)
def argumenttype_type(t: Type, *, mutable: bool, binds: ArgName) -> NamedCType: # If it's a value type, do the value type translation r = valuetype_type(t, binds=binds) if r is not None: return r if isinstance(t, BaseType): if t.name == BaseTy.Tensor: if mutable and not local.use_const_ref_for_mutable_tensors(): return NamedCType(binds, MutRefCType(BaseCType(tensorT))) else: return NamedCType(binds, ConstRefCType(BaseCType(tensorT))) elif t.name == BaseTy.Scalar: return NamedCType(binds, ConstRefCType(BaseCType(scalarT))) else: raise AssertionError(f"base type should have been value type {t}") elif isinstance(t, OptionalType): if str(t.elem) == 'Tensor': if mutable and not local.use_const_ref_for_mutable_tensors(): return NamedCType(binds, MutRefCType( BaseCType(tensorT))) # TODO: fix this discrepancy else: return NamedCType( binds, ConstRefCType(OptionalCType(BaseCType(tensorT)))) elif str(t.elem) == 'Scalar': return NamedCType(binds, ConstRefCType(OptionalCType(BaseCType(scalarT)))) elem = argumenttype_type(t.elem, mutable=mutable, binds=binds) return NamedCType(binds, OptionalCType(elem.type)) elif isinstance(t, ListType): # TODO: remove these special cases, ArrayRef fallthrough works fine if str(t.elem) == 'int': return NamedCType(binds, BaseCType(intArrayRefT)) elif str(t.elem) == 'Tensor': return NamedCType(binds, BaseCType(tensorListT)) elif str(t.elem) == 'Scalar': return NamedCType(binds, ArrayRefCType(BaseCType(scalarT))) elif str(t.elem) == 'Dimname': return NamedCType(binds, BaseCType(dimnameListT)) elif str(t.elem) == 'Tensor?': return NamedCType( binds, ConstRefCType(ListCType(OptionalCType(BaseCType(tensorT))))) elem = argumenttype_type(t.elem, mutable=mutable, binds=binds) return NamedCType(binds, ArrayRefCType(elem.type)) else: raise AssertionError(f"unrecognized type {repr(t)}")
def argumenttype_type(t: Type, *, mutable: bool, binds: ArgName) -> CType: # If it's a value type, do the value type translation r = valuetype_type(t, binds=binds) if r is not None: return r if isinstance(t, BaseType): if t.name == BaseTy.Tensor: if mutable: return MutRefCType(BaseCType('Tensor', binds)) else: return ConstRefCType(BaseCType('Tensor', binds)) elif t.name == BaseTy.Scalar: return ConstRefCType(BaseCType('Scalar', binds)) else: raise AssertionError(f"base type should have been value type {t}") elif isinstance(t, OptionalType): if str(t.elem) == 'Tensor': if mutable: return MutRefCType(BaseCType( 'Tensor', binds)) # TODO: fix this discrepancy else: return ConstRefCType(OptionalCType(BaseCType('Tensor', binds))) elif str(t.elem) == 'Scalar': return ConstRefCType(OptionalCType(BaseCType('Scalar', binds))) elem = argumenttype_type(t.elem, mutable=mutable, binds=binds) return OptionalCType(elem) elif isinstance(t, ListType): # TODO: remove these special cases, ArrayRef fallthrough works fine # NB: CType throws away ArrayRef structure because it is not currently # relevant in translation. When it becomes relevant, need to add back if str(t.elem) == 'int': return BaseCType("IntArrayRef", binds) elif str(t.elem) == 'Tensor': return BaseCType("TensorList", binds) elif str(t.elem) == 'Scalar': return BaseCType("ArrayRef<Scalar>", binds) elif str(t.elem) == 'Dimname': return BaseCType("DimnameList", binds) elif str(t.elem) == 'Tensor?': return ConstRefCType( BaseCType("c10::List<c10::optional<Tensor>>", binds)) elem = argumenttype_type(t.elem, mutable=mutable, binds=binds) # TODO: explicitly qualify namespace here return BaseCType(f"ArrayRef<{elem.cpp_type()}>", binds) else: raise AssertionError(f"unrecognized type {repr(t)}")
def argumenttype_type(t: Type, *, mutable: bool, binds: ArgName) -> NamedCType: # If it's a value type, do the value type translation r = cpp.valuetype_type(t, binds=binds) if r is not None: return r if isinstance(t, BaseType): if t.name == BaseTy.Tensor: return NamedCType(binds, ConstRefCType(BaseCType(tensorT))) elif t.name == BaseTy.Scalar: return NamedCType(binds, ConstRefCType(BaseCType(scalarT))) else: raise AssertionError(f"base type should have been value type {t}") elif isinstance(t, OptionalType): if t.elem == BaseType(BaseTy.Tensor): raise AssertionError( "optional tensor not supported by structured yet; to implement this " "add OptionalTensor c.f. https://github.com/pytorch/pytorch/issues/51456" ) elif t.elem == BaseType(BaseTy.Scalar): raise AssertionError( "optional scalar not supported by structured yet" ) elem = argumenttype_type(t.elem, mutable=mutable, binds=binds) return NamedCType(binds, OptionalCType(elem.type)) elif isinstance(t, ListType): if t.elem == BaseType(BaseTy.Tensor): raise AssertionError( "list of tensor not supported by structured yet; to implement this " "resolve torch::List issue, see " "https://fb.workplace.com/groups/894363187646754/permalink/1149276442155426" ) # TODO: delete these special cases; see tools.codegen.api.cpp--these # must be changed in tandem, but there are problems; see # https://github.com/pytorch/pytorch/pull/51485 elif str(t.elem) == 'int': return NamedCType(binds, BaseCType(intArrayRefT)) elif str(t.elem) == 'Dimname': return NamedCType(binds, BaseCType(dimnameListT)) elem = argumenttype_type(t.elem, mutable=mutable, binds=binds) return NamedCType(binds, ArrayRefCType(elem.type)) else: raise AssertionError(f"unrecognized type {repr(t)}")
def dispatchstub_type(t: Type, *, binds: ArgName) -> Optional[NamedCType]: r = cpp.valuetype_type(t, binds=binds) if r is not None: return r if t == BaseType(BaseTy.Scalar): return NamedCType(binds, ConstRefCType(BaseCType(scalarT))) elif t == BaseType(BaseTy.Tensor): return None else: raise AssertionError(f"unrecognized type {repr(t)}")
def argumenttype_type(t: Type, *, mutable: bool, binds: ArgName) -> NamedCType: # If it's a value type, do the value type translation r = cpp.valuetype_type(t, binds=binds) if r is not None: return r if isinstance(t, BaseType): if t.name == BaseTy.Tensor: return NamedCType(binds, ConstRefCType(BaseCType(tensorT))) elif t.name == BaseTy.Scalar: return NamedCType(binds, ConstRefCType(BaseCType(scalarT))) else: raise AssertionError(f"base type should have been value type {t}") elif isinstance(t, OptionalType): if t.elem == BaseType(BaseTy.Tensor): return NamedCType(binds, BaseCType(optionalTensorRefT)) elif t.elem == BaseType(BaseTy.Scalar): return NamedCType(binds, BaseCType(optionalScalarRefT)) elif isinstance(t.elem, ListType) and str(t.elem.elem) == 'int': return NamedCType(binds, BaseCType(optionalIntArrayRefT)) elem = argumenttype_type(t.elem, mutable=mutable, binds=binds) return NamedCType(binds, OptionalCType(elem.type)) elif isinstance(t, ListType): if t.elem == BaseType(BaseTy.Tensor): return NamedCType(binds, BaseCType(iTensorListRefT)) # TODO: delete these special cases; see tools.codegen.api.cpp--these # must be changed in tandem, but there are problems; see # https://github.com/pytorch/pytorch/pull/51485 elif str(t.elem) == 'int': return NamedCType(binds, BaseCType(intArrayRefT)) elif str(t.elem) == 'Dimname': return NamedCType(binds, BaseCType(dimnameListT)) elem = argumenttype_type(t.elem, mutable=mutable, binds=binds) return NamedCType(binds, ArrayRefCType(elem.type)) else: raise AssertionError(f"unrecognized type {repr(t)}")
def gen_one(self, f: NativeFunction) -> Optional[str]: assert not f.manual_kernel_registration if self.target is Target.REGISTRATION and not self.selector.is_native_function_selected( f): return None # TODO: Now, there is something interesting going on here. In the code below, # we generate CompositeExplicitAutograd implementations of functional and inplace # based on the out implementation. But in fact, out is definable by # functional too (just not very efficiently), and this is honestly the # MORE likely situation for a backend implementor. How do we pick? # Well, taking a page from Haskell type classes and default methods, # we could conceivably register a circular definition (out in terms # of functional, and functional in terms of out) and just require # someone to implement one or the other. We'd have to do a little bit # of work to not register one of these "weak" definitions unless there # is a strong definition somewhere in the DAG! So it's not implemented yet. if self.backend_index.dispatch_key == DispatchKey.CompositeExplicitAutograd and f.func.kind( ) is SchemaKind.out: # Never generate a default implementation for out, that's what you # have to define as a backend implementor return None # Note [Direct dispatch bindings] # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Signature of the non-dispatched function we'll expose in a header # (e.g., at::cpu::add). We don't generate methods (TODO: do this # when CPUTensor class is a thing); nor do we generate fallback # bindings for manual_cpp_binding functions. cpp_sig_group = CppSignatureGroup.from_native_function( f, method=False, fallback_binding=False) # Signature of the wrapper function we'll register to the dispatcher sig = NativeSignature(f.func, prefix="wrapper_") if self.target is Target.NAMESPACED_DECLARATION: result = f"TORCH_API {cpp_sig_group.signature.decl()};\n" if cpp_sig_group.faithful_signature is not None: result += f"TORCH_API {cpp_sig_group.faithful_signature.decl()};\n" return result elif self.target is Target.NAMESPACED_DEFINITION: def generate_defn(cpp_sig: CppSignature) -> str: return f""" {cpp_sig.defn()} {{ return {sig.name()}({', '.join(e.expr for e in translate(cpp_sig.arguments(), sig.arguments()))}); }} """ result = generate_defn(cpp_sig_group.signature) if cpp_sig_group.faithful_signature is not None: result += generate_defn(cpp_sig_group.faithful_signature) return result elif self.target is Target.ANONYMOUS_DEFINITION: k = f.func.kind() # Construct the body of the wrapper function with signature sig sig_body = [] # We'll use context to keep track of any variables we've brought # into scope while generating code context: List[Union[Binding, Expr]] = list(sig.arguments()) # Initialize the class corresponding to this structured # operator; feeding it the output argument(s) if it is known if self.backend_index.dispatch_key is DispatchKey.Meta: class_name = f"structured_{meta.name(self.g)}_meta_{k.name}" parent_class = f"at::meta::structured_{meta.name(self.g)}" elif self.backend_index.dispatch_key is DispatchKey.CompositeExplicitAutograd: # TODO: dedup this branch class_name = f"structured_{meta.name(self.g)}_default_backend_{k.name}" parent_class = f"at::meta::structured_{meta.name(self.g)}" else: metadata = self.backend_index.get_kernel(self.g) assert metadata is not None class_name = f"structured_{metadata.kernel}_{k.name}" parent_class = f"{self.cpp_namespace}::structured_{metadata.kernel}" if is_cuda_dispatch_key(self.backend_index.dispatch_key): device_check_args = itertools.chain( f.func.arguments.out, f.func.arguments.flat_positional) sig_body.append( RegisterDispatchKey.gen_device_check( f.device_check, list(device_check_args), sig.name())) if k is SchemaKind.functional: sig_body.append(f"{class_name} op;") elif k is SchemaKind.inplace: sig_body.append(f"{class_name} op(self);") elif k is SchemaKind.out: out_args_str = ', '.join(a.name for a in f.func.arguments.out) sig_body.append(f"{class_name} op({out_args_str});") # Translate the input native arguments into structured # arguments for the meta call meta_exprs = ', '.join(e.expr for e in translate( context, structured.meta_arguments(self.g), method=False)) if self.g.out.precomputed: # If this function group has precomputed elements, the meta function # returns a struct containing them which must be saved so that it # can be unpacked when generating code to call the impl. sig_body.append(f"auto precompute = op.meta({meta_exprs});") # Put all of the contents of the precompute struct into the context # so that translate will be able to return the correct args for the # call to the impl. for precomputed_elems in self.g.out.precomputed.replace.values( ): for arg in precomputed_elems: context.append( Expr( expr=f"precompute.{arg.name}", type=structured.argument_type(arg, binds=arg.name), )) # Add a use of the precompute struct so FB internal compilers don't # complain that there is an unused variable. sig_body.append("(void)precompute;") else: sig_body.append(f"op.meta({meta_exprs});") # After running meta, op.outputs_ is guaranteed to be valid; # add it to the context out_args = structured.out_arguments(self.g) maybe_star = '*' if k is SchemaKind.functional else '' for i, out_arg in enumerate(out_args): assert ConstRefCType(BaseCType(tensorT)) == out_arg.nctype.type context.append( Expr( expr=f"{maybe_star}op.outputs_[{i}]", # TODO: Stop hardcoding that the output type is a Tensor. Note # that for the codegen here this is fine because outputs_ is # hardcoded to be tensor already type=NamedCType(out_arg.nctype.name, MutRefCType(BaseCType(tensorT))))) # With the expanded context, do the impl call (if not a meta # function) if self.backend_index.dispatch_key == DispatchKey.CompositeExplicitAutograd: # TODO: https://github.com/pytorch/pytorch/issues/53023 out_sig_group = CppSignatureGroup.from_native_function( self.g.out, method=False, fallback_binding=f.manual_cpp_binding) out_sig = out_sig_group.most_faithful_signature() api_name = out_sig.name() out_exprs = ', '.join(e.expr for e in translate( context, out_sig.arguments(), method=False)) # TODO: I think this means structured won't work with method # only functions (but maybe you're saved by faithful? iunno.) # NB: Originally I wrote this as an at::redispatch call, but # I got in trouble because that meant I needed a DispatchKeySet # in the wrapper function, which meant I needed a DispatchKeySet # in the DispatchKeyFunctions declarations, but the defined API # there does NOT permit a dispatch key set. I think you can # probably unwind this by calling some function to do the TLS # fetch and get the DispatchKeySet when you don't have it, but # I didn't do it for this version sig_body.append(f"at::{api_name}({out_exprs});") elif self.backend_index.dispatch_key != DispatchKey.Meta: impl_exprs = ', '.join(e.expr for e in translate( context, structured.impl_arguments(self.g), method=False)) sig_body.append(f"op.impl({impl_exprs});") # Destructively return the final tensors # TODO: Do this in translate instead if k is SchemaKind.functional: if len(f.func.returns) == 1: ret_expr = "std::move(op.outputs_[0]).take()" # small optimization else: moved = ', '.join(f"std::move(op.outputs_[{i}]).take()" for i in range(len(f.func.returns))) ret_expr = f"std::make_tuple({moved})" elif k is SchemaKind.inplace: ret_expr = "self" elif k is SchemaKind.out: if len(f.func.returns) == 1: ret_expr = f.func.arguments.out[0].name else: refs = ', '.join(a.name for a in f.func.arguments.out) ret_expr = f"std::forward_as_tuple({refs})" sig_body.append(f"return {ret_expr};") sig_body_str = "\n".join(sig_body) # For an overview of what this template code looks like, see # https://github.com/pytorch/rfcs/pull/9 return f"""\ {self.gen_class( f, k, class_name=class_name, parent_class=parent_class, generate_super=self.g.out.structured_inherits is not None )} {sig.defn()} {{ {sig_body_str} }} """ elif self.target is Target.REGISTRATION: return f'm.impl("{f.func.name}", TORCH_FN({sig.name()}));' else: assert_never(self.target) # Silence mypy's "Missing return statement" error return None
def translate( bindings: Sequence[Union[Expr, Binding]], goals: Sequence[Union[NamedCType, Binding]], *, method: bool = False, allow_expensive_conversions: bool = False ) -> List[Expr]: binding_exprs: List[Expr] = [] for b in bindings: if isinstance(b, Binding): binding_exprs.append(Expr( expr=b.name, type=b.nctype, )) else: binding_exprs.append(b) goal_ctypes: List[NamedCType] = [] for g in goals: if isinstance(g, Binding): goal_ctypes.append(g.nctype) else: goal_ctypes.append(g) # Add all the bindings to the context ctx: Dict[NamedCType, str] = {} for b in binding_exprs: ctx[b.type] = b.expr # While we're at it, do some simple forward inference, looking through # constructors. # # NB: When should you do forward inference versus backward inference? # The general idea: # # - Backward inference WHEN the goal gets smaller # - Forward inference WHEN the hypothesis gets smaller # # This helps ensure termination: backward inference starts with a goal # and tries to make it simpler and simpler until it's trivial; if the # goal can grow in size, we blow up to a really huge goal size. # Similarly, with forward inference we take hypotheses and decompose # them into simpler hypotheses; if hypotheses could expand in size, # we also have potential nontermination. (In the code below, forward # inference is only ever carried out at a single step, but you could # imagine repeated application of forward inference being profitable.) # # A good starting point in the literature for exploring more about proof # search are these lecture notes # https://www.cs.cmu.edu/~fp/courses/oregon-m10/04-focusing.pdf # # TODO: My kingdom for a pattern matcher # https://www.python.org/dev/peps/pep-0634/ # # TODO: This could get us in recomputation trouble if b.expr is nontrivial. # Fix this by implementing some sort of sharing so that if multiple # goals share the same expression, we only compute it once. This seems # to matter in practice as compiler is often unwilling to CSE nontrivial # expressions like scalar.to<scalar_t>() t = b.type if isinstance(t, ConstRefCType) and isinstance(t.elem, OptionalCType) and \ isinstance(t.elem.elem, BaseCType) and str(t.elem.elem.type) == 'at::Tensor': ctx[NamedCType(t.elem.elem.name, ConstRefCType(BaseCType(tensorT)))] = \ f'({b.expr}.has_value() ? *{b.expr} : at::Tensor())' if t.type == ConstRefCType(OptionalCType(BaseCType(tensorT))): ctx[NamedCType(t.name, BaseCType(optionalTensorRefT))] = \ f'(({b.expr}.has_value() && (*{b.expr}).defined()) ? at::OptionalTensorRef(*{b.expr}) : at::OptionalTensorRef())' if t.type == ConstRefCType(BaseCType(scalarT)): ctx[NamedCType(t.name, BaseCType(opmath_t))] = f'({b.expr}).to<opmath_t>()' if t.type == ConstRefCType(OptionalCType(BaseCType(scalarT))): ctx[NamedCType(t.name, BaseCType(optionalScalarRefT))] = \ f'({b.expr}.has_value() ? at::OptionalScalarRef(&({b.expr}.value())) : at::OptionalScalarRef())' if t.type == BaseCType(scalar_t): ctx[NamedCType(t.name, BaseCType(opmath_t))] = f'static_cast<opmath_t>({b.expr})' # Add implicit bindings if the generated code is inside a Tensor method if method: ctx[NamedCType("self", MutRefCType(BaseCType(tensorT)))] = "const_cast<Tensor&>(*this)" ctx[NamedCType("self", ConstRefCType(BaseCType(tensorT)))] = "const_cast<Tensor&>(*this)" # This is better! Byte-for-byte compat # ctx[NamedCType("self", ConstRefCType(BaseCType(tensorT)))] = "*this" def unsat(goal: NamedCType) -> NoReturn: ctx_desc = '\n'.join(f" {t.cpp_type()} {t.name}; // {e}" for t, e in ctx.items()) raise UnsatError(f''' Failed to synthesize the expression "{goal.cpp_type()} {goal.name}". When I failed, the following bindings were available in the context: {ctx_desc} This probably means there is a missing rule in the rules of tools.codegen.api.translate. Check this module for more information. ''') # A shitty backtracking search implementation. It's shitty because it # does backtracking via stack (bad idea!) and for the most part tries to # avoid backtracking. In particular, if # direct=True, we won't try to do any fancy synthesis, just trivial # conversions (e.g., "T a" is OK for "const T& a"). So all of the # existing rules in this function simply try to solve immediately, # and bail if things don't work out. def solve(goal: NamedCType, *, direct: bool) -> str: def direct_solve(goal: NamedCType) -> str: return solve(goal, direct=True) if goal in ctx: # Trivial return ctx[goal] # const & is satisfied with mutable & if isinstance(goal.type, ConstRefCType): try: # WARNING: not strictly decreasing; be careful not # to add a direct conversion that goes satisfies # mutable& with const& return solve(NamedCType(goal.name, MutRefCType(goal.type.elem)), direct=direct) except UnsatError: pass # mutable & is satisfied with value if isinstance(goal.type, MutRefCType): try: return solve(NamedCType(goal.name, goal.type.elem), direct=direct) except UnsatError: pass if direct: unsat(goal) # For now, all of these rules are mutually exclusive. if goal == NamedCType("memory_format", OptionalCType(BaseCType(memoryFormatT))): memory_format = direct_solve( NamedCType(SpecialArgName.possibly_redundant_memory_format, OptionalCType(BaseCType(memoryFormatT))) ) # No need to join "memory_format" and "options" if the target API takes "options" directly. # Otherwise it will cause the redundant memory_format error. if options_ctype in goal_ctypes: return memory_format try: options = direct_solve(options_ctype) return f"c10::impl::check_tensor_options_and_extract_memory_format({options}, {memory_format})" except UnsatError: return memory_format elif goal == NamedCType("options", BaseCType(tensorOptionsT)): dtype = direct_solve(NamedCType("dtype", OptionalCType(BaseCType(scalarTypeT)))) pin_memory = direct_solve(NamedCType("pin_memory", OptionalCType(BaseCType(boolT)))) device = direct_solve(NamedCType("device", OptionalCType(BaseCType(deviceT)))) layout = direct_solve(NamedCType("layout", OptionalCType(BaseCType(layoutT)))) return f'TensorOptions().dtype({dtype}).layout({layout}).device({device}).pinned_memory({pin_memory})' elif goal == NamedCType("dtype", OptionalCType(BaseCType(scalarTypeT))): options = direct_solve(options_ctype) return f'optTypeMetaToScalarType({options}.dtype_opt())' elif goal == NamedCType("layout", OptionalCType(BaseCType(layoutT))): options = direct_solve(options_ctype) return f'{options}.layout_opt()' elif goal == NamedCType("device", OptionalCType(BaseCType(deviceT))): options = direct_solve(options_ctype) return f'{options}.device_opt()' elif goal == NamedCType("pin_memory", OptionalCType(BaseCType(boolT))): options = direct_solve(options_ctype) return f'{options}.pinned_memory_opt()' # We can always do translations from value types to reference types, like vector<int> -> IntArrayRef elif goal.type == BaseCType(intArrayRefT): return direct_solve(NamedCType(goal.name, longVec_ctype)) elif goal.type == BaseCType(optionalIntArrayRefT): return direct_solve(NamedCType(goal.name, optionalLongVec_ctype)) elif goal.type == BaseCType(optionalScalarRefT): return direct_solve(NamedCType(goal.name, optionalScalar_ctype)) elif goal.type == BaseCType(optionalTensorRefT): return direct_solve(NamedCType(goal.name, optionalTensor_ctype)) # Note [translation from C++ reference to value types] # The below cases are all for when we have an argument with a reference type, # and a corresponding goal with a value type. # These are needed when we populate the inputs to a lambda capture and we need # to guarantee the lifetime of each captured argument. # We guard it with an explicit kwarg because converting to a value type is expensive # (O(n)) to convert from IntArrayRef to vector<int>), # so the caller of translate() should be explicit that they need it. if allow_expensive_conversions: if goal.type == VectorCType(BaseCType(longT)): intArrayRef_ctype = NamedCType(goal.name, BaseCType(intArrayRefT)) argname = direct_solve(intArrayRef_ctype) return f'{argname}.vec()' elif goal.type == OptionalCType(VectorCType(BaseCType(longT))): optionalIntArrayRef_ctype = NamedCType(goal.name, BaseCType(optionalIntArrayRefT)) argname = direct_solve(optionalIntArrayRef_ctype) return f'{argname}.has_value() ? c10::make_optional({argname}->vec()) : c10::nullopt' elif goal.type == OptionalCType(BaseCType(scalarT)): optionalScalarRef_ctype = NamedCType(goal.name, BaseCType(optionalScalarRefT)) argname = direct_solve(optionalScalarRef_ctype) return f'{argname}.has_value() ? c10::make_optional({argname}) : c10::nullopt' elif goal.type == OptionalCType(BaseCType(scalarT)): optionalTensorRef_ctype = NamedCType(goal.name, BaseCType(optionalTensorRefT)) argname = direct_solve(optionalTensorRef_ctype) return f'{argname}.has_value() ? c10::make_optional({argname}) : c10::nullopt' # Technically, we also need to handle cases of C++ containers holding reference types. # But there currently aren't any ops that require lambda capture codegen # With arguments like std::vector<IntArrayRef>. # If that changes, we'll have to add the translation here. unsat(goal) return [Expr(solve(g, direct=False), g) for g in goal_ctypes]
# other scope); others are more nontrivial and may require packing/unpacking. # Some examples of non-trivial action: # # - Need the "dtype" binding? Well, maybe "dtype" isn't available # in the context, instead, "options" is, and you need to extract # it from there. (Gather) # # - Need the "context" binding? Well, maybe "context" isn't available # in the context, and you need to construct it from "dtype", "device", # etc. (Scatter) # # - Need the "memory_format" binding? Well, actually, it's available # from both "memory_format" and "options", so you had better make sure # they are consistent. (Join) options_ctype = NamedCType("options", ConstRefCType(BaseCType(tensorOptionsT))) longVec_ctype = VectorCType(BaseCType(longT)) optionalLongVec_ctype = OptionalCType(VectorCType(BaseCType(longT))) optionalScalar_ctype = OptionalCType(BaseCType(scalarT)) optionalTensor_ctype = OptionalCType(BaseCType(tensorT)) class UnsatError(RuntimeError): pass # Given a set of in-scope bindings and a set of target bindings, synthesize # a list of expressions that uses only the in-scope bindings (bindings) that # have all of the types of goals. You may want to use this function if # you're generating code for a function like: # # void f({args}) {
# These API's mostly follow the dispatcher API, with a few quirks: # - The lambda capture has to convert reference types to value types # - While the forward lambda just directly calls into the at::_ops API # (following the dispatcher convention), the logic here for the reverse lambda # is responsible for generating both the call-site, and the declarations # (which are implemented manually in the at::functionalization::impl namespace). # The lambdas generated for each view op in the functionalization pass are of the form # [capture_arguments](outer_arguments) -> returns_type { # return name(inner_arguments); # } # Define some specific lambda input arguments. base_binding = Binding(name='base', nctype=NamedCType(name='base', type=ConstRefCType( BaseCType(tensorT))), argument=Argument(name='base', type=BaseType(BaseTy.Tensor), default=None, annotation=None), default=None) mutated_view_binding = Binding(name='mutated_view', nctype=NamedCType(name='mutated_view', type=ConstRefCType( BaseCType(tensorT))), argument=Argument(name='base', type=BaseType(BaseTy.Tensor), default=None, annotation=None), default=None) mutated_view_idx_binding = Binding(name='mutated_view_idx',
def translate( bindings: Sequence[Union[Expr, Binding]], goals: Sequence[Union[NamedCType, Binding]], *, method: bool = False ) -> List[Expr]: binding_exprs: List[Expr] = [] for b in bindings: if isinstance(b, Binding): binding_exprs.append(Expr( expr=b.name, type=b.nctype, )) else: binding_exprs.append(b) goal_ctypes: List[NamedCType] = [] for g in goals: if isinstance(g, Binding): goal_ctypes.append(g.nctype) else: goal_ctypes.append(g) # Add all the bindings to the context ctx: Dict[NamedCType, str] = {} for b in binding_exprs: ctx[b.type] = b.expr # While we're at it, do some simple forward inference, looking through # constructors. # TODO: My kingdom for a pattern matcher # https://www.python.org/dev/peps/pep-0634/ # TODO: This could get us in recomputation trouble if b.expr is nontrivial t = b.type if isinstance(t, ConstRefCType) and isinstance(t.elem, OptionalCType) and \ isinstance(t.elem.elem, BaseCType) and str(t.elem.elem.type) == 'at::Tensor': ctx[NamedCType(t.elem.elem.name, ConstRefCType(BaseCType(tensorT)))] = \ f'({b.expr}.has_value() ? *{b.expr} : at::Tensor())' # Add implicit bindings if the generated code is inside a Tensor method if method: ctx[NamedCType("self", MutRefCType(BaseCType(tensorT)))] = "const_cast<Tensor&>(*this)" ctx[NamedCType("self", ConstRefCType(BaseCType(tensorT)))] = "const_cast<Tensor&>(*this)" # This is better! Byte-for-byte compat # ctx[NamedCType("self", ConstRefCType(BaseCType(tensorT)))] = "*this" def unsat(goal: NamedCType) -> NoReturn: ctx_desc = '\n'.join(f" {t.cpp_type()} {t.name}; // {e}" for t, e in ctx.items()) raise UnsatError(f''' Failed to synthesize the expression "{goal.cpp_type()} {goal.name}". When I failed, the following bindings were available in the context: {ctx_desc} This probably means there is a missing rule in the rules of tools.codegen.api.translate. Check this module for more information. ''') # A shitty backtracking search implementation. It's shitty because it # doesn't actually do backtracing or search. In particular, if # direct=True, we won't try to do any fancy synthesis, just trivial # conversions (e.g., "T a" is OK for "const T& a"). So all of the # existing rules in this function simply try to solve immediately, # and bail if things don't work out. def solve(goal: NamedCType, *, direct: bool) -> str: def direct_solve(goal: NamedCType) -> str: return solve(goal, direct=True) if goal in ctx: # Trivial return ctx[goal] # const & is satisfied with mutable & if isinstance(goal.type, ConstRefCType): try: # WARNING: not strictly decreasing; be careful not # to add a direct conversion that goes satisfies # mutable& with const& return solve(NamedCType(goal.name, MutRefCType(goal.type.elem)), direct=direct) except UnsatError: pass # mutable & is satisfied with value if isinstance(goal.type, MutRefCType): try: return solve(NamedCType(goal.name, goal.type.elem), direct=direct) except UnsatError: pass if direct: unsat(goal) # For now, all of these rules are mutually exclusive. if goal == NamedCType("memory_format", OptionalCType(BaseCType(memoryFormatT))): memory_format = direct_solve( NamedCType(SpecialArgName.possibly_redundant_memory_format, OptionalCType(BaseCType(memoryFormatT))) ) # No need to join "memory_format" and "options" if the target API takes "options" directly. # Otherwise it will cause the redundant memory_format error. if options_ctype in goal_ctypes: return memory_format try: options = direct_solve(options_ctype) return f"c10::impl::check_tensor_options_and_extract_memory_format({options}, {memory_format})" except UnsatError: return memory_format elif goal == NamedCType("options", BaseCType(tensorOptionsT)): dtype = direct_solve(NamedCType("dtype", OptionalCType(BaseCType(scalarTypeT)))) pin_memory = direct_solve(NamedCType("pin_memory", OptionalCType(BaseCType(boolT)))) device = direct_solve(NamedCType("device", OptionalCType(BaseCType(deviceT)))) layout = direct_solve(NamedCType("layout", OptionalCType(BaseCType(layoutT)))) return f'TensorOptions().dtype({dtype}).layout({layout}).device({device}).pinned_memory({pin_memory})' elif goal == NamedCType("dtype", OptionalCType(BaseCType(scalarTypeT))): options = direct_solve(options_ctype) return f'optTypeMetaToScalarType({options}.dtype_opt())' elif goal == NamedCType("layout", OptionalCType(BaseCType(layoutT))): options = direct_solve(options_ctype) return f'{options}.layout_opt()' elif goal == NamedCType("device", OptionalCType(BaseCType(deviceT))): options = direct_solve(options_ctype) return f'{options}.device_opt()' elif goal == NamedCType("pin_memory", OptionalCType(BaseCType(boolT))): options = direct_solve(options_ctype) return f'{options}.pinned_memory_opt()' unsat(goal) return [Expr(solve(g, direct=False), g) for g in goal_ctypes]
# other scope); others are more nontrivial and may require packing/unpacking. # Some examples of non-trivial action: # # - Need the "dtype" binding? Well, maybe "dtype" isn't available # in the context, instead, "options" is, and you need to extract # it from there. (Gather) # # - Need the "context" binding? Well, maybe "context" isn't available # in the context, and you need to construct it from "dtype", "device", # etc. (Scatter) # # - Need the "memory_format" binding? Well, actually, it's available # from both "memory_format" and "options", so you had better make sure # they are consistent. (Join) options_ctype = ConstRefCType(BaseCType("TensorOptions", "options")) class UnsatError(RuntimeError): pass # Given a set of in-scope bindings and a set of target bindings, synthesize # a list of expressions that uses only the in-scope bindings (bindings) that # have all of the types of goals. You may want to use this function if # you're generating code for a function like: # # void f({args}) { # g({exprs}); // g is a different API # } # # and you need to generate "exprs". #