def is_smaller(s1: PythonSignature, s2: PythonSignature) -> bool: """Returns True if s1 < s2 in the partial order.""" args1, args2 = s1.arguments(skip_outputs=True), s2.arguments(skip_outputs=True) if len(args1) != len(args2): return False # TODO: should use some canonical form instead of 'str(arg.type)' - see comments # above. The old codegen used the deprecated 'dynamic_type(arg.type)', which # ignores the optional annotation, i.e. 'Scalar' and 'Scalar?'. equal = all(arg1.type == arg2.type for arg1, arg2 in zip(args1, args2)) smaller_or_equal = all(str(arg1.type) == str(arg2.type) or is_arg_smaller(arg1.type, arg2.type) for arg1, arg2 in zip(args1, args2)) return smaller_or_equal and not equal
def dispatch_lambda_exprs(ps: PythonSignature, f: NativeFunction) -> DispatchLambdaArgumentExprs: # This method is to bind 'arg_parser_outputs' and 'lambda_args' by producing # 'inits' and 'lambda_args_exprs' for each lambda argument using arg parser # outputs. arg_parser_outputs = arg_parser_output_exprs(ps, f) lambda_args = dispatch_lambda_args(ps, f) inits: List[str] = [] lambda_args_exprs: Dict[str, str] = {} has_toptions = has_tensor_options(f) # 1. special inits/unpacking to provide binding exprs for lambda arguments. for a in ps.arguments(skip_tensor_options=True): name = a.name arg_parser_expr = arg_parser_outputs[a.name].expr if has_toptions and name == 'self': # TODO: why this needs to be special case? inits.extend([ f'auto self = {arg_parser_expr};', ]) lambda_args_exprs[name] = name elif isinstance(a, PythonOutArgument) and len( a.outputs) > 1 and f.func.is_out_fn(): inits.extend([ f'auto out = {arg_parser_expr};', ]) for i, out_arg in enumerate(a.outputs): lambda_args_exprs[out_arg.name] = f'out[{i}]' elif str(a.type) == 'Dimname[]?': inits.extend([ f'auto __{name} = {arg_parser_expr};', f'c10::optional<DimnameList> {name} = \ __{name} ? c10::make_optional(DimnameList(__{name}.value())) : c10::nullopt;', ]) lambda_args_exprs[name] = name else: # default case - directly using PythonArgParser output expr lambda_args_exprs[name] = arg_parser_expr # method's self is passed directly to python binding, rather than parsed if ps.method: lambda_args_exprs['self'] = 'self' # 2. special packing/checking for TensorOptions. tensor_options_args_names = list( map(lambda a: a.name, ps.tensor_options_args)) if has_toptions: if f.func.is_out_fn(): raise RuntimeError(f'{f.func}: tensor options with output arg') for a in ps.tensor_options_args: if a.name not in TENSOR_OPTIONS_FIELDS: raise RuntimeError( f'{f.func}: unrecognized tensor options field \'{a.name}\' in python binding arguments' ) if str(a.type) != TENSOR_OPTIONS_FIELDS.get(a.name): raise RuntimeError( f'{f.func}: unrecognized type \'{str(a.type)}\' for tensor options field \'{a.name}\'' ) if not all( map(lambda a: a in tensor_options_args_names, TENSOR_OPTIONS_FIELDS.keys())): raise RuntimeError( f'{f.func}: incomplete tensor options args: {tensor_options_args_names}' ) inits.append(f'''\ const auto options = TensorOptions() .dtype({arg_parser_outputs['dtype'].expr}) //.device({arg_parser_outputs['device'].expr}) //.layout({arg_parser_outputs['layout'].expr}) .requires_grad({arg_parser_outputs['requires_grad'].expr}) .pinned_memory({arg_parser_outputs['pin_memory'].expr}); // torch::utils::maybe_initialize_cuda(options); ''') lambda_args_exprs['options'] = 'options' return DispatchLambdaArgumentExprs( exprs=tuple(map(lambda a: lambda_args_exprs[a.name], lambda_args)), inits=inits, )