Ejemplo n.º 1
0
def cpparguments_exprs(func: FunctionSchema, *, method: bool,
                       api_is_faithful: bool) -> Sequence[DispatcherExpr]:
    dispatcher_calling_convention_is_faithful = local.use_c10_dispatcher(
    ).dispatcher_uses_new_style()
    arguments = cpp.group_arguments(
        func,
        method=method,
        faithful=dispatcher_calling_convention_is_faithful)

    if api_is_faithful:
        argument_packs = tuple(cpp.argument_faithful(a) for a in arguments)
    else:
        argument_packs = tuple(cpp.argument(a) for a in arguments)

    return _cpparguments_exprs(argument_packs)
Ejemplo n.º 2
0
def has_tensor_options(f: NativeFunction) -> bool:
    return any(
        filter(lambda a: isinstance(a, TensorOptionsArguments),
               cpp.group_arguments(f.func, method=False)))
Ejemplo n.º 3
0
def signature(f: NativeFunction, *, method: bool = False) -> PythonSignature:
    # Use cpp api to gather TensorOptions fields from kwargs.
    # Skip ThisArgument if this is method signature.
    # Skip TensorOptionsArguments in C++ signature. Python side TensorOptions
    # arguments are created based on different rules - see below.
    args = tuple(a for a in cpp.group_arguments(f.func, method=method)
                 if isinstance(a, Argument))

    input_arg_set = set(a.name for a in f.func.arguments.positional)
    kwarg_only_set = set(a.name for a in f.func.arguments.kwarg_only)
    out_arg_set = set(a.name for a in f.func.arguments.out)

    input_args = tuple(
        map(argument, filter(lambda a: a.name in input_arg_set, args)))
    input_kwargs = tuple(
        map(argument, filter(lambda a: a.name in kwarg_only_set, args)))
    outputs = tuple(
        map(argument, filter(lambda a: a.name in out_arg_set, args)))

    # Reintroduce the scattered fields of TensorOptions for Python.
    # Compared to the cpp counterpart, the python arguments have new property
    # (default_init) and a new argument 'requires_grad', which require some
    # special handlings.
    # [old codegen] TODO: because these aren't guaranteed to be 100% faithful
    # to the original versions in the yaml, this recreation is a potential
    # source of drift between eager and JIT. Pull this logic out to a shared place.

    has_tensor_input_arg = any(
        a.type.is_tensor_like() for a in itertools.chain(
            f.func.arguments.positional, f.func.arguments.kwarg_only))
    if any(a.name == 'requires_grad' for a in f.func.schema_order_arguments()):
        raise ValueError(
            'argument named requires_grad is reserved, should not explicitly add it in the schema'
        )

    # [old codegen] this probably won't work if one of the returns is not a tensor,
    # but it will produce a compile-time error that is obvious.
    has_tensor_return = any(r.type.is_tensor_like() for r in f.func.returns)

    name: str = cpp.name(f.func)
    is_factory_function = f.category_override == 'factory' or (
        has_tensor_return and not has_tensor_input_arg)
    is_like_or_new_function = f.category_override in (
        'new', 'like') or name.startswith('new_') or name.endswith('_like')

    tensor_options_args: List[PythonArgument] = []
    if is_factory_function or is_like_or_new_function:
        tensor_options_args.append(
            PythonArgument(
                name='dtype',
                type=BaseType(BaseTy.ScalarType),
                default=_dtype_default_type_hack(name),
                default_init='self.scalar_type()'
                if is_like_or_new_function else None,
            ))
        tensor_options_args.append(
            PythonArgument(
                name='layout',
                type=OptionalType(BaseType(BaseTy.Layout)),
                default='torch.strided',
                default_init='layout_from_backend(self.options().backend())'
                if is_like_or_new_function else None,
            ))
        tensor_options_args.append(
            PythonArgument(
                name='device',
                type=BaseType(BaseTy.Device),
                default='None',
                default_init='self.device()'
                if is_like_or_new_function else None,
            ))
        tensor_options_args.append(
            PythonArgument(
                name='pin_memory',
                type=BaseType(BaseTy.bool),
                default='False',
                default_init=None,
            ))
        tensor_options_args.append(
            PythonArgument(
                name='requires_grad',
                type=BaseType(BaseTy.bool),
                default='False',
                default_init=None,
            ))

    return PythonSignature(
        name=str(f.func.name.name),
        input_args=input_args,
        input_kwargs=input_kwargs,
        output_args=PythonOutArgument.from_outputs(outputs),
        tensor_options_args=tuple(tensor_options_args),
        method=method,
    )
Ejemplo n.º 4
0
def arguments(func: FunctionSchema) -> Tuple[NativeArgument, ...]:
    return tuple(i for arg in cpp.group_arguments(func, method=False)
                 for i in argument(arg))
Ejemplo n.º 5
0
def arguments(func: FunctionSchema) -> Tuple[NativeArgument, ...]:
    args = cpp.group_arguments(
        func,
        method=False,
        faithful=local.use_c10_dispatcher() is UseC10Dispatcher.full)
    return tuple(i for arg in args for i in argument(arg))
Ejemplo n.º 6
0
def arguments(func: FunctionSchema) -> Sequence[LegacyDispatcherArgument]:
    return list(map(argument, cpp.group_arguments(func)))
Ejemplo n.º 7
0
def arguments(func: FunctionSchema) -> Tuple[NativeArgument, ...]:
    return tuple(map(argument, cpp.group_arguments(func, method=False)))