Esempio n. 1
0
  def prepare(self, test_fn, namespace, recursive=True):
    namespace['ConversionOptions'] = converter.ConversionOptions

    future_features = ('print_function', 'division')
    node, source = parser.parse_entity(test_fn, future_features=future_features)
    namer = naming.Namer(namespace)
    program_ctx = converter.ProgramContext(
        options=converter.ConversionOptions(recursive=recursive),
        autograph_module=None)
    entity_info = transformer.EntityInfo(
        name=test_fn.__name__,
        source_code=source,
        source_file='<fragment>',
        future_features=future_features,
        namespace=namespace)
    ctx = transformer.Context(entity_info, namer, program_ctx)
    origin_info.resolve_entity(node, source, test_fn)

    graphs = cfg.build(node)
    node = qual_names.resolve(node)
    node = activity.resolve(node, ctx, None)
    node = reaching_definitions.resolve(node, ctx, graphs)
    anno.dup(
        node,
        {
            anno.Static.DEFINITIONS: anno.Static.ORIG_DEFINITIONS,
        },
    )

    return node, ctx
 def _simple_program_ctx(self):
   return converter.ProgramContext(
       recursive=True,
       autograph_decorators=(),
       partial_types=(),
       autograph_module=api,
       uncompiled_modules=config.DEFAULT_UNCOMPILED_MODULES)
Esempio n. 3
0
    def prepare(self,
                test_fn,
                namespace,
                namer=None,
                arg_types=None,
                owner_type=None,
                recursive=True,
                strip_decorators=()):
        namespace['ConversionOptions'] = converter.ConversionOptions

        node, source = parser.parse_entity(test_fn)
        node = node.body[0]
        if namer is None:
            namer = FakeNamer()
        program_ctx = converter.ProgramContext(
            options=converter.ConversionOptions(
                recursive=recursive, strip_decorators=strip_decorators),
            partial_types=None,
            autograph_module=None,
            uncompiled_modules=config.DEFAULT_UNCOMPILED_MODULES)
        entity_info = transformer.EntityInfo(source_code=source,
                                             source_file='<fragment>',
                                             namespace=namespace,
                                             arg_values=None,
                                             arg_types=arg_types,
                                             owner_type=owner_type)
        ctx = converter.EntityContext(namer, entity_info, program_ctx)
        node = converter.standard_analysis(node, ctx, is_initial=True)
        return node, ctx
Esempio n. 4
0
    def transform(self,
                  f,
                  converter_module,
                  include_ast=False,
                  ag_overrides=None):
        program_ctx = converter.ProgramContext(
            options=converter.ConversionOptions(recursive=True),
            autograph_module=api)

        conversion.create_custom_vars(program_ctx)
        custom_vars = dict(conversion.custom_vars)

        if ag_overrides:
            modified_ag = imp.new_module('fake_autograph')
            modified_ag.__dict__.update(custom_vars['ag__'].__dict__)
            modified_ag.__dict__.update(ag_overrides)
            custom_vars['ag__'] = modified_ag

        tr = TestingTranspiler(converter_module)
        transformed, _, _ = tr.transform_function(f, program_ctx.options,
                                                  program_ctx, custom_vars)

        if include_ast:
            return transformed, tr.transformed_ast, tr.transform_ctx

        return transformed
Esempio n. 5
0
  def transform(
      self, f, converter_module, include_ast=False, ag_overrides=None):
    program_ctx = converter.ProgramContext(
        options=converter.ConversionOptions(recursive=True),
        autograph_module=api)

    tr = TestingTranspiler(converter_module, ag_overrides)
    transformed, _, _ = tr.transform_function(f, program_ctx)

    if include_ast:
      return transformed, tr.transformed_ast, tr.transform_ctx

    return transformed
Esempio n. 6
0
def to_code(entity,
            recursive=True,
            arg_values=None,
            arg_types=None,
            indentation='  ',
            experimental_optional_features=converter.Feature.ALL,
            experimental_partial_types=None):
    """Similar to `to_graph`, but returns Python source code as a string.

  Also see: `tf.autograph.to_graph`.

  `to_graph` returns the Python source code that can be used to generate a
  TensorFlow graph that is functionally identical to the input Python code.

  Args:
    entity: Python callable or class to convert.
    recursive: Whether to recursively convert any functions that the
      converted function may call.
    arg_values: Optional dict of value hints for symbols including
      function arguments mapping string names to actual values. For example,
      `arg_values={'a': 1}` will map the variable `a` to the value `1`.
    arg_types: Optional dict of type hints for symbols including function
      arguments. Type hints allow specifying just the type of a variable, rather
      than a specific value.
    indentation: The string to use for indenting. Typically two or four spaces,
      or just the tab character.
    experimental_optional_features: `None`, a tuple of, or a single
      `tf.autograph.experimental.Feature` value. Controls the use of
      optional features in the conversion process.
    experimental_partial_types: A `set` of `type` values, reserved for internal
      use.

  Returns:
    The converted code as string.
  """
    program_ctx = converter.ProgramContext(
        options=converter.ConversionOptions(
            recursive=recursive,
            verbose=converter.Verbosity.BRIEF,
            strip_decorators=(convert, do_not_convert, converted_call),
            optional_features=experimental_optional_features),
        partial_types=experimental_partial_types,
        autograph_module=tf_inspect.getmodule(to_graph),
        uncompiled_modules=config.DEFAULT_UNCOMPILED_MODULES)
    conversion.entity_to_graph(entity, program_ctx, arg_values, arg_types)

    code = '\n'.join(
        compiler.ast_to_source(program_ctx.dependency_cache[dep], indentation)
        for dep in reversed(program_ctx.conversion_order))

    return program_ctx.required_imports + '\n\n' + code
Esempio n. 7
0
def _parse_and_analyze(f):
    """Performs preliminary analyses and transformations.

  The goal is to massage the source program into a form on which
  the `_AutoBatchingTransformer` below will be successful.

  Args:
    f: Function to analyze

  Returns:
    node: A Python AST node representing the function, suitable for
      passing to `_AutoBatchingTransformer.visit`
    entity_info: An AutoGraph `EntityInfo` object, with some information
      about `f`.  Required for initializing `_AutoBatchingTransformer`.
  """
    namespace = {}

    # Get the AST of the function
    future_features = inspect_utils.getfutureimports(f)
    node, _ = parser.parse_entity(f, future_features=future_features)

    # Boilerplate for AutoGraph transforms
    entity_info = transformer.EntityInfo(source_code='',
                                         source_file=None,
                                         future_features=future_features,
                                         namespace=namespace)
    program_ctx = converter.ProgramContext(
        options=converter.ConversionOptions(recursive=True),
        autograph_module=None)
    ctx = converter.EntityContext(namer=naming.Namer(namespace),
                                  entity_info=entity_info,
                                  program_ctx=program_ctx)

    # Canonicalize away break statements
    node = converter.standard_analysis(node, ctx, is_initial=True)
    node = break_statements.transform(node, ctx)

    # Canonicalize away continue statements
    node = converter.standard_analysis(node, ctx, is_initial=False)
    node = continue_statements.transform(node, ctx)

    # Force single returns
    node = converter.standard_analysis(node, ctx, is_initial=False)
    node = return_statements.transform(node, ctx, default_to_null_return=False)

    # Transform into ANF
    node = anf.transform(node, ctx)
    node = converter.standard_analysis(node, ctx, is_initial=False)

    return node, ctx
Esempio n. 8
0
def to_code(entity,
            recursive=True,
            arg_values=None,
            arg_types=None,
            indentation='  ',
            experimental_optional_features=converter.Feature.ALL,
            experimental_partial_types=None):
    """Similar to `to_graph`, but returns Python source code as a string.

  Also see: `tf.autograph.to_graph`.

  `to_graph` returns the Python source code that can be used to generate a
  TensorFlow graph that is functionally identical to the input Python code.

  Args:
    entity: Python callable or class to convert.
    recursive: Whether to recursively convert any functions that the
      converted function may call.
    arg_values: Optional dict of value hints for symbols including
      function arguments mapping string names to actual values. For example,
      `arg_values={'a': 1}` will map the variable `a` to the value `1`.
    arg_types: Optional dict of type hints for symbols including function
      arguments. Type hints allow specifying just the type of a variable, rather
      than a specific value.
    indentation: The string to use for indenting. Typically two or four spaces,
      or just the tab character.
    experimental_optional_features: `None`, a tuple of, or a single
      `tf.autograph.experimental.Feature` value. Controls the use of
      optional features in the conversion process.
    experimental_partial_types:  Deprecated, unused.

  Returns:
    The converted code as string.
  """
    del experimental_partial_types

    program_ctx = converter.ProgramContext(
        options=converter.ConversionOptions(
            recursive=recursive,
            optional_features=experimental_optional_features),
        autograph_module=tf_inspect.getmodule(to_graph))
    nodes, _, _ = conversion.entity_to_graph(entity, program_ctx, arg_values,
                                             arg_types)

    code = compiler.ast_to_source(nodes, indentation)

    return program_ctx.required_imports + '\n\n' + code
    def prepare(self, test_fn, namespace, arg_types=None, recursive=True):
        namespace['ConversionOptions'] = converter.ConversionOptions

        node, source, _ = parser.parse_entity(test_fn)
        namer = naming.Namer(namespace)
        program_ctx = converter.ProgramContext(
            options=converter.ConversionOptions(recursive=recursive),
            autograph_module=None)
        entity_info = transformer.EntityInfo(source_code=source,
                                             source_file='<fragment>',
                                             namespace=namespace,
                                             arg_values=None,
                                             arg_types=arg_types)
        ctx = converter.EntityContext(namer, entity_info, program_ctx)
        origin_info.resolve(node, source, test_fn)
        node = converter.standard_analysis(node, ctx, is_initial=True)
        return node, ctx
Esempio n. 10
0
    def prepare(self, test_fn, namespace, recursive=True):
        namespace['ConversionOptions'] = converter.ConversionOptions

        future_features = ('print_function', 'division')
        node, source = parser.parse_entity(test_fn,
                                           future_features=future_features)
        namer = naming.Namer(namespace)
        program_ctx = converter.ProgramContext(
            options=converter.ConversionOptions(recursive=recursive),
            autograph_module=None)
        entity_info = transformer.EntityInfo(source_code=source,
                                             source_file='<fragment>',
                                             future_features=future_features,
                                             namespace=namespace)
        ctx = converter.EntityContext(namer, entity_info, program_ctx)
        node = converter.standard_analysis(node, ctx, is_initial=True)
        return node, ctx
Esempio n. 11
0
def to_code(e,
            recursive=True,
            arg_values=None,
            arg_types=None,
            partial_types=None,
            indentation='  '):
    """Returns the equivalent code that uses TensorFlow ops.

  Also see: `to_graph`, `convert`

  Args:
    e: Union[Callable, Type], the Python entity to convert.
    recursive: bool, whether to recursively convert any functions that the
      converted function may call.
    arg_values: Optional[Dict[Text, Any]], value hints for symbols including
      function arguments.
    arg_types: Optional[Dict[Text, Type]], type hints for symbols including
      function arguments.
    partial_types: Set[Type], reserved for internal use.
    indentation: Text, when to use for each level of indentation.

  Returns:
    Text, the converted code.
  """
    program_ctx = converter.ProgramContext(
        options=converter.ConversionOptions(recursive=recursive,
                                            strip_decorators=(convert,
                                                              do_not_convert,
                                                              converted_call)),
        partial_types=partial_types,
        autograph_module=tf_inspect.getmodule(to_graph),
        uncompiled_modules=config.DEFAULT_UNCOMPILED_MODULES)
    conversion.entity_to_graph(e, program_ctx, arg_values, arg_types)

    code = '\n'.join(
        compiler.ast_to_source(program_ctx.dependency_cache[dep], indentation)
        for dep in reversed(program_ctx.conversion_order))

    return program_ctx.required_imports + '\n\n' + code
Esempio n. 12
0
    def test_to_ast(self):
        opts = converter.ConversionOptions()

        namer = converter_testing.FakeNamer()
        program_ctx = converter.ProgramContext(options=opts,
                                               partial_types=None,
                                               autograph_module=None,
                                               uncompiled_modules=())
        entity_info = transformer.EntityInfo(source_code='',
                                             source_file='<fragment>',
                                             namespace={},
                                             arg_values=None,
                                             arg_types={},
                                             owner_type=None)
        ctx = converter.EntityContext(namer, entity_info, program_ctx)
        opts_ast = opts.to_ast(ctx)

        template = '''
    def test_fn():
      return opts_ast
    '''
        opts_packed = templates.replace(template, opts_ast=opts_ast)

        reparsed, _ = compiler.ast_to_object(opts_packed)
        reparsed.__dict__['ag__'] = self.make_fake_mod(
            'fake_ag', converter.ConversionOptions, converter.Feature)

        reparsed_opts = reparsed.test_fn()

        self.assertEqual(opts.recursive, reparsed_opts.recursive)
        self.assertEqual(opts.verbose, reparsed_opts.verbose)
        self.assertEqual(opts.force_conversion, reparsed_opts.force_conversion)
        self.assertEqual(opts.internal_convert_user_code,
                         reparsed_opts.internal_convert_user_code)
        self.assertEqual(opts.optional_features,
                         reparsed_opts.optional_features)
Esempio n. 13
0
def to_graph(entity, recursive=True, experimental_optional_features=None):
    """Converts a Python entity into a TensorFlow graph.

  Also see: `tf.autograph.to_code`, `tf.function`.

  Unlike `tf.function`, `to_graph` is a low-level transpiler that converts
  Python code to TensorFlow graph code. It does not implement any caching,
  variable management or create any actual ops, and is best used where greater
  control over the generated TensorFlow graph is desired. Another difference
  from `tf.function` is that `to_graph` will not wrap the graph into a
  TensorFlow function or a Python callable. Internally, `tf.function` uses
  `to_graph`.

  _Example Usage_

  ```python
    def foo(x):
      if x > 0:
        y = x * x
      else:
        y = -x
      return y

    converted_foo = to_graph(foo)

    x = tf.constant(1)
    y = converted_foo(x)  # converted_foo is a TensorFlow Op-like.
    assert is_tensor(y)
  ```

  Supported Python entities include:
    * functions
    * classes
    * object methods

  Functions are converted into new functions with converted code.

  Classes are converted by generating a new class whose methods use converted
  code.

  Methods are converted into unbound function that have an additional first
  argument called `self`.

  Args:
    entity: Python callable or class to convert.
    recursive: Whether to recursively convert any functions that the converted
      function may call.
    experimental_optional_features: `None`, a tuple of, or a single
      `tf.autograph.experimental.Feature` value. Controls the use of optional
      features in the conversion process.

  Returns:
    Same as `entity`, the converted Python function or class.

  Raises:
    ValueError: If the entity could not be converted.
  """
    try:
        program_ctx = converter.ProgramContext(
            options=converter.ConversionOptions(
                recursive=recursive,
                optional_features=experimental_optional_features),
            autograph_module=tf_inspect.getmodule(to_graph))
        return conversion.convert(entity, program_ctx)
    except (ValueError, AttributeError, KeyError, NameError,
            AssertionError) as e:
        logging.error(1, 'Error converting %s', entity, exc_info=True)
        raise ConversionError('converting {}: {}: {}'.format(
            entity, e.__class__.__name__, str(e)))
Esempio n. 14
0
def to_graph(entity,
             recursive=True,
             arg_values=None,
             arg_types=None,
             experimental_optional_features=converter.Feature.ALL,
             experimental_strip_decorators=None,
             experimental_verbose=converter.Verbosity.BRIEF,
             experimental_partial_types=None):
    """Converts a Python entity into a TensorFlow graph.

  Also see: `tf.autograph.to_code`, `tf.function`.

  Unlike `tf.function`, `to_graph` is a low-level transpiler that converts
  Python code to TensorFlow graph code. It does not implement any caching,
  variable management or create any actual ops, and is best used where greater
  control over the generated TensorFlow graph is desired. Another difference
  from `tf.function` is that `to_graph` will not wrap the graph into a
  TensorFlow function or a Python callable. Internally, `tf.function` uses
  `to_graph`.

  _Example Usage_

  ```python
    def foo(x):
      if x > 0:
        y = x * x
      else:
        y = -x
      return y

    converted_foo = to_graph(foo)

    x = tf.constant(1)
    y = converted_foo(x)  # converted_foo is a TensorFlow Op-like.
    assert is_tensor(y)
  ```

  Supported Python entities include:
    * functions
    * classes
    * object methods

  Functions are converted into new functions with converted code.

  Classes are converted by generating a new class whose methods use converted
  code.

  Methods are converted into unbound function that have an additional first
  argument called `self`.

  Args:
    entity: Python callable or class to convert.
    recursive: Whether to recursively convert any functions that the
      converted function may call.
    arg_values: Optional dict of value hints for symbols including
      function arguments mapping string names to actual values. For example,
      `arg_values={'a': 1}` will map the variable `a` to the value `1`.
    arg_types: Optional dict of type hints for symbols including function
      arguments. Type hints allow specifying just the type of a variable, rather
      than a specific value.
    experimental_optional_features: `None`, a tuple of, or a single
      `tf.autograph.experimental.Feature` value. Controls the use of
      optional features in the conversion process.
    experimental_strip_decorators: A tuple specifying decorators that should be
      excluded from the compiled output. By default, when converting a function
      before the decorators are applied, the compiled output will include those
      decorators.
    experimental_verbose: The level of printing verbosity to use, as a
      `tf.autograph.experimental.Verbosity` value.
    experimental_partial_types: A `set` of `type` values, reserved for internal
      use.

  Returns:
    Same as `entity`, the converted Python function or class.

  Raises:
    ValueError: If the entity could not be converted.
  """
    if experimental_strip_decorators is None:
        experimental_strip_decorators = ()
    experimental_strip_decorators += (convert, do_not_convert, converted_call)

    program_ctx = converter.ProgramContext(
        options=converter.ConversionOptions(
            recursive=recursive,
            verbose=experimental_verbose,
            strip_decorators=experimental_strip_decorators,
            optional_features=experimental_optional_features),
        partial_types=experimental_partial_types,
        autograph_module=tf_inspect.getmodule(to_graph),
        uncompiled_modules=config.DEFAULT_UNCOMPILED_MODULES)
    _, name, namespace = conversion.entity_to_graph(entity, program_ctx,
                                                    arg_values, arg_types)

    nodes = []
    for dep in reversed(program_ctx.conversion_order):
        nodes.extend(program_ctx.dependency_cache[dep])

    compiled_module, _ = compiler.ast_to_object(
        nodes,
        source_prefix=program_ctx.required_imports,
        include_source_map=True)

    # The compiled code should see everything the entry entity saw.
    # TODO(mdan): This might not work well if the call tree spans modules?
    for key, val in namespace.items():
        # Avoid overwriting entities that have been transformed.
        if key not in compiled_module.__dict__:
            compiled_module.__dict__[key] = val
    for key, val in program_ctx.additional_symbols.items():
        if key not in compiled_module.__dict__:
            compiled_module.__dict__[key] = val
    compiled = getattr(compiled_module, name)

    if tf_inspect.isfunction(entity):
        compiled.__defaults__ = entity.__defaults__

    if hasattr(compiled, '__globals__'):
        # Remove self to avoid circular references. This will probably only work
        # so long as the function is not reentrant.
        del compiled.__globals__[name]

    # Need this so the source_mapping attribute is available for the context
    # manager to access for runtime errors.
    #
    # Note that compiler.ast_to_object attaches the source map 'ag_source_map__'
    # symbol to the compiled module.
    # TODO(mdan): Record this statically in the generated code.
    # TODO(mdan): Rename this attribute to 'autograph_info__'
    source_map_attribute_name = 'ag_source_map'
    if getattr(compiled, source_map_attribute_name, None) is not None:
        raise ValueError('cannot convert %s because is has an attribute '
                         '"%s", which is reserved for AutoGraph.' %
                         (compiled, source_map_attribute_name))
    setattr(compiled, source_map_attribute_name,
            compiled_module.__dict__['ag_source_map__'])

    return compiled
Esempio n. 15
0
def converted_call(f, args, kwargs, caller_fn_scope=None, options=None):
    """Converts a function call inline.

  For internal use only.

  Note: The argument list is optimized for readability of generated code, which
  may look like this:

    ag__.converted_call(f, (arg1, arg2), None, fscope)
    ag__.converted_call(f, (), dict(arg1=val1, **kwargs), fscope)
    ag__.converted_call(f, (arg1, arg2) + varargs, dict(**kwargs), lscope)

  Args:
    f: The function to convert.
    args: Tuple, the original positional arguments of f
    kwargs: Optional[Dict], the original keyword arguments of f
    caller_fn_scope: Optional[function_wrappers.FunctionScope], the function
      scope of the converted function in which this call was originally made.
    options: Optional[converter.ConversionOptions], conversion options. If not
      specified, the value of caller_fn_scope.callopts is used. Either options
      or caller_fn_scope must be present.

  Returns:
    Any, the result of executing a possibly-converted `f` with the given
      arguments.
  """
    logging.log(1, 'Converted call: %s\n    args: %s\n    kwargs: %s\n', f,
                args, kwargs)

    if options is None:
        if caller_fn_scope is None:
            raise ValueError(
                'either caller_fn_scope or options must have a value')
        options = caller_fn_scope.callopts

    if conversion.is_in_allowlist_cache(f, options):
        logging.log(2, 'Allowlisted %s: from cache', f)
        return _call_unconverted(f, args, kwargs, options, False)

    if ag_ctx.control_status_ctx().status == ag_ctx.Status.DISABLED:
        logging.log(2, 'Allowlisted: %s: AutoGraph is disabled in context', f)
        return _call_unconverted(f, args, kwargs, options, False)

    if is_autograph_artifact(f):
        logging.log(2, 'Permanently allowed: %s: AutoGraph artifact', f)
        return _call_unconverted(f, args, kwargs, options)

    # If this is a partial, unwrap it and redo all the checks.
    if isinstance(f, functools.partial):
        new_kwargs = {}
        if f.keywords is not None:
            # Use copy to avoid mutating the underlying keywords.
            new_kwargs = f.keywords.copy()
        if kwargs is not None:
            new_kwargs.update(kwargs)
        new_args = f.args + args
        logging.log(3, 'Forwarding call of partial %s with\n%s\n%s\n', f,
                    new_args, new_kwargs)
        return converted_call(f.func,
                              new_args,
                              new_kwargs,
                              caller_fn_scope=caller_fn_scope,
                              options=options)

    if inspect_utils.isbuiltin(f):
        if f is eval:
            return py_builtins.eval_in_original_context(
                f, args, caller_fn_scope)
        if f is super:
            return py_builtins.super_in_original_context(
                f, args, caller_fn_scope)
        if f is globals:
            return py_builtins.globals_in_original_context(caller_fn_scope)
        if f is locals:
            return py_builtins.locals_in_original_context(caller_fn_scope)
        if kwargs:
            return py_builtins.overload_of(f)(*args, **kwargs)
        else:
            return py_builtins.overload_of(f)(*args)

    if conversion.is_unsupported(f):
        return _call_unconverted(f, args, kwargs, options)

    if not options.user_requested and conversion.is_allowlisted(f):
        return _call_unconverted(f, args, kwargs, options)

    # internal_convert_user_code is for example turned off when issuing a dynamic
    # call conversion from generated code while in nonrecursive mode. In that
    # case we evidently don't want to recurse, but we still have to convert
    # things like builtins.
    if not options.internal_convert_user_code:
        return _call_unconverted(f, args, kwargs, options)

    try:
        if inspect.ismethod(f) or inspect.isfunction(f):
            target_entity = f
            effective_args = args

            f_self = getattr(f, '__self__', None)
            if f_self is not None:
                if isinstance(f_self, function.TfMethodTarget):
                    f_self = f_self.target
                effective_args = (f_self, ) + effective_args

        elif hasattr(f, '__class__') and hasattr(f.__class__, '__call__'):
            # Callable objects. Dunder methods have special lookup rules, see:
            # https://docs.python.org/3/reference/datamodel.html#specialnames
            # TODO(mdan): Recurse into converted_call to simplify other verifications.
            # This should be handled in the same way as partials.
            target_entity = f.__class__.__call__
            effective_args = (f, ) + args

        else:
            target_entity = f
            raise NotImplementedError('unknown callable type "%s"' % type(f))

    except Exception as e:  # pylint:disable=broad-except
        logging.log(1,
                    'Error transforming entity %s',
                    target_entity,
                    exc_info=True)
        if is_autograph_strict_conversion_mode():
            raise
        return _fall_back_unconverted(f, args, kwargs, options, e)

    if not hasattr(target_entity, '__code__'):
        logging.log(2, 'Permanently allowed: %s: native binding',
                    target_entity)
        return _call_unconverted(f, args, kwargs, options)
    elif (hasattr(target_entity.__code__, 'co_filename')
          and target_entity.__code__.co_filename == '<string>'):
        # TODO(mdan): __globals__['txt'] might work in Py3.
        logging.log(2, 'Permanently allowed: %s: dynamic code (exec?)',
                    target_entity)
        return _call_unconverted(f, args, kwargs, options)

    try:
        program_ctx = converter.ProgramContext(options=options)
        converted_f = _convert_actual(target_entity, program_ctx)
        if logging.has_verbosity(2):
            _log_callargs(converted_f, effective_args, kwargs)
    except Exception as e:  # pylint:disable=broad-except
        logging.log(1,
                    'Error transforming entity %s',
                    target_entity,
                    exc_info=True)
        if is_autograph_strict_conversion_mode():
            raise
        return _fall_back_unconverted(f, args, kwargs, options, e)

    with StackTraceMapper(converted_f), tf_stack.CurrentModuleFilter():
        try:
            if kwargs is not None:
                result = converted_f(*effective_args, **kwargs)
            else:
                result = converted_f(*effective_args)
        except Exception as e:
            _attach_error_metadata(e, converted_f)
            raise

    return result
Esempio n. 16
0
def converted_call(f,
                   args,
                   kwargs,
                   caller_fn_scope=None,
                   options=None):
  """Compiles a function call inline.

  For internal use only.

  Note: The argument list is optimized for readability of generated code, which
  may look like this:

    ag__.converted_call(f, (arg1, arg2), None, fscope)
    ag__.converted_call(f, (), dict(arg1=val1, **kwargs), fscope)
    ag__.converted_call(f, (arg1, arg2) + varargs, dict(**kwargs), lscope)

  Args:
    f: The function to convert.
    args: Tuple, the original positional arguments of f
    kwargs: Optional[Dict], the original keyword arguments of f
    caller_fn_scope: Optional[function_wrappers.FunctionScope], the function
      scope of the converted function in which this call was originally made.
    options: Optional[converter.ConversionOptions], conversion options. If not
      specified, the value of caller_fn_scope.callopts is used. Either options
      or caller_fn_scope must be present.

  Returns:
    Any, the result of executing a possibly-converted `f` with the given
      arguments.
  """
  logging.log(1, 'Converted call: %s\n    args: %s\n    kwargs: %s\n', f, args,
              kwargs)

  if options is None:
    if caller_fn_scope is None:
      raise ValueError('either caller_fn_scope or options must have a value')
    options = caller_fn_scope.callopts

  if conversion.is_in_whitelist_cache(f, options):
    logging.log(2, 'Whitelisted %s: from cache', f)
    return _call_unconverted(f, args, kwargs, options, False)

  if ag_ctx.control_status_ctx().status == ag_ctx.Status.DISABLED:
    logging.log(2, 'Whitelisted: %s: AutoGraph is disabled in context', f)
    return _call_unconverted(f, args, kwargs, options, False)

  if is_autograph_artifact(f):
    logging.log(2, 'Permanently whitelisted: %s: AutoGraph artifact', f)
    return _call_unconverted(f, args, kwargs, options)

  # If this is a partial, unwrap it and redo all the checks.
  if isinstance(f, functools.partial):
    new_kwargs = {}
    if f.keywords is not None:
      # Use copy to avoid mutating the underlying keywords.
      new_kwargs = f.keywords.copy()
    if kwargs is not None:
      new_kwargs.update(kwargs)
    new_args = f.args + args
    logging.log(3, 'Forwarding call of partial %s with\n%s\n%s\n', f, new_args,
                new_kwargs)
    return converted_call(
        f.func,
        new_args,
        new_kwargs,
        caller_fn_scope=caller_fn_scope,
        options=options)

  if inspect_utils.isbuiltin(f):
    if f is eval:
      return py_builtins.eval_in_original_context(f, args, caller_fn_scope)
    if f is super:
      return py_builtins.super_in_original_context(f, args, caller_fn_scope)
    if kwargs:
      return py_builtins.overload_of(f)(*args, **kwargs)
    else:
      return py_builtins.overload_of(f)(*args)

  # TODO(b/122265385): Remove this bypass.
  if (_is_known_loaded_type(f, 'wrapt', 'FunctionWrapper') or
      _is_known_loaded_type(f, 'wrapt', 'BoundFunctionWrapper')):
    logging.warn(
        '{} appears to be decorated by wrapt, which is not yet supported'
        ' by AutoGraph. The function will run as-is.'
        ' You may still apply AutoGraph before the wrapt decorator.'.format(f))
    logging.log(2, 'Permanently whitelisted: %s: wrapt decorated', f)
    return _call_unconverted(f, args, kwargs, options)

  if _is_known_loaded_type(f, 'functools', '_lru_cache_wrapper'):
    logging.log(2, 'Permanently whitelisted: %s: lru_cache', f)
    return _call_unconverted(f, args, kwargs, options)

  # Constructors are permanently whitelisted.
  # TODO(mdan): Toggle as experimental feature instead.
  # TODO(b/124016764): Remove this limitation.
  if inspect_utils.isconstructor(f):
    logging.log(2, 'Permanently whitelisted: %s: constructor', f)
    return _call_unconverted(f, args, kwargs, options)

  # Other built-in modules are permanently whitelisted.
  # TODO(mdan): Figure out how to do this consistently for all stdlib modules.
  if any(
      f in m.__dict__.values() for m in (collections, pdb, copy, inspect, re)):
    logging.log(2, 'Permanently whitelisted: %s: part of builtin module', f)
    return _call_unconverted(f, args, kwargs, options)

  # Custom ops and kernels are also permanently whitelisted.
  # See tensorflow.framework.load_library.
  if (hasattr(f, '__module__') and
      hasattr(f.__module__, '_IS_TENSORFLOW_PLUGIN')):
    logging.log(2, 'Permanently whitelisted: %s: TensorFlow plugin', f)
    return _call_unconverted(f, args, kwargs, options)

  if not options.user_requested and conversion.is_whitelisted(f):
    return _call_unconverted(f, args, kwargs, options)

  # internal_convert_user_code is for example turned off when issuing a dynamic
  # call conversion from generated code while in nonrecursive mode. In that
  # case we evidently don't want to recurse, but we still have to convert
  # things like builtins.
  if not options.internal_convert_user_code:
    return _call_unconverted(f, args, kwargs, options)

  try:
    if inspect.ismethod(f) or inspect.isfunction(f):
      target_entity = f
      effective_args = args

      f_self = getattr(f, '__self__', None)
      if f_self is not None:
        if isinstance(f_self, function.TfMethodTarget):
          f_self = f_self.target
        effective_args = (f_self,) + effective_args

    elif hasattr(f, '__class__') and hasattr(f.__class__, '__call__'):
      # Callable objects. Dunder methods have special lookup rules, see:
      # https://docs.python.org/3/reference/datamodel.html#specialnames
      # TODO(mdan): Recurse into converted_call to simplify other verifications.
      # This should be handled in the same way as partials.
      target_entity = f.__class__.__call__
      effective_args = (f,) + args

    else:
      target_entity = f
      raise NotImplementedError('unknown callable type "%s"' % type(f))

  except Exception as e:  # pylint:disable=broad-except
    logging.log(1, 'Error transforming entity %s', target_entity, exc_info=True)
    if is_autograph_strict_conversion_mode():
      raise
    return _fall_back_unconverted(f, args, kwargs, options, e)

  if not hasattr(target_entity, '__code__'):
    logging.log(2, 'Permanently whitelisted: %s: native binding',
                target_entity)
    return _call_unconverted(f, args, kwargs, options)
  elif (hasattr(target_entity.__code__, 'co_filename') and
        target_entity.__code__.co_filename == '<string>'):
    # TODO(mdan): __globals__['txt'] might work in Py3.
    logging.log(2, 'Permanently whitelisted: %s: dynamic code (exec?)',
                target_entity)
    return _call_unconverted(f, args, kwargs, options)

  try:
    program_ctx = converter.ProgramContext(
        options=options, autograph_module=tf_inspect.getmodule(converted_call))
    converted_f = conversion.convert(target_entity, program_ctx)
    if logging.has_verbosity(2):
      _log_callargs(converted_f, effective_args, kwargs)
  except Exception as e:  # pylint:disable=broad-except
    logging.log(1, 'Error transforming entity %s', target_entity, exc_info=True)
    if is_autograph_strict_conversion_mode():
      raise
    return _fall_back_unconverted(f, args, kwargs, options, e)

  with StackTraceMapper(converted_f), tf_stack.CurrentModuleFilter():
    try:
      if kwargs is not None:
        result = converted_f(*effective_args, **kwargs)
      else:
        result = converted_f(*effective_args)
    except Exception as e:
      _attach_metadata(e, converted_f)
      raise

  return result
Esempio n. 17
0
def to_graph(entity, recursive=True, experimental_optional_features=None):
  """Converts a Python entity into a TensorFlow graph.

  Also see: `tf.autograph.to_code`, `tf.function`.

  Unlike `tf.function`, `to_graph` is a low-level transpiler that converts
  Python code to TensorFlow graph code. It does not implement any caching,
  variable management or create any actual ops, and is best used where greater
  control over the generated TensorFlow graph is desired. Another difference
  from `tf.function` is that `to_graph` will not wrap the graph into a
  TensorFlow function or a Python callable. Internally, `tf.function` uses
  `to_graph`.

  Example usage:

  >>> def f(x):
  ...   if x > 0:
  ...     y = x * x
  ...   else:
  ...     y = -x
  ...   return y
  ...
  >>> converted_f = to_graph(f)
  >>> x = tf.constant(2)
  >>> converted_f(x)  # converted_foo is like a TensorFlow Op.
  <tf.Tensor: shape=(), dtype=int32, numpy=4>

  Supported Python entities include:
    * functions
    * classes
    * object methods

  Functions are converted into new functions with converted code.

  Classes are converted by generating a new class whose methods use converted
  code.

  Methods are converted into unbound function that have an additional first
  argument called `self`.

  For a tutorial, see the
  [tf.function and AutoGraph guide](https://www.tensorflow.org/guide/function).
  For more detailed information, see the
  [AutoGraph reference documentation](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/autograph/g3doc/reference/index.md).

  Args:
    entity: Python callable or class to convert.
    recursive: Whether to recursively convert any functions that the converted
      function may call.
    experimental_optional_features: `None`, a tuple of, or a single
      `tf.autograph.experimental.Feature` value.

  Returns:
    Same as `entity`, the converted Python function or class.

  Raises:
    ValueError: If the entity could not be converted.
  """
  try:
    program_ctx = converter.ProgramContext(
        options=converter.ConversionOptions(
            recursive=recursive,
            user_requested=True,
            optional_features=experimental_optional_features),
        autograph_module=tf_inspect.getmodule(to_graph))
    return autograph_artifact(conversion.convert(entity, program_ctx))
  except (ValueError, AttributeError, KeyError, NameError, AssertionError) as e:
    logging.error(1, 'Error converting %s', entity, exc_info=True)
    raise ConversionError('converting {}: {}: {}'.format(
        entity, e.__class__.__name__, str(e)))
Esempio n. 18
0
def to_graph(entity,
             recursive=True,
             arg_values=None,
             arg_types=None,
             experimental_optional_features=converter.Feature.ALL):
    """Converts a Python entity into a TensorFlow graph.

  Also see: `tf.autograph.to_code`, `tf.function`.

  Unlike `tf.function`, `to_graph` is a low-level transpiler that converts
  Python code to TensorFlow graph code. It does not implement any caching,
  variable management or create any actual ops, and is best used where greater
  control over the generated TensorFlow graph is desired. Another difference
  from `tf.function` is that `to_graph` will not wrap the graph into a
  TensorFlow function or a Python callable. Internally, `tf.function` uses
  `to_graph`.

  _Example Usage_

  ```python
    def foo(x):
      if x > 0:
        y = x * x
      else:
        y = -x
      return y

    converted_foo = to_graph(foo)

    x = tf.constant(1)
    y = converted_foo(x)  # converted_foo is a TensorFlow Op-like.
    assert is_tensor(y)
  ```

  Supported Python entities include:
    * functions
    * classes
    * object methods

  Functions are converted into new functions with converted code.

  Classes are converted by generating a new class whose methods use converted
  code.

  Methods are converted into unbound function that have an additional first
  argument called `self`.

  Args:
    entity: Python callable or class to convert.
    recursive: Whether to recursively convert any functions that the
      converted function may call.
    arg_values: Optional dict of value hints for symbols including
      function arguments mapping string names to actual values. For example,
      `arg_values={'a': 1}` will map the variable `a` to the value `1`.
    arg_types: Optional dict of type hints for symbols including function
      arguments. Type hints allow specifying just the type of a variable, rather
      than a specific value.
    experimental_optional_features: `None`, a tuple of, or a single
      `tf.autograph.experimental.Feature` value. Controls the use of
      optional features in the conversion process.

  Returns:
    Same as `entity`, the converted Python function or class.

  Raises:
    ValueError: If the entity could not be converted.
  """
    try:
        program_ctx = converter.ProgramContext(
            options=converter.ConversionOptions(
                recursive=recursive,
                optional_features=experimental_optional_features),
            autograph_module=tf_inspect.getmodule(to_graph))
        nodes, name, namespace = conversion.entity_to_graph(
            entity, program_ctx, arg_values, arg_types)

        compiled_module, _ = compiler.ast_to_object(
            nodes,
            source_prefix=program_ctx.required_imports,
            include_source_map=True)

        # The compiled code should see everything the entry entity saw.
        # TODO(mdan): This might not work well if the call tree spans modules?
        for key, val in namespace.items():
            # Avoid overwriting entities that have been transformed.
            if key not in compiled_module.__dict__:
                compiled_module.__dict__[key] = val
        compiled = getattr(compiled_module, name)

        if hasattr(entity, '__defaults__'):
            logging.log(3, 'Default args mapping: %s has: %s', entity,
                        entity.__defaults__)
            compiled.__defaults__ = entity.__defaults__
        else:
            logging.log(3, 'Default args mapping: %s has no __defaults__',
                        entity)

        logging.log(3, 'Namespace of %s includes: %s', compiled,
                    compiled_module.__dict__.keys())

        if hasattr(compiled, '__globals__'):
            # Remove self to avoid circular references. This will probably only work
            # so long as the function is not reentrant.
            del compiled.__globals__[name]

        # Need this so the source_mapping attribute is available for the context
        # manager to access for runtime errors.
        #
        # Note that compiler.ast_to_object attaches the source map 'ag_source_map__'
        # symbol to the compiled module.
        # TODO(mdan): Record this statically in the generated code.
        # TODO(mdan): Rename this attribute to 'autograph_info__'
        source_map_attribute_name = 'ag_source_map'
        if getattr(compiled, source_map_attribute_name, None) is not None:
            # TODO(znado): change input problem errors into TransformError
            raise ValueError('cannot convert %s because is has an attribute '
                             '"%s", which is reserved for AutoGraph.' %
                             (compiled, source_map_attribute_name))
        setattr(compiled, source_map_attribute_name,
                compiled_module.__dict__['ag_source_map__'])

        return compiled
    except (ValueError, AttributeError, KeyError, NameError,
            AssertionError) as e:
        errors.report_internal_error(entity, e)
Esempio n. 19
0
def converted_call(f, options, args, kwargs, caller_fn_scope=None):
    """Compiles a function call inline.

  For internal use only.

  Args:
    f: The function to convert.
    options: converter.ConversionOptions
    args: Tuple, the original positional arguments of f
    kwargs: Dict, the original keyword arguments of f
    caller_fn_scope: Optional[function_wrappers.FunctionScope], the function
      scope of the converted function in which this call was originally made.

  Returns:
    Any, the result of executing a possibly-converted `f` with the given
      arguments.
  """
    logging.log(1, 'Converted call: %s\n    args: %s\n    kwargs: %s\n', f,
                args, kwargs)

    if conversion.check_cached_unconverted(f, options):
        return _call_unconverted(f, args, kwargs, options, False)

    if inspect_utils.isbuiltin(f):
        if f is eval:
            return py_builtins.eval_in_original_context(
                f, args, caller_fn_scope)
        if f is super:
            return py_builtins.super_in_original_context(
                f, args, caller_fn_scope)
        if kwargs:
            return py_builtins.overload_of(f)(*args, **kwargs)
        else:
            return py_builtins.overload_of(f)(*args)

    # TODO(mdan): Clean up the naming inconsistency.
    if hasattr(f, 'autograph_info__') or hasattr(f, '__ag_compiled'):
        logging.log(2, 'Permanently whitelisted: %s: already converted', f)
        return _call_unconverted(f, args, kwargs, options)

    # TODO(b/122265385): Remove this bypass.
    if (_is_known_loaded_type(f, 'wrapt', 'FunctionWrapper')
            or _is_known_loaded_type(f, 'wrapt', 'BoundFunctionWrapper')):
        logging.warn(
            'Entity {} appears to be decorated by wrapt, which is not yet supported'
            ' by AutoGraph. The function will be called without transformation.'
            ' You may however apply AutoGraph before the decorator.'.format(f))
        logging.log(2, 'Permanently whitelisted: %s: wrapt decorated', f)
        return _call_unconverted(f, args, kwargs, options)

    if _is_known_loaded_type(f, 'functools', '_lru_cache_wrapper'):
        logging.log(2, 'Permanently whitelisted: %s: lru_cache', f)
        return _call_unconverted(f, args, kwargs, options)

    # Constructors are permanently whitelisted.
    # TODO(mdan): Toggle as experimental feature instead.
    # TODO(b/124016764): Remove this limitation.
    if tf_inspect.isclass(f):
        logging.log(2, 'Permanently whitelisted: %s: constructor', f)
        return _call_unconverted(f, args, kwargs, options)

    # Other built-in modules are permanently whitelisted.
    # TODO(mdan): Figure out how to do this consistently for all stdlib modules.
    if any(f in m.__dict__.values()
           for m in (collections, pdb, copy, inspect, re)):
        logging.log(2, 'Permanently whitelisted: %s: part of builtin module',
                    f)
        return _call_unconverted(f, args, kwargs, options)

    # Custom ops and kernels are also permanently whitelisted.
    # See tensorflow.framework.load_library.
    if (hasattr(f, '__module__')
            and hasattr(f.__module__, '_IS_TENSORFLOW_PLUGIN')):
        logging.log(2, 'Permanently whitelisted: %s: TensorFlow plugin', f)
        return _call_unconverted(f, args, kwargs, options)

    if not options.user_requested and conversion.is_whitelisted_for_graph(f):
        return _call_unconverted(f, args, kwargs, options)

    # internal_convert_user_code is for example turned off when issuing a dynamic
    # call conversion from generated code while in nonrecursive mode. In that
    # case we evidently don't want to recurse, but we still have to convert
    # things like builtins.
    if not options.internal_convert_user_code:
        return _call_unconverted(f, args, kwargs, options)

    # TODO(mdan): Move this entire block inside to_graph.
    try:  # Begin of transformation error guards

        # Unwrap functools.partial objects
        # TODO(mdan): Consider sharing unwrapping logic with tf_inspect.
        # TODO(b/120224672): This unwrapping should be done before the checks above.
        while isinstance(f, functools.partial):
            args = f.args + args
            new_kwargs = {}
            if f.keywords is not None:
                new_kwargs.update(f.keywords)
            if kwargs is not None:
                new_kwargs.update(kwargs)
            kwargs = new_kwargs
            f = f.func

        if tf_inspect.isfunction(f) or tf_inspect.ismethod(f):
            # Regular functions
            target_entity = f
            f_self = inspect_utils.getmethodself(f)

            # TODO(b/119246461): This may be more elegantly handled using __get__?
            if f_self is not None:
                effective_args = (f_self, ) + args
            else:
                effective_args = args

        elif hasattr(f, '__call__') and hasattr(f, '__class__'):
            # Callable objects
            target_entity = f.__call__
            effective_args = (f, ) + args

        elif tf_inspect.isclass(f):
            # Constructors
            # Note: Until we support class constructurs, and enable whole-class
            # conversion with an experimental flag, this branch is dead code.
            # TODO(mdan): Consider removing unless there is a compelling use case.
            target_entity = f
            effective_args = args

        else:
            target_entity = f
            raise NotImplementedError('unknown callable type "%s"' % type(f))

        if not tf_inspect.isclass(target_entity):
            if not hasattr(target_entity, '__code__'):
                logging.log(2, 'Permanently whitelisted: %s: native binding',
                            target_entity)
                return _call_unconverted(f, args, kwargs, options)
            elif (hasattr(target_entity.__code__, 'co_filename')
                  and target_entity.__code__.co_filename == '<string>'):
                # TODO(mdan): __globals__['txt'] might work in Py3.
                logging.log(
                    2, 'Permanently whitelisted: %s: dynamic code (exec?)',
                    target_entity)
                return _call_unconverted(f, args, kwargs, options)

        program_ctx = converter.ProgramContext(
            options=options,
            autograph_module=tf_inspect.getmodule(converted_call))
        converted_f = conversion.convert(target_entity, program_ctx)

        if logging.has_verbosity(2):
            logging.log(2, 'Defaults of %s : %s', converted_f,
                        converted_f.__defaults__)
            if six.PY3:
                logging.log(2, 'KW defaults of %s : %s', converted_f,
                            converted_f.__kwdefaults__)

            if kwargs is not None:
                callargs = tf_inspect.getcallargs(converted_f, *effective_args,
                                                  **kwargs)
            else:
                callargs = tf_inspect.getcallargs(converted_f, *effective_args)

            formatted_callargs = '\n'.join('    {}: {}'.format(k, v)
                                           for k, v in callargs.items())
            logging.log(2, 'Calling %s with\n%s\n', converted_f,
                        formatted_callargs)

    except Exception as e:  # pylint:disable=broad-except
        logging.log(1,
                    'Error transforming entity %s',
                    target_entity,
                    exc_info=True)
        if is_autograph_strict_conversion_mode():
            raise
        logging.warn(
            'Entity %s could not be transformed and will be executed as-is.'
            ' Please report this to the AutoGraph team. When filing the bug, set'
            ' the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and'
            ' attach the full output. Cause: %s', target_entity, e)
        return _call_unconverted(f, args, kwargs, options)

    with StackTraceMapper(converted_f), tf_stack.CurrentModuleFilter():
        try:
            if kwargs is not None:
                result = converted_f(*effective_args, **kwargs)
            else:
                result = converted_f(*effective_args)
        except Exception as e:
            _attach_metadata(e, converted_f, True)
            raise

    return result
Esempio n. 20
0
def to_graph(entity,
             recursive=True,
             arg_values=None,
             arg_types=None,
             experimental_optional_features=converter.Feature.ALL):
    """Converts a Python entity into a TensorFlow graph.

  Also see: `tf.autograph.to_code`, `tf.function`.

  Unlike `tf.function`, `to_graph` is a low-level transpiler that converts
  Python code to TensorFlow graph code. It does not implement any caching,
  variable management or create any actual ops, and is best used where greater
  control over the generated TensorFlow graph is desired. Another difference
  from `tf.function` is that `to_graph` will not wrap the graph into a
  TensorFlow function or a Python callable. Internally, `tf.function` uses
  `to_graph`.

  _Example Usage_

  ```python
    def foo(x):
      if x > 0:
        y = x * x
      else:
        y = -x
      return y

    converted_foo = to_graph(foo)

    x = tf.constant(1)
    y = converted_foo(x)  # converted_foo is a TensorFlow Op-like.
    assert is_tensor(y)
  ```

  Supported Python entities include:
    * functions
    * classes
    * object methods

  Functions are converted into new functions with converted code.

  Classes are converted by generating a new class whose methods use converted
  code.

  Methods are converted into unbound function that have an additional first
  argument called `self`.

  Args:
    entity: Python callable or class to convert.
    recursive: Whether to recursively convert any functions that the
      converted function may call.
    arg_values: Optional dict of value hints for symbols including
      function arguments mapping string names to actual values. For example,
      `arg_values={'a': 1}` will map the variable `a` to the value `1`.
    arg_types: Optional dict of type hints for symbols including function
      arguments. Type hints allow specifying just the type of a variable, rather
      than a specific value.
    experimental_optional_features: `None`, a tuple of, or a single
      `tf.autograph.experimental.Feature` value. Controls the use of
      optional features in the conversion process.

  Returns:
    Same as `entity`, the converted Python function or class.

  Raises:
    ValueError: If the entity could not be converted.
  """
    try:
        # TODO(b/129431421): Remove these args.
        del arg_values
        del arg_types
        program_ctx = converter.ProgramContext(
            options=converter.ConversionOptions(
                recursive=recursive,
                optional_features=experimental_optional_features),
            autograph_module=tf_inspect.getmodule(to_graph))
        return conversion.convert(entity, program_ctx)
    except (ValueError, AttributeError, KeyError, NameError,
            AssertionError) as e:
        errors.report_internal_error(entity, e)
Esempio n. 21
0
 def _simple_program_ctx(self):
     return converter.ProgramContext(
         options=converter.ConversionOptions(recursive=True),
         autograph_module=api)
Esempio n. 22
0
def _parse_and_analyze(f, autobatch_functions):
    """Performs preliminary analyses and transformations.

  The goal is to massage the source program into a form on which
  the `_AutoBatchingTransformer` below will be successful.

  Args:
    f: Function to analyze
    autobatch_functions: List of Python `str` names of autobatched functions.
      Arguments to these functions will be canonicalized to variable references,
      but others will not.

  Returns:
    node: A Python AST node representing the function, suitable for
      passing to `_AutoBatchingTransformer.visit`
    entity_info: An AutoGraph `EntityInfo` object, with some information
      about `f`.  Required for initializing `_AutoBatchingTransformer`.
  """
    namespace = {}

    # Get the AST of the function
    future_features = inspect_utils.getfutureimports(f)
    node, _ = parser.parse_entity(f, future_features=future_features)

    # Boilerplate for AutoGraph transforms
    entity_info = transformer.EntityInfo(source_code='',
                                         source_file=None,
                                         future_features=future_features,
                                         namespace=namespace)
    program_ctx = converter.ProgramContext(
        options=converter.ConversionOptions(recursive=True),
        autograph_module=None)
    ctx = converter.EntityContext(namer=naming.Namer(namespace),
                                  entity_info=entity_info,
                                  program_ctx=program_ctx)

    # Canonicalize away break statements
    node = converter.standard_analysis(node, ctx, is_initial=True)
    node = break_statements.transform(node, ctx)

    # Canonicalize away continue statements
    node = converter.standard_analysis(node, ctx, is_initial=False)
    node = continue_statements.transform(node, ctx)

    # Force single returns
    node = converter.standard_analysis(node, ctx, is_initial=False)
    node = return_statements.transform(node, ctx, default_to_null_return=False)

    # Transform into ANF
    # Replacing if tests and autobatched function call arguments because
    # that's where divergence can happen.
    # Replacing all function calls because the downstream transformation
    # expects calls to lead directly to assignments.
    def maybe_replace_function_argument(parent, field_name, child):
        del field_name, child
        if not anno.hasanno(parent.func, anno.Basic.QN):
            return False
        func_name = anno.getanno(parent.func, anno.Basic.QN)
        if str(func_name) in autobatch_functions:
            return True
        return False

    anf_config = [
        (anf.ASTEdgePattern(gast.If, 'test', anf.ANY), anf.REPLACE),
        (anf.ASTEdgePattern(anf.ANY, anf.ANY, gast.Call), anf.REPLACE),
        (anf.ASTEdgePattern(gast.Call, 'args',
                            anf.ANY), maybe_replace_function_argument),
    ]
    node = anf.transform(node, ctx, config=anf_config)
    node = converter.standard_analysis(node, ctx, is_initial=False)

    return node, ctx
Esempio n. 23
0
def to_graph(e,
             recursive=True,
             verbose=False,
             arg_values=None,
             arg_types=None,
             partial_types=None,
             strip_decorators=None):
    """Converts a Python entity into equivalent code that uses TensorFlow ops.

  Supported Python entities include:
    * functions
    * classes

  Classes are converted by converting all their methods into a new class.

  Args:
    e: Union[Callable, Type], the Python entity to convert.
    recursive: bool, whether to recursively convert any functions that the
        converted function may call.
    verbose: bool, whether to output the compiled code in the logs.
    arg_values: Optional[Dict[Text, Any]], value hints for symbols including
        function arguments.
    arg_types: Optional[Dict[Text, Type]], type hints for symbols including
        function arguments.
    partial_types: Set[Type], reserved for internal use.
    strip_decorators: Tuple[Callable], same as
        ConversionOptions.strip_decorators.

  Returns:
    Union[Callable, Type], the converted entity, which is the same kind as e
    (that is, a function is e is a function, a class if e is a class, etc.) but
    its code has been converted to use TF ops.

  Raises:
    ValueError: If the entity could not be converted.
  """
    if strip_decorators is None:
        strip_decorators = ()
    strip_decorators += (convert, do_not_convert, converted_call)

    program_ctx = converter.ProgramContext(
        recursive=recursive,
        autograph_decorators=strip_decorators,
        partial_types=partial_types,
        autograph_module=tf_inspect.getmodule(to_graph),
        uncompiled_modules=config.DEFAULT_UNCOMPILED_MODULES)
    _, name, namespace = conversion.entity_to_graph(e, program_ctx, arg_values,
                                                    arg_types)

    nodes = []
    for dep in reversed(program_ctx.conversion_order):
        nodes.extend(program_ctx.dependency_cache[dep])

    compiled_module, compiled_src = compiler.ast_to_object(
        nodes,
        source_prefix=program_ctx.required_imports,
        include_source_map=True)

    # The compiled code should see everything the entry entity saw.
    # TODO(mdan): This might not work well if the call tree spans modules?
    for key, val in namespace.items():
        # Avoid overwriting entities that have been transformed.
        if key not in compiled_module.__dict__:
            compiled_module.__dict__[key] = val
    compiled = getattr(compiled_module, name)

    # Need this so the source_mapping attribute is available for the context
    # manager to access for runtime errors.
    #
    # Note that compiler.ast_to_object attaches the source map 'ag_source_map__'
    # symbol to the compiled module.
    # TODO(mdan): Record this statically in the generated code.
    # TODO(mdan): Rename this attribute to 'autograph_info__'
    source_map_attribute_name = 'ag_source_map'
    if getattr(compiled, source_map_attribute_name, None) is not None:
        raise ValueError('cannot convert %s because is has an attribute '
                         '"%s", which is reserved for AutoGraph.' %
                         (compiled, source_map_attribute_name))
    setattr(compiled, source_map_attribute_name,
            compiled_module.__dict__['ag_source_map__'])

    if verbose:
        logging.info('Compiled output of %s:\n\n%s\n', e, compiled_src)

    return compiled
Esempio n. 24
0
def converted_call(f, args, kwargs, caller_fn_scope=None, options=None):
    """Compiles a function call inline.

  For internal use only.

  Note: The argument list is optimized for readability of generated code, which
  may look like this:

    ag__.converted_call(f, (arg1, arg2), None, fscope)
    ag__.converted_call(f, (), dict(arg1=val1, **kwargs), fscope)
    ag__.converted_call(f, (arg1, arg2) + varargs, dict(**kwargs), lscope)

  Args:
    f: The function to convert.
    args: Tuple, the original positional arguments of f
    kwargs: Optional[Dict], the original keyword arguments of f
    caller_fn_scope: Optional[function_wrappers.FunctionScope], the function
      scope of the converted function in which this call was originally made.
    options: Optional[converter.ConversionOptions], conversion options. If not
      specified, the value of caller_fn_scope.callopts is used. Either options
      or caller_fn_scope must be present.

  Returns:
    Any, the result of executing a possibly-converted `f` with the given
      arguments.
  """
    logging.log(1, 'Converted call: %s\n    args: %s\n    kwargs: %s\n', f,
                args, kwargs)

    if options is None:
        if caller_fn_scope is None:
            raise ValueError(
                'either caller_fn_scope or options must have a value')
        options = caller_fn_scope.callopts

    if conversion.is_in_whitelist_cache(f, options):
        logging.log(2, 'Whitelisted %s: from cache')
        return _call_unconverted(f, args, kwargs, options, False)

    if ag_ctx.control_status_ctx().status == ag_ctx.Status.DISABLED:
        logging.log(2, 'Whitelisted: %s: AutoGraph is disabled in context', f)
        return _call_unconverted(f, args, kwargs, options, False)

    if inspect_utils.isbuiltin(f):
        if f is eval:
            return py_builtins.eval_in_original_context(
                f, args, caller_fn_scope)
        if f is super:
            return py_builtins.super_in_original_context(
                f, args, caller_fn_scope)
        if kwargs:
            return py_builtins.overload_of(f)(*args, **kwargs)
        else:
            return py_builtins.overload_of(f)(*args)

    if is_autograph_artifact(f):
        logging.log(2, 'Permanently whitelisted: %s: AutoGraph artifact', f)
        return _call_unconverted(f, args, kwargs, options)

    # TODO(b/122265385): Remove this bypass.
    if (_is_known_loaded_type(f, 'wrapt', 'FunctionWrapper')
            or _is_known_loaded_type(f, 'wrapt', 'BoundFunctionWrapper')):
        logging.warn(
            '{} appears to be decorated by wrapt, which is not yet supported'
            ' by AutoGraph. The function will run as-is.'
            ' You may still apply AutoGraph before the wrapt decorator.'.
            format(f))
        logging.log(2, 'Permanently whitelisted: %s: wrapt decorated', f)
        return _call_unconverted(f, args, kwargs, options)

    if _is_known_loaded_type(f, 'functools', '_lru_cache_wrapper'):
        logging.log(2, 'Permanently whitelisted: %s: lru_cache', f)
        return _call_unconverted(f, args, kwargs, options)

    # Constructors are permanently whitelisted.
    # TODO(mdan): Toggle as experimental feature instead.
    # TODO(b/124016764): Remove this limitation.
    if tf_inspect.isclass(f):
        logging.log(2, 'Permanently whitelisted: %s: constructor', f)
        return _call_unconverted(f, args, kwargs, options)

    # Other built-in modules are permanently whitelisted.
    # TODO(mdan): Figure out how to do this consistently for all stdlib modules.
    if any(f in m.__dict__.values()
           for m in (collections, pdb, copy, inspect, re)):
        logging.log(2, 'Permanently whitelisted: %s: part of builtin module',
                    f)
        return _call_unconverted(f, args, kwargs, options)

    # Custom ops and kernels are also permanently whitelisted.
    # See tensorflow.framework.load_library.
    if (hasattr(f, '__module__')
            and hasattr(f.__module__, '_IS_TENSORFLOW_PLUGIN')):
        logging.log(2, 'Permanently whitelisted: %s: TensorFlow plugin', f)
        return _call_unconverted(f, args, kwargs, options)

    if not options.user_requested and conversion.is_whitelisted(f):
        return _call_unconverted(f, args, kwargs, options)

    # internal_convert_user_code is for example turned off when issuing a dynamic
    # call conversion from generated code while in nonrecursive mode. In that
    # case we evidently don't want to recurse, but we still have to convert
    # things like builtins.
    if not options.internal_convert_user_code:
        return _call_unconverted(f, args, kwargs, options)

    # TODO(mdan): Move this entire block inside to_graph.
    try:  # Begin of transformation error guards

        # Unwrap functools.partial objects
        # TODO(mdan): Consider sharing unwrapping logic with tf_inspect.
        # TODO(b/120224672): This unwrapping should be done before the checks above.
        while isinstance(f, functools.partial):
            args = f.args + args
            new_kwargs = {}
            if f.keywords is not None:
                new_kwargs.update(f.keywords)
            if kwargs is not None:
                new_kwargs.update(kwargs)
            kwargs = new_kwargs
            f = f.func

        if tf_inspect.isfunction(f) or tf_inspect.ismethod(f):
            # Regular functions
            target_entity = f
            f_self = inspect_utils.getmethodself(f)

            # TODO(b/119246461): This may be more elegantly handled using __get__?
            if f_self is not None:
                effective_args = (f_self, ) + args
            else:
                effective_args = args

        elif hasattr(f, '__class__') and hasattr(f.__class__, '__call__'):
            # Callable objects. Dunder methods have special lookup rules, see:
            # https://docs.python.org/3/reference/datamodel.html#specialnames
            target_entity = f.__class__.__call__
            effective_args = (f, ) + args

        else:
            target_entity = f
            raise NotImplementedError('unknown callable type "%s"' % type(f))

        if not tf_inspect.isclass(target_entity):
            if not hasattr(target_entity, '__code__'):
                logging.log(2, 'Permanently whitelisted: %s: native binding',
                            target_entity)
                return _call_unconverted(f, args, kwargs, options)
            elif (hasattr(target_entity.__code__, 'co_filename')
                  and target_entity.__code__.co_filename == '<string>'):
                # TODO(mdan): __globals__['txt'] might work in Py3.
                logging.log(
                    2, 'Permanently whitelisted: %s: dynamic code (exec?)',
                    target_entity)
                return _call_unconverted(f, args, kwargs, options)

        program_ctx = converter.ProgramContext(
            options=options,
            autograph_module=tf_inspect.getmodule(converted_call))
        converted_f = conversion.convert(target_entity, program_ctx)

        if logging.has_verbosity(2):
            logging.log(2, 'Defaults of %s : %s', converted_f,
                        converted_f.__defaults__)
            if six.PY3:
                logging.log(2, 'KW defaults of %s : %s', converted_f,
                            converted_f.__kwdefaults__)

            if kwargs is not None:
                callargs = tf_inspect.getcallargs(converted_f, *effective_args,
                                                  **kwargs)
            else:
                callargs = tf_inspect.getcallargs(converted_f, *effective_args)

            formatted_callargs = '\n'.join('    {}: {}'.format(k, v)
                                           for k, v in callargs.items())
            logging.log(2, 'Calling %s with\n%s\n', converted_f,
                        formatted_callargs)

    except Exception as e:  # pylint:disable=broad-except
        logging.log(1,
                    'Error transforming entity %s',
                    target_entity,
                    exc_info=True)
        if is_autograph_strict_conversion_mode():
            raise

        if isinstance(e, errors.UnsupportedLanguageElementError):
            # Repeating the check made upon function entry because the state might
            # have updated in the meantime.
            if not conversion.is_in_whitelist_cache(f, options):
                logging.warn(
                    'AutoGraph could not transform %s and will run it as-is.\n'
                    'Cause: %s', target_entity, e)
        else:
            logging.warn(
                'AutoGraph could not transform %s and will run it as-is.\n'
                'Please report this to the TensorFlow team. When filing the bug, set'
                ' the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and'
                ' attach the full output.\n'
                'Cause: %s', target_entity, e)

        return _call_unconverted(f, args, kwargs, options)

    with StackTraceMapper(converted_f), tf_stack.CurrentModuleFilter():
        try:
            if kwargs is not None:
                result = converted_f(*effective_args, **kwargs)
            else:
                result = converted_f(*effective_args)
        except Exception as e:
            _attach_metadata(e, converted_f, True)
            raise

    return result