Пример #1
0
def node_to_graph(node, ctx, nocompile_decorators):
    """Convert Python code to equivalent TF graph mode code.

  Args:
    node: A Python AST node representing the code to convert.
    ctx: An EntityContext object.
    nocompile_decorators: A tuple containing decorators to be stripped from
        functions during conversion.

  Returns:
    A tuple (node, deps):
        * node: A Python ast node, representing the converted code.
        * deps: A set of strings, the fully qualified names of entity
            dependencies that this node has.
  """
    # TODO(mdan): Verify arguments for correctness.

    # TODO(mdan): Factor out common elements.
    # These include:
    #   * code move between blocks
    #   * visiting blocks in transformers

    # Certain steps, especially canonicalization, insert new symbols into the
    # tree, which must be accounted. Although less efficient, it is most robust
    # to re-run the analysis.

    node = _static_analysis_pass(node, ctx)
    # Past this point, line numbers are no longer accurate so we ignore the
    # source.
    # TODO(mdan): Is it feasible to reconstruct intermediate source code?
    ctx.source_code = None
    node = decorators.transform(node, nocompile_decorators)
    node = break_statements.transform(node, ctx)
    node = asserts.transform(node, ctx)

    # Note: sequencing continue canonicalization before for loop one avoids
    # dealing with the extra loop increment operation that the for
    # canonicalization creates.
    node = continue_statements.transform(node, ctx)
    ctx.namespace['len'] = len

    node = _static_analysis_pass(node, ctx)
    node = for_loops.transform(node, ctx)
    # for_loops may insert new global references.
    node = builtin_functions.transform(node, ctx)
    # TODO(mdan): Kept for CL consistency. Remove.
    # builtin_functions may insert new global references.
    ctx.namespace['print'] = print

    node = _static_analysis_pass(node, ctx)
    node = call_trees.transform(node, ctx, config.DEFAULT_UNCOMPILED_MODULES,
                                nocompile_decorators)
    node = control_flow.transform(node, ctx)

    # control_flow may create new symbols and change scopes.
    node = _static_analysis_pass(node, ctx)
    node = logical_expressions.transform(node)
    node = side_effect_guards.transform(node, ctx)

    return node
Пример #2
0
def node_to_graph(node, ctx, nocompile_decorators):
  """Convert Python code to equivalent TF graph mode code.

  Args:
    node: A Python AST node representing the code to convert.
    ctx: An EntityContext object.
    nocompile_decorators: A tuple containing decorators to be stripped from
        functions during conversion.

  Returns:
    A tuple (node, deps):
        * node: A Python ast node, representing the converted code.
        * deps: A set of strings, the fully qualified names of entity
            dependencies that this node has.
  """
  # TODO(mdan): Verify arguments for correctness.

  # TODO(mdan): Factor out common elements.
  # These include:
  #   * code move between blocks
  #   * visiting blocks in transformers

  # Certain steps, especially canonicalization, insert new symbols into the
  # tree, which must be accounted. Although less efficient, it is most robust
  # to re-run the analysis.

  node = _static_analysis_pass(node, ctx)
  # Past this point, line numbers are no longer accurate so we ignore the
  # source.
  # TODO(mdan): Is it feasible to reconstruct intermediate source code?
  ctx.source_code = None
  node = decorators.transform(node, nocompile_decorators)
  node = break_canonicalization.transform(node, ctx)
  node = asserts.transform(node, ctx)

  # Note: sequencing continue canonicalization before for loop one avoids
  # dealing with the extra loop increment operation that the for
  # canonicalization creates.
  node = continue_canonicalization.transform(node, ctx)
  ctx.namespace['len'] = len

  node = _static_analysis_pass(node, ctx)
  node = for_canonicalization.transform(node, ctx)
  # for_canonicalization may insert new global references.
  node = builtin_functions.transform(node, ctx)
  # builtin_functions may insert new global references.
  ctx.namespace['print'] = print

  node = _static_analysis_pass(node, ctx)
  node = call_trees.transform(node, ctx, config.DEFAULT_UNCOMPILED_MODULES,
                              nocompile_decorators)
  node = control_flow.transform(node, ctx)

  # control_flow may create new symbols and change scopes.
  node = _static_analysis_pass(node, ctx)
  node = logical_expressions.transform(node)
  node = side_effect_guards.transform(node, ctx)

  return node
Пример #3
0
def node_to_graph(node, ctx, nocompile_decorators):
  """Convert Python code to equivalent TF graph mode code.

  Args:
    node: A Python AST node representing the code to convert.
    ctx: An EntityContext object.
    nocompile_decorators: A tuple containing decorators to be stripped from
        functions during conversion.

  Returns:
    A tuple (node, deps):
        * node: A Python ast node, representing the converted code.
        * deps: A set of strings, the fully qualified names of entity
            dependencies that this node has.
  """
  # TODO(mdan): Verify arguments for correctness.

  # TODO(mdan): Factor out common elements.
  # These include:
  #   * keeping track of symbols that have been created
  #   * marking nodes (e.g. py_func wrappers) to suppress further processing
  #   * code move between blocks
  #   * insertion of new global references
  #   * visiting blocks in transformers

  # Certain steps, especially canonicalization, insert new symbols into the
  # tree, which must be accounted. Although less efficient, it is most robust
  # to re-run the analysis.

  node = _static_analysis_pass(node, ctx)
  node = decorators.transform(node, nocompile_decorators)
  node = break_canonicalization.transform(node, ctx.namer)

  # Note: sequencing continue canonicalization before for loop one avoids
  # dealing with the extra loop increment operation that the for
  # canonicalization creates.
  node = continue_canonicalization.transform(node, ctx.namer)
  ctx.namespace['len'] = len

  node = _static_analysis_pass(node, ctx)
  node = for_canonicalization.transform(node, ctx.namer)
  # for_canonicalization may insert new global references.
  node = builtin_functions.transform(node)
  # builtin_functions may insert new global references.
  ctx.namespace['print'] = print

  node = _static_analysis_pass(node, ctx)
  node = print_functions.transform(node)
  node = call_trees.transform(node, ctx.namer, ctx.namespace,
                              config.DEFAULT_UNCOMPILED_MODULES,
                              nocompile_decorators)
  node = control_flow.transform(node, ctx.namer)
  node = logical_expressions.transform(node)
  node = side_effect_guards.transform(node, ctx.namer)

  return node
    def test_print(self):
        def test_fn(a):
            print(a)

        node = self.parse_and_analyze(test_fn, {'print': print})
        node = builtin_functions.transform(node, self.ctx)
        result = compiler.ast_to_object(node)

        result.test_fn('a')
        self.assertTrue(isinstance(node.body[0].body[0].value, gast.Call))
  def test_print(self):

    def test_fn(a):
      print(a)

    node = self.parse_and_analyze(test_fn, {'print': print})
    node = builtin_functions.transform(node, self.ctx)
    result = compiler.ast_to_object(node)

    result.test_fn('a')
    self.assertTrue(isinstance(node.body[0].body[0].value, gast.Call))
Пример #6
0
    def test_len(self):
        def test_fn(a):
            return len(a)

        node = self.parse_and_analyze(test_fn, {'len': len})
        node = builtin_functions.transform(node, self.ctx)

        with self.compiled(node, array_ops.shape) as result:
            with self.test_session() as sess:
                self.assertEqual(
                    3, sess.run(result.test_fn(constant_op.constant([0, 0,
                                                                     0]))))
Пример #7
0
    def test_len(self):
        def test_fn(a):
            return len(a)

        node = self.parse_and_analyze(test_fn, {'len': len})
        node = builtin_functions.transform(node)
        result = compiler.ast_to_object(node)
        setattr(result, 'tf', array_ops)

        with self.test_session() as sess:
            self.assertEqual(
                3, sess.run(result.test_fn(constant_op.constant([0, 0, 0]))))
  def test_len(self):

    def test_fn(a):
      return len(a)

    node = self.parse_and_analyze(test_fn, {'len': len})
    node = builtin_functions.transform(node, self.ctx)

    with self.compiled(node, array_ops.shape) as result:
      with self.test_session() as sess:
        self.assertEqual(3,
                         sess.run(
                             result.test_fn(constant_op.constant([0, 0, 0]))))
  def test_len(self):

    def test_fn(a):
      return len(a)

    node = self.parse_and_analyze(test_fn, {'len': len})
    node = builtin_functions.transform(node, self.ctx)
    result = compiler.ast_to_object(node)
    setattr(result, 'tf', array_ops)

    with self.test_session() as sess:
      self.assertEqual(3,
                       sess.run(
                           result.test_fn(constant_op.constant([0, 0, 0]))))
Пример #10
0
  def test_print(self):

    def test_fn(a):
      print(a)

    node = self.parse_and_analyze(test_fn, {'print': print})
    node = builtin_functions.transform(node, self.ctx)

    with self.compiled(node) as result:
      try:
        out_capturer = six.StringIO()
        sys.stdout = out_capturer
        result.test_fn('a')
        self.assertEqual(out_capturer.getvalue(), 'a\n')
      finally:
        sys.stdout = sys.__stdout__
Пример #11
0
    def test_print_with_py_func(self):
        def test_fn(a, b, c):
            print(a, b, c)

        node = self.parse_and_analyze(test_fn, {'print': print})
        node = builtin_functions.transform(node, self.ctx)

        # Note: it's relevant not to include logging_ops.Print here, to verify
        # that py_func is used.
        with self.compiled(node, script_ops.py_func) as result:
            with self.test_session() as sess:
                try:
                    out_capturer = six.StringIO()
                    sys.stdout = out_capturer
                    result.test_fn('a', 1, [2, 3])
                    sess.run(sess.graph.get_operations())
                    self.assertEqual(out_capturer.getvalue(), 'a 1 [2, 3]\n')
                finally:
                    sys.stdout = sys.__stdout__
  def test_print_with_py_func(self):

    def test_fn(a, b, c):
      print(a, b, c)

    node = self.parse_and_analyze(test_fn, {'print': print})
    node = builtin_functions.transform(node, self.ctx)

    # Note: it's relevant not to include logging_ops.Print here, to verify
    # that py_func is used.
    with self.compiled(node, script_ops.py_func) as result:
      with self.test_session() as sess:
        try:
          out_capturer = six.StringIO()
          sys.stdout = out_capturer
          result.test_fn('a', 1, [2, 3])
          sess.run(sess.graph.get_operations())
          self.assertEqual(out_capturer.getvalue(), 'a 1 [2, 3]\n')
        finally:
          sys.stdout = sys.__stdout__
Пример #13
0
  def test_print_tuple(self):

    def test_fn(a, b, c):
      print(a, b, c)

    node = self.parse_and_analyze(test_fn, {'print': print})
    node = builtin_functions.transform(node, self.ctx)

    with self.compiled(node) as result:
      try:
        out_capturer = six.StringIO()
        sys.stdout = out_capturer
        result.test_fn('a', 1, [2, 3])
        # It appears that the print output looks odd only under Python 2.
        if six.PY2:
          self.assertEqual(out_capturer.getvalue(), "('a', 1, [2, 3])\n")
        else:
          self.assertEqual(out_capturer.getvalue(), 'a 1 [2, 3]\n')
      finally:
        sys.stdout = sys.__stdout__