def test_list_pop(self):
        def test_fn():
            l = [1, 2, 3]
            utils.set_element_type(l, dtypes.int32, ())
            s = l.pop()
            return s, l

        node = self.parse_and_analyze(
            test_fn,
            {
                'utils': utils,
                'dtypes': dtypes
            },
            include_type_analysis=True,
        )
        node = lists.transform(node, self.ctx)

        with self.compiled(node) as result:
            result.utils = utils
            result.dtypes = dtypes
            with self.test_session() as sess:
                ts, tl = result.test_fn()
                r = list_ops.tensor_list_stack(tl, dtypes.int32)
                self.assertAllEqual(sess.run(r), [1, 2])
                self.assertAllEqual(sess.run(ts), 3)
示例#2
0
  def test_list_pop(self):

    def test_fn():
      l = [1, 2, 3]
      utils.set_element_type(l, dtypes.int32, ())
      s = l.pop()
      return s, l

    node = self.parse_and_analyze(
        test_fn,
        {
            'utils': utils,
            'dtypes': dtypes
        },
        include_type_analysis=True,
    )
    node = lists.transform(node, self.ctx)

    with self.compiled(node) as result:
      result.utils = utils
      result.dtypes = dtypes
      with self.test_session() as sess:
        ts, tl = result.test_fn()
        r = list_ops.tensor_list_stack(tl, dtypes.int32)
        self.assertAllEqual(sess.run(r), [1, 2])
        self.assertAllEqual(sess.run(ts), 3)
    def test_empty_list(self):
        def test_fn():
            return []

        node = self.parse_and_analyze(test_fn, {})
        node = lists.transform(node, self.ctx)

        with self.compiled(node) as result:
            tl = result.test_fn()
            # Empty tensor lists cannot be evaluated or stacked.
            self.assertTrue(isinstance(tl, ops.Tensor))
            self.assertEqual(tl.dtype, dtypes.variant)
    def test_initialized_list(self):
        def test_fn():
            return [1, 2, 3]

        node = self.parse_and_analyze(test_fn, {})
        node = lists.transform(node, self.ctx)

        with self.compiled(node) as result:
            with self.test_session() as sess:
                tl = result.test_fn()
                r = list_ops.tensor_list_stack(tl, dtypes.int32)
                self.assertAllEqual(sess.run(r), [1, 2, 3])
示例#5
0
  def test_initialized_list(self):

    def test_fn():
      return [1, 2, 3]

    node = self.parse_and_analyze(test_fn, {})
    node = lists.transform(node, self.ctx)

    with self.compiled(node) as result:
      with self.test_session() as sess:
        tl = result.test_fn()
        r = list_ops.tensor_list_stack(tl, dtypes.int32)
        self.assertAllEqual(sess.run(r), [1, 2, 3])
示例#6
0
  def test_empty_list(self):

    def test_fn():
      return []

    node = self.parse_and_analyze(test_fn, {})
    node = lists.transform(node, self.ctx)

    with self.compiled(node) as result:
      tl = result.test_fn()
      # Empty tensor lists cannot be evaluated or stacked.
      self.assertTrue(isinstance(tl, ops.Tensor))
      self.assertEqual(tl.dtype, dtypes.variant)
    def test_double_list_pop(self):
        def test_fn(l):
            s = l.pop().pop()
            return s

        node = self.parse_and_analyze(test_fn, {})
        node = lists.transform(node, self.ctx)

        with self.compiled(node) as result:
            test_input = [1, 2, [1, 2, 3]]
            # TODO(mdan): Pass a list of lists of tensor when we fully support that.
            # For now, we just pass a regular Python list of lists just to verify that
            # the two pop calls are sequenced properly.
            self.assertAllEqual(result.test_fn(test_input), 3)
    def test_list_append(self):
        def test_fn():
            l = [1]
            l.append(2)
            l.append(3)
            return l

        node = self.parse_and_analyze(test_fn, {})
        node = lists.transform(node, self.ctx)

        with self.compiled(node) as result:
            with self.test_session() as sess:
                tl = result.test_fn()
                r = list_ops.tensor_list_stack(tl, dtypes.int32)
                self.assertAllEqual(sess.run(r), [1, 2, 3])
示例#9
0
  def test_double_list_pop(self):

    def test_fn(l):
      s = l.pop().pop()
      return s

    node = self.parse_and_analyze(test_fn, {})
    node = lists.transform(node, self.ctx)

    with self.compiled(node) as result:
      test_input = [1, 2, [1, 2, 3]]
      # TODO(mdan): Pass a list of lists of tensor when we fully support that.
      # For now, we just pass a regular Python list of lists just to verify that
      # the two pop calls are sequenced properly.
      self.assertAllEqual(result.test_fn(test_input), 3)
示例#10
0
    def test_list_stack(self):
        def test_fn():
            l = [1, 2, 3]
            return tf.stack(l)

        node, ctx = self.prepare(test_fn, {})
        def_, = anno.getanno(node.body[0].body[0].targets[0],
                             anno.Static.ORIG_DEFINITIONS)
        def_.directives[directives.set_element_type] = {
            'dtype': parser.parse_expression('tf.int32')
        }
        node = lists.transform(node, ctx)

        with self.compiled(node, {}, array_ops.stack, dtypes.int32) as result:
            with self.test_session() as sess:
                self.assertAllEqual(sess.run(result.test_fn()), [1, 2, 3])
示例#11
0
  def test_list_append(self):

    def test_fn():
      l = [1]
      l.append(2)
      l.append(3)
      return l

    node = self.parse_and_analyze(test_fn, {})
    node = lists.transform(node, self.ctx)

    with self.compiled(node) as result:
      with self.test_session() as sess:
        tl = result.test_fn()
        r = list_ops.tensor_list_stack(tl, dtypes.int32)
        self.assertAllEqual(sess.run(r), [1, 2, 3])
示例#12
0
  def test_list_stack(self):

    def test_fn():
      l = [1, 2, 3]
      return tf.stack(l)

    node, ctx = self.prepare(test_fn, {})
    def_, = anno.getanno(node.body[0].targets[0],
                         anno.Static.ORIG_DEFINITIONS)
    def_.directives[directives.set_element_type] = {
        'dtype': parser.parse_expression('tf.int32')
    }
    node = lists.transform(node, ctx)

    with self.compiled(node, {}, array_ops.stack, dtypes.int32) as result:
      with self.test_session() as sess:
        self.assertAllEqual(sess.run(result.test_fn()), [1, 2, 3])
示例#13
0
  def test_empty_annotated_list(self):

    def test_fn():
      l = []
      utils.set_element_type(l, dtypes.int32)
      l.append(1)
      return l

    node = self.parse_and_analyze(test_fn, {'dtypes': dtypes, 'utils': utils})
    node = lists.transform(node, self.ctx)

    with self.compiled(node, tensor_array_ops.TensorArray,
                       dtypes.int32) as result:
      # TODO(mdan): Attach these additional modules automatically.
      result.utils = utils
      result.dtypes = dtypes
      with self.test_session() as sess:
        self.assertAllEqual([1], sess.run(result.test_fn().stack()))
示例#14
0
  def test_empty_annotated_list(self):

    def test_fn():
      l = []
      utils.set_element_type(l, dtypes.int32)
      l.append(1)
      return l

    node = self.parse_and_analyze(test_fn, {'dtypes': dtypes, 'utils': utils})
    node = lists.transform(node, self.ctx)

    with self.compiled(node, tensor_array_ops.TensorArray,
                       dtypes.int32) as result:
      # TODO(mdan): Attach these additional modules automatically.
      result.utils = utils
      result.dtypes = dtypes
      with self.test_session() as sess:
        self.assertEqual(test_fn(), sess.run(result.test_fn().stack()))
示例#15
0
    def test_list_pop(self):
        def test_fn():
            l = [1, 2, 3]
            s = l.pop()
            return s, l

        node, ctx = self.prepare(test_fn, {})
        def_, = anno.getanno(node.body[0].body[0].targets[0],
                             anno.Static.ORIG_DEFINITIONS)
        def_.directives[directives.set_element_type] = {
            'dtype': parser.parse_expression('tf.int32'),
            'shape': parser.parse_expression('()'),
        }
        node = lists.transform(node, ctx)

        with self.compiled(node, {}, dtypes.int32) as result:
            with self.test_session() as sess:
                ts, tl = result.test_fn()
                r = list_ops.tensor_list_stack(tl, dtypes.int32)
                self.assertAllEqual(sess.run(r), [1, 2])
                self.assertAllEqual(sess.run(ts), 3)
示例#16
0
  def test_empty_annotated_lists_list_unpacked(self):

    def test_fn():
      [l, m] = [], []
      utils.set_element_type(l, dtypes.int32)
      utils.set_element_type(m, dtypes.int32)
      l.append(1)
      m.append(2)
      return l, m

    node = self.parse_and_analyze(test_fn, {'dtypes': dtypes, 'utils': utils})
    node = lists.transform(node, self.ctx)

    with self.compiled(node, tensor_array_ops.TensorArray,
                       dtypes.int32) as result:
      result.utils = utils
      result.dtypes = dtypes
      with self.test_session() as sess:
        res_l, res_m = result.test_fn()
        self.assertEqual([1], sess.run(res_l.stack()))
        self.assertEqual([2], sess.run(res_m.stack()))
示例#17
0
  def test_list_pop(self):

    def test_fn():
      l = [1, 2, 3]
      s = l.pop()
      return s, l

    node, ctx = self.prepare(test_fn, {})
    def_, = anno.getanno(node.body[0].body[0].targets[0],
                         anno.Static.ORIG_DEFINITIONS)
    def_.directives[directives.set_element_type] = {
        'dtype': parser.parse_expression('tf.int32'),
        'shape': parser.parse_expression('()'),
    }
    node = lists.transform(node, ctx)

    with self.compiled(node, {}, dtypes.int32) as result:
      with self.test_session() as sess:
        ts, tl = result.test_fn()
        r = list_ops.tensor_list_stack(tl, dtypes.int32)
        self.assertAllEqual(sess.run(r), [1, 2])
        self.assertAllEqual(sess.run(ts), 3)
示例#18
0
  def test_list_stack(self):

    tf = None  # Will be replaced with a mock.

    def test_fn():
      l = [1, 2, 3]
      utils.set_element_type(l, dtypes.int32)
      return tf.stack(l)

    node = self.parse_and_analyze(
        test_fn,
        {
            'utils': utils,
            'dtypes': dtypes
        },
        include_type_analysis=True,
    )
    node = lists.transform(node, self.ctx)

    with self.compiled(node, array_ops.stack, dtypes.int32) as result:
      result.utils = utils
      result.dtypes = dtypes
      with self.test_session() as sess:
        self.assertAllEqual(sess.run(result.test_fn()), [1, 2, 3])
示例#19
0
    def test_list_stack(self):

        tf = None  # Will be replaced with a mock.

        def test_fn():
            l = [1, 2, 3]
            utils.set_element_type(l, dtypes.int32)
            return tf.stack(l)

        node = self.parse_and_analyze(
            test_fn,
            {
                'utils': utils,
                'dtypes': dtypes
            },
            include_type_analysis=True,
        )
        node = lists.transform(node, self.ctx)

        with self.compiled(node, array_ops.stack, dtypes.int32) as result:
            result.utils = utils
            result.dtypes = dtypes
            with self.test_session() as sess:
                self.assertAllEqual(sess.run(result.test_fn()), [1, 2, 3])
示例#20
0
def node_to_graph(node, ctx, nocompile_decorators):
  """Convert Python code to equivalent TF graph mode code.

  Args:
    node: A Python AST node representing the code to convert.
    ctx: An EntityContext object.
    nocompile_decorators: A tuple containing decorators to be stripped from
        functions during conversion.

  Returns:
    A tuple (node, deps):
        * node: A Python ast node, representing the converted code.
        * deps: A set of strings, the fully qualified names of entity
            dependencies that this node has.
  """
  # TODO(mdan): Verify arguments for correctness.

  # TODO(mdan): Factor out common elements.
  # These include:
  #   * code move between blocks
  #   * visiting blocks in transformers

  # Certain steps, especially canonicalization, insert new symbols into the
  # tree, which must be accounted. Although less efficient, it is most robust
  # to re-run the analysis.

  node = _static_analysis_pass(node, ctx)

  # TODO(mdan): Clean this up.
  # Some intermediate analyses are not required, and some comments got orphaned.

  # Past this point, line numbers are no longer accurate so we ignore the
  # source.
  # TODO(mdan): Is it feasible to reconstruct intermediate source code?
  ctx.source_code = None
  node = ifexp.transform(node, ctx)
  node, deps = decorators.transform(node, nocompile_decorators)
  node = break_statements.transform(node, ctx)
  node = asserts.transform(node, ctx)

  # Note: sequencing continue canonicalization before for loop one avoids
  # dealing with the extra loop increment operation that the for
  # canonicalization creates.
  node = continue_statements.transform(node, ctx)
  ctx.namespace['len'] = len

  node = _static_analysis_pass(node, ctx)
  node = single_return.transform(node, ctx)

  node = _static_analysis_pass(node, ctx)
  node = lists.transform(node, ctx)
  node = builtin_functions.transform(node, ctx)

  node = _static_analysis_pass(node, ctx)
  node = call_trees.transform(node, ctx, config.DEFAULT_UNCOMPILED_MODULES,
                              nocompile_decorators)
  node = control_flow.transform(node, ctx)

  # control_flow may create new symbols and change scopes.
  node = _static_analysis_pass(node, ctx)
  node = logical_expressions.transform(node, ctx)
  node = side_effect_guards.transform(node, ctx)
  node = name_scopes.transform(node, ctx)

  return node, deps
def node_to_graph(node, ctx, nocompile_decorators):
    """Convert Python code to equivalent TF graph mode code.

  Args:
    node: A Python AST node representing the code to convert.
    ctx: An EntityContext object.
    nocompile_decorators: A tuple containing decorators to be stripped from
        functions during conversion.

  Returns:
    A tuple (node, deps):
        * node: A Python ast node, representing the converted code.
        * deps: A set of strings, the fully qualified names of entity
            dependencies that this node has.
  """
    # TODO(mdan): Verify arguments for correctness.

    # TODO(mdan): Factor out common elements.
    # These include:
    #   * code move between blocks
    #   * visiting blocks in transformers

    # Certain steps, especially canonicalization, insert new symbols into the
    # tree, which must be accounted. Although less efficient, it is most robust
    # to re-run the analysis.

    node = _static_analysis_pass(node, ctx)

    # TODO(mdan): Clean this up.
    # Some intermediate analyses are not required, and some comments got orphaned.

    # Past this point, line numbers are no longer accurate so we ignore the
    # source.
    # TODO(mdan): Is it feasible to reconstruct intermediate source code?
    ctx.source_code = None
    node = ifexp.transform(node, ctx)
    node, deps = decorators.transform(node, nocompile_decorators)
    node = break_statements.transform(node, ctx)
    node = asserts.transform(node, ctx)

    # Note: sequencing continue canonicalization before for loop one avoids
    # dealing with the extra loop increment operation that the for
    # canonicalization creates.
    node = continue_statements.transform(node, ctx)
    ctx.namespace['len'] = len

    node = _static_analysis_pass(node, ctx)
    node = single_return.transform(node, ctx)

    node = _static_analysis_pass(node, ctx)
    node = lists.transform(node, ctx)
    node = builtin_functions.transform(node, ctx)

    node = _static_analysis_pass(node, ctx)
    node = call_trees.transform(node, ctx, config.DEFAULT_UNCOMPILED_MODULES,
                                nocompile_decorators)
    node = control_flow.transform(node, ctx)

    # control_flow may create new symbols and change scopes.
    node = _static_analysis_pass(node, ctx)
    node = logical_expressions.transform(node, ctx)
    node = side_effect_guards.transform(node, ctx)
    node = name_scopes.transform(node, ctx)

    return node, deps