def node_to_graph(node, ctx, nocompile_decorators): """Convert Python code to equivalent TF graph mode code. Args: node: A Python AST node representing the code to convert. ctx: An EntityContext object. nocompile_decorators: A tuple containing decorators to be stripped from functions during conversion. Returns: A tuple (node, deps): * node: A Python ast node, representing the converted code. * deps: A set of strings, the fully qualified names of entity dependencies that this node has. """ # TODO(mdan): Verify arguments for correctness. # TODO(mdan): Factor out common elements. # These include: # * code move between blocks # * visiting blocks in transformers # Certain steps, especially canonicalization, insert new symbols into the # tree, which must be accounted. Although less efficient, it is most robust # to re-run the analysis. node = _static_analysis_pass(node, ctx) # Past this point, line numbers are no longer accurate so we ignore the # source. # TODO(mdan): Is it feasible to reconstruct intermediate source code? ctx.source_code = None node = decorators.transform(node, nocompile_decorators) node = break_statements.transform(node, ctx) node = asserts.transform(node, ctx) # Note: sequencing continue canonicalization before for loop one avoids # dealing with the extra loop increment operation that the for # canonicalization creates. node = continue_statements.transform(node, ctx) ctx.namespace['len'] = len node = _static_analysis_pass(node, ctx) node = for_loops.transform(node, ctx) # for_loops may insert new global references. node = builtin_functions.transform(node, ctx) # TODO(mdan): Kept for CL consistency. Remove. # builtin_functions may insert new global references. ctx.namespace['print'] = print node = _static_analysis_pass(node, ctx) node = call_trees.transform(node, ctx, config.DEFAULT_UNCOMPILED_MODULES, nocompile_decorators) node = control_flow.transform(node, ctx) # control_flow may create new symbols and change scopes. node = _static_analysis_pass(node, ctx) node = logical_expressions.transform(node) node = side_effect_guards.transform(node, ctx) return node
def node_to_graph(node, ctx, nocompile_decorators): """Convert Python code to equivalent TF graph mode code. Args: node: A Python AST node representing the code to convert. ctx: An EntityContext object. nocompile_decorators: A tuple containing decorators to be stripped from functions during conversion. Returns: A tuple (node, deps): * node: A Python ast node, representing the converted code. * deps: A set of strings, the fully qualified names of entity dependencies that this node has. """ # TODO(mdan): Verify arguments for correctness. # TODO(mdan): Factor out common elements. # These include: # * code move between blocks # * visiting blocks in transformers # Certain steps, especially canonicalization, insert new symbols into the # tree, which must be accounted. Although less efficient, it is most robust # to re-run the analysis. node = _static_analysis_pass(node, ctx) # Past this point, line numbers are no longer accurate so we ignore the # source. # TODO(mdan): Is it feasible to reconstruct intermediate source code? ctx.source_code = None node = decorators.transform(node, nocompile_decorators) node = break_canonicalization.transform(node, ctx) node = asserts.transform(node, ctx) # Note: sequencing continue canonicalization before for loop one avoids # dealing with the extra loop increment operation that the for # canonicalization creates. node = continue_canonicalization.transform(node, ctx) ctx.namespace['len'] = len node = _static_analysis_pass(node, ctx) node = for_canonicalization.transform(node, ctx) # for_canonicalization may insert new global references. node = builtin_functions.transform(node, ctx) # builtin_functions may insert new global references. ctx.namespace['print'] = print node = _static_analysis_pass(node, ctx) node = call_trees.transform(node, ctx, config.DEFAULT_UNCOMPILED_MODULES, nocompile_decorators) node = control_flow.transform(node, ctx) # control_flow may create new symbols and change scopes. node = _static_analysis_pass(node, ctx) node = logical_expressions.transform(node) node = side_effect_guards.transform(node, ctx) return node
def node_to_graph(node, ctx, nocompile_decorators): """Convert Python code to equivalent TF graph mode code. Args: node: A Python AST node representing the code to convert. ctx: An EntityContext object. nocompile_decorators: A tuple containing decorators to be stripped from functions during conversion. Returns: A tuple (node, deps): * node: A Python ast node, representing the converted code. * deps: A set of strings, the fully qualified names of entity dependencies that this node has. """ # TODO(mdan): Verify arguments for correctness. # TODO(mdan): Factor out common elements. # These include: # * keeping track of symbols that have been created # * marking nodes (e.g. py_func wrappers) to suppress further processing # * code move between blocks # * insertion of new global references # * visiting blocks in transformers # Certain steps, especially canonicalization, insert new symbols into the # tree, which must be accounted. Although less efficient, it is most robust # to re-run the analysis. node = _static_analysis_pass(node, ctx) node = decorators.transform(node, nocompile_decorators) node = break_canonicalization.transform(node, ctx.namer) # Note: sequencing continue canonicalization before for loop one avoids # dealing with the extra loop increment operation that the for # canonicalization creates. node = continue_canonicalization.transform(node, ctx.namer) ctx.namespace['len'] = len node = _static_analysis_pass(node, ctx) node = for_canonicalization.transform(node, ctx.namer) # for_canonicalization may insert new global references. node = builtin_functions.transform(node) # builtin_functions may insert new global references. ctx.namespace['print'] = print node = _static_analysis_pass(node, ctx) node = print_functions.transform(node) node = call_trees.transform(node, ctx.namer, ctx.namespace, config.DEFAULT_UNCOMPILED_MODULES, nocompile_decorators) node = control_flow.transform(node, ctx.namer) node = logical_expressions.transform(node) node = side_effect_guards.transform(node, ctx.namer) return node
def test_if_single_var(self): def test_fn(n): if n > 0: n = -n return n node = self.parse_and_analyze(test_fn, {}) node = control_flow.transform(node, self.ctx) with self.compiled(node, control_flow_ops.cond) as result: with self.test_session() as sess: self.assertEqual( -1, sess.run(result.test_fn(constant_op.constant(1))))
def test_while_single_var(self): def test_fn(n): while n > 0: n -= 1 return n node = self.parse_and_analyze(test_fn, {}) node = control_flow.transform(node, self.ctx) with self.compiled(node, control_flow_ops.while_loop) as result: with self.test_session() as sess: self.assertEqual( 0, sess.run(result.test_fn(constant_op.constant(5))))
def test_while_single_var(self): def test_fn(n): while n > 0: n -= 1 return n node = self.parse_and_analyze(test_fn, {}) node = control_flow.transform(node, self.ctx) with self.compiled(node, control_flow_ops.while_loop) as result: with self.test_session() as sess: self.assertEqual(0, sess.run(result.test_fn(constant_op.constant(5))))
def test_if_single_var(self): def test_fn(n): if n > 0: n = -n return n node = self.parse_and_analyze(test_fn, {}) node = control_flow.transform(node, self.ctx) with self.compiled(node, control_flow_ops.cond) as result: with self.test_session() as sess: self.assertEqual(-1, sess.run(result.test_fn(constant_op.constant(1))))
def test_while_single_var(self): def test_fn(n): while n > 0: n -= 1 return n node = self.parse_and_analyze(test_fn, {}) node = control_flow.transform(node, TestNamer()) result = compiler.ast_to_object(node) setattr(result, 'tf', control_flow_ops) with self.test_session() as sess: self.assertEqual(0, sess.run(result.test_fn(constant_op.constant(5))))
def test_if_single_var(self): def test_fn(n): if n > 0: n = -n return n node = self.parse_and_analyze(test_fn, {}, namer=TestNamer()) node = control_flow.transform(node, self.ctx) result = compiler.ast_to_object(node) setattr(result, 'tf', control_flow_ops) with self.test_session() as sess: self.assertEqual(-1, sess.run(result.test_fn(constant_op.constant(1))))
def test_simple_while(self): def test_fn(n): i = 0 s = 0 while i < n: s += i i += 1 return s, i, n node = self.parse_and_analyze(test_fn, {}) node = control_flow.transform(node, TestNamer()) result = compiler.ast_to_object(node) setattr(result, 'tf', control_flow_ops) with self.test_session() as sess: self.assertEqual((10, 5, 5), sess.run(result.test_fn(constant_op.constant(5))))
def test_simple_while(self): def test_fn(n): i = 0 s = 0 while i < n: s += i i += 1 return s, i, n node = self.parse_and_analyze(test_fn, {}) node = control_flow.transform(node, self.ctx) with self.compiled(node, control_flow_ops.while_loop) as result: with self.test_session() as sess: self.assertEqual( (10, 5, 5), sess.run(result.test_fn(constant_op.constant(5))))
def test_simple_while(self): def test_fn(n): i = 0 s = 0 while i < n: s += i i += 1 return s, i, n node = self.parse_and_analyze(test_fn, {}) node = control_flow.transform(node, self.ctx) with self.compiled(node, control_flow_ops.while_loop) as result: with self.test_session() as sess: self.assertEqual((10, 5, 5), sess.run(result.test_fn(constant_op.constant(5))))
def test_simple_while(self): def test_fn(n): i = 0 s = 0 while i < n: s += i i += 1 return s, i, n node = self.parse_and_analyze(test_fn, {}, namer=TestNamer()) node = control_flow.transform(node, self.ctx) result = compiler.ast_to_object(node) setattr(result, 'tf', control_flow_ops) with self.test_session() as sess: self.assertEqual((10, 5, 5), sess.run(result.test_fn(constant_op.constant(5))))
def test_simple_if(self): def test_fn(n): a = 0 b = 0 if n > 0: a = -n else: b = 2 * n return a, b node = self.parse_and_analyze(test_fn, {}) node = control_flow.transform(node, self.ctx) with self.compiled(node, control_flow_ops.cond) as result: with self.test_session() as sess: self.assertEqual( (-1, 0), sess.run(result.test_fn(constant_op.constant(1)))) self.assertEqual( (0, -2), sess.run(result.test_fn(constant_op.constant(-1))))
def test_simple_if(self): def test_fn(n): a = 0 b = 0 if n > 0: a = -n else: b = 2 * n return a, b node = self.parse_and_analyze(test_fn, {}) node = control_flow.transform(node, self.ctx) with self.compiled(node, control_flow_ops.cond) as result: with self.test_session() as sess: self.assertEqual((-1, 0), sess.run(result.test_fn(constant_op.constant(1)))) self.assertEqual((0, -2), sess.run(result.test_fn(constant_op.constant(-1))))
def test_simple_if(self): def test_fn(n): a = 0 b = 0 if n > 0: a = -n else: b = 2 * n return a, b node = self.parse_and_analyze(test_fn, {}) node = control_flow.transform(node, TestNamer()) result = compiler.ast_to_object(node) setattr(result, 'tf', control_flow_ops) with self.test_session() as sess: self.assertEqual((-1, 0), sess.run(result.test_fn(constant_op.constant(1)))) self.assertEqual( (0, -2), sess.run(result.test_fn(constant_op.constant(-1))))
def test_simple_if(self): def test_fn(n): a = 0 b = 0 if n > 0: a = -n else: b = 2 * n return a, b node = self.parse_and_analyze(test_fn, {}, namer=TestNamer()) node = control_flow.transform(node, self.ctx) result = compiler.ast_to_object(node) setattr(result, 'tf', control_flow_ops) with self.test_session() as sess: self.assertEqual((-1, 0), sess.run( result.test_fn(constant_op.constant(1)))) self.assertEqual((0, -2), sess.run(result.test_fn(constant_op.constant(-1))))