Beispiel #1
0
 def transform_ast(self, node, ctx):
     node = qual_names.resolve(node)
     node = activity.resolve(node, ctx)
     graphs = cfg.build(node)
     node = reaching_definitions.resolve(node, ctx, graphs)
     node = reaching_fndefs.resolve(node, ctx, graphs)
     node = type_inference.resolve(node, ctx, graphs, self.resolver)
     return node
Beispiel #2
0
def transform(node, ctx):
    graphs = cfg.build(node)
    node = qual_names.resolve(node)
    node = activity.resolve(node, ctx, None)
    node = reaching_definitions.resolve(node, ctx, graphs)
    node = reaching_fndefs.resolve(node, ctx, graphs)
    node = liveness.resolve(node, ctx, graphs)

    node = ControlFlowTransformer(ctx).visit(node)
    return node
Beispiel #3
0
def convert4():
    node, ctx = get_node_and_ctx(f4)

    node = qual_names.resolve(node)
    cfgs = cfg.build(node)
    node = activity.resolve(node, ctx)
    node = reaching_definitions.resolve(node, ctx, cfgs)
    node = reaching_fndefs.resolve(node, ctx, cfgs)
    node = liveness.resolve(node, ctx, cfgs)

    print('live into `b = a + 1`:', anno.getanno(node.body[0], anno.Static.LIVE_VARS_IN))
    print('live into `return b`:', anno.getanno(node.body[1], anno.Static.LIVE_VARS_IN))
Beispiel #4
0
def mlir_gen_internal(node, entity_info):
  """Returns mlir module for unprocessed node `node`."""
  namer = naming.Namer({})
  graphs = cfg.build(node)
  ctx = transformer.Context(entity_info, namer, None)
  node = qual_names.resolve(node)
  node = activity.resolve(node, ctx)
  node = reaching_definitions.resolve(node, ctx, graphs)
  node = reaching_fndefs.resolve(node, ctx, graphs)
  node = liveness.resolve(node, ctx, graphs)
  mlir_generator = MLIRGen(ctx)
  mlir_generator.visit(node)
  return mlir_generator.prog
 def _parse_and_analyze(self, test_fn):
     # TODO(mdan): Use a custom FunctionTransformer here.
     node, source = parser.parse_entity(test_fn, future_features=())
     entity_info = transformer.EntityInfo(name=test_fn.__name__,
                                          source_code=source,
                                          source_file=None,
                                          future_features=(),
                                          namespace={})
     node = qual_names.resolve(node)
     namer = naming.Namer({})
     ctx = transformer.Context(entity_info, namer, None)
     node = activity.resolve(node, ctx)
     graphs = cfg.build(node)
     node = reaching_fndefs.resolve(node, ctx, graphs)
     node = liveness.resolve(node, ctx, graphs)
     return node
Beispiel #6
0
  def transform_ast(self, node, ctx):
    node = _apply_py_to_tf_passes(node, ctx)
    # TODO(mdan): Enable this.
    # node = anf.transform(node, ctx)

    graphs = cfg.build(node)
    node = qual_names.resolve(node)
    node = activity.resolve(node, ctx)
    node = reaching_definitions.resolve(node, ctx, graphs)
    node = reaching_fndefs.resolve(node, ctx, graphs)
    node = type_inference.resolve(node, ctx, graphs,
                                  TFRTypeResolver(self._op_defs))

    mlir_generator = TFRGen(ctx, self._op_defs)
    mlir_generator.visit(node)
    return mlir_generator.code_buffer
Beispiel #7
0
def _live_tensors(f, attr_name="inputs"):
    """Returns the indices of the used inputs.

  Note: This currently only handles direct index accesses e.g. op.inputs[1].
  If the function has slicing or list comprehension on attr_name then returns
  _ALL. This ensure that this is correct even if inefficient.

  Args:
    f: A grad function, taking the op as first argument.
    attr_name: op attr to track. "inputs" or "outputs".

  Returns:
    Either one of:
      * set of integers representing individual indices of inputs used
      * the value _ALL, if indices are used but cannot be determined which
      * empty set, if no inputs are used
  """
    node, _ = parser.parse_entity(f, ())
    entity_info = transformer.EntityInfo(
        name=f.__name__,
        source_code=None,
        source_file=None,
        future_features=(),
        namespace=sys.modules[f.__module__].__dict__)
    ctx = transformer.Context(entity_info, None, None)

    graphs = cfg.build(node)
    node = qual_names.resolve(node)
    node = activity.resolve(node, ctx, None)
    node = reaching_fndefs.resolve(node, ctx, graphs)
    node = liveness.resolve(node, ctx, graphs)

    op_arg_name = anno.getanno(node.args.args[0], anno.Basic.QN)
    op_inputs_outputs_name = qual_names.QN(op_arg_name, attr=attr_name)

    special_tracker = _SubscriptUseTracker(ctx, (op_inputs_outputs_name, ))
    node = special_tracker.visit(node)

    live_vars_in = anno.getanno(node.body[0], anno.Static.LIVE_VARS_IN)
    inputs_outputs_used_qns = set()
    for v in special_tracker.complex_reads:
        # Complicated patterns like op.inputs[:3]. Could be smarter about them
        # if they matter much.
        if v == op_inputs_outputs_name:
            return _ALL
    for v in live_vars_in:
        if v in special_tracker.reads:
            if (v.has_subscript() and v.parent == op_inputs_outputs_name):
                inputs_outputs_used_qns.add(v)
            elif v == op_inputs_outputs_name:
                # When op.{attr_name} is used directly, assume all tensors are
                # used for now. In that case, no point digging further.
                # TODO(mdan): We can descend into tuple expansions.
                return _ALL

    function_calls_tracker = _FunctionCallsTracker(ctx, op_arg_name)
    node = function_calls_tracker.visit(node)

    input_output_indices = set()

    for called_f in function_calls_tracker.calls:
        child_indices = _live_tensors(called_f, attr_name=attr_name)
        if child_indices is _ALL:
            return _ALL
        input_output_indices |= child_indices

    for v in inputs_outputs_used_qns:
        assert v.has_subscript()
        _, subscript = v.qn
        if not subscript.is_simple():
            # Not a number, assuming it can be anything.
            return _ALL
        subscript_val, = subscript.qn
        if (not isinstance(subscript_val, qual_names.Literal)
                and not isinstance(subscript_val.value, int)):
            # Not a number, assuming it can be anything.
            return _ALL
        input_output_indices.add(subscript_val.value)
    return input_output_indices