コード例 #1
0
def _optimize_1(graph, live_out, alloc):
  """Optimize the variable allocation strategy for one CFG.

  Args:
    graph: `ControlFlowGraph` to traverse.
    live_out: Set of `str` variable names that are live out of this graph (i.e.,
      returned by the function this graph represents).
    alloc: Dictionary of allocation strategy deductions made so far.
      This is mutated; but no variable is moved to a cheaper strategy.
  """
  liveness_map = liveness.liveness_analysis(graph, set(live_out))
  if graph.exit_index() > 0:
    _variable_crosses_block_boundary(inst.pc_var, alloc)
  for i in range(graph.exit_index()):
    block = graph.block(i)
    for op, live_out in zip(
        block.instructions, liveness_map[block].live_out_instructions):
      for varname in inst.pattern_traverse(_vars_read_by(op)):
        _variable_is_read(varname, alloc)
      if isinstance(op, inst.FunctionCallOp):
        for varname in live_out - set(inst.pattern_flatten(op.vars_out)):
          # TODO(axch): Technically a variable only needs the conservative
          # storage strategy if it crosses a call to some function that writes
          # it (e.g., a recursive self-call).  Checking for that here would
          # require traversing the call graph.
          _variable_crosses_function_call_boundary(varname, alloc)
        _variable_crosses_function_call_boundary(inst.pc_var, alloc)
    if isinstance(block.terminator, inst.BranchOp):
      # TODO(axch): Actually, being read by BranchOp only implies
      # _variable_is_read.  However, the downstream VM doesn't know how to pop a
      # condition variable that is not needed after the BranchOp, so for now we
      # have to allocate a register for it.
      _variable_crosses_block_boundary(block.terminator.cond_var, alloc)
    for varname in liveness_map[block].live_out_of_block:
      _variable_crosses_block_boundary(varname, alloc)
コード例 #2
0
ファイル: lowering.py プロジェクト: soon0698/probability
def _lower_function_calls_1(
    builder, graph, defined_in, live_out, function=True):
  """Lowers one function body, destructively.

  Mutates the given `ControlFlowGraphBuilder`, inserting `Block`s
  representing the new body.  Some of these may be the same as some
  `Block`s in the input `graph`, mutated; others may be newly
  allocated.

  Args:
    builder: `ControlFlowGraphBuilder` constructing the answer.
    graph: The `ControlFlowGraph` to lower.
    defined_in: A Python list of `str`.  The set of variables that
      are defined on entry to this `graph`.
    live_out: A Python list of `str`.  The set of variables that are
      live on exit from this `graph`.
    function: Python `bool`.  If `True` (the default), assume this is
      a `Function` body and convert an "exit" transfer into
      `IndirectGotoOp`; otherwise leave it as (`Program`) "exit".

  Raises:
    ValueError: If an invalid instruction is encountered, if a live
      variable is undefined, if different paths into a `Block` cause
      different sets of variables to be defined, or if trying to lower
      function calls in a program that already has `IndirectGotoOp`
      instructions (they confuse the liveness analysis).
  """
  liveness_map = liveness.liveness_analysis(graph, set(live_out))
  defined_map = _definedness_analysis(graph, defined_in, liveness_map)
  for i in range(graph.exit_index()):
    block = graph.block(i)
    old_instructions = block.instructions
    # Resetting block.instructions here because we will build up the
    # list of new ones in place (via the `builder`).
    block.instructions = []
    builder.append_block(block)
    builder.maybe_add_pop(
        defined_map[block].defined_into_block,
        liveness_map[block].live_into_block)
    for op_i, (op, defined_out, live_out) in enumerate(zip(
        old_instructions,
        defined_map[block].defined_out_instructions,
        liveness_map[block].live_out_instructions)):
      if isinstance(op, inst.PrimOp):
        for name in inst.pattern_traverse(op.vars_in):
          if name in inst.pattern_flatten(op.vars_out):
            # Why not?  Because the stack discipline we are trying to
            # implement calls for popping variables as soon as they
            # become dead.  Now, if a PrimOp writes to the same
            # variable as it reads, the old version of that variable
            # dies.  Where to put the PopOp?  Before the PrimOp is no
            # good -- it still needs to be read.  After the PrimOp is
            # no good either -- it will pop the output, not the input.
            # Various solutions to this problem are possible, such as
            # adding a "drop the second-top element of this stack"
            # instruction, or orchestrating the pushes and pops
            # directly in the interpreter, but for now the simplest
            # thing is to just forbid this situation.
            msg = 'Cannot lower PrimOp that writes to its own input {}.'
            raise ValueError(msg.format(name))
        builder.append_instruction(op)
        builder.maybe_add_pop(defined_out, live_out)
      elif isinstance(op, inst.FunctionCallOp):
        names_pushed_here = inst.pattern_flatten(op.vars_out)
        for name in inst.pattern_traverse(op.vars_in):
          if name in names_pushed_here:
            # For the same reason as above.
            msg = 'Cannot lower FunctionCallOp that writes to its own input {}.'
            raise ValueError(msg.format(name))
        builder.append_instruction(
            # Some of the pushees (op.function.vars_in) may be in scope if this
            # is a self-call.  They may therefore overlap with op.vars_in.  If
            # so, those values will be copied and/or duplicated.  Insofar as
            # op.vars_in become dead, some of this will be undone by the
            # following pop.  This is wasteful but sound.
            inst.push_op(op.vars_in, op.function.vars_in))
        builder.maybe_add_pop(
            # Pop names defined on entry now, to save a stack frame in the
            # function call.  Can't pop names defined by this call
            # because they haven't been pushed yet.
            defined_out.difference(names_pushed_here), live_out)
        if (op_i == len(old_instructions) - 1
            and _optimizable_tail_call(op, builder.cur_block())):
          builder.end_block_with_tail_call(op.function.graph.block(0))
          # The check that the tail call is optimizable is equivalent to
          # checking that the push-pop pair below would do nothing.
        else:
          builder.split_block(op.function.graph.block(0))
          builder.append_instruction(
              # These extra levels of list protect me (I hope) from the
              # auto-unpacking in the implementation of push_op, in the case of
              # a function returning exactly one Tensor.
              inst.push_op([op.function.vars_out], [op.vars_out]))
          builder.append_instruction(
              inst.PopOp(inst.pattern_flatten(op.function.vars_out)))
          # The only way this would actually add a pop is if some name written
          # by this call was a dummy variable.
          builder.maybe_add_pop(frozenset(names_pushed_here), live_out)
      elif isinstance(op, (inst.PopOp)):
        # Presumably, lowering is applied before any `PopOp`s are present.  That
        # said, simply propagating them is sound.  (But see the `PopOp` case in
        # `liveness_analysis`.)
        builder.append_instruction(op)
      else:
        raise ValueError('Invalid instruction in block {}.'.format(op))
    if function:
      builder.maybe_adjust_terminator()