コード例 #1
0
def my_add_control_inputs(wait_to_do_ops, inputs_to_do_before):
    for op in wait_to_do_ops:
        ci = [
            i for i in inputs_to_do_before
            if op.control_inputs is None or i not in op.control_inputs
        ]
        ge.add_control_inputs(op, ci)
コード例 #2
0
def my_add_control_inputs(wait_to_do_ops, inputs_to_do_before):
    """ Add control inputs """
    for tf_op in wait_to_do_ops:
        ctl_inp = [
            i for i in inputs_to_do_before
            if tf_op.control_inputs is None or i not in tf_op.control_inputs
        ]
        ge.add_control_inputs(tf_op, ctl_inp)
コード例 #3
0
def add_control_dependency(all_ops, swapin_op, target_op):
    for tensor in target_op.inputs:
        if "_grad" in tensor.name:
            #we need to find this tenor is which operation's output
            for op in all_ops:
                for i in range(len(op.outputs)):
                    if ((op.outputs[i].name == tensor.name)
                            and ("_grad" in op.name)):
                        print("swapin_op:", swapin_op, "op:", op)
                        ge.add_control_inputs(swapin_op, op)
コード例 #4
0
def add_control_dependency(all_ops, swapin_op, target_op):
    global added_control
    for tensor in target_op.inputs:
        if "_grad" in tensor.name:
            #we need to find this tenor is which operation's output
            for op in all_ops:
                for i in range(len(op.outputs)):
                    if ((op.outputs[i].name == tensor.name)): #and ("shape" not in op.name)):
                        added_control = True
                        print("swapin_op:", swapin_op, "which op is added control_dependency:", op)
                        ge.add_control_inputs(swapin_op, op)
コード例 #5
0
    def _add_control_dependency(self, fw_op, bw_op, swapin_op):
        """Find and add a control dependency to the graph.

        This method does an in-place modification to the graph.

        Args:
          fw_op: a `tf.Operation`.
          bw_op: a `tf.Operation`.
          swapin_op: a `tf.Operation`.
        """
        # if lb is out of range, reset it to make sure
        # that a control dependency op will be found
        lb = self._lb
        if (self._topo_sort.get_order(bw_op) - lb <=
                self._topo_sort.get_order(fw_op)):
            lb = 1
        if fw_op in self._grad_ops:
            re = self._do_direct_order(fw_op, bw_op, lb, self._ub)
        elif self._ctrld_strategy is CTRLD_Strategy.CHAIN_RULE:
            re = self._do_chain_rule(fw_op, bw_op, lb, self._ub)
        elif self._ctrld_strategy is CTRLD_Strategy.DIRECT_ORDER:
            re = self._do_direct_order(fw_op, bw_op, lb, self._ub)
        else:
            re = self._do_chain_rule(fw_op, bw_op, lb, self._ub)

        ctrld_op = re[0]
        ctrld_order = re[1]
        if ctrld_op:
            ge.add_control_inputs(swapin_op, ctrld_op)
            self._log_info(
                "Control dependency op {},  order: {}".format(
                    ctrld_op.name, ctrld_order), 1)
        else:
            self._log_info(
                "No control dependency op needed for swap in of op {}.".format(
                    fw_op.name), 1)
コード例 #6
0
def Quantize(graph,
             weight_bits=8,
             weight_narrow_range=False,
             activation_bits=8,
             ema_decay=0.999,
             quant_delay=None,
             vars_collection=ops.GraphKeys.MOVING_AVERAGE_VARIABLES,
             is_training=True,
             quantize_folded_weights_use_ema=False):
    """Updates graph with quantization operations.

  Args:
    graph: Graph to modify.
    weight_bits: Number of bits to use for quantizing weights.
    weight_narrow_range: Whether to use a more efficient narrow range for
      weights quantization.  With weight_narrow_range true, the range is
      [1; 2^weight_bits - 1], with it false [0; 2^weight_bits - 1].
    activation_bits: Number of bits to use for quantizing activations.
    ema_decay: (Optional) Float, EMA decay parameter.  EMA is used to update
      quantization intervals for quantizing activations (see here about EMA:
      https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average).
    quant_delay: (Optional, default None) Int, count of global steps for which
      to delay quantization.  This helps weights stabilize at the start of
      training.
    vars_collection: (Optional) Collection where to store the variables for
      quantization interval ends.
    is_training: (Optional) Whether quantizing training graph or eval graph.
    quantize_folded_weights_use_ema: (Optional, default False) Whether to
      quantize weights after batchnorm-folding with exponential average
      quantization.
  Raises:
    ValueError: When quantization fails.
  """
    context = _QuantizeContext(graph, weight_bits, weight_narrow_range,
                               activation_bits, ema_decay, quant_delay,
                               vars_collection, is_training,
                               quantize_folded_weights_use_ema)

    graph_ops = graph.get_operations()

    # Filter out backprop and summary related operations, leave only interesting
    # op types.
    def _IsInterestingOpWithWeights(op):
        return (op.type in _QUANTIZABLE_TYPES
                and not op.name.startswith(common.SKIPPED_PREFIXES))

    for op in (op for op in graph_ops if _IsInterestingOpWithWeights(op)):
        if op.name.endswith('/depthwise'):
            # Separable convolution may consist of 2 convolution nodes. If so, skip
            # .../depthwise and only quantize the top one.
            separable_conv = context.GetOperationByNameDontThrow(
                op.name[:-len('/depthwise')])
            if separable_conv and separable_conv.type == 'Conv2D':
                continue
        # Quantize add ops that come after Conv2D or DepthwiseConv2dNative.
        if op.type in ['Conv2D', 'DepthwiseConv2dNative']:
            add_context_re = re.search(r'^(.*)/[^/]+/', op.name)
            if add_context_re is not None:
                context.add_contexts.add(add_context_re.group(1))
        if not op.name.endswith('_Fold'):
            folded_op = context.GetOperationByNameDontThrow(op.name + '_Fold')
            # Do nothing if found, it will be quantized when it is iterated over.
            if not folded_op:
                context.QuantizeOpWithWeights(op, folded=False)
        else:
            context.QuantizeOpWithWeights(op, folded=True)

    context.QuantizeAddContexts()

    # Once all quantization ops have been inserted in the graph, collect update
    # ops for their variables and modify the TF Slim update barrier (see
    # https://www.tensorflow.org/code/tensorflow/contrib/slim/python/slim/learning.py)
    # to depend on them.
    try:
        update_barrier = graph.get_operation_by_name('update_barrier')
    except KeyError:
        # In evaluation graph, this barrier may not exist.
        return None
    update_quant_ops = graph.get_collection_ref(_UPDATE_QUANT_OPS)
    graph_editor.add_control_inputs(update_barrier, update_quant_ops)
コード例 #7
0
ファイル: quantize.py プロジェクト: SylChan/tensorflow
def Quantize(graph,
             weight_bits=8,
             weight_narrow_range=False,
             activation_bits=8,
             ema_decay=0.999,
             quant_delay=None,
             vars_collection=ops.GraphKeys.MOVING_AVERAGE_VARIABLES,
             is_training=True,
             quantize_folded_weights_use_ema=False):
  """Updates graph with quantization operations.

  Args:
    graph: Graph to modify.
    weight_bits: Number of bits to use for quantizing weights.
    weight_narrow_range: Whether to use a more efficient narrow range for
      weights quantization.  With weight_narrow_range true, the range is
      [1; 2^weight_bits - 1], with it false [0; 2^weight_bits - 1].
    activation_bits: Number of bits to use for quantizing activations.
    ema_decay: (Optional) Float, EMA decay parameter.  EMA is used to update
      quantization intervals for quantizing activations (see here about EMA:
      https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average).
    quant_delay: (Optional, default None) Int, count of global steps for which
      to delay quantization.  This helps weights stabilize at the start of
      training.
    vars_collection: (Optional) Collection where to store the variables for
      quantization interval ends.
    is_training: (Optional) Whether quantizing training graph or eval graph.
    quantize_folded_weights_use_ema: (Optional, default False) Whether to
      quantize weights after batchnorm-folding with exponential average
      quantization.
  Raises:
    ValueError: When quantization fails.
  """
  context = _QuantizeContext(graph, weight_bits, weight_narrow_range,
                             activation_bits, ema_decay, quant_delay,
                             vars_collection, is_training,
                             quantize_folded_weights_use_ema)

  graph_ops = graph.get_operations()

  # Filter out backprop and summary related operations, leave only interesting
  # op types.
  def _IsInterestingOpWithWeights(op):
    return (op.type in _QUANTIZABLE_TYPES and
            not op.name.startswith(common.SKIPPED_PREFIXES))

  for op in (op for op in graph_ops if _IsInterestingOpWithWeights(op)):
    if op.name.endswith('/depthwise'):
      # Separable convolution may consist of 2 convolution nodes. If so, skip
      # .../depthwise and only quantize the top one.
      separable_conv = context.GetOperationByNameDontThrow(
          op.name[:-len('/depthwise')])
      if separable_conv and separable_conv.type == 'Conv2D':
        continue
    # Quantize add ops that come after Conv2D or DepthwiseConv2dNative.
    if op.type in ['Conv2D', 'DepthwiseConv2dNative']:
      add_context_re = re.search(r'^(.*)/[^/]+/', op.name)
      if add_context_re is not None:
        context.add_contexts.add(add_context_re.group(1))
    if not op.name.endswith('_Fold'):
      folded_op = context.GetOperationByNameDontThrow(op.name + '_Fold')
      # Do nothing if found, it will be quantized when it is iterated over.
      if not folded_op:
        context.QuantizeOpWithWeights(op, folded=False)
    else:
      context.QuantizeOpWithWeights(op, folded=True)

  context.QuantizeAddContexts()

  # Once all quantization ops have been inserted in the graph, collect update
  # ops for their variables and modify the TF Slim update barrier (see
  # https://www.tensorflow.org/code/tensorflow/contrib/slim/python/slim/learning.py)
  # to depend on them.
  try:
    update_barrier = graph.get_operation_by_name('update_barrier')
  except KeyError:
    # In evaluation graph, this barrier may not exist.
    return None
  update_quant_ops = graph.get_collection_ref(_UPDATE_QUANT_OPS)
  graph_editor.add_control_inputs(update_barrier, update_quant_ops)
コード例 #8
0
def run_after(a_tensor, b_tensor):
    """Force a to run after b"""
    a = graph.get_operation_by_name(a_tensor)
    b = graph.get_operation_by_name(b_tensor)

    ge.add_control_inputs(a, b)
コード例 #9
0
def my_add_control_inputs(wait_to_do_ops, inputs_to_do_before):
    """ Add control inputs """
    for tf_op in wait_to_do_ops:
        ctl_inp = [i for i in inputs_to_do_before
                   if tf_op.control_inputs is None or i not in tf_op.control_inputs]
        ge.add_control_inputs(tf_op, ctl_inp)
コード例 #10
0
def my_add_control_inputs(wait_to_do_ops, inputs_to_do_before):
    for op in wait_to_do_ops:
        ci = [i for i in inputs_to_do_before if op.control_inputs is None or i not in op.control_inputs]
        ge.add_control_inputs(op, ci)