Example #1
0
def _auc_hist_accumulate(hist_true, hist_false, nbins, collections):
  """Accumulate histograms in new variables."""
  with variable_scope.variable_op_scope(
      [hist_true, hist_false], None, 'hist_accumulate'):
    # Holds running total histogram of scores for records labeled True.
    hist_true_acc = variable_scope.get_variable(
        'hist_true_acc',
        initializer=array_ops.zeros_initializer(
            [nbins],
            dtype=hist_true.dtype),
        collections=collections,
        trainable=False)
    # Holds running total histogram of scores for records labeled False.
    hist_false_acc = variable_scope.get_variable(
        'hist_false_acc',
        initializer=array_ops.zeros_initializer(
            [nbins],
            dtype=hist_false.dtype),
        collections=collections,
        trainable=False)

    update_op = control_flow_ops.group(
        hist_true_acc.assign_add(hist_true),
        hist_false_acc.assign_add(hist_false),
        name='update_op')

    return hist_true_acc, hist_false_acc, update_op
Example #2
0
def _auc_hist_accumulate(hist_true, hist_false, nbins, collections):
    """Accumulate histograms in new variables."""
    with variable_scope.variable_scope(None, 'hist_accumulate',
                                       [hist_true, hist_false]):
        # Holds running total histogram of scores for records labeled True.
        hist_true_acc = variable_scope.get_variable(
            'hist_true_acc',
            initializer=array_ops.zeros_initializer([nbins],
                                                    dtype=hist_true.dtype),
            collections=collections,
            trainable=False)
        # Holds running total histogram of scores for records labeled False.
        hist_false_acc = variable_scope.get_variable(
            'hist_false_acc',
            initializer=array_ops.zeros_initializer([nbins],
                                                    dtype=hist_false.dtype),
            collections=collections,
            trainable=False)

        update_op = control_flow_ops.group(
            hist_true_acc.assign_add(hist_true),
            hist_false_acc.assign_add(hist_false),
            name='update_op')

        return hist_true_acc, hist_false_acc, update_op
def weighted_moving_average(value,
                            decay,
                            weight,
                            truediv=True,
                            name="WeightedMovingAvg"):
    """Compute the weighted moving average of `value`.

    Conceptually, the weighted moving average is:
      moving_average(value * weight) / moving_average(weight),
    where a moving average updates by the rule
      new_value = decay * old_value + (1 - decay) * update

    Args:
      value: A tensor.
      decay: A float Tensor or float value.  The moving average decay.
      weight:  A tensor that keeps the current value of a weight.
        Shape should be able to multiply `value`.
      truediv:  Boolean, if True, dividing by moving_average(weight) is floating
        point division.  If False, use division implied by dtypes.
      name: Optional name of the returned operation.

    Returns:
      An Operation that updates the weighted moving average.
    """
    # Unlike assign_moving_average, the weighted moving average doesn't modify
    # user-visible variables. It is the ratio of two internal variables, which are
    # moving averages of the updates.  Thus, the signature of this function is
    # quite different than assign_moving_average.
    with variable_scope.variable_op_scope([value, weight, decay], name,
                                          name) as scope:
        value_variable = variable_scope.get_variable(
            "value",
            initializer=array_ops.zeros_initializer(value.get_shape(),
                                                    dtype=value.dtype),
            trainable=False)
        weight_variable = variable_scope.get_variable(
            "weight",
            initializer=array_ops.zeros_initializer(weight.get_shape(),
                                                    dtype=weight.dtype),
            trainable=False)
        numerator = assign_moving_average(value_variable, value * weight,
                                          decay)
        denominator = assign_moving_average(weight_variable, weight, decay)

        if truediv:
            return math_ops.truediv(numerator, denominator, name=scope.name)
        else:
            return math_ops.div(numerator, denominator, name=scope.name)
Example #4
0
def weighted_moving_average(
    value, decay, weight, truediv=True, name="WeightedMovingAvg"):
  """Compute the weighted moving average of `value`.

  Conceptually, the weighted moving average is:
    moving_average(value * weight) / moving_average(weight),
  where a moving average updates by the rule
    new_value = decay * old_value + (1 - decay) * update

  Args:
    value: A tensor.
    decay: A float Tensor or float value.  The moving average decay.
    weight:  A tensor that keeps the current value of a weight.
      Shape should be able to multiply `value`.
    truediv:  Boolean, if True, dividing by moving_average(weight) is floating
      point division.  If False, use division implied by dtypes.
    name: Optional name of the returned operation.

  Returns:
    An Operation that updates the weighted moving average.
  """
  # Unlike assign_moving_average, the weighted moving average doesn't modify
  # user-visible variables. It is the ratio of two internal variables, which are
  # moving averages of the updates.  Thus, the signature of this function is
  # quite different than assign_moving_average.
  with variable_scope.variable_op_scope(
      [value, weight, decay], name, name) as scope:
    value_variable = variable_scope.get_variable(
        "value",
        initializer=array_ops.zeros_initializer(
            value.get_shape(), dtype=value.dtype),
        trainable=False
    )
    weight_variable = variable_scope.get_variable(
        "weight",
        initializer=array_ops.zeros_initializer(
            weight.get_shape(), dtype=weight.dtype),
        trainable=False
    )
    numerator = assign_moving_average(value_variable, value * weight, decay)
    denominator = assign_moving_average(weight_variable, weight, decay)

    if truediv:
      return math_ops.truediv(numerator, denominator, name=scope.name)
    else:
      return math_ops.div(numerator, denominator, name=scope.name)
Example #5
0
def histogram_fixed_width(values, value_range, nbins=100, use_locking=True, dtype=dtypes.int32, name=None):
    """Return histogram of values.

  Given the tensor `values`, this operation returns a rank 1 histogram counting
  the number of entries in `values` that fell into every bin.  The bins are
  equal width and determined by the arguments `value_range` and `nbins`.

  Args:
    values:  Numeric `Tensor`.
    value_range:  Shape [2] `Tensor`.  new_values <= value_range[0] will be
      mapped to hist[0], values >= value_range[1] will be mapped to hist[-1].
      Must be same dtype as new_values.
    nbins:  Integer number of bins in this histogram.
    use_locking:  Boolean.
      If `True`, use locking during the operation (optional).
    dtype:  dtype for returned histogram.
    name:  A name for this operation (defaults to 'histogram_fixed_width').

  Returns:
    A `Variable` holding histogram of values.

  Examples:
  ```python
  # Bins will be:  (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)
  nbins = 5
  value_range = [0.0, 5.0]
  new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15]

  with tf.default_session() as sess:
    hist = tf.histogram_fixed_width(new_values, value_range, nbins=5)
    variables.initialize_all_variables().run()
    sess.run(hist) => [2, 1, 1, 0, 2]
  ```
  """
    with variable_scope.variable_op_scope([values, value_range], name, "histogram_fixed_width") as scope:
        values = ops.convert_to_tensor(values, name="values")
        values = array_ops.reshape(values, [-1])
        value_range = ops.convert_to_tensor(value_range, name="value_range")

        # Map tensor values that fall within value_range to [0, 1].
        scaled_values = math_ops.truediv(values - value_range[0], value_range[1] - value_range[0], name="scaled_values")

        # map tensor values within the open interval value_range to {0,.., nbins-1},
        # values outside the open interval will be zero or less, or nbins or more.
        indices = math_ops.floor(nbins * scaled_values, name="indices")

        # Clip edge cases (e.g. value = value_range[1]) or "outliers."
        indices = math_ops.cast(clip_ops.clip_by_value(indices, 0, nbins - 1), dtypes.int32)

        # Dummy vector to scatter.
        # TODO(langmore) Replace non-ideal creation of large dummy vector once an
        # alternative to scatter is available.
        updates = array_ops.ones_like(indices, dtype=dtype)

        hist = variable_scope.get_variable(
            "hist", initializer=array_ops.zeros_initializer([nbins], dtype=dtype), trainable=False
        )
        hist_assign_zero = hist.assign(array_ops.zeros_like(hist))

        with ops.control_dependencies([hist_assign_zero]):
            return state_ops.scatter_add(hist, indices, updates, use_locking=use_locking, name=scope.name)
Example #6
0
def histogram_fixed_width(values,
                          value_range,
                          nbins=100,
                          use_locking=True,
                          dtype=dtypes.int32,
                          name=None):
    """Return histogram of values.

  Given the tensor `values`, this operation returns a rank 1 histogram counting
  the number of entries in `values` that fell into every bin.  The bins are
  equal width and determined by the arguments `value_range` and `nbins`.

  Args:
    values:  Numeric `Tensor`.
    value_range:  Shape [2] `Tensor`.  new_values <= value_range[0] will be
      mapped to hist[0], values >= value_range[1] will be mapped to hist[-1].
      Must be same dtype as new_values.
    nbins:  Integer number of bins in this histogram.
    use_locking:  Boolean.
      If `True`, use locking during the operation (optional).
    dtype:  dtype for returned histogram.
    name:  A name for this operation (defaults to 'histogram_fixed_width').

  Returns:
    A `Variable` holding histogram of values.

  Examples:
  ```python
  # Bins will be:  (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)
  nbins = 5
  value_range = [0.0, 5.0]
  new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15]

  with tf.default_session() as sess:
    hist = tf.histogram_fixed_width(new_values, value_range, nbins=5)
    variables.initialize_all_variables().run()
    sess.run(hist) => [2, 1, 1, 0, 2]
  ```
  """
    with variable_scope.variable_op_scope([values, value_range], name,
                                          'histogram_fixed_width') as scope:
        values = ops.convert_to_tensor(values, name='values')
        values = array_ops.reshape(values, [-1])
        value_range = ops.convert_to_tensor(value_range, name='value_range')

        # Map tensor values that fall within value_range to [0, 1].
        scaled_values = math_ops.truediv(values - value_range[0],
                                         value_range[1] - value_range[0],
                                         name='scaled_values')

        # map tensor values within the open interval value_range to {0,.., nbins-1},
        # values outside the open interval will be zero or less, or nbins or more.
        indices = math_ops.floor(nbins * scaled_values, name='indices')

        # Clip edge cases (e.g. value = value_range[1]) or "outliers."
        indices = math_ops.cast(clip_ops.clip_by_value(indices, 0, nbins - 1),
                                dtypes.int32)

        # Dummy vector to scatter.
        # TODO(langmore) Replace non-ideal creation of large dummy vector once an
        # alternative to scatter is available.
        updates = array_ops.ones_like(indices, dtype=dtype)

        hist = variable_scope.get_variable(
            'hist',
            initializer=array_ops.zeros_initializer([nbins], dtype=dtype),
            trainable=False)
        hist_assign_zero = hist.assign(array_ops.zeros_like(hist))

        with ops.control_dependencies([hist_assign_zero]):
            return state_ops.scatter_add(hist,
                                         indices,
                                         updates,
                                         use_locking=use_locking,
                                         name=scope.name)