Esempio n. 1
0
  def testSqueezablePredictionsExpectedRankDiffMinus1(self):
    label_values = np.ones(shape=(2, 3, 5))
    prediction_values = np.zeros(shape=(2, 3, 1))
    static_labels, static_predictions = (
        confusion_matrix.remove_squeezable_dimensions(
            label_values, prediction_values, expected_rank_diff=-1))

    labels_placeholder = array_ops.placeholder(dtype=dtypes.int32)
    predictions_placeholder = array_ops.placeholder(dtype=dtypes.int32)
    dynamic_labels, dynamic_predictions = (
        confusion_matrix.remove_squeezable_dimensions(
            labels_placeholder, predictions_placeholder, expected_rank_diff=-1))

    expected_prediction_values = np.reshape(prediction_values, newshape=(2, 3))
    with self.cached_session():
      self.assertAllEqual(label_values, self.evaluate(static_labels))
      self.assertAllEqual(expected_prediction_values,
                          self.evaluate(static_predictions))
      feed_dict = {
          labels_placeholder: label_values,
          predictions_placeholder: prediction_values
      }
      self.assertAllEqual(
          label_values, dynamic_labels.eval(feed_dict=feed_dict))
      self.assertAllEqual(
          expected_prediction_values,
          dynamic_predictions.eval(feed_dict=feed_dict))
Esempio n. 2
0
    def testSameShape(self):
        label_values = np.ones(shape=(2, 3, 1))
        prediction_values = np.zeros_like(label_values)
        static_labels, static_predictions = (
            confusion_matrix.remove_squeezable_dimensions(
                label_values, prediction_values))

        labels_placeholder = array_ops.placeholder(dtype=dtypes.int32)
        predictions_placeholder = array_ops.placeholder(dtype=dtypes.int32)
        dynamic_labels, dynamic_predictions = (
            confusion_matrix.remove_squeezable_dimensions(
                labels_placeholder, predictions_placeholder))

        with self.cached_session():
            self.assertAllEqual(label_values, self.evaluate(static_labels))
            self.assertAllEqual(prediction_values,
                                self.evaluate(static_predictions))
            feed_dict = {
                labels_placeholder: label_values,
                predictions_placeholder: prediction_values
            }
            self.assertAllEqual(label_values,
                                dynamic_labels.eval(feed_dict=feed_dict))
            self.assertAllEqual(prediction_values,
                                dynamic_predictions.eval(feed_dict=feed_dict))
Esempio n. 3
0
  def testSqueezablePredictionsExpectedRankDiffMinus1(self):
    label_values = np.ones(shape=(2, 3, 5))
    prediction_values = np.zeros(shape=(2, 3, 1))
    static_labels, static_predictions = (
        confusion_matrix.remove_squeezable_dimensions(
            label_values, prediction_values, expected_rank_diff=-1))

    labels_placeholder = array_ops.placeholder(dtype=dtypes.int32)
    predictions_placeholder = array_ops.placeholder(dtype=dtypes.int32)
    dynamic_labels, dynamic_predictions = (
        confusion_matrix.remove_squeezable_dimensions(
            labels_placeholder, predictions_placeholder, expected_rank_diff=-1))

    expected_prediction_values = np.reshape(prediction_values, newshape=(2, 3))
    with self.cached_session():
      self.assertAllEqual(label_values, static_labels.eval())
      self.assertAllEqual(expected_prediction_values, static_predictions.eval())
      feed_dict = {
          labels_placeholder: label_values,
          predictions_placeholder: prediction_values
      }
      self.assertAllEqual(
          label_values, dynamic_labels.eval(feed_dict=feed_dict))
      self.assertAllEqual(
          expected_prediction_values,
          dynamic_predictions.eval(feed_dict=feed_dict))
Esempio n. 4
0
    def testBothScalarShape(self):
        label_values = 1.0
        prediction_values = 0.0
        static_labels, static_predictions = (
            confusion_matrix.remove_squeezable_dimensions(
                label_values, prediction_values))

        labels_placeholder = array_ops.placeholder(dtype=dtypes.float32)
        predictions_placeholder = array_ops.placeholder(dtype=dtypes.float32)
        dynamic_labels, dynamic_predictions = (
            confusion_matrix.remove_squeezable_dimensions(
                labels_placeholder, predictions_placeholder))

        with self.cached_session():
            self.assertAllEqual(label_values, self.evaluate(static_labels))
            self.assertAllEqual(prediction_values,
                                self.evaluate(static_predictions))
            feed_dict = {
                labels_placeholder: label_values,
                predictions_placeholder: prediction_values
            }
            self.assertAllEqual(label_values,
                                dynamic_labels.eval(feed_dict=feed_dict))
            self.assertAllEqual(prediction_values,
                                dynamic_predictions.eval(feed_dict=feed_dict))
Esempio n. 5
0
  def testSqueezableLabels(self):
    label_values = np.ones(shape=(2, 3, 1))
    prediction_values = np.zeros(shape=(2, 3))
    static_labels, static_predictions = (
        confusion_matrix.remove_squeezable_dimensions(
            label_values, prediction_values))

    labels_placeholder = array_ops.placeholder(dtype=dtypes.int32)
    predictions_placeholder = array_ops.placeholder(dtype=dtypes.int32)
    dynamic_labels, dynamic_predictions = (
        confusion_matrix.remove_squeezable_dimensions(
            labels_placeholder, predictions_placeholder))

    expected_label_values = np.reshape(label_values, newshape=(2, 3))
    with self.test_session():
      self.assertAllEqual(expected_label_values, static_labels.eval())
      self.assertAllEqual(prediction_values, static_predictions.eval())
      feed_dict = {
          labels_placeholder: label_values,
          predictions_placeholder: prediction_values
      }
      self.assertAllEqual(
          expected_label_values, dynamic_labels.eval(feed_dict=feed_dict))
      self.assertAllEqual(
          prediction_values, dynamic_predictions.eval(feed_dict=feed_dict))
Esempio n. 6
0
    def testUnsqueezablePredictions(self):
        label_values = np.ones(shape=(2, 3))
        prediction_values = np.zeros(shape=(2, 3, 2))
        with self.assertRaisesRegexp(ValueError, r"Can not squeeze dim\[2\]"):
            confusion_matrix.remove_squeezable_dimensions(label_values, prediction_values)

        labels_placeholder = array_ops.placeholder(dtype=dtypes.int32)
        predictions_placeholder = array_ops.placeholder(dtype=dtypes.int32)
        dynamic_labels, dynamic_predictions = confusion_matrix.remove_squeezable_dimensions(
            labels_placeholder, predictions_placeholder
        )

        with self.test_session():
            feed_dict = {labels_placeholder: label_values, predictions_placeholder: prediction_values}
            self.assertAllEqual(label_values, dynamic_labels.eval(feed_dict=feed_dict))
            with self.assertRaisesRegexp(errors_impl.InvalidArgumentError, "Tried to explicitly squeeze dimension 2"):
                dynamic_predictions.eval(feed_dict=feed_dict)
Esempio n. 7
0
    def testSameShapeExpectedRankDiff0(self):
        label_values = np.ones(shape=(2, 3, 1))
        prediction_values = np.zeros_like(label_values)
        static_labels, static_predictions = confusion_matrix.remove_squeezable_dimensions(
            label_values, prediction_values, expected_rank_diff=0
        )

        labels_placeholder = array_ops.placeholder(dtype=dtypes.int32)
        predictions_placeholder = array_ops.placeholder(dtype=dtypes.int32)
        dynamic_labels, dynamic_predictions = confusion_matrix.remove_squeezable_dimensions(
            labels_placeholder, predictions_placeholder, expected_rank_diff=0
        )

        with self.test_session():
            self.assertAllEqual(label_values, static_labels.eval())
            self.assertAllEqual(prediction_values, static_predictions.eval())
            feed_dict = {labels_placeholder: label_values, predictions_placeholder: prediction_values}
            self.assertAllEqual(label_values, dynamic_labels.eval(feed_dict=feed_dict))
            self.assertAllEqual(prediction_values, dynamic_predictions.eval(feed_dict=feed_dict))
Esempio n. 8
0
    def testBothScalarShape(self):
        label_values = 1.0
        prediction_values = 0.0
        static_labels, static_predictions = confusion_matrix.remove_squeezable_dimensions(
            label_values, prediction_values
        )

        labels_placeholder = array_ops.placeholder(dtype=dtypes.float32)
        predictions_placeholder = array_ops.placeholder(dtype=dtypes.float32)
        dynamic_labels, dynamic_predictions = confusion_matrix.remove_squeezable_dimensions(
            labels_placeholder, predictions_placeholder
        )

        with self.test_session():
            self.assertAllEqual(label_values, static_labels.eval())
            self.assertAllEqual(prediction_values, static_predictions.eval())
            feed_dict = {labels_placeholder: label_values, predictions_placeholder: prediction_values}
            self.assertAllEqual(label_values, dynamic_labels.eval(feed_dict=feed_dict))
            self.assertAllEqual(prediction_values, dynamic_predictions.eval(feed_dict=feed_dict))
Esempio n. 9
0
    def testSqueezableLabels(self):
        label_values = np.ones(shape=(2, 3, 1))
        prediction_values = np.zeros(shape=(2, 3))
        static_labels, static_predictions = confusion_matrix.remove_squeezable_dimensions(
            label_values, prediction_values
        )

        labels_placeholder = array_ops.placeholder(dtype=dtypes.int32)
        predictions_placeholder = array_ops.placeholder(dtype=dtypes.int32)
        dynamic_labels, dynamic_predictions = confusion_matrix.remove_squeezable_dimensions(
            labels_placeholder, predictions_placeholder
        )

        expected_label_values = np.reshape(label_values, newshape=(2, 3))
        with self.test_session():
            self.assertAllEqual(expected_label_values, static_labels.eval())
            self.assertAllEqual(prediction_values, static_predictions.eval())
            feed_dict = {labels_placeholder: label_values, predictions_placeholder: prediction_values}
            self.assertAllEqual(expected_label_values, dynamic_labels.eval(feed_dict=feed_dict))
            self.assertAllEqual(prediction_values, dynamic_predictions.eval(feed_dict=feed_dict))
def auc_using_histogram(boolean_labels,
                        scores,
                        score_range,
                        nbins=100,
                        collections=None,
                        check_shape=True,
                        name=None):
    """AUC computed by maintaining histograms.

  Rather than computing AUC directly, this Op maintains Variables containing
  histograms of the scores associated with `True` and `False` labels.  By
  comparing these the AUC is generated, with some discretization error.
  See: "Efficient AUC Learning Curve Calculation" by Bouckaert.

  This AUC Op updates in `O(batch_size + nbins)` time and works well even with
  large class imbalance.  The accuracy is limited by discretization error due
  to finite number of bins.  If scores are concentrated in a fewer bins,
  accuracy is lower.  If this is a concern, we recommend trying different
  numbers of bins and comparing results.

  Args:
    boolean_labels:  1-D boolean `Tensor`.  Entry is `True` if the corresponding
      record is in class.
    scores:  1-D numeric `Tensor`, same shape as boolean_labels.
    score_range:  `Tensor` of shape `[2]`, same dtype as `scores`.  The min/max
      values of score that we expect.  Scores outside range will be clipped.
    nbins:  Integer number of bins to use.  Accuracy strictly increases as the
      number of bins increases.
    collections: List of graph collections keys. Internal histogram Variables
      are added to these collections. Defaults to `[GraphKeys.LOCAL_VARIABLES]`.
    check_shape:  Boolean.  If `True`, do a runtime shape check on the scores
      and labels.
    name:  A name for this Op.  Defaults to "auc_using_histogram".

  Returns:
    auc:  `float32` scalar `Tensor`.  Fetching this converts internal histograms
      to auc value.
    update_op:  `Op`, when run, updates internal histograms.
  """
    if collections is None:
        collections = [ops.GraphKeys.LOCAL_VARIABLES]
    with variable_scope.variable_scope(name, 'auc_using_histogram',
                                       [boolean_labels, scores, score_range]):
        scores, boolean_labels = cm.remove_squeezable_dimensions(
            scores, boolean_labels)
        score_range = ops.convert_to_tensor(score_range, name='score_range')
        boolean_labels, scores = _check_labels_and_scores(
            boolean_labels, scores, check_shape)
        hist_true, hist_false = _make_auc_histograms(boolean_labels, scores,
                                                     score_range, nbins)
        hist_true_acc, hist_false_acc, update_op = _auc_hist_accumulate(
            hist_true, hist_false, nbins, collections)
        auc = _auc_convert_hist_to_auc(hist_true_acc, hist_false_acc, nbins)
        return auc, update_op
def _remove_squeezable_dimensions(predictions, labels, weights):
    predictions = ops.convert_to_tensor(predictions)
    if labels is not None:
        labels, predictions = confusion_matrix.remove_squeezable_dimensions(
            labels, predictions)
        predictions.get_shape().assert_is_compatible_with(labels.get_shape())

    if weights is None:
        return predictions, labels, None

    weights = ops.convert_to_tensor(weights)
    weights_shape = weights.get_shape()
    weights_rank = weights_shape.ndims
    if weights_rank == 0:
        return predictions, labels, weights

    predictions_shape = predictions.get_shape()
    predictions_rank = predictions_shape.ndims
    if (predictions_rank is not None) and (weights_rank is not None):
        # Use static rank.
        if weights_rank - predictions_rank == 1:
            weights = array_ops.squeeze(weights, [-1])
        elif predictions_rank - weights_rank == 1:
            weights = array_ops.expand_dims(weights, [-1])
    else:
        # Use dynamic rank.
        weights_rank_tensor = array_ops.rank(weights)
        rank_diff = weights_rank_tensor - array_ops.rank(predictions)

        def _maybe_expand_weights():
            return control_flow_ops.cond(
                math_ops.equal(rank_diff, -1),
                lambda: array_ops.expand_dims(weights, [-1]), lambda: weights)

        # Don't attempt squeeze if it will fail based on static check.
        if (weights_rank is not None) and (
                not weights_shape.dims[-1].is_compatible_with(1)):
            maybe_squeeze_weights = lambda: weights
        else:
            maybe_squeeze_weights = lambda: array_ops.squeeze(weights, [-1])

        def _maybe_adjust_weights():
            return control_flow_ops.cond(math_ops.equal(rank_diff, 1),
                                         maybe_squeeze_weights,
                                         _maybe_expand_weights)

        # If weights are scalar, do nothing. Otherwise, try to add or remove a
        # dimension to match predictions.
        weights = control_flow_ops.cond(math_ops.equal(weights_rank_tensor, 0),
                                        lambda: weights, _maybe_adjust_weights)
    return predictions, labels, weights
    def testUnsqueezablePredictions(self):
        label_values = np.ones(shape=(2, 3))
        prediction_values = np.zeros(shape=(2, 3, 2))
        with self.assertRaisesRegexp(ValueError, r"Can not squeeze dim\[2\]"):
            confusion_matrix.remove_squeezable_dimensions(
                label_values, prediction_values)

        labels_placeholder = array_ops.placeholder(dtype=dtypes.int32)
        predictions_placeholder = array_ops.placeholder(dtype=dtypes.int32)
        dynamic_labels, dynamic_predictions = (
            confusion_matrix.remove_squeezable_dimensions(
                labels_placeholder, predictions_placeholder))

        with self.cached_session():
            feed_dict = {
                labels_placeholder: label_values,
                predictions_placeholder: prediction_values
            }
            self.assertAllEqual(label_values,
                                dynamic_labels.eval(feed_dict=feed_dict))
            with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
                                         "Can not squeeze dim\[2\]"):
                dynamic_predictions.eval(feed_dict=feed_dict)
Esempio n. 13
0
def _remove_squeezable_dimensions(labels,
                                  predictions,
                                  weights=None,
                                  expected_rank_diff=0):
    """Internal version of _remove_squeezable_dimensions which handles weights.

  Squeezes `predictions` and `labels` if their ranks differ from expected by
  exactly 1.
  Squeezes `weights` if its rank is 1 more than the new rank of `predictions`

  This will use static shape if available. Otherwise, it will add graph
  operations, which could result in a performance hit.

  Args:
    labels: Label values, a `Tensor` whose dimensions match `predictions`.
    predictions: Predicted values, a `Tensor` of arbitrary dimensions.
    weights: Optional weight `Tensor`. It will be squeezed if it's not scalar,
      and its rank is 1 more than the new rank of `labels`.
    expected_rank_diff: Expected result of `rank(predictions) - rank(labels)`.

  Returns:
    Tuple of `predictions`, `labels` and `weights`, possibly with the last
    dimension squeezed.
  """
    labels, predictions = confusion_matrix.remove_squeezable_dimensions(
        labels, predictions, expected_rank_diff=expected_rank_diff)

    if weights is not None:
        weights = ops.convert_to_tensor(weights)
        labels_rank = labels.get_shape().ndims
        weights_shape = weights.get_shape()
        weights_rank = weights_shape.ndims

        if (labels_rank is not None) and (weights_rank is not None):
            # Use static rank.
            rank_diff = weights_rank - labels_rank
            if rank_diff == 1:
                weights = array_ops.squeeze(weights, [-1])
            return labels, predictions, weights

        # Use dynamic rank.
        rank_diff = array_ops.rank(weights) - array_ops.rank(labels)
        if (weights_rank is
                None) or (weights_rank > 0
                          and weights_shape.dims[-1].is_compatible_with(1)):
            weights = control_flow_ops.cond(
                math_ops.equal(1, rank_diff),
                lambda: array_ops.squeeze(weights, [-1]), lambda: weights)

    return labels, predictions, weights
Esempio n. 14
0
  def testSameShape(self):
    label_values = np.ones(shape=(2, 3, 1))
    prediction_values = np.zeros_like(label_values)
    static_labels, static_predictions = (
        confusion_matrix.remove_squeezable_dimensions(
            label_values, prediction_values))

    labels_placeholder = array_ops.placeholder(dtype=dtypes.int32)
    predictions_placeholder = array_ops.placeholder(dtype=dtypes.int32)
    dynamic_labels, dynamic_predictions = (
        confusion_matrix.remove_squeezable_dimensions(
            labels_placeholder, predictions_placeholder))

    with self.cached_session():
      self.assertAllEqual(label_values, self.evaluate(static_labels))
      self.assertAllEqual(prediction_values, self.evaluate(static_predictions))
      feed_dict = {
          labels_placeholder: label_values,
          predictions_placeholder: prediction_values
      }
      self.assertAllEqual(
          label_values, dynamic_labels.eval(feed_dict=feed_dict))
      self.assertAllEqual(
          prediction_values, dynamic_predictions.eval(feed_dict=feed_dict))
Esempio n. 15
0
def _remove_squeezable_dimensions(
    labels, predictions, weights=None, expected_rank_diff=0):
  """Internal version of _remove_squeezable_dimensions which handles weights.

  Squeezes `predictions` and `labels` if their ranks differ from expected by
  exactly 1.
  Squeezes `weights` if its rank is 1 more than the new rank of `predictions`

  This will use static shape if available. Otherwise, it will add graph
  operations, which could result in a performance hit.

  Args:
    labels: Label values, a `Tensor` whose dimensions match `predictions`.
    predictions: Predicted values, a `Tensor` of arbitrary dimensions.
    weights: Optional weight `Tensor`. It will be squeezed if it's not scalar,
      and its rank is 1 more than the new rank of `labels`.
    expected_rank_diff: Expected result of `rank(predictions) - rank(labels)`.

  Returns:
    Tuple of `predictions`, `labels` and `weights`, possibly with the last
    dimension squeezed.
  """
  labels, predictions = confusion_matrix.remove_squeezable_dimensions(
      labels, predictions, expected_rank_diff=expected_rank_diff)

  if weights is not None:
    weights = ops.convert_to_tensor(weights)
    labels_rank = labels.get_shape().ndims
    weights_shape = weights.get_shape()
    weights_rank = weights_shape.ndims

    if (labels_rank is not None) and (weights_rank is not None):
      # Use static rank.
      rank_diff = weights_rank - labels_rank
      if rank_diff == 1:
        weights = array_ops.squeeze(weights, [-1])
      return labels, predictions, weights

    # Use dynamic rank.
    rank_diff = array_ops.rank(weights) - array_ops.rank(labels)
    if (weights_rank is None) or (
        weights_rank > 0 and weights_shape.dims[-1].is_compatible_with(1)):
      weights = control_flow_ops.cond(
          math_ops.equal(1, rank_diff),
          lambda: array_ops.squeeze(weights, [-1]),
          lambda: weights)

  return labels, predictions, weights
Esempio n. 16
0
    def testUnsqueezablePredictions(self):
        label_values = np.ones(shape=(2, 3))
        prediction_values = np.zeros(shape=(2, 3, 2))

        labels_placeholder = array_ops.placeholder(dtype=dtypes.int32)
        predictions_placeholder = array_ops.placeholder(dtype=dtypes.int32)
        dynamic_labels, _ = (confusion_matrix.remove_squeezable_dimensions(
            labels_placeholder, predictions_placeholder))

        with self.cached_session():
            feed_dict = {
                labels_placeholder: label_values,
                predictions_placeholder: prediction_values
            }
            self.assertAllEqual(label_values,
                                dynamic_labels.eval(feed_dict=feed_dict))
Esempio n. 17
0
def _remove_squeezable_dimensions(predictions, labels, weights):
    labels, predictions = confusion_matrix.remove_squeezable_dimensions(
        labels, predictions)
    predictions.get_shape().assert_is_compatible_with(labels.get_shape())

    if weights is not None:
        weights = ops.convert_to_tensor(weights)
        predictions_shape = predictions.get_shape()
        predictions_rank = predictions_shape.ndims
        weights_shape = weights.get_shape()
        weights_rank = weights_shape.ndims

        if (predictions_rank is not None) and (weights_rank is not None):
            # Use static rank.
            if weights_rank - predictions_rank == 1:
                weights = array_ops.squeeze(weights, [-1])
        elif (weights_rank is
              None) or (weights_shape.dims[-1].is_compatible_with(1)):
            # Use dynamic rank
            weights = control_flow_ops.cond(
                math_ops.equal(array_ops.rank(weights),
                               math_ops.add(array_ops.rank(predictions), 1)),
                lambda: array_ops.squeeze(weights, [-1]), lambda: weights)
    return predictions, labels, weights
Esempio n. 18
0
def squeeze_or_expand_dimensions(y_pred, y_true=None, sample_weight=None):
    """Squeeze or expand last dimension if needed.

  1. Squeezes last dim of `y_pred` or `y_true` if their rank differs by 1
  (using `confusion_matrix.remove_squeezable_dimensions`).
  2. Squeezes or expands last dim of `sample_weight` if its rank differs by 1
  from the new rank of `y_pred`.
  If `sample_weight` is scalar, it is kept scalar.

  This will use static shape if available. Otherwise, it will add graph
  operations, which could result in a performance hit.

  Args:
    y_pred: Predicted values, a `Tensor` of arbitrary dimensions.
    y_true: Optional label `Tensor` whose dimensions match `y_pred`.
    sample_weight: Optional weight scalar or `Tensor` whose dimensions match
      `y_pred`.

  Returns:
    Tuple of `y_pred`, `y_true` and `sample_weight`. Each of them possibly has
    the last dimension squeezed,
    `sample_weight` could be extended by one dimension.
    If `sample_weight` is None, (y_pred, y_true) is returned.
  """
    y_pred_shape = y_pred.shape
    y_pred_rank = y_pred_shape.ndims
    if y_true is not None:

        # If sparse matrix is provided as `y_true`, the last dimension in `y_pred`
        # may be > 1. Eg: y_true = [0, 1, 2] (shape=(3,)),
        # y_pred = [[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]] (shape=(3, 3))
        # In this case, we should not try to remove squeezable dimension.
        y_true_shape = y_true.shape
        y_true_rank = y_true_shape.ndims
        if (y_true_rank is not None) and (y_pred_rank is not None):
            # Use static rank for `y_true` and `y_pred`.
            if (y_pred_rank - y_true_rank != 1) or y_pred_shape[-1] == 1:
                y_true, y_pred = confusion_matrix.remove_squeezable_dimensions(
                    y_true, y_pred)
        else:
            # Use dynamic rank.
            rank_diff = array_ops.rank(y_pred) - array_ops.rank(y_true)
            squeeze_dims = lambda: confusion_matrix.remove_squeezable_dimensions(  # pylint: disable=g-long-lambda
                y_true, y_pred)
            is_last_dim_1 = math_ops.equal(1, array_ops.shape(y_pred)[-1])
            maybe_squeeze_dims = lambda: control_flow_ops.cond(  # pylint: disable=g-long-lambda
                is_last_dim_1, squeeze_dims, lambda: (y_true, y_pred))
            y_true, y_pred = control_flow_ops.cond(
                math_ops.equal(1, rank_diff), maybe_squeeze_dims, squeeze_dims)

    if sample_weight is None:
        return y_pred, y_true

    sample_weight = ops.convert_to_tensor(sample_weight)
    weights_shape = sample_weight.shape
    weights_rank = weights_shape.ndims
    if weights_rank == 0:  # If weights is scalar, do nothing.
        return y_pred, y_true, sample_weight

    if (y_pred_rank is not None) and (weights_rank is not None):
        # Use static rank.
        if weights_rank - y_pred_rank == 1:
            sample_weight = array_ops.squeeze(sample_weight, [-1])
        elif y_pred_rank - weights_rank == 1:
            sample_weight = array_ops.expand_dims(sample_weight, [-1])
        return y_pred, y_true, sample_weight

    # Use dynamic rank.
    weights_rank_tensor = array_ops.rank(sample_weight)
    rank_diff = weights_rank_tensor - array_ops.rank(y_pred)
    maybe_squeeze_weights = lambda: array_ops.squeeze(sample_weight, [-1])

    def _maybe_expand_weights():
        expand_weights = lambda: array_ops.expand_dims(sample_weight, [-1])
        return control_flow_ops.cond(math_ops.equal(rank_diff, -1),
                                     expand_weights, lambda: sample_weight)

    def _maybe_adjust_weights():
        return control_flow_ops.cond(math_ops.equal(rank_diff,
                                                    1), maybe_squeeze_weights,
                                     _maybe_expand_weights)

    # squeeze or expand last dim of `sample_weight` if its rank differs by 1
    # from the new rank of `y_pred`.
    sample_weight = control_flow_ops.cond(
        math_ops.equal(weights_rank_tensor, 0), lambda: sample_weight,
        _maybe_adjust_weights)
    return y_pred, y_true, sample_weight
Esempio n. 19
0
def squeeze_or_expand_dimensions(y_pred, y_true, sample_weight):
  """Squeeze or expand last dimension if needed.

  1. Squeezes last dim of `y_pred` or `y_true` if their rank differs by 1
  (using `confusion_matrix.remove_squeezable_dimensions`).
  2. Squeezes or expands last dim of `sample_weight` if its rank differs by 1
  from the new rank of `y_pred`.
  If `sample_weight` is scalar, it is kept scalar.

  This will use static shape if available. Otherwise, it will add graph
  operations, which could result in a performance hit.

  Args:
    y_pred: Predicted values, a `Tensor` of arbitrary dimensions.
    y_true: Optional label `Tensor` whose dimensions match `y_pred`.
    sample_weight: Optional weight scalar or `Tensor` whose dimensions match
      `y_pred`.

  Returns:
    Tuple of `y_pred`, `y_true` and `sample_weight`. Each of them possibly has
    the last dimension squeezed,
    `sample_weight` could be extended by one dimension.
  """
  if y_true is not None:
    # squeeze last dim of `y_pred` or `y_true` if their rank differs by 1
    y_true, y_pred = confusion_matrix.remove_squeezable_dimensions(
        y_true, y_pred)

  if sample_weight is None:
    return y_pred, y_true, None

  sample_weight = ops.convert_to_tensor(sample_weight)
  weights_shape = sample_weight.get_shape()
  weights_rank = weights_shape.ndims
  if weights_rank == 0:  # If weights is scalar, do nothing.
    return y_pred, y_true, sample_weight

  y_pred_shape = y_pred.get_shape()
  y_pred_rank = y_pred_shape.ndims
  if (y_pred_rank is not None) and (weights_rank is not None):
    # Use static rank.
    if weights_rank - y_pred_rank == 1:
      sample_weight = array_ops.squeeze(sample_weight, [-1])
    elif y_pred_rank - weights_rank == 1:
      sample_weight = array_ops.expand_dims(sample_weight, [-1])
    return y_pred, y_true, sample_weight

  # Use dynamic rank.
  weights_rank_tensor = array_ops.rank(sample_weight)
  rank_diff = weights_rank_tensor - array_ops.rank(y_pred)
  maybe_squeeze_weights = lambda: array_ops.squeeze(sample_weight, [-1])

  def _maybe_expand_weights():
    return control_flow_ops.cond(
        math_ops.equal(rank_diff,
                       -1), lambda: array_ops.expand_dims(sample_weight, [-1]),
        lambda: sample_weight)

  def _maybe_adjust_weights():
    return control_flow_ops.cond(
        math_ops.equal(rank_diff, 1), maybe_squeeze_weights,
        _maybe_expand_weights)

  # squeeze or expand last dim of `sample_weight` if its rank differs by 1
  # from the new rank of `y_pred`.
  sample_weight = control_flow_ops.cond(
      math_ops.equal(weights_rank_tensor, 0), lambda: sample_weight,
      _maybe_adjust_weights)
  return y_pred, y_true, sample_weight
Esempio n. 20
0
    result = tf.add(tf.matmul(fc4_Drop, fc5_W), fc5_B)

	
	
    return result

filename_queue = tf.train.string_input_producer(data_dir)
onTarget, offTarget, label = create_file_reader_ops(filename_queue)
batch_onTarget, batch_offTarget, batch_label = tf.train.batch([onTarget, offTarget, label], shapes=[[onTargetLen,4], [offTargetLen,4], [1]], batch_size=batch_size)
model_Pred = model()

loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=model_Pred, labels=batch_label))
adamOpt = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE)
train_step = adamOpt.minimize(loss)

l, p = confusion_matrix.remove_squeezable_dimensions(batch_label, model_Pred)
s = tf.square(p - l)
mean_t = tf.reduce_mean(s)
saver = tf.train.Saver()

with tf.Session() as sess:
    tf.global_variables_initializer().run()
    tf.tables_initializer().run()

    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(coord=coord)

    i=1
    while (True):
        try:
Esempio n. 21
0
def squeeze_or_expand_dimensions(y_pred, y_true, sample_weight):
  """Squeeze or expand last dimension if needed.

  1. Squeezes last dim of `y_pred` or `y_true` if their rank differs by 1
  (using `confusion_matrix.remove_squeezable_dimensions`).
  2. Squeezes or expands last dim of `sample_weight` if its rank differs by 1
  from the new rank of `y_pred`.
  If `sample_weight` is scalar, it is kept scalar.

  This will use static shape if available. Otherwise, it will add graph
  operations, which could result in a performance hit.

  Args:
    y_pred: Predicted values, a `Tensor` of arbitrary dimensions.
    y_true: Optional label `Tensor` whose dimensions match `y_pred`.
    sample_weight: Optional weight scalar or `Tensor` whose dimensions match
      `y_pred`.

  Returns:
    Tuple of `y_pred`, `y_true` and `sample_weight`. Each of them possibly has
    the last dimension squeezed,
    `sample_weight` could be extended by one dimension.
  """
  y_pred_shape = y_pred.shape
  y_pred_rank = y_pred_shape.ndims
  if y_true is not None:

    # If sparse matrix is provided as `y_true`, the last dimension in `y_pred`
    # may be > 1. Eg: y_true = [0, 1, 2] (shape=(3,)),
    # y_pred = [[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]] (shape=(3, 3))
    # In this case, we should not try to remove squeezable dimension.
    y_true_shape = y_true.shape
    y_true_rank = y_true_shape.ndims
    if (y_true_rank is not None) and (y_pred_rank is not None):
      # Use static rank for `y_true` and `y_pred`.
      if (y_pred_rank - y_true_rank != 1) or y_pred_shape[-1] == 1:
        y_true, y_pred = confusion_matrix.remove_squeezable_dimensions(
            y_true, y_pred)
    else:
      # Use dynamic rank.
      rank_diff = array_ops.rank(y_pred) - array_ops.rank(y_true)
      squeeze_dims = lambda: confusion_matrix.remove_squeezable_dimensions(  # pylint: disable=g-long-lambda
          y_true, y_pred)
      is_last_dim_1 = math_ops.equal(1, array_ops.shape(y_pred)[-1])
      maybe_squeeze_dims = lambda: control_flow_ops.cond(  # pylint: disable=g-long-lambda
          is_last_dim_1, squeeze_dims, lambda: (y_true, y_pred))
      y_true, y_pred = control_flow_ops.cond(
          math_ops.equal(1, rank_diff), maybe_squeeze_dims, squeeze_dims)

  if sample_weight is None:
    return y_pred, y_true, None

  sample_weight = ops.convert_to_tensor(sample_weight)
  weights_shape = sample_weight.shape
  weights_rank = weights_shape.ndims
  if weights_rank == 0:  # If weights is scalar, do nothing.
    return y_pred, y_true, sample_weight

  if (y_pred_rank is not None) and (weights_rank is not None):
    # Use static rank.
    if weights_rank - y_pred_rank == 1:
      sample_weight = array_ops.squeeze(sample_weight, [-1])
    elif y_pred_rank - weights_rank == 1:
      sample_weight = array_ops.expand_dims(sample_weight, [-1])
    return y_pred, y_true, sample_weight

  # Use dynamic rank.
  weights_rank_tensor = array_ops.rank(sample_weight)
  rank_diff = weights_rank_tensor - array_ops.rank(y_pred)
  maybe_squeeze_weights = lambda: array_ops.squeeze(sample_weight, [-1])

  def _maybe_expand_weights():
    return control_flow_ops.cond(
        math_ops.equal(rank_diff,
                       -1), lambda: array_ops.expand_dims(sample_weight, [-1]),
        lambda: sample_weight)

  def _maybe_adjust_weights():
    return control_flow_ops.cond(
        math_ops.equal(rank_diff, 1), maybe_squeeze_weights,
        _maybe_expand_weights)

  # squeeze or expand last dim of `sample_weight` if its rank differs by 1
  # from the new rank of `y_pred`.
  sample_weight = control_flow_ops.cond(
      math_ops.equal(weights_rank_tensor, 0), lambda: sample_weight,
      _maybe_adjust_weights)
  return y_pred, y_true, sample_weight
Esempio n. 22
0
def squeeze_or_expand_dimensions(y_pred, y_true, sample_weight):
    """Squeeze or expand last dimension if needed.

  1. Squeezes last dim of `y_pred` or `y_true` if their rank differs by 1
  (using `confusion_matrix.remove_squeezable_dimensions`).
  2. Squeezes or expands last dim of `sample_weight` if its rank differs by 1
  from the new rank of `y_pred`.
  If `sample_weight` is scalar, it is kept scalar.

  This will use static shape if available. Otherwise, it will add graph
  operations, which could result in a performance hit.

  Args:
    y_pred: Predicted values, a `Tensor` of arbitrary dimensions.
    y_true: Optional label `Tensor` whose dimensions match `y_pred`.
    sample_weight: Optional weight scalar or `Tensor` whose dimensions match
      `y_pred`.

  Returns:
    Tuple of `y_pred`, `y_true` and `sample_weight`. Each of them possibly has
    the last dimension squeezed,
    `sample_weight` could be extended by one dimension.
  """
    if y_true is not None:
        # squeeze last dim of `y_pred` or `y_true` if their rank differs by 1
        y_true, y_pred = confusion_matrix.remove_squeezable_dimensions(
            y_true, y_pred)

    if sample_weight is None:
        return y_pred, y_true, None

    sample_weight = ops.convert_to_tensor(sample_weight)
    weights_shape = sample_weight.get_shape()
    weights_rank = weights_shape.ndims
    if weights_rank == 0:  # If weights is scalar, do nothing.
        return y_pred, y_true, sample_weight

    y_pred_shape = y_pred.get_shape()
    y_pred_rank = y_pred_shape.ndims
    if (y_pred_rank is not None) and (weights_rank is not None):
        # Use static rank.
        if weights_rank - y_pred_rank == 1:
            sample_weight = array_ops.squeeze(sample_weight, [-1])
        elif y_pred_rank - weights_rank == 1:
            sample_weight = array_ops.expand_dims(sample_weight, [-1])
        return y_pred, y_true, sample_weight

    # Use dynamic rank.
    weights_rank_tensor = array_ops.rank(sample_weight)
    rank_diff = weights_rank_tensor - array_ops.rank(y_pred)
    maybe_squeeze_weights = lambda: array_ops.squeeze(sample_weight, [-1])

    def _maybe_expand_weights():
        return control_flow_ops.cond(
            math_ops.equal(rank_diff, -1),
            lambda: array_ops.expand_dims(sample_weight, [-1]),
            lambda: sample_weight)

    def _maybe_adjust_weights():
        return control_flow_ops.cond(math_ops.equal(rank_diff,
                                                    1), maybe_squeeze_weights,
                                     _maybe_expand_weights)

    # squeeze or expand last dim of `sample_weight` if its rank differs by 1
    # from the new rank of `y_pred`.
    sample_weight = control_flow_ops.cond(
        math_ops.equal(weights_rank_tensor, 0), lambda: sample_weight,
        _maybe_adjust_weights)
    return y_pred, y_true, sample_weight
Esempio n. 23
0
def _remove_squeezable_dimensions(predictions, labels, weights):
    """Squeeze or expand last dim if needed.

  Squeezes last dim of `predictions` or `labels` if their rank differs by 1
  (using confusion_matrix.remove_squeezable_dimensions).
  Squeezes or expands last dim of `weights` if its rank differs by 1 from the
  new rank of `predictions`.

  If `weights` is scalar, it is kept scalar.

  This will use static shape if available. Otherwise, it will add graph
  operations, which could result in a performance hit.

  Args:
    predictions: Predicted values, a `Tensor` of arbitrary dimensions.
    labels: Optional label `Tensor` whose dimensions match `predictions`.
    weights: Optional weight scalar or `Tensor` whose dimensions match
      `predictions`.

  Returns:
    Tuple of `predictions`, `labels` and `weights`. Each of them possibly has
    the last dimension squeezed, `weights` could be extended by one dimension.
  """
    predictions = ops.convert_to_tensor(predictions)
    if labels is not None:
        labels, predictions = confusion_matrix.remove_squeezable_dimensions(
            labels, predictions)
        predictions.get_shape().assert_is_compatible_with(labels.get_shape())

    if weights is None:
        return predictions, labels, None

    weights = ops.convert_to_tensor(weights)
    weights_shape = weights.get_shape()
    weights_rank = weights_shape.ndims
    if weights_rank == 0:
        return predictions, labels, weights

    predictions_shape = predictions.get_shape()
    predictions_rank = predictions_shape.ndims
    if (predictions_rank is not None) and (weights_rank is not None):
        # Use static rank.
        if weights_rank - predictions_rank == 1:
            weights = array_ops.squeeze(weights, [-1])
        elif predictions_rank - weights_rank == 1:
            weights = array_ops.expand_dims(weights, [-1])
    else:
        # Use dynamic rank.
        weights_rank_tensor = array_ops.rank(weights)
        rank_diff = weights_rank_tensor - array_ops.rank(predictions)

        def _maybe_expand_weights():
            return control_flow_ops.cond(
                math_ops.equal(rank_diff, -1),
                lambda: array_ops.expand_dims(weights, [-1]), lambda: weights)

        # Don't attempt squeeze if it will fail based on static check.
        if ((weights_rank is not None)
                and (not weights_shape.dims[-1].is_compatible_with(1))):
            maybe_squeeze_weights = lambda: weights
        else:
            maybe_squeeze_weights = lambda: array_ops.squeeze(weights, [-1])

        def _maybe_adjust_weights():
            return control_flow_ops.cond(math_ops.equal(rank_diff, 1),
                                         maybe_squeeze_weights,
                                         _maybe_expand_weights)

        # If weights are scalar, do nothing. Otherwise, try to add or remove a
        # dimension to match predictions.
        weights = control_flow_ops.cond(math_ops.equal(weights_rank_tensor, 0),
                                        lambda: weights, _maybe_adjust_weights)
    return predictions, labels, weights