def _compute_precision_recall(input_layer, labels, threshold, per_example_weights): """Returns the numerator of both, the denominator of precision and recall.""" # To apply per_example_weights, we need to collapse each row to a scalar, but # we really want the sum. labels.get_shape().assert_is_compatible_with(input_layer.get_shape()) relevant = tf.to_float(tf.greater(labels, 0)) retrieved = tf.to_float(tf.greater(input_layer, threshold)) selected = relevant * retrieved if per_example_weights: per_example_weights = tf.convert_to_tensor(per_example_weights, name='per_example_weights') if selected.get_shape().dims: per_example_weights.get_shape().assert_is_compatible_with( [selected.get_shape().dims[0]]) else: per_example_weights.get_shape().assert_is_compatible_with([None]) per_example_weights = tf.to_float(tf.greater(per_example_weights, 0)) selected = functions.reduce_batch_sum(selected) * per_example_weights relevant = functions.reduce_batch_sum(relevant) * per_example_weights retrieved = functions.reduce_batch_sum(retrieved) * per_example_weights sum_relevant = tf.reduce_sum(relevant) sum_retrieved = tf.reduce_sum(retrieved) selected = tf.reduce_sum(selected) return selected, sum_retrieved, sum_relevant
def _batch_sum_bce(x, target, name='binary_cross_entropy'): logits = functions.binary_cross_entropy_loss_with_logits(x, target, name=name) if per_output_weights is not None: logits *= per_output_weights return functions.reduce_batch_sum(logits)
def _compute_precision_recall(input_, labels, threshold, per_example_weights): """Returns the numerator of both, the denominator of precision and recall.""" # To apply per_example_weights, we need to collapse each row to a scalar, but # we really want the sum. labels.get_shape().assert_is_compatible_with(input_.get_shape()) relevant = tf.to_float(tf.greater(labels, 0)) retrieved = tf.to_float(tf.greater(input_, threshold)) selected = relevant * retrieved if per_example_weights is not None: per_example_weights = _convert_and_assert_per_example_weights_compatible( input_, per_example_weights, dtype=None) per_example_weights = tf.to_float(tf.greater(per_example_weights, 0)) selected = functions.reduce_batch_sum(selected) * per_example_weights relevant = functions.reduce_batch_sum(relevant) * per_example_weights retrieved = functions.reduce_batch_sum(retrieved) * per_example_weights sum_relevant = tf.reduce_sum(relevant) sum_retrieved = tf.reduce_sum(retrieved) selected = tf.reduce_sum(selected) return selected, sum_retrieved, sum_relevant
def _compute_precision_recall(input_, labels, threshold, per_example_weights): """Returns the numerator of both, the denominator of precision and recall.""" # To apply per_example_weights, we need to collapse each row to a scalar, but # we really want the sum. labels.get_shape().assert_is_compatible_with(input_.get_shape()) relevant = tf.to_float(tf.greater(labels, 0)) retrieved = tf.to_float(tf.greater(input_, threshold)) selected = relevant * retrieved if per_example_weights is not None: per_example_weights = _convert_and_assert_per_example_weights_compatible( input_, per_example_weights, dtype=None ) per_example_weights = tf.to_float(tf.greater(per_example_weights, 0)) selected = functions.reduce_batch_sum(selected) * per_example_weights relevant = functions.reduce_batch_sum(relevant) * per_example_weights retrieved = functions.reduce_batch_sum(retrieved) * per_example_weights sum_relevant = tf.reduce_sum(relevant) sum_retrieved = tf.reduce_sum(retrieved) selected = tf.reduce_sum(selected) return selected, sum_retrieved, sum_relevant
def _batch_sum_bce(x, target, name='binary_cross_entropy'): return functions.reduce_batch_sum( functions.binary_cross_entropy_loss_with_logits(x, target, name=name))
def _batch_sum_bce(x, target, name="binary_cross_entropy"): logits = functions.binary_cross_entropy_loss_with_logits(x, target, name=name) if per_output_weights is not None: logits *= per_output_weights return functions.reduce_batch_sum(logits)