Esempio n. 1
0
def padded_sequence_accuracy(predictions,
                             labels,
                             weights_fn=common_layers.weights_nonzero):
  """Percentage of times that predictions matches labels everywhere (non-0)."""
  # If the last dimension is 1 then we're using L1/L2 loss.
  if common_layers.shape_list(predictions)[-1] == 1:
    return rounding_sequence_accuracy(
        predictions, labels, weights_fn=weights_fn)
  with tf.variable_scope(
      "padded_sequence_accuracy", values=[predictions, labels]):
    padded_predictions, padded_labels = common_layers.pad_with_zeros(
        predictions, labels)
    weights = weights_fn(padded_labels)

    # Flatten, keeping batch dim (and num_classes dim for predictions)
    # TPU argmax can only deal with a limited number of dimensions
    predictions_shape = common_layers.shape_list(padded_predictions)
    batch_size = predictions_shape[0]
    num_classes = predictions_shape[-1]
    flat_size = common_layers.list_product(
        common_layers.shape_list(padded_labels)[1:])
    padded_predictions = tf.reshape(
        padded_predictions,
        [batch_size, common_layers.list_product(predictions_shape[1:-1]),
         num_classes])
    padded_labels = tf.reshape(padded_labels, [batch_size, flat_size])
    weights = tf.reshape(weights, [batch_size, flat_size])

    outputs = tf.to_int32(tf.argmax(padded_predictions, axis=-1))
    padded_labels = tf.to_int32(padded_labels)
    not_correct = tf.to_float(tf.not_equal(outputs, padded_labels)) * weights
    axis = list(range(1, len(outputs.get_shape())))
    correct_seq = 1.0 - tf.minimum(1.0, tf.reduce_sum(not_correct, axis=axis))
    return correct_seq, tf.constant(1.0)
Esempio n. 2
0
def padded_sequence_accuracy(predictions,
                             labels,
                             weights_fn=common_layers.weights_nonzero):
  """Percentage of times that predictions matches labels everywhere (non-0)."""
  # If the last dimension is 1 then we're using L1/L2 loss.
  if common_layers.shape_list(predictions)[-1] == 1:
    return rounding_sequence_accuracy(
        predictions, labels, weights_fn=weights_fn)
  with tf.variable_scope(
      "padded_sequence_accuracy", values=[predictions, labels]):
    padded_predictions, padded_labels = common_layers.pad_with_zeros(
        predictions, labels)
    weights = weights_fn(padded_labels)

    # Flatten, keeping batch dim (and num_classes dim for predictions)
    # TPU argmax can only deal with a limited number of dimensions
    predictions_shape = common_layers.shape_list(padded_predictions)
    batch_size = predictions_shape[0]
    num_classes = predictions_shape[-1]
    flat_size = common_layers.list_product(
        common_layers.shape_list(padded_labels)[1:])
    padded_predictions = tf.reshape(
        padded_predictions,
        [batch_size, common_layers.list_product(predictions_shape[1:-1]),
         num_classes])
    padded_labels = tf.reshape(padded_labels, [batch_size, flat_size])
    weights = tf.reshape(weights, [batch_size, flat_size])

    outputs = tf.to_int32(tf.argmax(padded_predictions, axis=-1))
    padded_labels = tf.to_int32(padded_labels)
    not_correct = tf.to_float(tf.not_equal(outputs, padded_labels)) * weights
    axis = list(range(1, len(outputs.get_shape())))
    correct_seq = 1.0 - tf.minimum(1.0, tf.reduce_sum(not_correct, axis=axis))
    return correct_seq, tf.constant(1.0)
Esempio n. 3
0
def padded_sequence_accuracy(predictions,
                             labels,
                             weights_fn=common_layers.weights_nonzero):

    if common_layers.shape_list(predictions)[-1] == 1:
        return rounding_sequence_accuracy(predictions,
                                          labels,
                                          weights_fn=weights_fn)
    with tf.variable_scope("padded_sequence_accuracy",
                           values=[predictions, labels]):
        padded_predictions, padded_labels = common_layers.pad_with_zeros(
            predictions, labels)
        weights = weights_fn(padded_labels)

        predictions_shape = common_layers.shape_list(padded_predictions)
        batch_size = predictions_shape[0]
        num_classes = predictions_shape[-1]
        flat_size = common_layers.list_product(
            common_layers.shape_list(padded_labels)[1:])
        padded_predictions = tf.reshape(padded_predictions, [
            batch_size,
            common_layers.list_product(predictions_shape[1:-1]), num_classes
        ])
        padded_labels = tf.reshape(padded_labels, [batch_size, flat_size])
        weights = tf.reshape(weights, [batch_size, flat_size])

        n = 3
        _, outputs = tf.nn.top_k(padded_predictions, k=2)

        weights = tf.expand_dims(weights, axis=-1)
        # weights += tf.zeros_like(outputs)
        weights += tf.zeros_like(tf.to_float(outputs))

        outputs = tf.to_int32(outputs)
        padded_labels = tf.expand_dims(padded_labels, axis=-1)
        padded_labels += tf.zeros_like(outputs)

        # outputs = tf.to_int32(tf.argmax(padded_predictions, axis=-1))
        padded_labels = tf.to_int32(padded_labels)

        not_correct = tf.to_float(tf.not_equal(outputs,
                                               padded_labels)) * weights

        axis = list(range(1, len(outputs.get_shape())))

        correct_seq_single = 1.0 - tf.minimum(
            1.0, tf.reduce_sum(not_correct, axis=1))
        correct_seq = tf.reduce_sum(correct_seq_single, axis=1)

        return correct_seq, tf.constant(1.0)