Exemple #1
0
def _class_log_loss(probabilities, targets, weights=None):
    return metric_ops.streaming_mean(losses.log_loss(
        probabilities,
        _squeeze_and_onehot(targets,
                            array_ops.shape(probabilities)[1])),
                                     weights=weights)
def _class_log_loss(probabilities, targets, weights=None):
  return metric_ops.streaming_mean(
      losses.log_loss(probabilities,
                      _squeeze_and_onehot(targets,
                                          array_ops.shape(probabilities)[1])),
      weights=weights)
Exemple #3
0
# softmax output. Gives global meaning to both the perceptrons combined
# output is [0.26,0.73], [0.26,0.73].
# [e^6/(e^6+e^7),e^7/(e^6 + e^7) ] = [1/1+e, 1/1+e^-1] = [0.26,0.73]
# [e^12/(e^12+e^13),e^13/(e^12 + e^13) ] = [1/1+e, 1/1+e^-1] = [0.26,0.73]
# Note how the result is very different from that obtained with sigmoid activation function.
nnout6 = layers.fully_connected(
    inputs=features,
    weights_initializer=tf.constant_initializer([[1.0, 1.0], [2.0, 2.0]]),
    biases_initializer=tf.constant_initializer([1.0, 2.0]),
    num_outputs=2,
    activation_fn=tf.nn.softmax)
session = tf.Session()
session.run(tf.initialize_all_variables())
session.run(nnout6)

outputs = tf.constant([0, 1, 0, 1])
targets = tf.constant([1, 1, 1, 0])
sq_loss1 = losses.mean_squared_error(outputs, targets)
log_loss1 = losses.log_loss(outputs, targets)

outputs2 = tf.constant([[100.0, -100.0, -100.0], [-100.0, 100.0, -100.0],
                        [-100.0, -100.0, 100.0]])
targets2 = tf.constant([[0, 0, 1], [1, 0, 0], [0, 1, 0]])
sq_loss2 = losses.mean_squared_error(outputs2, targets2)

session = tf.Session()
session.run(tf.initialize_all_variables())
session.run(sq_loss1)  # 0.75 = [(0-1)^2 + (1-1)^2 + (0-1)^2 + (1-0)^2] / 4
session.run(sq_loss2)  # 10067 = (6*100^2 + 3*101^2)/9
session.run(log_loss1)  # sigma(-y_i*log(y_i))
Exemple #4
0
def _log_loss(probabilities, targets):
    # targets doesn't have a shape coming in, log_loss isn't too happy about it.
    targets = array_ops.reshape(targets, array_ops.shape(probabilities))
    return metric_ops.streaming_mean(losses.log_loss(probabilities, targets))
def _log_loss(probabilities, targets):
  # targets doesn't have a shape coming in, log_loss isn't too happy about it.
  targets = array_ops.reshape(targets, array_ops.shape(probabilities))
  return metric_ops.streaming_mean(losses.log_loss(probabilities, targets))