Esempio n. 1
0
def test_cce_one_hot():
    y_a = K.variable(np.random.randint(0, 7, (5, 6)))
    y_b = K.variable(np.random.random((5, 6, 7)))
    objective_output = objectives.sparse_categorical_crossentropy(y_a, y_b)
    assert K.eval(objective_output).shape == (5, 6)

    y_a = K.variable(np.random.randint(0, 7, (6,)))
    y_b = K.variable(np.random.random((6, 7)))
    assert K.eval(objectives.sparse_categorical_crossentropy(y_a, y_b)).shape == (6,)
Esempio n. 2
0
def test_cce_one_hot():
    y_a = K.variable(np.random.randint(0, 7, (5, 6)))
    y_b = K.variable(np.random.random((5, 6, 7)))
    objective_output = objectives.sparse_categorical_crossentropy(y_a, y_b)
    assert K.eval(objective_output).shape == (5, 6)

    y_a = K.variable(np.random.randint(0, 7, (6, )))
    y_b = K.variable(np.random.random((6, 7)))
    assert K.eval(objectives.sparse_categorical_crossentropy(
        y_a, y_b)).shape == (6, )
Esempio n. 3
0
def multi_sparse_graph_loss(y_true, y_pred):
    ids = tensor.nonzero(y_true[:, 0] + 1)[0]
    y_true = y_true[ids]
    y_pred = y_pred[ids]

    return tensor.mean(
        objectives.sparse_categorical_crossentropy(y_true, y_pred))
Esempio n. 4
0
 def vae_loss(self, x, x_decoded_onehot):
     xent_loss = objectives.sparse_categorical_crossentropy(
         x, x_decoded_onehot)
     kl_loss = -0.5 * K.mean(1 + self.z_log_sigma - K.square(self.z_mean) -
                             K.exp(self.z_log_sigma))
     loss = xent_loss + self.kl_w * kl_loss
     return loss
Esempio n. 5
0
 def logp(self, a):
     logps = []
     for i, (low, high) in enumerate(
             zip(self.action_space.low, self.action_space.high)):
         if low + 1 == high:
             logp = (a[:, :, :1] * K.log(self.ps[i]) +
                     (1 - a[:, :, :1]) * K.log(1 - self.ps[i]))[:, :, 0]
             a = a[:, :, 1:]
         else:
             logp = -sparse_categorical_crossentropy(
                 a[:, :, :1], self.ps[i])
             a = a[:, :, 1:]
         logps.append(logp)
     return maybe_merge(logps, mode="sum"), a
Esempio n. 6
0
def crf_loss(y_true, y_pred):
    """General CRF loss function depending on the learning mode.

    # Arguments
        y_true: tensor with true targets.
        y_pred: tensor with predicted targets.

    # Returns
        If the CRF layer is being trained in the join mode, returns the negative
        log-likelihood. Otherwise returns the categorical crossentropy implemented
        by the underlying Keras backend.
    """
    crf, idx = y_pred._keras_history[:2]
    if crf.learn_mode == 'join':
        return crf_nll(y_true, y_pred)
    else:
        if crf.sparse_target:
            return sparse_categorical_crossentropy(y_true, y_pred)
        else:
            return categorical_crossentropy(y_true, y_pred)
Esempio n. 7
0
 def logp(self, a):
     return -sparse_categorical_crossentropy(a[:, :, :1], self.p), a[:, :,
                                                                     1:]