def loss_multiclass_logistic(Y, pred, weight, **kwargs):
     """The multiple class logistic regression loss function
      
     The input Y should be a 0-1 matrix 
     """
     # normalized prediction and avoid overflowing
     prob = pred - pred.max(axis=1)[:,np.newaxis]
     mathutil.exp(prob, out=prob)
     prob /= prob.sum(axis=1)[:, np.newaxis]
     g = prob - Y
     # take the log
     mathutil.log(prob, out=prob)
     return -np.dot(prob.flat, Y.flat), g
 def loss_multiclass_logistic_yvector(Y, pred, weight, gpred, cache, **kwargs):
     """The multiple class logistic regression loss function, where the
     input Y is a vector of indices, instead of a 0-1 matrix.
     """
     if len(cache) == 0:
         cache.append(np.empty_like(pred))
     cache[0].resize(pred.shape)
     prob = cache[0]
     # normalize prediction to avoid overflowing
     prob[:] = pred
     prob -= pred.max(axis=1)[:,np.newaxis]
     mathutil.exp(prob, out=prob)
     prob /= prob.sum(axis=1)[:, np.newaxis]
     gpred[:] = prob
     # instead of carrying out gpred-=Y, we need to convert it to indices
     gpred[np.arange(len(Y)), Y] -= 1.
     mathutil.log(prob, out=prob)
     return - prob[np.arange(len(Y)), Y].sum(), gpred
 def loss_multiclass_logistic(Y, pred, weight, gpred, cache, **kwargs):
     """The multiple class logistic regression loss function
     
     The input Y should be a 0-1 matrix 
     """
     if len(cache) == 0:
         cache.append(np.empty_like(pred))
     cache[0].resize(pred.shape)
     prob = cache[0]
     # normalize prediction to avoid overflowing
     prob[:] = pred
     prob -= pred.max(axis=1)[:,np.newaxis]
     mathutil.exp(prob, out=prob)
     prob /= prob.sum(axis=1)[:, np.newaxis]
     gpred[:] = prob
     gpred -= Y
     # take the log
     mathutil.log(prob, out=prob)
     return -np.dot(prob.flat, Y.flat), gpred
def loss_meu_logistic(Y, pred, weight, gpred, cache, **kwargs):
    """This loss function computes the maximum expected utility based loss
    where the input Y is the normalized utility values.
    """
    # compute the probability, normalize prediction to avoid overflowing
    if len(cache) == 0:
        cache.append(np.empty_like(pred))
    cache[0].resize(pred.shape)
    prob = cache[0]
    prob[:] = pred
    prob -= pred.max(axis=1)[:,np.newaxis]
    mathutil.exp(prob, out=prob)
    prob /= prob.sum(axis=1)[:, np.newaxis]
    # eu is the expected utility
    eu = inner1d(Y, prob)
    # numerical stability
    eu += np.finfo(np.float64).eps
    gpred[:] = Y * prob
    gpred /= - eu[:, np.newaxis]
    gpred += prob
    mathutil.log(eu, out=eu)
    return - eu.sum(), gpred
Beispiel #5
0
def loss_meu_logistic(Y, pred, weight, gpred, cache, **kwargs):
    """This loss function computes the maximum expected utility based loss
    where the input Y is the normalized utility values.
    """
    # compute the probability, normalize prediction to avoid overflowing
    if len(cache) == 0:
        cache.append(np.empty_like(pred))
    cache[0].resize(pred.shape)
    prob = cache[0]
    prob[:] = pred
    prob -= pred.max(axis=1)[:, np.newaxis]
    mathutil.exp(prob, out=prob)
    prob /= prob.sum(axis=1)[:, np.newaxis]
    # eu is the expected utility
    eu = inner1d(Y, prob)
    # numerical stability
    eu += np.finfo(np.float64).eps
    gpred[:] = Y * prob
    gpred /= -eu[:, np.newaxis]
    gpred += prob
    mathutil.log(eu, out=eu)
    return -eu.sum(), gpred