Example #1
0
def get_predictions_logreg(X, weights):
    pred = mathutil.dot(X,weights[0])+weights[1]
    prob = pred - pred.max(axis=1)[:,np.newaxis]
    mathutil.exp(prob, out=prob)
    prob /= prob.sum(axis=1)[:, np.newaxis]
    prob = mpi.COMM.gather(prob)
    if mpi.is_root():
        return np.vstack(prob)
    else:
        return np.zeros((0))
 def loss_multiclass_logistic(Y, pred, weight, **kwargs):
     """The multiple class logistic regression loss function
      
     The input Y should be a 0-1 matrix 
     """
     # normalized prediction and avoid overflowing
     prob = pred - pred.max(axis=1)[:,np.newaxis]
     mathutil.exp(prob, out=prob)
     prob /= prob.sum(axis=1)[:, np.newaxis]
     g = prob - Y
     # take the log
     mathutil.log(prob, out=prob)
     return -np.dot(prob.flat, Y.flat), g
 def loss_multiclass_logistic_yvector(Y, pred, weight, gpred, cache, **kwargs):
     """The multiple class logistic regression loss function, where the
     input Y is a vector of indices, instead of a 0-1 matrix.
     """
     if len(cache) == 0:
         cache.append(np.empty_like(pred))
     cache[0].resize(pred.shape)
     prob = cache[0]
     # normalize prediction to avoid overflowing
     prob[:] = pred
     prob -= pred.max(axis=1)[:,np.newaxis]
     mathutil.exp(prob, out=prob)
     prob /= prob.sum(axis=1)[:, np.newaxis]
     gpred[:] = prob
     # instead of carrying out gpred-=Y, we need to convert it to indices
     gpred[np.arange(len(Y)), Y] -= 1.
     mathutil.log(prob, out=prob)
     return - prob[np.arange(len(Y)), Y].sum(), gpred
 def loss_multiclass_logistic(Y, pred, weight, gpred, cache, **kwargs):
     """The multiple class logistic regression loss function
     
     The input Y should be a 0-1 matrix 
     """
     if len(cache) == 0:
         cache.append(np.empty_like(pred))
     cache[0].resize(pred.shape)
     prob = cache[0]
     # normalize prediction to avoid overflowing
     prob[:] = pred
     prob -= pred.max(axis=1)[:,np.newaxis]
     mathutil.exp(prob, out=prob)
     prob /= prob.sum(axis=1)[:, np.newaxis]
     gpred[:] = prob
     gpred -= Y
     # take the log
     mathutil.log(prob, out=prob)
     return -np.dot(prob.flat, Y.flat), gpred
Example #5
0
 def loss_bnll(Y, pred, weight, **kwargs):
     """
     the BNLL loss: f = log(1 + exp(-y * pred))
     """
     # expnyp is exp(-y * pred)
     expnyp = mathutil.exp(-Y * pred)
     expnyp_plus = 1.0 + expnyp
     if weight is None:
         return np.sum(np.log(expnyp_plus)), -Y * expnyp / expnyp_plus
     else:
         return np.dot(weight, np.log(expnyp_plus)).sum(), -Y * weight * expnyp / expnyp_plus
 def loss_bnll(Y,pred,weight,**kwargs):
     '''
     the BNLL loss: f = log(1 + exp(-y * pred))
     '''
     # expnyp is exp(-y * pred)
     expnyp = mathutil.exp(-Y*pred)
     expnyp_plus = 1. + expnyp
     if weight is None:
         return np.sum(np.log(expnyp_plus)), -Y * expnyp / expnyp_plus
     else:
         return np.dot(weight, np.log(expnyp_plus)).sum(), \
                - Y * weight * expnyp / expnyp_plus
def loss_meu_logistic(Y, pred, weight, gpred, cache, **kwargs):
    """This loss function computes the maximum expected utility based loss
    where the input Y is the normalized utility values.
    """
    # compute the probability, normalize prediction to avoid overflowing
    if len(cache) == 0:
        cache.append(np.empty_like(pred))
    cache[0].resize(pred.shape)
    prob = cache[0]
    prob[:] = pred
    prob -= pred.max(axis=1)[:,np.newaxis]
    mathutil.exp(prob, out=prob)
    prob /= prob.sum(axis=1)[:, np.newaxis]
    # eu is the expected utility
    eu = inner1d(Y, prob)
    # numerical stability
    eu += np.finfo(np.float64).eps
    gpred[:] = Y * prob
    gpred /= - eu[:, np.newaxis]
    gpred += prob
    mathutil.log(eu, out=eu)
    return - eu.sum(), gpred
Example #8
0
def loss_meu_logistic(Y, pred, weight, gpred, cache, **kwargs):
    """This loss function computes the maximum expected utility based loss
    where the input Y is the normalized utility values.
    """
    # compute the probability, normalize prediction to avoid overflowing
    if len(cache) == 0:
        cache.append(np.empty_like(pred))
    cache[0].resize(pred.shape)
    prob = cache[0]
    prob[:] = pred
    prob -= pred.max(axis=1)[:, np.newaxis]
    mathutil.exp(prob, out=prob)
    prob /= prob.sum(axis=1)[:, np.newaxis]
    # eu is the expected utility
    eu = inner1d(Y, prob)
    # numerical stability
    eu += np.finfo(np.float64).eps
    gpred[:] = Y * prob
    gpred /= -eu[:, np.newaxis]
    gpred += prob
    mathutil.log(eu, out=eu)
    return -eu.sum(), gpred