Esempio n. 1
0
def logloss(p, y):
    epsilon = 1e-15
    p = sp.maximum(epsilon, p)
    p = sp.minimum(1 - epsilon, p)
    ll = sum(y * sp.log(p) + sp.subtract(1, y) * sp.log(sp.subtract(1, p)))
    ll = ll * -1.0 / len(y)
    return ll
 def run(self, clusters):
     """Input:
     - clusters: a TrainingClusters instance
     """
     n = clusters.problemSpaceDims()
     self.Ms = [None] * len(clusters.library.primitives)
     for i in xrange(len(clusters.library.primitives)):
         trainPos = []
         trainNeg = []
         close = clusters.clusterClose[i]
         far = clusters.clusterFar[i]
         for j in close:
             trainPos.append(
                 sp.subtract(
                     clusters.problemFeatures[j],
                     clusters.library.primitives[i].problemFeatures))
         for j in far:
             trainNeg.append(
                 sp.subtract(
                     clusters.problemFeatures[j],
                     clusters.library.primitives[i].problemFeatures))
         sigmaPos = sp.eye(n) * self.regularization
         sigmaNeg = sp.eye(n) * self.regularization
         for v in trainPos:
             sigmaPos += sp.outer(v, v)
         for v in trainNeg:
             sigmaNeg += sp.outer(v, v)
         sigmaPos = mask(sigmaPos, self.mask)
         sigmaNeg = mask(sigmaNeg, self.mask)
         M = LA.inv(sigmaPos) - LA.inv(sigmaNeg)
         self.Ms[i], numNegative = projectSemidef(M)
         print numNegative, "negative eigenvalues"
     return
Esempio n. 3
0
def evaluate_ll(y, yhat):
    epsilon = 1e-15
    yhat = sp.maximum(epsilon, yhat)
    yhat = sp.minimum(1-epsilon, yhat)
    ll = sum(y*sp.log(yhat) + sp.subtract(1,y)*sp.log(sp.subtract(1,yhat)))
    ll = ll * -1.0/len(y)
    return ll
Esempio n. 4
0
def llfun(act, pred):
    epsilon = 1e-15
    pred = sp.maximum(epsilon, pred)
    pred = sp.minimum(1 - epsilon, pred)
    ll = sum(act * sp.log(pred) + sp.subtract(1, act) * sp.log(sp.subtract(1, pred)))
    ll = ll * -1.0 / len(act)
    return ll
Esempio n. 5
0
def binary_logloss(p, y):
    epsilon = 1e-15
    p = sp.maximum(epsilon, p)
    p = sp.minimum(1-epsilon, p)
    res = sum(y * sp.log(p) + sp.subtract(1, y) * sp.log(sp.subtract(1, p)))
    res *= -1.0/len(y)
    return res
Esempio n. 6
0
def reflect1(v, u, c):
    print("Reflect by vector math variant 1:")
    c = 0
    center_ = eT(center(len(v)))
    print("center_:", center_)
    print("v:", v)
    v = scipy.subtract(v, center_)
    print("v:", v)
    print("u:", u)
    print("c:", c)
    v_dot_u = scipy.dot(v, u)
    print("v_dot_u:", v_dot_u)
    v_dot_u_minus_c = scipy.subtract(v_dot_u, c)
    print("v_dot_u_minus_c:", v_dot_u_minus_c)
    u_dot_u = scipy.dot(u, u)
    print("u_dot_u:", u_dot_u)
    quotient = scipy.divide(v_dot_u_minus_c, u_dot_u)
    print("quotient:", quotient)
    subtrahend = scipy.multiply((2 * quotient), u)
    print("subtrahend:", subtrahend)
    reflection = scipy.subtract(v, subtrahend)
    print("reflection:", reflection)
    reflection = scipy.add(reflection, center_)
    print("reflection:", reflection)
    return reflection
 def run(self, clusters):
     """Input:
     - clusters: a TrainingClusters instance
     """
     n = len(clusters.problemFeatures[0])
     sigmaPos = sp.eye(n) * self.regularization
     sigmaNeg = sp.eye(n) * self.regularization
     numPos = 0
     numNeg = 0
     for i, c in enumerate(clusters.clusterClose):
         for j in c:
             v = sp.subtract(clusters.problemFeatures[j],
                             clusters.library.primitives[i].problemFeatures)
             sigmaPos += sp.outer(v, v)
             numPos += 1
     for i, c in enumerate(clusters.clusterFar):
         for j in c:
             v = sp.subtract(clusters.problemFeatures[j],
                             clusters.library.primitives[i].problemFeatures)
             sigmaNeg += sp.outer(v, v)
             numNeg += 1
     #Do we want to do covariance, or just E[xxt]?
     sigmaPos = mask(sigmaPos / numPos, self.mask)
     sigmaNeg = mask(sigmaNeg / numNeg, self.mask)
     M = LA.inv(sigmaPos) - LA.inv(sigmaNeg)
     self.M, numNegative = projectSemidef(M)
     print numNegative, "negative eigenvalues"
     return
Esempio n. 8
0
 def __init__(self, fc, c_vel, alp_g, mu_los, mu_nlos, a, b, noise_var, hUAV, xUAV, yUAV, xUE, yUE):
     dist = sp.sqrt( sp.add(sp.square(sp.subtract(yUAV, yUE)), sp.square(sp.subtract(xUAV, xUE))) )
     R_dist = sp.sqrt( sp.add(sp.square(dist), sp.square(hUAV)) )
     temp1 = sp.multiply(10, sp.log10(sp.power(fc*4*sp.pi*R_dist/c_vel, alp_g)))
     temp2 = sp.multiply(sp.subtract(mu_los, mu_nlos), sp.divide(1, (1+a*sp.exp(-b*sp.arctan(hUAV/dist)-a))))
     temp3 = sp.add(sp.add(temp1, temp2), mu_nlos)
     self.pathloss = sp.divide(sp.real(sp.power(10, -sp.divide(temp3, 10))), noise_var)
Esempio n. 9
0
def entropyloss(act, pred):
    epsilon = 1e-15
    pred = sp.maximum(epsilon, pred)
    pred = sp.minimum(1-epsilon, pred)
    el = sum(act*sp.log10(pred) + sp.subtract(1,act)*sp.log10(sp.subtract(1,pred)))
    el = el * -1.0/len(act)
    return el
Esempio n. 10
0
def binary_logloss(p, y):
    epsilon = 1e-15
    p = sp.maximum(epsilon, p)
    p = sp.minimum(1 - epsilon, p)
    res = sum(y * sp.log(p) + sp.subtract(1, y) * sp.log(sp.subtract(1, p)))
    res *= -1.0 / len(y)
    return res
Esempio n. 11
0
def logloss(act, pred):
    epsilon = 1e-15
    pred = sp.maximum(epsilon, pred)
    pred = sp.minimum(1-epsilon, pred)
    ll = sum(act*sp.log(pred) + sp.subtract(1,act)*sp.log(sp.subtract(1,pred)))
    ll = ll * -1.0/len(act)
    return ll
def logloss(act, pred):
    epsilon = 1e-15
    pred = max(epsilon, pred)
    pred = min(1-epsilon, pred)
    ll = act*sp.log(pred) + sp.subtract(1,act)*sp.log(sp.subtract(1,pred))
    ll = ll * -1.0
    return ll
def logloss(Y_true, Y_pred):
    epsilon = 1e-15
    pred = sp.maximum(epsilon, Y_pred)
    pred = sp.minimum(1-epsilon, Y_pred)
    ll = sum(Y_true*sp.log(pred) + sp.subtract(1,Y_true)*sp.log(sp.subtract(1,Y_pred)))
    ll = ll * -1.0/len(Y_true)
    return ll
Esempio n. 14
0
def logloss(act, pred):
    epsilon = 1e-4
    pred = sp.maximum(epsilon, pred)
    pred = sp.minimum(1-epsilon, pred)
    ll = -1.0/len(act) * sum(act*sp.log(pred) +
            sp.subtract(1,act)*sp.log(sp.subtract(1,pred)))
    return ll
 def logloss(self, y, pred):
     epsilon = 1e-15
     pred = sp.maximum(epsilon, pred)
     pred = sp.minimum(1-epsilon, pred)
     ll = sum(y*sp.log(pred) + sp.subtract(1,y)*sp.log(sp.subtract(1,pred)))
     ll = ll * -1.0/len(y)
     return ll
Esempio n. 16
0
 def log_loss(self, act, pred, epsilon=1e-07):
     pred = sp.maximum(epsilon, pred)
     pred = sp.minimum(1 - epsilon, pred)
     ll = sum(act * sp.log(pred) +
              sp.subtract(1, act) * sp.log(sp.subtract(1, pred)))
     ll = ll * -1.0 / len(act)
     return ll
def logloss(p, y):
    epsilon = 1e-15
    p = sp.maximum(epsilon, p)
    p = sp.minimum(1-epsilon, p)
    ll = sum(y*sp.log(p) + sp.subtract(1,y)*sp.log(sp.subtract(1,p)))
    ll = ll * -1.0/len(y)
    return ll
Esempio n. 18
0
 def logloss(actual, predict):
     epsilon = 1e-15
     predict = sp.maximum(epsilon, predict)
     predict = sp.minum(1 - epsilon, predict)
     loss = sum(actual * sp.log(predict) + sp.subtract(1, actual) * sp.log(sp.subtract(1, predict)))
     loss = loss * -1.0 / len(actual)
     return loss
Esempio n. 19
0
def llfun(act, pred,idx):
    epsilon = 1e-15
    pred = sp.maximum(epsilon, pred[idx])
    pred = sp.minimum(1-epsilon, pred)
    ll = sum(act[idx]*sp.log(pred) + sp.subtract(1,act[idx])*sp.log(sp.subtract(1,pred)))
    ll = ll * -1.0/len(act[idx])
    return ll
Esempio n. 20
0
def report( right_list, pre_list ):
    epsilon = 1e-15
    act = right_list
    pred = sp.maximum(epsilon, pre_list)
    pred = sp.minimum(1-epsilon, pre_list)
    ll = sum(act*sp.log(pred) + sp.subtract(1,act)*sp.log(sp.subtract(1,pred)))
    ll = ll * -1.0/len(act)
    return ll
Esempio n. 21
0
def log_loss(act, pred):
    epsilon = 1e-15
    pred = sp.maximum(epsilon, pred)
    pred = sp.minimum(1 - epsilon, pred)
    ll = sum(act * sp.log(pred.astype(float)) + sp.subtract(1, act.astype(float)) * sp.log(
        sp.subtract(1, pred.astype(float))))
    ll = ll * -1.0 / len(act)
    return ll
Esempio n. 22
0
def logloss(act, predicted):
    predicted = sp.minimum(1-(1e-15), sp.maximum(1e-15, predicted))
    v1 = act*sp.log(predicted)
    v2 = sp.subtract(1,act)
    v3 = sp.log(sp.subtract(1,predicted))
    LogLoss = sum(v1 + v2 * v3)
    LogLoss = LogLoss * (-1.0/len(act))
    return LogLoss
Esempio n. 23
0
def cross_entropy(act, pred):
    #negative log-loss sklearn
    epsilon = 1e-15
    pred = sp.maximum(epsilon, pred)
    pred = sp.minimum(1 - epsilon, pred)
    ll = act * sp.log(pred) + sp.subtract(1, act) * sp.log(sp.subtract(
        1, pred))
    return -ll
Esempio n. 24
0
def logloss(label, prediction):
    epsilon = 1e-15
    prediction = sp.maximum(epsilon, prediction)
    prediction = sp.minimum(1 - epsilon, prediction)
    ll = sum(label * sp.log(prediction) +
             sp.subtract(1, label) * sp.log(sp.subtract(1, prediction)))
    ll = ll * -1.0 / len(label)
    print(ll)
Esempio n. 25
0
 def logloss(self, act, pred):
     epsilon = 1e-15
     pred = sp.maximum(epsilon, pred)
     pred = sp.minimum(1-epsilon, pred)
     pred[pred >= 1] = 0.9999999
     ll = sum(act*sp.log(pred) + sp.subtract(1,act)*sp.log(sp.subtract(1,pred)))
     ll = ll * -1.0/len(act)
     return ll
Esempio n. 26
0
def log_loss(act, pred):
    """ Vectorised computation of logloss """
    epsilon = 1e-15
    pred = sp.maximum(epsilon, pred)
    pred = sp.minimum(1-epsilon, pred)
    ll = sum(act*sp.log(pred) + sp.subtract(1,act)*sp.log(sp.subtract(1,pred)))
    ll = ll * -1.0/len(act)
    return ll
Esempio n. 27
0
def logloss(act, pred):
    epsilon = 1e-6
    pred = sp.maximum(epsilon, pred)
    pred = sp.minimum(1-epsilon, pred)
    #print np.mean(pred)
    ll = sum(act*sp.log(pred) + sp.subtract(1,act)*sp.log(sp.subtract(1,pred)))
    ll = ll * -1.0/len(act)
    return ll
Esempio n. 28
0
def log_loss(y_true, y_pred, eps=1e-15):
    """ As used by Kaggle. """
    y_pred = sp.maximum(eps, y_pred)
    y_pred = sp.minimum(1 - eps, y_pred)
    ll = sum(y_true * sp.log(y_pred) +
             sp.subtract(1, y_true) * sp.log(sp.subtract(1, y_pred)))
    ll = ll * -1.0 / len(y_true)
    return ll
def logloss(real, pred):
    epsilon = 1e-15
    pred = sp.maximum(epsilon, pred)
    pred = sp.minimum(1 - epsilon, pred)
    ll = sum(real * sp.log(pred) +
             sp.subtract(1, real) * sp.log(sp.subtract(1, pred)))
    ll = ll * 1.0 / len(real)
    return ll
Esempio n. 30
0
def llfun(act, pred):
    p_true = pred[:, 1]
    epsilon = 1e-15
    p_true = sp.maximum(epsilon, p_true)
    p_true = sp.minimum(1 - epsilon, p_true)
    ll = sum(act * sp.log(p_true) + sp.subtract(1, act) * sp.log(sp.subtract(1, p_true)))
    ll = ll * -1.0 / len(act)
    return ll
Esempio n. 31
0
def logloss(pred, dtrain):
      act = dtrain.get_label()
      epsilon = 1e-15
      pred = sp.maximum(epsilon, pred)
      pred = sp.minimum(1-epsilon, pred)
      ll = sum(act*sp.log(pred) + sp.subtract(1,act)*sp.log(sp.subtract(1,pred)))
      ll = ll * -1.0/len(act)
      return 'logloss',ll
Esempio n. 32
0
def llfun(act, pred):
    p_true = pred[:, 1]
    epsilon = 1e-15
    p_true = sp.maximum(epsilon, p_true)
    p_true = sp.minimum(1 - epsilon, p_true)
    ll = sum(act * sp.log(p_true) +
             sp.subtract(1, act) * sp.log(sp.subtract(1, p_true)))
    ll = ll * -1.0 / len(act)
    return ll
Esempio n. 33
0
def logloss(act,pred):
    epsilon = 1e-15
    pred = sp.maximum(epsilon,pred)
    pred = sp.minimum(1-epsilon,pred)
    #实际上我觉得这个式子就是机器学习课程中的cost Function
    #sum(act*log(pred) + (1-a)*log(1-p))
    ll = sum(act*sp.log(pred) + sp.subtract(1,act)*sp.log(sp.subtract(1,pred)))
    ll = ll * -1.0/len(act)
    return ll
Esempio n. 34
0
def logloss_1(act, pred):
    act = act.flatten()
    pred = pred.flatten()
    epsilon = 1e-15
    pred = sp.maximum(epsilon, pred)
    pred = sp.minimum(1-epsilon, pred)
    ll = sum(act*sp.log(pred) + sp.subtract(1,act)*sp.log(sp.subtract(1,pred)))
    ll = ll * -1.0/len(act)
    return ll
Esempio n. 35
0
def log_loss(act, pred, normalize=True):
    epsilon = 1e-15
    pred = sp.maximum(epsilon, pred)
    pred = sp.minimum(1 - epsilon, pred)
    ll = sum(act * sp.log(pred) +
             sp.subtract(1, act) * sp.log(sp.subtract(1, pred)))
    if normalize is True:
        ll = ll * -1.0 / len(act)
    return ll
Esempio n. 36
0
def log_loss(act, pred):
    """https://www.kaggle.com/wiki/LogarithmicLoss"""
    epsilon = 1e-15
    import scipy as sp
    pred = sp.maximum(epsilon, pred)
    pred = sp.minimum(1 - epsilon, pred)
    ll = sum(act * sp.log(pred) + sp.subtract(1, act) * sp.log(sp.subtract(1, pred)))
    ll = ll * -1.0/len(act)
    return ll
def oht_alg(d2d_to_d2d_gains_diag, uav_to_d2d_gains, d2d_to_d2d_gains_diff, eta, power_UAV, power_cir_UAV):
    theta_ini = Parameter(value=1 / 0.5)
    iter = 0
    epsilon = 1
    theta_sol = 0
    iter_phi = []
    while epsilon >= 1e-2 and iter <= 20:
        iter += 1
        if iter == 1:
            theta_ref = theta_ini.value
        else:
            theta_ref = theta_sol

        term_x = sp.divide(1,
                           sp.multiply(sp.subtract(theta_ref, 1), sp.matmul(d2d_to_d2d_gains_diag, uav_to_d2d_gains)))
        term_y = sp.add(
            sp.multiply(sp.subtract(theta_ref, 1), sp.matmul(sp.transpose(d2d_to_d2d_gains_diff), uav_to_d2d_gains)),
            sp.divide(1, eta * power_UAV))

        a_1 = sp.add(sp.divide(sp.multiply(2, sp.log(sp.add(1, sp.divide(1, sp.multiply(term_x, term_y))))), theta_ref),
                     sp.divide(2, sp.multiply(theta_ref, sp.add(sp.multiply(term_x, term_y), 1))))
        b_1 = sp.divide(1, sp.multiply(theta_ref, sp.multiply(term_x, sp.add(sp.multiply(term_x, term_y), 1))))
        c_1 = sp.divide(1, sp.multiply(theta_ref, sp.multiply(term_y, sp.add(sp.multiply(term_x, term_y), 1))))
        d_1 = sp.divide(sp.log(sp.add(1, sp.divide(1, sp.multiply(term_x, term_y)))), sp.square(theta_ref))

        theta = NonNegative(1)
        t_max = NonNegative(1)
        obj_opt = Maximize(t_max)

        constraints = [theta >= 1]
        constraints.append(
            t_max <= a_1 - sp.divide(b_1, sp.matmul(d2d_to_d2d_gains_diag, uav_to_d2d_gains)) * inv_pos(theta - 1)
            - mul_elemwise(c_1,
                           sp.matmul(sp.transpose(d2d_to_d2d_gains_diff), uav_to_d2d_gains) * (theta - 1)
                           + sp.divide(1, eta * power_UAV))
            - d_1 * theta)

        t1 = time.time()

        prob = Problem(obj_opt, constraints)
        prob.solve(solver=ECOS_BB)
        theta_sol = theta.value
        phi_n_sol = sp.multiply((theta_sol - 1) * eta * power_UAV, uav_to_d2d_gains)
        x_rate = sp.matmul(d2d_to_d2d_gains_diag, phi_n_sol)
        term_rate = sp.matmul(sp.transpose(d2d_to_d2d_gains_diff), phi_n_sol) + 1
        rate_sol_ue = sp.divide(sp.log(sp.add(1, sp.divide(x_rate, term_rate))), theta_sol)
        iter_maximin_rate = min(rate_sol_ue)
        term_pow_iter = sp.subtract(1, sp.divide(1, theta_sol)) * eta * power_UAV * sp.add(1, sp.sum(
            uav_to_d2d_gains)) + power_cir_UAV
        iter_phi.append(t_max.value)
        if iter >= 2:
            epsilon = sp.divide(sp.absolute(sp.subtract(iter_phi[iter - 1], iter_phi[iter - 2])),
                                sp.absolute(iter_phi[iter - 2]))
        iter_EE = sp.divide(sp.multiply(1e3, sp.divide(sp.sum(rate_sol_ue), term_pow_iter)), sp.log(2))

    return iter_EE, theta_sol, iter_maximin_rate
def logloss(act, pred):
    '''
    Calculate the log loss incurred for each prediction
    '''
    epsilon = 1e-15
    pred = max(epsilon, pred)
    pred = min(1-epsilon, pred)
    ll = act*sp.log(pred) + sp.subtract(1,act)*sp.log(sp.subtract(1,pred))
    ll = ll * -1.0
    return ll
Esempio n. 39
0
def logloss(obs, pred):
    """LogLoss function
    https://www.kaggle.com/wiki/LogarithmicLoss
    """
    epsilon = 1e-15
    pred = sp.maximum(epsilon, pred)
    pred = sp.minimum(1-epsilon, pred)
    ll = sum(obs*sp.log(pred) + sp.subtract(1,obs)*sp.log(sp.subtract(1,pred)))
    ll = ll * -1.0/len(obs)
    return ll
Esempio n. 40
0
    def logloss(self):
        probs = self.predict_proba(self.x)
        # Calculating the loss
        epsilon = 1e-15
        probs = sp.maximum(epsilon, probs)
        probs = sp.minimum(1 - epsilon, probs)
        ll = sum(self.y * sp.log(probs) + sp.subtract(1, self.y) * sp.log(sp.subtract(1, probs)))
        ll = ll * -1.0 / len(self.y)

        return ll[0]
Esempio n. 41
0
def log_loss_fun(act, pred):
    """

    :rtype : float
    """
    epsilon = 1e-15
    pred = sp.maximum(epsilon, pred)
    pred = sp.minimum(1-epsilon, pred)
    ll = sum(act*sp.log(pred) + sp.subtract(1,act)*sp.log(sp.subtract(1,pred)))
    ll = ll * -1.0/len(act)
    return ll
Esempio n. 42
0
def llfun(act, pred):
    # import pdb;pdb.set_trace()
    epsilon = 1e-15
    pred = sp.maximum(epsilon, pred)
    pred = sp.minimum(1-epsilon, pred)
    ll = sum(act*sp.log(pred) + sp.subtract(1,act)*sp.log(sp.subtract(1,pred)))
    # ll = sum(ll)
    ll = ll * -1.0/len(act)

    print ll
    return ll
Esempio n. 43
0
def logloss(y_true, y_pred):
    """ As provided by kaggle:
    https://www.kaggle.com/wiki/LogarithmicLoss
    """
    epsilon = 1e-18
    y_pred = sp.maximum(epsilon, y_pred)
    y_pred = sp.minimum(1 - epsilon, y_pred)
    ll = (sum(y_true * sp.log(y_pred) +
              sp.subtract(1, y_true) * sp.log(sp.subtract(1, y_pred))))
    ll = ll * -1.0 / len(y_true)
    return ll
Esempio n. 44
0
def logloss(act, pred):
    """
    logloss function
    imported from kaggle evalutation
    """
    epsilon = 1e-15
    pred = sp.maximum(epsilon, pred)
    pred = sp.minimum(1-epsilon, pred)
    ll = sum(act*sp.log(pred) + sp.subtract(1,act)*sp.log(sp.subtract(1,pred)))
    ll = ll * -1.0/len(act)
    return ll
Esempio n. 45
0
    def logloss(self):
        probs = self.predict_proba(self.x)
        # Calculating the loss
        epsilon = 1e-15
        probs = sp.maximum(epsilon, probs)
        probs = sp.minimum(1 - epsilon, probs)
        ll = sum(self.y * sp.log(probs) +
                 sp.subtract(1, self.y) * sp.log(sp.subtract(1, probs)))
        ll = ll * -1.0 / len(self.y)

        return ll[0]
Esempio n. 46
0
def log_loss(predicted, actual):
    """ Vectorized computation of log loss """
    assert(len(predicted), len(actual))
    epsilon = 1e-15
    predicted = sp.maximum(epsilon, predicted)
    predicted = sp.minimum(1 - epsilon, predicted)

    # compute log loss function (vectorized)
    ll = sum(actual * sp.log(predicted) +
             sp.subtract(1, actual) * sp.log(sp.subtract(1, predicted)))
    ll = ll * -1.0 / len(actual)
    return ll
Esempio n. 47
0
def logloss(act, pred):
    """ Vectorised computation of logloss """
    #cap in official Kaggle implementation, 
    #per forums/t/1576/r-code-for-logloss
    epsilon = 1e-15
    pred = sp.maximum(epsilon, pred)
    pred = sp.minimum(1-epsilon, pred)
    #compute logloss function (vectorised)
    ll = sum(   act*sp.log(pred) + 
                sp.subtract(1,act)*sp.log(sp.subtract(1,pred)))
    ll = ll * -1.0/len(act)
    return abs(ll)
    def loss_to_pair(self,
                     pair,
                     gain=1e-3,
                     exp_factor=sp.random.exponential(1),
                     pl_exp=3):
        dist = sp.sqrt(
            sp.add(sp.square(sp.subtract(self.tx_x, pair.rx_x)),
                   sp.square(sp.subtract(self.tx_y, pair.rx_y))))
        loss = sp.multiply(
            gain, sp.multiply(sp.square(exp_factor), sp.power(dist, -pl_exp)))

        return loss
Esempio n. 49
0
File: utils.py Progetto: jfeigl/snn
def logloss(act, pred):
    """ Vectorised computation of logloss """

    epsilon = 1e-15
    pred = sp.maximum(epsilon, pred)
    pred = sp.minimum(1 - epsilon, pred)

    # compute logloss function (vectorised)
    ll = sum(act * sp.log(pred) +
             sp.subtract(1, act) * sp.log(sp.subtract(1, pred)))
    ll = ll * -1.0 / len(act)
    return ll
Esempio n. 50
0
def logloss(y_true, y_pred):
    """ As provided by kaggle:
    https://www.kaggle.com/wiki/LogarithmicLoss
    """
    epsilon = 1e-18
    y_pred = sp.maximum(epsilon, y_pred)
    y_pred = sp.minimum(1 - epsilon, y_pred)
    ll = (sum(y_true * sp.log(y_pred) +
              sp.subtract(1, y_true) *
              sp.log(sp.subtract(1, y_pred)))
          )
    ll = ll * -1.0 / len(y_true)
    return ll
Esempio n. 51
0
def binary_logloss(act, pred):
    """
    act and pred are vectors of actual class
    and prediction probability of class 1,
    respectively
    """
    epsilon = 1e-15
    pred = sp.maximum(epsilon, pred)
    pred = sp.minimum(1 - epsilon, pred)
    ll = sum(act * sp.log(pred) +
             sp.subtract(1, act) * sp.log(sp.subtract(1, pred)))
    ll = ll * -1.0 / len(act)
    return ll
Esempio n. 52
0
def logloss(act, pred):
    '''
    官方给的损失函数
    :param act: 
    :param pred: 
    :return: 
    '''
    epsilon = 1e-15
    pred = sp.maximum(epsilon, pred)
    pred = sp.minimum(1 - epsilon, pred)
    ll = sum(act * sp.log(pred) +
             sp.subtract(1, act) * sp.log(sp.subtract(1, pred)))
    ll = ll * -1.0 / len(act)
    return ll
Esempio n. 53
0
def rbf(inputs, centroids, weights):
    if len(inputs) > 0:
        icw = np.array([[inputs[i], centroids[i], weights[i]]
                        for i in inputs.keys()])
        sw = np.absolute(np.subtract(icw[:, 0], icw[:, 1]))
        return np.exp(-10 * np.multiply(sw, icw[:, 2]).sum())  # /len(inputs))
    else:
        return 0
Esempio n. 54
0
File: 3a.py Progetto: jpdiazp/Tarea4
def mls(p):

    data = ascii.read("datos.dat")  
    
    x=data["col1"]
    y=data["col2"]
    z=data["col3"]
    
    sig=30*(10**(-6))
    
    Y=sy.subtract(y,1)
    
    A=[]

    

    for m in x:
        
        pol=[]
        for i in range(p+1):
            pol.append(m**i)
        if m <0.4 or m>0.7:
            pol.append(0)
            A.append(pol) 
      
        else:
            pol.append(-1)
            A.append(pol)
            
    
    
    theta= sy.dot( sy.linalg.inv(sy.dot( sy.transpose(A),A )) , sy.dot(sy.transpose(A),Y) )

    modelo=[]
    
    for i in x:
        
        poli=1
        for s in range(p+1):
            poli+=(theta[s]*(i**s))    
        
        e=sy.random.normal(0,sig)
        if i <0.4 or i>0.7:
            modelo.append(poli)
      
        else:
            modelo.append(poli - theta[len(theta)-1])   
        
    
    chi2=0

    for h in range(len(x)):
        
        chi2+= ((y[h]-modelo[h]) / (sig) ) **2       
        
    return modelo, theta , len(x) ,sig ,chi2
Esempio n. 55
0
 def _compute_single( self, score_type, y, y_hat):
     #--------------------------------------------------------------------------
     if score_type == "accuracy":
         return metrics.accuracy_score(  y, 
                                         y_hat>0.5, 
                                         sample_weight=self.sample_weight, 
                                         normalize=True  )
     #--------------------------------------------------------------------------                                    
     elif score_type == "f1_score":
         return metrics.f1_score(    y, 
                                     y_hat>0.5, 
                                     sample_weight=self.sample_weight     )
     #--------------------------------------------------------------------------
     elif score_type == "auc":
         return metrics.roc_auc_score(   y, 
                                         y_hat, 
                                         sample_weight=self.sample_weight     )
     #--------------------------------------------------------------------------
     elif score_type == "log-loss":
         epsilon = 1e-15
         pred = sp.maximum(epsilon, y_hat)
         pred = sp.minimum(1-epsilon, pred)
         if self.sample_weight is None:
             J = np.sum(     - y*sp.log(pred) \
                             - sp.subtract(1,y)*sp.log(sp.subtract(1,pred))) \
                             /y.shape[0]
         else:
             J = np.sum(     - y*sp.log(pred)*self.sample_weight \
                             - sp.subtract(1,y)*sp.log(sp.subtract(1,pred)) \
                             *self.sample_weight)/y.shape[0]
         return J
     #--------------------------------------------------------------------------
     elif score_type == "quadratic-loss":
         if self.sample_weight is None:
             J = 0.5*np.sum((y-y_hat)**2)/y.shape[0]
         else:
             J = 0.5*np.sum((self.sample_weight*(y-y_hat))**2) \
                         /y.shape[0]
         return J
     #--------------------------------------------------------------------------
     else:
         raise ValueError('Evaluator: undefined score_type.')
Esempio n. 56
0
File: 2a.py Progetto: jpdiazp/Tarea4
def param(y):
    Y=sy.subtract(y,1)    
    
    A=[]
    
    for m in x:
        A.append([1,m,m**2,m**3,m**4,m**5]) 
    
    
    theta= sy.dot( sy.linalg.inv(sy.dot( sy.transpose(A),A )) , sy.dot(sy.transpose(A),Y) )
    return theta
    def score_game(self, year, team_1_id, team_2_id, winning_team_id):
        """
        Compute the log loss portion of this game and return.
        https://www.kaggle.com/wiki/LogarithmicLoss
        """
        if team_1_id >= team_2_id:
            raise Exception("Invalid team Ids while calculating score, team 1's id is greater than or equal to team 2. {}, {}".format(team_1_id, team_2_id))
        prediction = self.predictions[team_1_id][team_2_id]
        epsilon = 1e-15
        prediction = maximum(epsilon, prediction)
        prediction = minimum(1-epsilon, prediction)

        y_i = None
        if team_1_id == winning_team_id:
            y_i = 1
        else:
            y_i = 0

        result = y_i * log(prediction) + subtract(1, y_i) * log(subtract(1, prediction))
        return result
Esempio n. 58
0
File: 2d.py Progetto: jpdiazp/Tarea4
def param(y):
    Y=sy.subtract(y,1)    
    
    A=[]
    
    for m in x:
        if m <0.4 or m>0.7:
            A.append([1,m,m**2,m**3,m**4,m**5,0]) 
      
        else:
            A.append([1,m,m**2,m**3,m**4,m**5,-1])   
    
    theta= sy.dot( sy.linalg.inv(sy.dot( sy.transpose(A),A )) , sy.dot(sy.transpose(A),Y) )
    return theta