Ejemplo n.º 1
0
def categorical_hinge(y_true, y_pred):
    if not isinstance(y_true, R.Tensor):
        y_true = R.Tensor(y_true)
    if not isinstance(y_pred, R.Tensor):
        y_pred = R.Tensor(y_pred)

    neg = R.max(R.elemul(R.sub(R.Scalar(-1), y_true), y_pred))
    pos = R.sum(R.elemul(y_true, y_pred))
    loss = R.max((R.sub(neg, pos), R.Scalar(1)), R.Scalar(0))

    return loss
Ejemplo n.º 2
0
def log_loss(y_true, y_pred, with_logit=True):
    if with_logit:
        y_pred = sigmoid(y_pred)

    else:
        pass

    y_pred = R.clip(y_pred, R.epsilon(), R.sub(R.Scalar(1), R.epsilon()))
    loss = R.elemul(R.Scalar(-1), R.mean(R.elemul(y_true, R.natlog(y_pred)),
                                         R.elemul((R.sub(R.Scalar(1), y_true)), R.natlog(R.sub(R.Scalar(1), y_pred)))))

    return loss
Ejemplo n.º 3
0
def huber(y_true, y_pred, d):
    if not isinstance(y_true, R.Tensor):
        y_true = R.Tensor(y_true)
    if not isinstance(y_pred, R.Tensor):
        y_pred = R.Tensor(y_pred)

    d = R.Scalar(d)
    x = R.sub(y_true, y_pred)

    if R.abs(x) <= d:
        return R.elemul(R.Scalar(d), R.elemul(x, x))

    if R.abs(x) > d:
        return R.add(R.elemul(R.Scalar(d), R.mul(d, d)), R.elemul(d, R.sub(R.abs(x), d)))
Ejemplo n.º 4
0
def Poisson_loss(y_true, y_pred):
    if not isinstance(y_true, R.Tensor):
        y_true = R.Tensor(y_true)
    if not isinstance(y_pred, R.Tensor):
        y_pred = R.Tensor(y_pred)

    y_pred = R.clip(y_pred, R.epsilon(), R.Saclar(1) - R.epsilon())

    return R.sub(y_pred, R.elemul(y_true, R.natlog(y_pred)))
Ejemplo n.º 5
0
def KL_div_loss(y_true, y_pred, d):
    if not isinstance(y_true, R.Tensor):
        y_true = R.Tensor(y_true)
    if not isinstance(y_pred, R.Tensor):
        y_pred = R.Tensor(y_pred)

    y_pred = R.clip(y_pred, R.epsilon(), R.Saclar(1) - R.epsilon())

    return R.elemul(y_true, R.natlog(R.div(y_true, y_pred)))
Ejemplo n.º 6
0
def sparse_cross_entropy(y_true, y_pred, with_logit=True):
    if with_logit:
        y_pred = softmax(y_pred)

    else:
        pass

    y_pred = R.clip(y_pred, R.epsilon(), R.div(R.Scalar(1), R.epsilon()))
    N = y_pred.shape[0]
    loss = R.elemul(R.Scalar(-1), R.div(R.sum(R.natlog(y_pred[R.len(y_pred), y_true])), R.Scalar(N)))

    return loss
Ejemplo n.º 7
0
def one_hot_cross_entropy(y_true, y_pred, with_logit=True):
    if with_logit:
        y_pred = softmax(y_pred)

    else:
        pass

    y_pred = R.clip(y_pred, R.epsilon(), R.div(R.Scalar(1), R.epsilon()))
    N = y_pred.shape[0]
    loss = R.div(R.elemul(R.Scalar(-1), R.mul(R.sum(y_true, R.natlog(R.add(y_pred, 1e-9))))), R.Scalar(N))

    return loss
  confusion = R.Tensor(confusion)

  if average=='macro':

    for i in confusion:
      TP ,TN ,FP ,FN = i[0] ,i[1] ,i[2] ,i[3]
      Recall = R.div(TP, R.add(TP, FN))
      Precision = R.div(TP, R.add(TP, FP))

      if Precision == 0 or Recall == 0 
          or Recall == np.nan or Precision == np.nan:
        final.append(0)

      else:
        F1 = R.div(R.elemul(R.Scalar(2), R.elemul(Recall, Precision)),R.sum(Recall ,Precision))
        final.append(F1)
        
    return R.mean(final)

  if average=='micro':

    confusion = R.Tensor(confusion)
    TP = R.sum(confusion ,axis=0)[0]
    TN = R.sum(confusion ,axis=0)[1]
    FP = R.sum(confusion ,axis=0)[2]
    FN = R.sum(confusion ,axis=0)[3]

    Recall = R.div(TP, R.add(TP, FN))
    Precision = R.div(TP, R.add(TP, FP))
    F1 = R.div(R.elemul(R.Scalar(2), R.elemul(Recall, Precision)),R.sum(Recall ,Precision))