예제 #1
0
파일: mnist.py 프로젝트: xzzjx/dpdl
def loss_fun(y_true, y_pred):
    '''
    y_true is teacher's prediction vector
    '''
    return logloss(y_true, y_pred)
예제 #2
0
 def categorical_crossentropy(y_true, y_pred):
     y_true = y_true[:, :nb_classes]
     y_pred = y_pred[:, :nb_classes]
     return logloss(y_true, y_pred)
예제 #3
0
def soft_logloss(y_true, y_pred):
    logits = y_true[:, 256:]
    y_soft = K.softmax(logits / temperature)
    y_pred_soft = y_pred[:, 256:]
    return logloss(y_soft, y_pred_soft)
예제 #4
0
def categorical_crossentropy(y_true, y_pred):
    y_true = y_true[:, :256]
    y_pred = y_pred[:, :256]
    return logloss(y_true, y_pred)
def knowledge_distillation_loss(input_distillation):
    y_pred, y_true, y_soft, y_pred_soft = input_distillation
    return (1 - args.lambda_const) * logloss(y_true, y_pred) + \
           args.lambda_const * args.temperature * args.temperature * logloss(y_soft, y_pred_soft)
예제 #6
0
def custom_loss(y_true, y_pred):
    lambda_param = 0
    soft_label = logloss(y_true[:, 1:], y_pred[:,])
    hard_label = logloss(y_true[:, :1], y_pred[:,])
    loss = lambda_param*soft_label + (1-lambda_param)*hard_label
    return loss
def loss(y_true, y_pred):
    entropy = -K.mean(K.sum(y_pred * K.log(y_pred), 1))
    beta = 0.1
    return logloss(y_true, y_pred) - beta * entropy