예제 #1
0
def f1_reweight_loss(logits,
                     label_ids,
                     positive_idx,
                     negative_idx,
                     correct_class_weight=None,
                     wrong_confusion_matrix=None,
                     label_size=19):
    softmax_layer = MaskedSoftmaxLayer("softmax_layer")
    probs = softmax_layer(logits)

    batch_idx = tf.range(tf.shape(probs)[0])
    label_with_idx = tf.concat(
        [tf.expand_dims(t, 1) for t in [batch_idx, label_ids]], 1)
    golden_prob = tf.gather_nd(probs, label_with_idx)
    m = tf.reduce_sum(positive_idx)
    n = tf.reduce_sum(negative_idx)
    p1 = tf.reduce_sum(positive_idx * golden_prob)
    p2 = tf.reduce_sum(negative_idx * golden_prob)
    beta2 = 1
    neg_weight = p1 / ((beta2 * m) + n - p2 + 1e-8)
    all_one = tf.ones(tf.shape(golden_prob))
    loss_weight = all_one * positive_idx + all_one * neg_weight * negative_idx

    loss = -loss_weight * tf.log(golden_prob + 1e-8)
    return loss
예제 #2
0
def f1_confusion_loss(logits,label_ids,positive_idx,negative_idx,correct_class_weight,wrong_confusion_matrix, label_size = 19):
    softmax_layer = MaskedSoftmaxLayer("softmax_layer")
    probs = softmax_layer(logits)
    log_probs = tf.log(probs + 1e-8)
    log_one_minus_prob = tf.log(1-probs + 1e-8)
    
    batch_idx = tf.range(tf.shape(probs)[0])
    label_with_idx = tf.concat([tf.expand_dims(t, 1) for t in [batch_idx,label_ids]], 1)
    golden_prob = tf.gather_nd(probs,label_with_idx)
    golden_log_prob = tf.gather_nd(log_probs,label_with_idx)

    positvie_confusion_weight = tf.gather(correct_class_weight,label_ids)
    negative_confusion_weight = tf.gather(wrong_confusion_matrix,label_ids)   #B*label_size
    positive_cost = positvie_confusion_weight * golden_log_prob
    negative_cost = tf.reduce_sum(negative_confusion_weight  * log_one_minus_prob,axis = 1)
    cost = positive_cost + negative_cost

    m = tf.reduce_sum(positive_idx)
    n = tf.reduce_sum(negative_idx)
    p1 = tf.reduce_sum(positive_idx * golden_prob)
    p2 = tf.reduce_sum(negative_idx * golden_prob)
    neg_weight = p1 / (m+n-p2 + 1e-8)
    all_one = tf.ones(tf.shape(golden_prob))
    balanced_weight = all_one * positive_idx + all_one * neg_weight * negative_idx
    loss = - balanced_weight * cost
    return loss
예제 #3
0
def f1_entropy_loss(logits,label_ids,positive_idx = None,negative_idx = None,correct_class_weight =None,wrong_confusion_matrix = None, label_size = 19):
    wrong_confusion_matrix = 1.0- tf.diag(tf.Variable([1.0] * label_size,dtype = tf.float32,trainable = False))
    softmax_layer = MaskedSoftmaxLayer("softmax_layer")
    probs = softmax_layer(logits)
    log_probs = tf.log(probs + 1e-8)
    negative_entropy = probs * tf.log(probs + 1e-8)
    
    batch_idx = tf.range(tf.shape(probs)[0])
    label_with_idx = tf.concat([tf.expand_dims(t, 1) for t in [batch_idx,label_ids]], 1)
    golden_prob = tf.gather_nd(probs,label_with_idx)
    golden_log_prob = tf.gather_nd(log_probs,label_with_idx)

    negative_confusion_weight = tf.gather(wrong_confusion_matrix,label_ids)   #B*label_size
    positive_cost = golden_log_prob
    negative_cost = tf.reduce_sum(- negative_confusion_weight  * negative_entropy,axis = 1)
    cost = positive_cost + negative_cost

    m = tf.reduce_sum(positive_idx)
    n = tf.reduce_sum(negative_idx)
    p1 = tf.reduce_sum(positive_idx * golden_prob)
    p2 = tf.reduce_sum(negative_idx * golden_prob)
    neg_weight = p1 / (m+n-p2 + 1e-8)
    all_one = tf.ones(tf.shape(golden_prob))
    balanced_weight = all_one * positive_idx + all_one * neg_weight * negative_idx
    loss = - balanced_weight * cost
    return loss
예제 #4
0
def likelihood_loss(logits,label_ids,positive_idx = None,negative_idx = None,correct_class_weight =None,wrong_confusion_matrix = None, label_size = 19):
    softmax_layer = MaskedSoftmaxLayer("softmax_layer")
    probs = softmax_layer(logits)
    batch_idx = tf.range(tf.shape(probs)[0])
    label_with_idx = tf.concat([tf.expand_dims(t, 1) for t in [batch_idx,label_ids]], 1)
    golden_prob = tf.gather_nd(probs,label_with_idx)
    loss = - tf.log(golden_prob +1e-8)
    return loss
예제 #5
0
def weight_loss(logits,label_ids,positive_idx = None,negative_idx = None,correct_class_weight =None,wrong_confusion_matrix = None, label_size = 19):
    class_weight = [1.0] * label_size
    class_weight[9] = 0.1
    class_weight = tf.Variable(class_weight,dtype = tf.float32,name = "cls_weight",trainable = False)
    
    softmax_layer = MaskedSoftmaxLayer("softmax_layer")
    probs = softmax_layer(logits)
    batch_idx = tf.range(tf.shape(probs)[0])
    label_with_idx = tf.concat([tf.expand_dims(t, 1) for t in [batch_idx,label_ids]], 1)
    golden_prob = tf.gather_nd(probs,label_with_idx)
    loss_weight = tf.gather(class_weight,label_ids)
    
    loss = - loss_weight * tf.log(golden_prob + 1e-8)
    return loss