コード例 #1
0
 def lossFunc(y_true, y_pred):
     ce_loss = losses.categorical_crossentropy(
         y_true, y_pred, label_smoothing=Label_smoothing)
     aux_loss = losses.categorical_crossentropy(
         y_true, auxiliary, label_smoothing=Label_smoothing)
     total_loss = aux_loss * (aux_rate) + ce_loss
     return total_loss
コード例 #2
0
def softmax_dice_loss(y_true, y_pred, alpha=1., gumbel_temp=0.1):
    if alpha > 0:
        loss = categorical_crossentropy(y_true, y_pred, from_logits=True) + \
                alpha*gumbel_dice_loss(y_true, y_pred, temperature=gumbel_temp)
        return loss
    else:
        return categorical_crossentropy(y_true, y_pred, from_logits=True)
コード例 #3
0
def loss_reg(y_in,x_in):
    # h is the histogram vector "one hot encoded" (40 bins in this case), techically part of the "truth" y                         
    h = y_in[:,0:NBINS]
    y = y_in[:,NBINS:]
    hpred = x_in[:,0:NBINS]
    ypred = x_in[:,NBINS:]

    return categorical_crossentropy(y, ypred) + categorical_crossentropy(h, hpred)
コード例 #4
0
def geom_gaussian_loss(y_true, y_pred):
    # loss fn based on eq #26 of http://arxiv.org/abs/1308.0850.
    gaussian_loss = bivariate_gaussian_loss(y_true, y_pred)
    geom_type_error = categorical_crossentropy(K.softmax(y_true[..., GEOM_TYPE_INDEX:RENDER_INDEX]),
                                               K.softmax(y_pred[..., GEOM_TYPE_INDEX:RENDER_INDEX]))
    render_error = categorical_crossentropy(K.softmax(y_true[..., RENDER_INDEX:]),
                                            K.softmax(y_pred[..., RENDER_INDEX:]))
    return gaussian_loss + geom_type_error + render_error
コード例 #5
0
	def triplet_loss(self, inputs):
		a, p, n,true_a,true_p,true_n, prd_a, prd_p, prd_n = inputs
		p_dist = self.newcos_similarity(a,p)
		n_dist = self.newcos_similarity(a,n)

		## softmax loss add
		cat_a = categorical_crossentropy(true_a,prd_a)
		cat_p = categorical_crossentropy(true_p,prd_p)
		cat_n = categorical_crossentropy(true_n,prd_n)

		return p_dist - n_dist + K.mean(K.mean(cat_a)+K.mean(cat_p) + K.mean(cat_n))#p_dist*self.pos_r - n_dist*self.neg_r + self.neg_r
コード例 #6
0
    def triplet_mix_loss(self, inputs):
        a, p, n, true_a, true_p, true_n, prd_a, prd_p, prd_n = inputs
        p_sim = self.newcos_similarity(
            a, p)  # new cosine 0~1 upper is more similar
        n_sim = self.newcos_similarity(a, n)

        ## softmax loss add
        cat_a = categorical_crossentropy(true_a, prd_a)
        cat_p = categorical_crossentropy(true_p, prd_p)
        cat_n = categorical_crossentropy(true_n, prd_n)
        return n_sim / p_sim + K.mean(
            cat_a / 2 + cat_p / 2 + cat_n
        )  #self.triplet_cos_sim_loss(inputs[:3])*0.2 + #p_dist*self.pos_r - n_dist*self.neg_r + self.neg_r
コード例 #7
0
def kd_loss(y_true, y_pred, alpha):
    '''
    labels and prediction results are stacked in the following order hard label/pred & soft label/pred
    
    '''
    hard_label, soft_label = y_true[:, :NUM_CLASSES], y_true[:, NUM_CLASSES:]
    hard_pred, soft_pred = y_pred[:, :NUM_CLASSES], y_pred[:, NUM_CLASSES:]

    hard_loss = categorical_crossentropy(hard_label, hard_pred)
    soft_loss = categorical_crossentropy(soft_label, soft_pred)

    loss = alpha * hard_loss + soft_loss

    return loss
コード例 #8
0
ファイル: pvn.py プロジェクト: whfuyn/AlphaFy
 def loss(self, y_true, y_pred):
     # It's the policy_loss if it has two dims.
     if y_pred.shape.as_list()[-2:] == list(BOARD_SHAPE):
         loss = categorical_crossentropy(y_true, y_pred)
     else:
         loss = mean_squared_error(y_true, y_pred)
     return loss
コード例 #9
0
def calculate_loss(names,predictions):
    y_true = K.variable(np.array([CATEGORIES.index(name[0].upper()) for name in names]))
    y_pred = K.variable(np.array(predictions))
    print(y_true)
    print(y_pred)
    error = K.eval(categorical_crossentropy(y_true, y_pred))
    print(error)
コード例 #10
0
def cce_dice_loss(y_true, y_pred):
    
    #Combining the dice loss with categorical crossentropy loss
    loss = categorical_crossentropy(y_true, y_pred) + dice_loss(y_true, y_pred)
    
    #Returning the loss
    return loss
コード例 #11
0
ファイル: metrics.py プロジェクト: odedfis/ellipse-project
def ellipse_loss(y_true, y_pred):

    is_ellipse = y_true[..., GT_INDEX.IS_ELLIPSE]

    is_ellipse_loss = focal_loss(is_ellipse, y_pred[..., GT_INDEX.IS_ELLIPSE])

    angle_loss = losses.categorical_crossentropy(y_true[..., GT_INDEX.ANGLE_BIN_BEG:GT_INDEX.ANGLE_BIN_END],
                                                 y_pred[..., GT_INDEX.ANGLE_BIN_BEG:GT_INDEX.ANGLE_BIN_END])

    shape_l1 = K.abs(y_true[:, GT_INDEX.SHAPE_BEG:GT_INDEX.SHAPE_END] -
                     y_pred[:, GT_INDEX.SHAPE_BEG:GT_INDEX.SHAPE_END])

    shape_smooth_l1_loss = K.mean(K.switch(K.less(shape_l1, 1),
                                           0.5 * shape_l1 ** 2,
                                           shape_l1 - 0.5), axis=-1)

    # shape_smooth_l1_loss = K.print_tensor(shape_smooth_l1_loss, "shape_smooth_l1_loss: ")

    is_ellipse_bool = K.equal(is_ellipse, 1)

    zeros = K.zeros_like(is_ellipse)

    shape_loss = K.sum(K.switch(is_ellipse_bool, shape_smooth_l1_loss, zeros)) / (K.sum(is_ellipse) + K.epsilon())

    angle_loss_total = K.sum(K.switch(is_ellipse_bool, angle_loss, zeros)) / (K.sum(is_ellipse) + K.epsilon())

    return is_ellipse_loss + shape_loss + angle_loss_total
コード例 #12
0
def getLosses(true, pred):
    
    product = categorical_crossentropy(true, pred)
    with tf.Session() as sess:
        loss = product.eval()

    return loss
コード例 #13
0
ファイル: fine-tune.py プロジェクト: gayatri267/cs395t-f17
def test_loss():
    np.random.seed(1)
    y_a = K.variable(np.random.random((6, 7)))
    y_b = K.variable(np.random.random((6, 7)))

    print(K.eval(y_a).shape)
    print(K.eval(y_b).shape)

    print(K.eval(y_a))
    # print(K.eval(K.abs(K.argmax(y_a,axis = -1))).shape)
    # print (K.eval(K.abs(K.argmax(y_a,axis = -1))))
    output1 = losses.categorical_crossentropy(y_a, y_b)
    output2 = pure_mean_squared_error(y_a, y_b)
    output = categorical_crossentropy_mean_squared_error(y_a, y_b)
    # output_mse = pure_mean_squared_error(y_a, y_b)
    print('mean_L1:')
    print(K.eval(output).shape)
    print('cross:', K.eval(output1))
    print('mse: ', K.eval(output2))

    print('total: ', K.eval(output))
    # print('total_mse:',K.eval(output_mse))
    # print('cross_entropy:', K.eval(output1))
    # print('mse: ', K.eval(output2))


    assert K.eval(output).shape == (6,)
コード例 #14
0
ファイル: Losses.py プロジェクト: rsyarif/DeepJet
def loss_kldiv(y_in, x):
    """
    mass sculpting penlaty term using kullback_leibler_divergence
    y_in: truth [h, y]
    x: predicted NN output for y
    h: the truth mass histogram vector "one-hot encoded" (length NBINS=40)
    y: the truth categorical labels  "one-hot encoded" (length NClasses=2)
    """
    h = y_in[:, 0:NBINS]
    y = y_in[:, NBINS:NBINS + 2]

    # build mass histogram for true q events weighted by q, b prob
    h_alltag_q = K.dot(K.transpose(h), K.dot(tf.diag(y[:, 0]), x))
    # build mass histogram for true b events weighted by q, b prob
    h_alltag_b = K.dot(K.transpose(h), K.dot(tf.diag(y[:, 1]), x))

    # select mass histogram for true q events weighted by q prob; normalize
    h_qtag_q = h_alltag_q[:, 0]
    h_qtag_q = h_qtag_q / K.sum(h_qtag_q, axis=0)
    # select mass histogram for true q events weighted by b prob; normalize
    h_btag_q = h_alltag_q[:, 1]
    h_btag_q = h_btag_q / K.sum(h_btag_q, axis=0)
    # select mass histogram for true b events weighted by q prob; normalize
    h_qtag_b = h_alltag_b[:, 0]
    h_qtag_b = h_qtag_b / K.sum(h_qtag_b, axis=0)
    # select mass histogram for true b events weighted by b prob; normalize
    h_btag_b = h_alltag_b[:, 1]
    h_btag_b = h_btag_b / K.sum(h_btag_b, axis=0)

    # compute KL divergence between true q events weighted by b vs q prob (symmetrize?)
    # compute KL divergence between true b events weighted by b vs q prob (symmetrize?)
    return categorical_crossentropy(y, x) + \
        LAMBDA_ADV*kullback_leibler_divergence(h_btag_q, h_qtag_q) + \
        LAMBDA_ADV*kullback_leibler_divergence(h_btag_b, h_qtag_b)
コード例 #15
0
ファイル: Losses.py プロジェクト: rsyarif/DeepJet
def loss_disc_kldiv(y_in, x):
    """
    Loss for only the discriminator part for kldiv
    """
    y = y_in[:, NBINS:NBINS + 2]

    return categorical_crossentropy(y, x)
コード例 #16
0
ファイル: Losses.py プロジェクト: rsyarif/DeepJet
def loss_reg(y_in, x_in):
    """
    adversarial
    y_in: truth [h, y]
    x: predicted NN output for y
    h: the truth mass histogram vector "one-hot encoded" (length NBINS=40)
    y: the truth categorical labels  "one-hot encoded" (length NClasses=?)
    """
    h = y_in[:, 0:NBINS]
    y = y_in[:, NBINS:NBINS + NCLASSES]

    hpred = x_in[:, 0:NBINS]
    ypred = x_in[:, NBINS:NBINS + NCLASSES]

    return categorical_crossentropy(
        y, ypred) + LAMBDA_ADV * categorical_crossentropy(h, hpred)
コード例 #17
0
def build_vae(models, inputs, outputs, z_mean, z_log_var, loss, name):

    encoder, decoder = models
    outputs = decoder(encoder(inputs)[2])
    vae = Model(inputs, outputs, name=name)

    # VAE loss = mse_loss or xent_loss + kl_loss
    if loss == 'mse':
        reconstruction_loss = mse(inputs, outputs)
    elif loss == 'binary':
        reconstruction_loss = binary_crossentropy(inputs, outputs)
    else:
        reconstruction_loss = categorical_crossentropy(inputs, outputs)

    reconstruction_loss *= encoder.input_shape[1] * encoder.input_shape[1]
    reconstruction_loss = K.mean(
        reconstruction_loss,
        [1, 2])  # https://github.com/keras-team/keras/issues/10155
    kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(
        z_log_var)  # error to keep within distribution
    kl_loss = K.sum(kl_loss, axis=-1)
    kl_loss *= -0.5
    vae_loss = K.mean(reconstruction_loss + kl_loss)
    vae.add_loss(vae_loss)
    vae.compile(optimizer='adam')
    vae.summary()
    return vae
コード例 #18
0
def loss(y_true, y_pred):
    """ Build the loss function given true labels and predicted probabilities.
    Weather labels are mutually exclusive so we use categorical cross entropy ("cross entropy with
    softmax" in tf terminology). Ground labels are multi-class, so we use binary cross entropy
    ("sigmoid" in ternsorflow). Weather labels always come first. """
    return losses.categorical_crossentropy(y_true[:, :N_WEATHER_LABELS], y_pred[:, :N_WEATHER_LABELS]) + \
           losses.binary_crossentropy(y_true[:, N_WEATHER_LABELS:], y_pred[:, N_WEATHER_LABELS:])
コード例 #19
0
def ordinal_loss(y_true, y_pred):
    # https://github.com/JHart96/keras_ordinal_categorical_crossentropy/blob/master/ordinal_categorical_crossentropy.py
    weights = K.cast(
        K.abs(K.argmax(y_true, axis=1) - K.argmax(y_pred, axis=1)) /
        (K.int_shape(y_pred)[1] - 1),
        dtype='float32')
    return (1.0 + weights) * losses.categorical_crossentropy(y_true, y_pred)
コード例 #20
0
def cce_jaccard_loss(gt,
                     pr,
                     cce_weight=1.,
                     class_weights=1.,
                     smooth=SMOOTH,
                     per_image=True):
    r"""Sum of categorical crossentropy and jaccard losses:
    
    .. math:: L(A, B) = cce_weight * categorical_crossentropy(A, B) + jaccard_loss(A, B)
    
    Args:
        gt: ground truth 4D keras tensor (B, H, W, C)
        pr: prediction 4D keras tensor (B, H, W, C)
        class_weights: 1. or list of class weights for jaccard loss, len(weights) = C
        smooth: value to avoid division by zero
        per_image: if ``True``, jaccard loss is calculated as mean over images in batch (B),
            else over whole batch

    Returns:
        loss
    
    """
    cce = categorical_crossentropy(gt, pr) * class_weights
    cce = K.mean(cce)
    return cce_weight * cce + jaccard_loss(gt,
                                           pr,
                                           smooth=smooth,
                                           class_weights=class_weights,
                                           per_image=per_image)
コード例 #21
0
def kappa_loss(y_true,
               y_pred,
               y_pow=2,
               eps=1e-12,
               N=5,
               bsize=32,
               name='kappa'):
    with tf.name_scope(name):
        y_true = tf.to_float(y_true)
        repeat_op = tf.to_float(
            tf.tile(tf.reshape(tf.range(0, N), [N, 1]), [1, N]))
        repeat_op_sq = tf.square((repeat_op - tf.transpose(repeat_op)))
        weights = repeat_op_sq / tf.to_float((N - 1)**2)

        pred_ = y_pred**y_pow
        try:
            pred_norm = pred_ / (eps +
                                 tf.reshape(tf.reduce_sum(pred_, 1), [-1, 1]))
        except Exception:
            pred_norm = pred_ / (
                eps + tf.reshape(tf.reduce_sum(pred_, 1), [bsize, 1]))

        hist_rater_a = tf.reduce_sum(pred_norm, 0)
        hist_rater_b = tf.reduce_sum(y_true, 0)

        conf_mat = tf.matmul(tf.transpose(pred_norm), y_true)

        nom = tf.reduce_sum(weights * conf_mat)
        denom = tf.reduce_sum(weights *
                              tf.matmul(tf.reshape(hist_rater_a, [N, 1]),
                                        tf.reshape(hist_rater_b, [1, N])) /
                              tf.to_float(bsize))

        return nom * 0.5 / (denom + eps) + categorical_crossentropy(
            y_true, y_pred) * 0.5
コード例 #22
0
 def loss(y_true, y_pred):
     layer_tensors = [l.weights[0] for l in model_arg.layers]
     eigenvals = make_eigenval_op(
         num_cluster_eigs, num_cluster_grad_workers)(*layer_tensors)
     cluster_score = tf.reduce_sum(eigenvals)
     return (categorical_crossentropy(y_true, y_pred) +
             (cluster_lambda / num_cluster_eigs) * cluster_score)
コード例 #23
0
ファイル: train.py プロジェクト: kelvincjr/myRepo
        def categorical_crossentropy_loss(y_true: np.array,
                                          y_pred: np.array) -> float:
            """
            计算分类交叉熵(适用于多分类,张量版本)
            y_true: 训练标签,[[1,0,0], [0,1,0],...]。
            y_pred: 预测标签,[[1,0,0], [0,1,0],...]。
            返回交叉熵张量,形如[0.983423]。
            """

            #  # 传入单个标签时,转换成标签列表的形式。
            #  if len(y_true.shape) == 1:
            #      y_true = y_true.reshape((1, y_true.shape[0]))
            #      y_pred = y_pred.reshape((1, y_pred.shape[0]))
            # 生成形如[[0,1,0,...], [0,0,1,...],...]的张量。
            y_true = tf.convert_to_tensor(y_true, dtype='float32')
            y_pred = tf.convert_to_tensor(y_pred, dtype='float32')

            # 根据标签位置距离,计算分类权重。
            i_true = K.argmax(y_true, axis=1)
            i_pred = K.argmax(y_pred, axis=1)
            distance = K.abs(i_true - i_pred)
            length = K.int_shape(y_pred)[1] - 1
            weights = K.cast(distance / length, dtype='float32')

            losses = ((1.0 + weights) *
                      categorical_crossentropy(y_true, y_pred))
            loss = K.eval(K.mean(losses))
            return loss
コード例 #24
0
 def loss_a(y_true, y_pred):
     loss_sum = 0
     # print(y_pred.shape,y_true.shape)
     for i in range(0, 80, 4):
         loss_sum += categorical_crossentropy(y_true[:, i:i + 4],
                                              y_pred[:, i:i + 4])
     return loss_sum
コード例 #25
0
def loss_kldiv(y_in,x):
    """
    mass sculpting penlaty term usking kullback_leibler_divergence
    y_in: truth [h, y]
    x: predicted NN output for y
    h: the truth mass histogram vector "one-hot encoded" (length NBINS=40)
    y: the truth categorical labels  "one-hot encoded" (length NClasses=2)
    """
    h = y_in[:,0:NBINS]
    y = y_in[:,NBINS:NBINS+2]
    h_all = K.dot(K.transpose(h), y)
    h_all_q = h_all[:,0]
    h_all_h = h_all[:,1]
    h_all_q = h_all_q / K.sum(h_all_q,axis=0)
    h_all_h = h_all_h / K.sum(h_all_h,axis=0)
    h_btag_anti_q = K.dot(K.transpose(h), K.dot(tf.diag(y[:,0]),x))
    h_btag_anti_h = K.dot(K.transpose(h), K.dot(tf.diag(y[:,1]),x))
    h_btag_q = h_btag_anti_q[:,1]
    h_btag_q = h_btag_q / K.sum(h_btag_q,axis=0)
    h_anti_q = h_btag_anti_q[:,0]
    h_anti_q = h_anti_q / K.sum(h_anti_q,axis=0)
    h_btag_h = h_btag_anti_h[:,1]
    h_btag_h = h_btag_h / K.sum(h_btag_h,axis=0)
    h_anti_h = h_btag_anti_q[:,0]
    h_anti_h = h_anti_h / K.sum(h_anti_h,axis=0)

    return categorical_crossentropy(y, x) + \
        LAMBDA*kullback_leibler_divergence(h_btag_q, h_anti_q) + \
        LAMBDA*kullback_leibler_divergence(h_btag_h, h_anti_h)         
コード例 #26
0
def ctm_loss(y_true, y_pred):
    pred_list = tf.split(y_pred, 5, axis=-1)
    loss_list = []
    for pred in pred_list:
        loss_list.append(categorical_crossentropy(y_true, pred))
    return loss_list[0] + loss_list[1] + 0.1 * loss_list[2] + loss_list[
        3] + 0.1 * loss_list[4]
コード例 #27
0
def classcrossentropy(inputs,outputs):
    loss = 0
    i = 0

    input1 = inputs[1]
    output1 = outputs[0]


    recon_loss = -tf.reduce_sum(
    input1 * tf.log(1e-5+output1) + 
    (1-input1) * tf.log(1e-5+1-output1), 
    axis=1
)
    input2 = inputs[2]
    output2 = outputs[1]
    
    
#     print(input2.shape)
#     print(output2.shape)
    
#     print(inputs.shape)
    recon_loss = recon_loss + classifierwt*categorical_crossentropy(input2,output2)
    
    
    return recon_loss
コード例 #28
0
 def __call__(self, y_true, y_pred, sample_weight=None):
     cost_weight = get_sample_weights(y_true, y_pred, self.cost_mat)
     # cost_weight = K.print_tensor(cost_weight)
     return categorical_crossentropy(
         y_true=y_true,
         y_pred=y_pred,
     ) * cost_weight
コード例 #29
0
 def vae_loss(train_input, output):
     pred_loss = categorical_crossentropy(train_input,
                                          output)  # mean squared error
     # KL divergence between N(0,I) and latent params (eqn 7 in (Doersch, 2016))
     kl_diverg = 0.5 * (K.sum(K.exp(latent_cov)) + K.sum(
         K.square(latent_mean)) - self.latent - K.sum(latent_cov))
     return pred_loss + kl_diverg
コード例 #30
0
def smooth_l1_ce_loss(y_true, y_pred):
    # we use smoth l1 for adding constance relation of age
    ture_arg_max = K.cast(K.argmax(y_true, axis = -1), "float32")
    pred_arg_max = K.cast(K.argmax(y_pred, axis = -1), "float32")
    diff = K.abs(ture_arg_max - pred_arg_max)
    ls = K.switch(diff < 3, (0.3 * diff * diff), (diff - 0.3)) * categorical_crossentropy(y_true, y_pred)
    return K.mean(ls)
コード例 #31
0
ファイル: autoencoder.py プロジェクト: titu1994/Python-Work
def categorical_loss(y_true, y_pred):
    return alpha * categorical_crossentropy(y_true, y_pred)
コード例 #32
0
ファイル: train.py プロジェクト: ikegami-yukino/misc
def perplexity(y_true, y_pred):
    loss = categorical_crossentropy(y_true, y_pred)
    ppl = K.cast(K.pow(math.e, K.mean(loss, axis=-1)), K.floatx())
    return ppl