示例#1
0
def perceptual_loss(y_true_256, y_pred_256):

    mse_loss = K.mean(mean_squared_error(y_true_256, y_pred_256))
    mae_loss = K.mean(mean_absolute_error(y_true_256, y_pred_256))
    img_nrows, img_ncols = 256, 256

    y_pred = y_pred_256  #tf.image.central_crop(y_pred_256, 0.875)
    y_true = y_true_256  #tf.image.central_crop(y_true_256, 0.875)

    e = VGG_16()
    layers = [l for l in e.layers]
    eval_pred = y_pred
    for i in range(len(layers)):
        eval_pred = layers[i](eval_pred)
    eval_true = y_true
    for i in range(len(layers)):
        eval_true = layers[i](eval_true)
    perceptual_loss = K.mean(mean_squared_error(eval_true, eval_pred))

    #Total variation loss https://github.com/keras-team/keras/blob/master/examples/neural_style_transfer.py
    a = K.square(y_pred[:, :img_nrows - 1, :img_ncols - 1, :] -
                 y_pred[:, 1:, :img_ncols - 1, :])
    b = K.square(y_pred[:, :img_nrows - 1, :img_ncols - 1, :] -
                 y_pred[:, :img_nrows - 1, 1:, :])
    tv_loss = K.sum(K.pow(a + b, 1.25))

    loss = perceptual_loss + tf.scalar_mul(
        0.1, mse_loss)  #+ tf.scalar_mul(0.1, tv_loss)

    #perceptual_psnr = - tf.image.psnr(eval_true, eval_pred, K.max(eval_true))

    return loss
示例#2
0
def loss_function_ratio_regression(y_true, y_pred):
    r_loss = losses.mean_squared_error(K.exp(K.clip(y_true[:, 1], -10., 10.)),
                                       K.exp(K.clip(y_pred[:, 1], -10., 10.)))
    inverse_r_loss = losses.mean_squared_error(
        K.exp(-K.clip(y_true[:, 1], -10., 10.)),
        K.exp(-K.clip(y_pred[:, 1], -10., 10.)))
    return y_true[:, 0] * r_loss + (1. - y_true[:, 0]) * inverse_r_loss
示例#3
0
def custom_metroc_TRP(y_true, y_pred):

    #K.transpose(

    T_true = y_true[:, 0]
    R_true = y_true[:, 1]
    P_true = y_true[:, 2]

    T_pred = y_pred[:, 0]
    R_pred = y_pred[:, 1]
    P_pred = y_pred[:, 2]

    mask = K.tf.cast(K.greater(R_pred, P_pred), tf.float32)

    non_valid = 100 * tf.reduce_sum(mask) / tf.reduce_sum(
        K.tf.ones_like(P_pred))

    smapeT = smape(T_true, T_pred)
    smapeR = smape(R_true, R_pred)
    smapeP = smape(P_true, P_pred)

    mseT = Losses.mean_squared_error(T_true, T_pred)
    mseP = Losses.mean_squared_error(P_true, P_pred)
    mseR = Losses.mean_squared_error(R_true, R_pred)

    mse = Losses.mean_squared_error(y_true, y_pred)

    result = non_valid + mseT + mseP * 10 + mseR

    return result
示例#4
0
    def mse_mse(y_true, y_pred):
        y_true_temp = (tf.concat([y_true[:, :, 1:, :], y_true[:, :, 0:1, :]], 2)) - y_true
        y_pred_temp = (tf.concat([y_pred[:, :, 1:, :], y_pred[:, :, 0:1, :]], 2)) - y_pred

        loss = losses.mean_squared_error(y_true, y_pred) + losses.mean_squared_error(y_true_temp, y_pred_temp)

        return loss
    def __call__(self, y_true, y_pred):
        # 目的音とモデリング音 (バッチ数, timesteps)
        self.batch_size = tf.shape(y_true)[0]  # バッチサイズを取得
        _, self.timesteps, _ = y_pred.shape    # タイムステップを取得
        y_true_ = K.reshape (y_true, (self.batch_size, self.timesteps) )
        y_pred_ = K.reshape (y_pred, (self.batch_size, self.timesteps) )

        # 波形の損失関数
        if   self.wave_loss == "None":
            wave = K.variable(0)
        elif self.wave_loss == "MAE":
            wave = mean_absolute_error(y_true_, y_pred_)
        elif self.wave_loss == "MSE":
            wave = mean_squared_error(y_true_, y_pred_)
        elif self.wave_loss == "MSE_PE":
            y_true_ = Pre_Emphasis(y_true_, self.timesteps, self.batch_size, p)
            y_pred_ = Pre_Emphasis(y_pred_, self.timesteps, self.batch_size, p)
            wave = mean_squared_error(y_true_, y_pred_)
        elif self.wave_loss == "ESR":
            wave = esr0(y_true_, y_pred_)
        elif self.wave_loss == "ESR_PE":
            y_true_ = Pre_Emphasis(y_true_, self.timesteps, self.batch_size, p)
            y_pred_ = Pre_Emphasis(y_pred_, self.timesteps, self.batch_size, p)
            wave = esr0(y_true_, y_pred_)
        else:
            raise ValueError("wave_loss value Error")

        wave = K.mean(wave) 
                
        return wave
示例#6
0
def loss_function_ratio_regression(y_true, y_pred):

    """
    ROLR loss function.

    :param y_true: ndarray with shape (n_samples, 2 + n_parameters) where the first column are the y_i (0 if from
                   numerator, 1 if from denominator), the second column are the true log r(x, z | theta0, theta1), and the
                   remaining columns are the true t(x, z | theta0).
    :param y_pred: ndarray with shape (n_samples, 2 + n_parameters) where the first column is the classifier decision
                   function \hat{s}_i, the second column are the estimated log \hat{r}(x | theta0, theta1), and the
                   remaining columns are the estimated \hat{t}(x | theta0).
    :return: Squared error on r (for the y=1 samples) plus squared error on 1/r (for the y=0 samples).
    """

    r_loss = losses.mean_squared_error(K.exp(K.clip(y_true[:, 1],
                                                    -settings.log_r_clip_value, settings.log_r_clip_value)),
                                       K.exp(K.clip(y_pred[:, 1],
                                                    -settings.log_r_clip_value, settings.log_r_clip_value)))
    inverse_r_loss = losses.mean_squared_error(K.exp(-K.clip(y_true[:, 1],
                                                             -settings.log_r_clip_value,
                                                             settings.log_r_clip_value)),
                                               K.exp(-K.clip(y_pred[:, 1],
                                                             -settings.log_r_clip_value,
                                                             settings.log_r_clip_value)))

    return y_true[:, 0] * r_loss + (1. - y_true[:, 0]) * inverse_r_loss
示例#7
0
def main(data: np.ndarray, m: int, units: int, epochs: int, spec_n: int):
    # 切割訓練資料、產生模型並訓練
    x, y = split_data(data, m)
    model = make_model(units)
    history = model.fit(x, y, batch_size=m, epochs=epochs, verbose=0)
    # 預測和推測訓練資料,並得出個別的損失
    y_pred = model.predict(x)
    y_pred_loss = K.eval(mean_squared_error(y.flatten(), y_pred.flatten()))
    y_spec = speculate(model, x[0], len(y))
    y_spec_loss = K.eval(mean_squared_error(y.flatten(), y_spec.flatten()))
    print(f'm={m} units={units} epochs={epochs} - loss: '
          f'pred={y_pred_loss:.4f} spec={y_spec_loss:.4f}')
    # 如果使用推測訓練資料產生出來的結果與正確答案差太多,則直接跳過
    if y_spec_loss >= 0.2:
        return
    # 儲存結果
    dir_name = (f'results/{m}_{units}_{epochs}_{spec_n}'
                f'-{y_pred_loss:.4f}-{y_spec_loss:.4f}')
    os.makedirs(dir_name)
    spec = speculate(model, data[-m:], spec_n)
    save_model_and_spec(dir_name, model, spec)
    data_pred = np.concatenate((x[0], y_pred))
    data_spec = np.concatenate((x[0], y_spec, spec))
    result = (data, data_pred, data_spec, m, m + len(x), len(data))
    save_history_plot(dir_name, history.history)
    save_results_plot(dir_name, result)
示例#8
0
    def __call__(self, y_true, y_pred):
        result = None
        if self.loss_type == LossType.mae:
            result = objectives.mean_absolute_error(y_true, y_pred)
        elif self.loss_type == LossType.mse:
            result = objectives.mean_squared_error(y_true, y_pred)
        elif self.loss_type == LossType.rmse:
            result = K.sqrt(objectives.mean_squared_error(y_true, y_pred))
        elif self.loss_type == LossType.variance:
            result = K.sqrt(objectives.mean_squared_error(
                y_true, y_pred)) - objectives.mean_absolute_error(
                    y_true, y_pred)
        elif self.loss_type == LossType.weighted_mae_mse:
            loss1 = objectives.mean_absolute_error(y_true, y_pred)
            loss2 = objectives.mean_squared_error(y_true, y_pred)
            result = self.loss_ratio * loss1 + (1.0 - self.loss_ratio) * loss2
        elif self.loss_type == LossType.weighted_mae_rmse:
            loss1 = objectives.mean_absolute_error(y_true, y_pred)
            loss2 = K.sqrt(objectives.mean_squared_error(y_true, y_pred))
            result = self.loss_ratio * loss1 + (1.0 - self.loss_ratio) * loss2
        elif self.loss_type == LossType.binary_crossentropy:
            result = objectives.binary_crossentropy(y_true, y_pred)
        elif self.loss_type == LossType.weighted_tanhmse_mse:
            loss1 = losses.mean_squared_error(
                K.tanh(self.data_input_scale * y_true),
                K.tanh(self.data_input_scale * y_pred))
            loss2 = losses.mean_squared_error(y_true, y_pred)
            result = self.loss_ratio * loss1 + (1.0 - self.loss_ratio) * loss2
        else:
            assert False, ("Loss function not supported")

        return result
示例#9
0
 def seq2seq_recurrent_loss(y_true, y_pred):
     # Reconstruction loss
     predicted_frames_loss = K.mean(losses.mean_squared_error(y_true, y_pred))
     
     # Embedding loss
     predicted_emb_loss = K.mean(losses.mean_squared_error(y_true_z, y_pred_z))
     
     return loss_weights['predicted_frames']*predicted_frames_loss + loss_weights['predicted_emb']*predicted_emb_loss
示例#10
0
		def stft_losses_dec_d(d, _):
			rmse_d = losses.mean_squared_error(d, d_hat_m)
			mae_d = losses.mean_absolute_error(d, d_hat_m)
			loss_d = rmse_d + mae_d
			rmse_neg_d = losses.mean_squared_error(d, x_hat_m)
			mae_neg_d = losses.mean_absolute_error(d, x_hat_m)
			loss_neg_d = rmse_neg_d + mae_neg_d
		
			total_loss = loss_d# - 0.01*loss_neg_d
			return total_loss
示例#11
0
def _mse(y_true, y_pred):
    # keras
    cost = losses.mean_squared_error(y_true, y_pred)
    if cfg.add_xyz_sum1:
        ux = y_pred[:, :, :, 0][:, :, :, np.newaxis]
        uy = y_pred[:, :, :, 1][:, :, :, np.newaxis]
        uz = y_pred[:, :, :, 2][:, :, :, np.newaxis]
        reg = losses.mean_squared_error(1, ux**2 + uy**2 + uz**2)
        cost += 0.5 * reg
    return cost
示例#12
0
		def stft_losses_dec_x(x, _):
			rmse_x = losses.mean_squared_error(x, x_hat_m)
			mae_x = losses.mean_absolute_error(x, x_hat_m)
			loss_x = rmse_x + mae_x
			rmse_neg_x = losses.mean_squared_error(x, d_hat_m)
			mae_neg_x = losses.mean_absolute_error(x, d_hat_m)
			loss_neg_x = rmse_neg_x + mae_neg_x

			total_loss = loss_x# - 0.01*loss_neg_x
			return total_loss
示例#13
0
		def stft_losses_NMF_d(d, _):
			rmse_d = losses.mean_squared_error(d, d_NMF)
			mae_d = losses.mean_absolute_error(d, d_NMF)
			loss_d = mae_d + rmse_d
			rmse_neg_d = losses.mean_squared_error(d, x_NMF)
			mae_neg_d = losses.mean_absolute_error(d, x_NMF)
			loss_neg_d = mae_neg_d + rmse_neg_d
		
			total_loss = K.abs(loss_d - 0.01*loss_neg_d)
			return total_loss
示例#14
0
		def stft_losses_NMF_x(x, _):
			rmse_x = losses.mean_squared_error(x, x_NMF)
			mae_x = losses.mean_absolute_error(x, x_NMF)
			loss_x = mae_x + rmse_x
			rmse_neg_x = losses.mean_squared_error(x, d_NMF)
			mae_neg_x = losses.mean_absolute_error(x, d_NMF)
			loss_neg_x = mae_neg_x + rmse_neg_x

			total_loss = K.abs(loss_x - 0.01*loss_neg_x)
			return total_loss
示例#15
0
 def loss(y_true, y_pred):
     zeronan = 0
     isMask_ol = nearby_hole(x,6)
     isMask_ol = K.cast(isMask_ol,dtype=K.floatx())
     isMask = K.equal(x,zeronan) # mask in the region with hole (integer)
     isMask_square = K.cast(isMask,dtype=K.floatx()) # mask for the pixel where the hole is
     isMask_out = 1 - isMask_square # mask  for the pixels not considering the hole
     loss_square = losses.mean_squared_error(y_true*isMask_square,y_pred*isMask_square)
     #loss_out = losses.mean_squared_error(y_true*isMask_out,y_pred*isMask_out)
     loss_ol = losses.mean_squared_error(y_true*isMask_ol,y_pred*isMask_ol)
     return loss_square*weight_hole  + loss_ol*weight_ol #  loss outputs sum
示例#16
0
def mean_squared_loss(y_true, y_pred):
    if "activation" in y_pred.name:
        return mean_squared_error(y_true, y_pred) * 0
    true_max = K.mean(y_true)
    if true_max != 0:
        y_true /= true_max
    pred_max = K.mean(y_pred)
    if pred_max != 0:
        y_pred /= pred_max
    loss = mean_squared_error(y_true, y_pred)
    return loss
示例#17
0
 def seq2seq_recurrent_loss(y_true, y_pred):
     # Reconstruction loss
     predicted_frames_loss = losses.mean_squared_error(y_true, y_pred)
     
     # Embedding loss
     predicted_emb_loss = losses.mean_squared_error(y_true_z, y_pred_z)
     print('y_true:', y_true.shape, 'y_pred:', y_pred.shape)
     print('y_true_z:', y_true_z.shape, 'y_pred_z:', y_pred_z.shape)
     print('predicted_frames_loss:', predicted_frames_loss.shape, 'predicted_emb_loss:', predicted_emb_loss.shape)
     
     return loss_weights['predicted_frames']*predicted_frames_loss #+ loss_weights['predicted_emb']*predicted_emb_loss
示例#18
0
文件: models.py 项目: yanis-k/BSSS
    def pit_loss(y_true, y_pred):
        cost1 = mean_squared_error(y_pred[x], y_true[x])

        def c1():
            return tf.reduce_mean(cost1)

        cost2 = mean_squared_error(y_pred[x - 1], y_true[x])

        def c2():
            return tf.reduce_mean(cost2)

        result = tf.cond(tf.less(tf.reduce_mean(cost1), tf.reduce_mean(cost2)),
                         c1, c2)
        return result
示例#19
0
def ewc_loss(y_true, y_pred, prev_model_layers, curr_model_layers,
             outputTensor, lambda_const):
    fisher_reg = l2_diff(prev_model_layers, curr_model_layers, y_pred,
                         outputTensor)
    reg_term = (lambda_const / 2) * fisher_reg
    loss = losses.mean_squared_error(y_true, y_pred) + reg_term
    return loss
 def on_epoch_end(self, epoch, logs={}):
     if epoch % self.interval == 0:
         y_pred = self.model.predict(self.model.validation_data[0],
                                     verbose=0)
         score = mean_squared_error(self.model.validation_data[1], y_pred)
         #print("mean_squared_error - epoch: {:d} - score: {:.6f}".format(epoch + 1, score))
         self.scores.append(score)
    def train_step(self, data):
        #print(data)
        data = data[0]
        data_1 = data[1]
        true = data[2]

        with tf.GradientTape() as tape:
            encoder_output = encoder(data)
            encoder_2_output = encoder_2(data_1)
            reconstruction = decoder(
                tf.concat([encoder_output, encoder_2_output], 2))

            reconstruction_loss = tf.reduce_mean(
                mean_squared_error(true, reconstruction))
            cross_recon_loss = tf.reduce_mean(true - reconstruction)
            #trip_s_loss = encoder(data)-encoder(data_from_subject) - encoder(data)-encoder(data_other_subject) + alpha
            #trip_p_loss = encoder(data)-encoder(data_from_beattype) - encoder(data)-encoder(data_other_beattype) + alpha

            total_loss = reconstruction_loss + cross_recon_loss
            #total_loss = reconstruction_loss + trip_s_loss + trip_p_loss
        grads = tape.gradient(total_loss, self.trainable_weights)
        self.optimizer.apply_gradients(zip(grads, self.trainable_weights))
        return {
            "loss": total_loss,
            "reconstruction_loss": reconstruction_loss,
            "cross_recon_loss": cross_recon_loss
            #"trip_s_loss": trip_s_loss,
            #"trip_p_loss": trip_p_loss
        }
示例#22
0
    def vae_loss(x, x_dec):
        #recon_loss + kl_loss
        recon_loss = losses.mean_squared_error(x, x_dec)
        kl_loss = - 0.5 * K.mean(1 + z_log_sigma - K.square(z_mean) - K.exp(z_log_sigma), axis = -1)
        vae_loss = recon_loss + kl_loss

        return vae_loss
示例#23
0
def masked_mse(y_true,y_pred):
    nanval = -1e5
    isMask = K.equal(y_true,nanval)
    isMask = 1 - K.cast(isMask,dtype=K.floatx())
    y_true = y_true*isMask
    y_pred = y_pred*isMask
    return (losses.mean_squared_error(y_true,y_pred))
示例#24
0
def primary_loss(y_true, y_pred):
    # 3 separate loss calculations based on if note is played or not
    played = y_true[:, :, :, 0]
    bce_note = losses.binary_crossentropy(y_true[:, :, :, 0], y_pred[:, :, :, 0])
    bce_replay = losses.binary_crossentropy(y_true[:, :, :, 1], tf.multiply(played, y_pred[:, :, :, 1]) + tf.multiply(1 - played, y_true[:, :, :, 1]))
    mse = losses.mean_squared_error(y_true[:, :, :, 2], tf.multiply(played, y_pred[:, :, :, 2]) + tf.multiply(1 - played, y_true[:, :, :, 2]))
    return bce_note + bce_replay + mse
示例#25
0
def custom_loss(g_xy, grot_xy):
    #Multiple patches
    print(g_xy.shape)
    print(grot_xy.shape)
    #return mean_squared_error(g_xy , grot_xy)
    return -K.mean(20 * K.log(K.max(g_xy))) + 10 * K.log(
        mean_squared_error(g_xy, tf.transpose(grot_xy, [1, 0, 2])))
示例#26
0
文件: pvn.py 项目: whfuyn/AlphaFy
 def loss(self, y_true, y_pred):
     # It's the policy_loss if it has two dims.
     if y_pred.shape.as_list()[-2:] == list(BOARD_SHAPE):
         loss = categorical_crossentropy(y_true, y_pred)
     else:
         loss = mean_squared_error(y_true, y_pred)
     return loss
    def __init__(self, tensors: MDPTensors, scale=None, **kwargs):
        super().__init__(tensors, name='reward_prediction', scale=scale)

        if self.loss is None:
            state_rep = tf.keras.Input(
                shape=self.tensors.state_representation.shape[1:],
                name='state_representation_input')
            act_in = tf.keras.Input(shape=self.tensors.action.shape[1:],
                                    name='action_input')
            rewards = tf.keras.Input(shape=self.tensors.reward.shape[1:],
                                     name='rewards_input')

            if kwargs['discrete_actions']:
                act = layers.Lambda(lambda x: tf.cast(x, tf.int32))(act_in)
                act = layers.Lambda(lambda x: tf.one_hot(
                    x, depth=kwargs['n_actions'], dtype=tf.float32))(act)
            else:
                act = act_in

            merged = layers.concatenate([state_rep, act])
            merged, = self.optional_gradient_stop(merged)

            x = layers.Dense(32, activation='elu')(merged)
            pred = layers.Dense(1, activation=None)(x)
            mse = layers.Lambda(lambda x: mean_squared_error(x[0], x[1]))(
                (rewards, pred))
            mse = layers.Lambda(lambda x: backend.mean(x))(mse)
            scaled_mse = layers.Lambda(lambda x: x * self.scale)(mse)

            self.model = Model(inputs=[state_rep, act_in, rewards],
                               outputs=[scaled_mse])
            self.loss = self.model([
                self.tensors.state_representation, self.tensors.action,
                self.tensors.reward
            ])
示例#28
0
文件: model.py 项目: lzzhaha/DeepJ
def primary_loss(y_true, y_pred):
    # 3 separate loss calculations based on if note is played or not
    played = y_true[:, :, :, 0]
    bce_note = losses.binary_crossentropy(y_true[:, :, :, 0], y_pred[:, :, :, 0])
    bce_replay = losses.binary_crossentropy(y_true[:, :, :, 1], tf.multiply(played, y_pred[:, :, :, 1]) + tf.multiply(1 - played, y_true[:, :, :, 1]))
    mse = losses.mean_squared_error(y_true[:, :, :, 2], tf.multiply(played, y_pred[:, :, :, 2]) + tf.multiply(1 - played, y_true[:, :, :, 2]))
    return bce_note + bce_replay + mse
示例#29
0
    def train_step(self, data):
        data = data[0]
        data_1 = data[1]
        true = data[2]
        data_p = data[3]
        data_s = data[4]
        data_r = data[5]

        with tf.GradientTape() as tape:
            encoder_output = encoder(data)
            encoder_2_output = encoder_2(data_1)
            reconstruction = decoder(
                tf.concat([encoder_output, encoder_2_output], 2))

            cross_recon_loss = tf.reduce_mean(
                mean_squared_error(true, reconstruction))
            alpha = 0.2
            trip_p_loss = tf.reduce_mean(
                abs(encoder(data) - encoder(data_p)) -
                abs(encoder(data) - encoder(data_r)) + alpha)
            trip_s_loss = tf.reduce_mean(
                abs(encoder_2(data_1) - encoder_2(data_s)) -
                abs(encoder_2(data_1) - encoder_2(data_r)) + alpha)
            total_loss = cross_recon_loss + trip_s_loss + trip_p_loss
        grads = tape.gradient(total_loss, self.trainable_weights)
        self.optimizer.apply_gradients(zip(grads, self.trainable_weights))
        return {
            "loss": total_loss,
            "cross_recon_loss": cross_recon_loss,
            "trip_s_loss": trip_s_loss,
            "trip_p_loss": trip_p_loss
        }
示例#30
0
def compute_benchmark():
    data, validation_data = get_data()
    mean = np.mean([y for x, y in data])
    benchmark_loss = K.eval(
        K.mean(mean_squared_error([y for x, y in data], mean * len(data))))
    print("Guesing Mean MSE:")
    print(benchmark_loss)  # 13051.012331730903
 def get_loss(y_pred, y_true):
     # y_true = tf.cast(y_true, 'int32')
     loss = mean_squared_error(y_pred, y_true)
     mask = src_mask
     loss = tf.reduce_sum(loss * mask, -1) / tf.reduce_sum(mask, -1)
     loss = K.mean(loss)
     return loss
示例#32
0
 def objective_function_for_value(y_true, y_pred):
     return mean_squared_error(y_true, y_pred)
示例#33
0
def l2(y_true, y_pred):
    """ L2 metric (MSE) """
    return losses.mean_squared_error(y_true, y_pred)
示例#34
0
def rmse(y_true, y_pred):
    return (1. - alpha) * K.sqrt(mean_squared_error(y_true, y_pred))
from keras.losses import mean_squared_error

def my_loss(y_pred, y_true, weights):
    x = y_true - y_pred
    loss = tf.square(x) * weights
    return tf.reduce_sum(loss, axis=-1)

def my_loss2(y_pred, y_true):
    x = y_true - y_pred
    loss = tf.square(x)
    return tf.reduce_sum(loss, axis=-1)


y_pred = tf.Variable([[1.1, 2.3, 3.],[1.2, 2.5, 10.]])
y_true = tf.Variable([[1., 2., 4.],[2., 2., 3.]])
weights = tf.Variable([[0.8],[0.7]])

sess = tf.InteractiveSession()
sess.run(tf.initialize_all_variables())

loss = mean_squared_error(y_pred, y_true)
loss2 = my_loss(y_pred, y_true, weights)
loss3 = my_loss2(y_pred, y_true)

loss_val, loss2_val, loss3_val = sess.run([loss, loss2, loss3])
print (loss_val, loss2_val, loss3_val)