Example #1
0
		def stft_losses_dec_x(x, _):
			rmse_x = losses.mean_squared_error(x, x_hat_m)
			mae_x = losses.mean_absolute_error(x, x_hat_m)
			loss_x = rmse_x + mae_x
			rmse_neg_x = losses.mean_squared_error(x, d_hat_m)
			mae_neg_x = losses.mean_absolute_error(x, d_hat_m)
			loss_neg_x = rmse_neg_x + mae_neg_x

			total_loss = loss_x# - 0.01*loss_neg_x
			return total_loss
Example #2
0
		def stft_losses_NMF_x(x, _):
			rmse_x = losses.mean_squared_error(x, x_NMF)
			mae_x = losses.mean_absolute_error(x, x_NMF)
			loss_x = mae_x + rmse_x
			rmse_neg_x = losses.mean_squared_error(x, d_NMF)
			mae_neg_x = losses.mean_absolute_error(x, d_NMF)
			loss_neg_x = mae_neg_x + rmse_neg_x

			total_loss = K.abs(loss_x - 0.01*loss_neg_x)
			return total_loss
Example #3
0
		def stft_losses_NMF_d(d, _):
			rmse_d = losses.mean_squared_error(d, d_NMF)
			mae_d = losses.mean_absolute_error(d, d_NMF)
			loss_d = mae_d + rmse_d
			rmse_neg_d = losses.mean_squared_error(d, x_NMF)
			mae_neg_d = losses.mean_absolute_error(d, x_NMF)
			loss_neg_d = mae_neg_d + rmse_neg_d
		
			total_loss = K.abs(loss_d - 0.01*loss_neg_d)
			return total_loss
Example #4
0
		def stft_losses_dec_d(d, _):
			rmse_d = losses.mean_squared_error(d, d_hat_m)
			mae_d = losses.mean_absolute_error(d, d_hat_m)
			loss_d = rmse_d + mae_d
			rmse_neg_d = losses.mean_squared_error(d, x_hat_m)
			mae_neg_d = losses.mean_absolute_error(d, x_hat_m)
			loss_neg_d = rmse_neg_d + mae_neg_d
		
			total_loss = loss_d# - 0.01*loss_neg_d
			return total_loss
Example #5
0
def L1_loss(y_true, y_pred):
    y_fg_true = y_true[:, :, :, :3]
    y_bg_true = y_true[:, :, :, 3:]

    y_fg_pred = y_pred[:, :, :, :3]
    y_bg_pred = y_pred[:, :, :, 3:]

    l1_fg_loss = losses.mean_absolute_error(y_fg_true, y_fg_pred)
    l1_bg_loss = losses.mean_absolute_error(y_bg_true, y_bg_pred)

    total_loss = 0.8 * l1_fg_loss + 0.2 * l1_bg_loss

    return total_loss
Example #6
0
def iou_with_mse(y_true, y_pred):
    # split the bounding boxes into individual tensors
    xmin_true, xmax_true, ymin_true, ymax_true, zmin_true, zmax_true = tf.split(
        y_true, 6, axis=1)
    xmin_pred, xmax_pred, ymin_pred, ymax_pred, zmin_pred, zmax_pred = tf.split(
        y_pred, 6, axis=1)

    dx = K.minimum(xmax_true, xmax_pred) - K.maximum(xmin_true, xmin_pred)
    dy = K.minimum(ymax_true, ymax_pred) - K.maximum(ymin_true, ymin_pred)
    dz = K.minimum(zmax_true, zmax_pred) - K.maximum(zmin_true, zmin_pred)

    intersection = dx * dy * dz
    intersection = K.relu(intersection)

    # find the total volume and then find the union
    vol_true = tf.abs((xmax_true - xmin_true) * (ymax_true - ymin_true) *
                      (ymax_true - ymin_true))
    vol_pred = tf.abs((xmax_pred - xmin_pred) * (ymax_pred - ymin_pred) *
                      (ymax_pred - ymin_pred))

    # find the union now
    union = vol_true + vol_pred - intersection

    iou = intersection / union

    return (-1000 * iou) + mean_absolute_error(y_true, y_pred)
Example #7
0
def test_loss(y_true, y_pred):
    return sd_weights[0] * dice_coef_loss(
        y_true, y_pred) + sd_weights[0] * seg_crossentropy_weighted(
            y_true, y_pred) + sd_weights[3] * mean_absolute_error(
                y_true, y_pred) + sd_weights[0] * vol_diff(
                    y_true, y_pred) + sd_weights[4] * recall_loss(
                        y_true, y_pred)
Example #8
0
 def mae(self, hr, sr):
     margin = (tf.shape(hr)[1] - tf.shape(sr)[1]) // 2
     hr_crop = tf.cond(tf.equal(margin, 0), lambda: hr,
                       lambda: hr[:, margin:-margin, margin:-margin, :])
     hr = K.in_train_phase(hr_crop, hr)
     hr.uses_learning_phase = True
     return mean_absolute_error(hr, sr)
Example #9
0
def perceptual_loss(y_true_256, y_pred_256):

    mse_loss = K.mean(mean_squared_error(y_true_256, y_pred_256))
    mae_loss = K.mean(mean_absolute_error(y_true_256, y_pred_256))
    img_nrows, img_ncols = 256, 256

    y_pred = y_pred_256  #tf.image.central_crop(y_pred_256, 0.875)
    y_true = y_true_256  #tf.image.central_crop(y_true_256, 0.875)

    e = VGG_16()
    layers = [l for l in e.layers]
    eval_pred = y_pred
    for i in range(len(layers)):
        eval_pred = layers[i](eval_pred)
    eval_true = y_true
    for i in range(len(layers)):
        eval_true = layers[i](eval_true)
    perceptual_loss = K.mean(mean_squared_error(eval_true, eval_pred))

    #Total variation loss https://github.com/keras-team/keras/blob/master/examples/neural_style_transfer.py
    a = K.square(y_pred[:, :img_nrows - 1, :img_ncols - 1, :] -
                 y_pred[:, 1:, :img_ncols - 1, :])
    b = K.square(y_pred[:, :img_nrows - 1, :img_ncols - 1, :] -
                 y_pred[:, :img_nrows - 1, 1:, :])
    tv_loss = K.sum(K.pow(a + b, 1.25))

    loss = perceptual_loss + tf.scalar_mul(
        0.1, mse_loss)  #+ tf.scalar_mul(0.1, tv_loss)

    #perceptual_psnr = - tf.image.psnr(eval_true, eval_pred, K.max(eval_true))

    return loss
Example #10
0
def custom_loss_2(y_true, y_pred):
    # scaled cosine_distance with MSE + MAE
    cosine = losses.cosine_proximity(y_true, y_pred)
    mse = losses.mean_squared_error(y_true, y_pred)
    mle = losses.mean_absolute_error(y_true, y_pred)
    l = (1 + cosine) * mse + mle
    return l
Example #11
0
    def syn_loss(self, inputs):

        img_tgt, img_syn = inputs
        img_tgt_cropped = K.slice(img_tgt, (0, 40, 40, 0), (-1, 400, 560, -1))
        img_syn_cropped = K.slice(img_syn, (0, 40, 40, 0), (-1, 400, 560, -1))
        loss = K.mean(mean_absolute_error(img_tgt_cropped, img_syn_cropped))
        return loss
    def __call__(self, y_true, y_pred):
        # 目的音とモデリング音 (バッチ数, timesteps)
        self.batch_size = tf.shape(y_true)[0]  # バッチサイズを取得
        _, self.timesteps, _ = y_pred.shape    # タイムステップを取得
        y_true_ = K.reshape (y_true, (self.batch_size, self.timesteps) )
        y_pred_ = K.reshape (y_pred, (self.batch_size, self.timesteps) )

        # 波形の損失関数
        if   self.wave_loss == "None":
            wave = K.variable(0)
        elif self.wave_loss == "MAE":
            wave = mean_absolute_error(y_true_, y_pred_)
        elif self.wave_loss == "MSE":
            wave = mean_squared_error(y_true_, y_pred_)
        elif self.wave_loss == "MSE_PE":
            y_true_ = Pre_Emphasis(y_true_, self.timesteps, self.batch_size, p)
            y_pred_ = Pre_Emphasis(y_pred_, self.timesteps, self.batch_size, p)
            wave = mean_squared_error(y_true_, y_pred_)
        elif self.wave_loss == "ESR":
            wave = esr0(y_true_, y_pred_)
        elif self.wave_loss == "ESR_PE":
            y_true_ = Pre_Emphasis(y_true_, self.timesteps, self.batch_size, p)
            y_pred_ = Pre_Emphasis(y_pred_, self.timesteps, self.batch_size, p)
            wave = esr0(y_true_, y_pred_)
        else:
            raise ValueError("wave_loss value Error")

        wave = K.mean(wave) 
                
        return wave
Example #13
0
    def recon_loss_combi(y_true, y_pred):

        mask_value = 0
        mask = K.cast(K.not_equal(y_true, mask_value), K.floatx())

        return lamb * mse(y_true * mask, y_pred * mask) + (
            1 - lamb) * mean_absolute_error(y_true * mask, y_pred * mask)
Example #14
0
 def acc_loss(y_true, y_pred):
     if mean is not None:
         a = (K.mean(
             (y_pred - mean_tensor) * (y_true - mean_tensor)) / K.sqrt(
                 K.mean(K.square((y_pred - mean_tensor))) *
                 K.mean(K.square((y_true - mean_tensor)))))
     else:
         a = (K.mean(y_pred * y_true) /
              K.sqrt(K.mean(K.square(y_pred)) * K.mean(K.square(y_true))))
     if regularize_mean is not None:
         if regularize_mean == 'global':
             m = K.abs((K.mean(y_true) - K.mean(y_pred)) / K.mean(y_true))
         elif regularize_mean == 'spatial':
             m = K.mean(
                 K.abs((K.mean(y_true, axis=[-2, -1]) -
                        K.mean(y_pred, axis=[-2, -1])) /
                       K.mean(y_true, axis=[-2, -1])))
         elif regularize_mean == 'mse':
             m = mean_squared_error(y_true, y_pred)
         elif regularize_mean == 'mae':
             m = mean_absolute_error(y_true, y_pred)
     if reverse:
         if regularize_mean is not None:
             return m - a
         else:
             return -a
     else:
         if regularize_mean:
             return a - m
         else:
             return a
Example #15
0
    def customized_loss(self, y_true, y_pred, alpha=0.0001, beta=3):
        """
		linear combination of MSE and KL divergence.
		"""
        loss1 = losses.mean_absolute_error(y_true, y_pred)
        loss2 = losses.kullback_leibler_divergence(y_true, y_pred)
        #(alpha/2) *
        return loss1 + beta * loss2
def average_precision(y_true, y_pred):
    loss = K.floatx(0)

    loss = mean_absolute_error(y_true=y_true, y_pred=y_pred)
    print('size of y_true: ', K.int_shape(y_true))
    print('size of y_pred: ', K.int_shape(y_pred))
    print('size of loss: ', K.int_shape(loss))
    return loss
Example #17
0
 def _vae_loss(self, x, x_decoded_mean):
     # xent_loss = losses.binary_crossentropy(x, x_decoded_mean) / self._sigma
     xent_loss = losses.mean_absolute_error(x, x_decoded_mean) / self._sigma
     # xent_loss = objectives.binary_crossentropy(x, x_decoded_mean)
     # xent_loss = self._input_dim * objectives.poisson(x, x_decoded_mean)
     kl_loss = -0.5 * K.mean(1 + self._z_log_var - K.square(self._z_mean) -
                             K.exp(self._z_log_var),
                             axis=-1)
     return xent_loss + 1.0 * kl_loss
Example #18
0
    def __call__(self, y_true, y_pred):
        ae_pred = y_pred
        ae_grad_pred = jacobian_with_time_layer(y_pred, is_3d=self.is_3d)

        ae_true = y_true
        ae_grad_true = jacobian_with_time_layer(y_true, is_3d=self.is_3d)

        result = 0.0
        if self.use_mse:
            result = losses.mean_squared_error(
                ae_pred, ae_true) + losses.mean_squared_error(
                    ae_grad_pred, ae_grad_true)
        else:
            result = losses.mean_absolute_error(
                ae_pred, ae_true) + losses.mean_absolute_error(
                    ae_grad_pred, ae_grad_true)

        return result
    def __call__(self, y_true, y_pred):
        # 目的音とモデリング音 (バッチ数, timesteps)
        y_true_ = K.reshape (y_true, (self.batch_size, self.timesteps) )
        y_pred_ = K.reshape (y_pred, (self.batch_size, self.timesteps) )
        
        if set_float == "float16":
            y_true_ = K.cast(y_true_, tf.float32)
            y_pred_ = K.cast(y_pred_, tf.float32)
            
        " スペクトログラムの計算 "
        if ( self.freq_loss != None ) and ( spectrogram_type == "CQT" ):
            true_freq, _ =  self.cqt.calc_cqt(y_true_) 
            pred_freq, _ =  self.cqt.calc_cqt(y_pred_) 
            # 振幅スペクトログラム
            true_freq = K.abs(true_freq)
            pred_freq = K.abs(pred_freq)
        
        elif self.freq_loss != None and ( spectrogram_type == "STFT" ):
            # 目標音とモデリング音の振幅スペクトログラムの取得
            true_freq = K.abs( tf.signal.stft(signals=y_true_, frame_length=frame_length, frame_step=frame_step, fft_length=N, pad_end=True) )
            pred_freq = K.abs( tf.signal.stft(signals=y_pred_, frame_length=frame_length, frame_step=frame_step, fft_length=N, pad_end=True) )
            # パワースペクトログラム
            if power == True:
                true_freq  = K.square( true_freq)
                pred_freq  = K.square( pred_freq )                
            # メル周波数スペクトログラム(MFS)
            if mel == True:
                # メルフィルタバンク
                linear_to_mel_weight_matrix = tf.signal.linear_to_mel_weight_matrix(num_mel_bins_mel, int(N/2+1), Fs, lower_hz_mel, upper_hz_mel)
                # 目的音源のMFS            
                true_freq = tf.tensordot(true_freq, linear_to_mel_weight_matrix, 1)
                true_freq.set_shape(true_freq.shape[:-1].concatenate(linear_to_mel_weight_matrix.shape[-1:]))
                # モデリング音源のMFS            
                pred_freq = tf.tensordot(pred_freq, linear_to_mel_weight_matrix, 1)
                pred_freq.set_shape(pred_freq.shape[:-1].concatenate(linear_to_mel_weight_matrix.shape[-1:]))
        else:
            raise ValueError("spectrogram type value Error")

        # 周波数の損失関数
        if   self.freq_loss == "None":
            freq = K.variable(0)
        elif self.freq_loss == "MAE":
            freq = mean_absolute_error(true_freq, pred_freq)
        elif self.freq_loss == "MSE":
            freq = mean_squared_error(true_freq, pred_freq)
        elif self.freq_loss == "KL":
            freq = I_divergence(true_freq, pred_freq) 
        elif self.freq_loss == "IS":
            freq = IS_divergence(true_freq, pred_freq) 
        else:
            raise ValueError("freq_loss value Error")
        freq = K.mean(freq)
        

        if set_float == "float16":
            freq = K.cast( freq, dtype=tf.float16)
        return freq 
def customized_loss(y_true, y_pred, alpha=0.0001, beta=3):
    """
    Create a customized loss for the stacked AE.
    Linear combination of MSE and KL divergence.
    """
    #customize your own loss components
    loss1 = losses.mean_absolute_error(y_true, y_pred)
    loss2 = losses.kullback_leibler_divergence(y_true, y_pred)
    #adjust the weight between loss components
    return (alpha / 2) * loss1 + beta * loss2
Example #21
0
def nn_stats(clf, trainX, trainY, multi_input=False, name="Train"):

    if multi_input:
        train_images = trainX.return_images()
        train_fc = trainX.return_fc()
        y_pred = clf.predict([train_fc, train_images])
    else:
        y_pred = clf.predict(trainX)

    train_mae = eval(mean_absolute_error(trainY, y_pred)).mean()
    print("{} performance average MAE: {}".format(name, round(train_mae, 2)))
Example #22
0
def dssim_loss(y_true, y_pred):
    """Structural dissimilarity loss + L1 loss
    DSSIM is defined as (1-SSIM)/2
    https://en.wikipedia.org/wiki/Structural_similarity

    :param tensor y_true: Labeled ground truth
    :param tensor y_pred: Predicted labels, potentially non-binary
    :return float: 0.8 * DSSIM + 0.2 * L1
    """
    mae = mean_absolute_error(y_true, y_pred)
    return 0.8 * (1.0 - metrics.ssim(y_true, y_pred) / 2.0) + 0.2 * mae
Example #23
0
    def mae_mse_combined_loss(y_true, y_pred):

        y_true_myo = relu(y_true - 1.0 / 3.0) + 1.0 / 3.0
        y_pred_myo = relu(y_pred - 1.0 / 3.0) + 1.0 / 3.0
        y_true_myi = relu(y_true - 2.0 / 3.0) + 2.0 / 3.0
        y_pred_myi = relu(y_pred - 2.0 / 3.0) + 2.0 / 3.0

        # myo_error = mean_squared_error(y_true_myo,y_pred_myo)
        # myi_error = mean_absolute_error(y_true_myi,y_pred_myi)

        loss_types = loss_type.split('+')
        if loss_types[0] == 'mse':
            loss1 = mean_squared_error(y_true_myo, y_pred_myo)
        elif loss_types[0] == 'mae':
            loss1 = mean_absolute_error(y_true_myo, y_pred_myo)

        if loss_types[1] == 'mse':
            loss2 = mean_squared_error(y_true_myi, y_pred_myi)
        elif loss_types[1] == 'mae':
            loss2 = mean_absolute_error(y_true_myi, y_pred_myi)
        return (loss1 + loss2 * infarction_weight) / restrict_chn
Example #24
0
def getGradientsPerEpisode(model, samples, targets):
    gradients_per_episode = []
    rewards = getRewardsWithBaselinePerEpisode(samples, targets)
    for i in range(samples.shape[0]):
        loss = losses.mean_absolute_error(targets, samples[i])
        gradients = K.gradients(loss, model.trainable_weights)
        for g in gradients:
            with tf.Session() as sess:
                print(g, rewards[i].eval())
            rewardedGradients.append(tf.multipy(g, rewards[i][0]))
        gradients_per_episode.append(rewardedGradients)
    return gradients_per_episode
Example #25
0
 def bump_mse(heatmap_true, spikes_pred):
     # generate the heatmap corresponding to the predicted spikes
     heatmap_pred = K.conv2d(spikes_pred,
                             gfilter,
                             strides=(1, 1),
                             padding='same')
     # heatmaps MSE
     loss_heatmaps = losses.mean_squared_error(heatmap_true, heatmap_pred)
     # l1 on the predicted spikes
     loss_spikes = losses.mean_absolute_error(spikes_pred,
                                              tf.zeros(input_shape))
     return loss_heatmaps + loss_spikes
Example #26
0
def donut_model(input_shape, intermediate_dim, z_dim, x_dim):
    input = Input(input_shape, name='encoder_input')

    # encoder
    dense_z1 = Dense(intermediate_dim,
                     activation='relu',
                     kernel_regularizer=regularizers.l2(0.001))
    dense_z2 = Dense(intermediate_dim,
                     activation='relu',
                     kernel_regularizer=regularizers.l2(0.001))

    z_mean = Dense(z_dim, name='z_mean')
    z_log_var = Dense(z_dim, activation='softplus', name='z_log_var')

    z = Lambda(sampling, output_shape=(z_dim, ), name='z')
    # decoder
    dense_x1 = Dense(intermediate_dim,
                     activation='relu',
                     kernel_regularizer=regularizers.l2(0.001))
    dense_x2 = Dense(intermediate_dim,
                     activation='relu',
                     kernel_regularizer=regularizers.l2(0.001))

    zx_mean = Dense(x_dim, name='zx_mean')
    zx_log_var = Dense(x_dim, activation='softplus', name='zx_log_var')

    z_x = Lambda(sampling, output_shape=(x_dim, ), name='z_x')

    # build model
    denx1 = dense_x1(input)
    denx2 = dense_x2(denx1)
    z_mean_ = z_mean(denx2)
    z_log_var_ = z_log_var(denx2)
    z_out = z([z_mean_, z_log_var_])

    denxz1 = dense_z1(z_out)
    denxz2 = dense_z2(denxz1)
    zx_mean_d = zx_mean(denxz2)
    zx_log_var_d = zx_log_var(denxz2)
    z_x_d = z_x([zx_mean_d, zx_log_var_d])

    model = Model(input, z_x_d, name='vae_donut')

    reconstruction_loss = mean_absolute_error(input, z_x_d)
    reconstruction_loss *= x_dim
    kl_loss = 1 + z_log_var_ - K.square(z_mean_) - K.exp(z_log_var_)
    kl_loss = K.sum(kl_loss, axis=-1)
    kl_loss *= -0.5
    vae_loss = K.mean(reconstruction_loss + kl_loss)
    model.add_loss(vae_loss)
    model.compile(optimizer='sgd', metrics=['loss', 'acc'])

    return model
Example #27
0
    def __call__(self, y_true, y_pred):
        ae_pred = y_pred
        ae_grad_pred, _ = jacobian_layer(y_pred, is_3d=self.is_3d)

        ae_true = y_true
        ae_grad_true, _ = jacobian_layer(y_true, is_3d=self.is_3d)

        velo_dim = 3 if self.is_3d else 2

        # MAE on velo
        result = losses.mean_absolute_error(ae_pred[..., 0:velo_dim],
                                            ae_true[..., 0:velo_dim])
        # Gradient Loss
        result += losses.mean_absolute_error(ae_grad_pred, ae_grad_true)
        if self.sqrd_diff_loss:
            result += self.sqrd_diff_loss_op(ae_true[..., 0:velo_dim],
                                             ae_pred[..., 0:velo_dim])
        if self.density:
            result += losses.mean_squared_error(
                ae_pred[..., velo_dim:velo_dim + 1],
                ae_true[..., velo_dim:velo_dim + 1])
        return result
Example #28
0
def calc_MAPE_of_liu(lane, df_liu_results):
    ground_truth = df_liu_results.loc[:, (lane, 'ground-truth')]
    liu_estimations = df_liu_results.loc[:, (lane, 'estimated hybrid')]
    ground_truth = np.reshape(ground_truth.values, (ground_truth.values.shape[0]))
    liu_estimations = np.reshape(liu_estimations.values, (liu_estimations.values.shape[0]))
    
    MAPE_liu = K.eval(mean_absolute_percentage_error(ground_truth, liu_estimations))
    print('MAPE liu:', MAPE_liu)
    
    MAE_liu = K.eval(mean_absolute_error(ground_truth, liu_estimations))
    print('MAE liu:', MAE_liu)
    
    return MAPE_liu, MAE_liu
Example #29
0
 def vae_loss(self, x_input, x_decoded):
     #per sample
     reconstruction_loss = original_dim * losses.mean_absolute_error(x_input, x_decoded)
     kl_loss = - 0.5 * K.sum(1 + z_log_var_encoded - K.square(z_mean_encoded) - 
                             K.exp(z_log_var_encoded), axis=-1)
     
     #
     #per data point
     #reconstruction_loss = losses.mean_absolute_error(x_input, x_decoded)
     #kl_loss = - 0.5 * K.sum(1 + z_log_var_encoded - K.square(z_mean_encoded) - 
     #                        K.exp(z_log_var_encoded), axis=-1) / latent_dim
     
     
     return K.mean(reconstruction_loss + alpha * (kl_loss))#K.mean(reconstruction_loss + (K.get_value(beta) * kl_loss))
Example #30
0
def calc_MAPE_of_predictions(lane, df_predictions):
    ground_truth_queue = df_predictions.loc[:, (lane, 'ground-truth queue')]
    prediction_queue = df_predictions.loc[:, (lane, 'prediction queue')]    
    ground_truth_queue = np.reshape(ground_truth_queue.values, (ground_truth_queue.values.shape[0]))
    prediction_queue = np.reshape(prediction_queue.values, (prediction_queue.values.shape[0]))
    
    MAPE_queue = K.eval(mean_absolute_percentage_error(ground_truth_queue, prediction_queue))
    print('MAPE queue:', MAPE_queue)
    MAE_queue = K.eval(mean_absolute_error(ground_truth_queue, prediction_queue))
    print('MAE queue:', MAE_queue)
    
    ground_truth_nVehSeen = df_predictions.loc[:, (lane, 'ground-truth nVehSeen')]
    prediction_nVehSeen = df_predictions.loc[:, (lane, 'prediction nVehSeen')]  
    ground_truth_nVehSeen = np.reshape(ground_truth_nVehSeen.values, (ground_truth_nVehSeen.values.shape[0]))
    prediction_nVehSeen = np.reshape(prediction_nVehSeen.values, (prediction_nVehSeen.values.shape[0]))
    
    MAPE_nVehSeen = K.eval(mean_absolute_percentage_error(ground_truth_nVehSeen, prediction_nVehSeen))
    print('MAPE nVehSeen:', MAPE_nVehSeen)
    
    MAE_nVehSeen = K.eval(mean_absolute_error(ground_truth_nVehSeen, prediction_nVehSeen))
    print('MAE nVehSeen:', MAE_nVehSeen)
    
    return MAPE_queue, MAE_queue, MAPE_nVehSeen, MAE_nVehSeen
Example #31
0
def l1(y_true, y_pred):
    """ L1 metric (MAE) """
    return losses.mean_absolute_error(y_true, y_pred)