def penalized_loss(y_true, y_pred):
    '''
    the loss that penalizes the model when the forcast demand is output of the boundaries for the day's actual demand.
    '''
    beta = 0.5
    loss1 = mean_absolute_percentage_error(y_true, y_pred)
    loss2 = K.mean(K.maximum(K.max(y_pred, axis=1) - input_D_max, 0.), axis=-1)
    loss3 = K.mean(K.maximum(input_D_min - K.min(y_pred, axis=1), 0.), axis=-1)
    return loss1 + beta * (loss2 + loss3)
示例#2
0
文件: losses.py 项目: zhupeiru/dts
def write(op, y_true, y_pred):
    y_test = y_true.astype(np.float32)
    pred = y_pred.astype(np.float32)
    op('MAE {}\n'.format(K.eval(K.mean(mae(y_test, pred)))))
    op('MSE {}\n'.format(K.eval((K.mean(mse(y_test, pred))))))
    op('RMSE {}\n'.format(K.eval(K.sqrt(K.mean(mse(y_test, pred))))))
    op('NRMSE_a {}\n'.format(K.eval(K.sqrt(K.mean(nrmse_a(y_test, pred))))))
    op('NRMSE_b {}\n'.format(K.eval(K.sqrt(K.mean(nrmse_b(y_test, pred))))))
    op('MAPE {}\n'.format(
        K.eval(K.mean(mean_absolute_percentage_error(y_test, pred)))))
    op('NRMSD {}\n'.format(K.eval(K.mean(nrmsd(y_test, pred)))))
    op('SMAPE {}\n'.format(K.eval(K.mean(smape(y_test, pred)))))
    op('R2 {}\n'.format(K.eval(K.mean(r2(y_test, pred)))))
示例#3
0
def calc_MAPE_of_liu(lane, df_liu_results):
    ground_truth = df_liu_results.loc[:, (lane, 'ground-truth')]
    liu_estimations = df_liu_results.loc[:, (lane, 'estimated hybrid')]
    ground_truth = np.reshape(ground_truth.values, (ground_truth.values.shape[0]))
    liu_estimations = np.reshape(liu_estimations.values, (liu_estimations.values.shape[0]))
    
    MAPE_liu = K.eval(mean_absolute_percentage_error(ground_truth, liu_estimations))
    print('MAPE liu:', MAPE_liu)
    
    MAE_liu = K.eval(mean_absolute_error(ground_truth, liu_estimations))
    print('MAE liu:', MAE_liu)
    
    return MAPE_liu, MAE_liu
示例#4
0
def calc_MAPE_of_predictions(lane, df_predictions):
    ground_truth_queue = df_predictions.loc[:, (lane, 'ground-truth queue')]
    prediction_queue = df_predictions.loc[:, (lane, 'prediction queue')]    
    ground_truth_queue = np.reshape(ground_truth_queue.values, (ground_truth_queue.values.shape[0]))
    prediction_queue = np.reshape(prediction_queue.values, (prediction_queue.values.shape[0]))
    
    MAPE_queue = K.eval(mean_absolute_percentage_error(ground_truth_queue, prediction_queue))
    print('MAPE queue:', MAPE_queue)
    MAE_queue = K.eval(mean_absolute_error(ground_truth_queue, prediction_queue))
    print('MAE queue:', MAE_queue)
    
    ground_truth_nVehSeen = df_predictions.loc[:, (lane, 'ground-truth nVehSeen')]
    prediction_nVehSeen = df_predictions.loc[:, (lane, 'prediction nVehSeen')]  
    ground_truth_nVehSeen = np.reshape(ground_truth_nVehSeen.values, (ground_truth_nVehSeen.values.shape[0]))
    prediction_nVehSeen = np.reshape(prediction_nVehSeen.values, (prediction_nVehSeen.values.shape[0]))
    
    MAPE_nVehSeen = K.eval(mean_absolute_percentage_error(ground_truth_nVehSeen, prediction_nVehSeen))
    print('MAPE nVehSeen:', MAPE_nVehSeen)
    
    MAE_nVehSeen = K.eval(mean_absolute_error(ground_truth_nVehSeen, prediction_nVehSeen))
    print('MAE nVehSeen:', MAE_nVehSeen)
    
    return MAPE_queue, MAE_queue, MAPE_nVehSeen, MAE_nVehSeen
 def EnergyLoss(y_truth, y_predicted):
     return mean_absolute_percentage_error(y_truth[:, 0], y_predicted[:, 0])
示例#6
0
def penalized_loss(y_true, y_pred):
    beta = 0.1
    loss1 = mean_absolute_percentage_error(y_true, y_pred)
    loss2 = K.mean(K.maximum(K.max(y_pred, axis=1) - input_D_max, 0.), axis=-1)
    loss3 = K.mean(K.maximum(input_D_min - K.min(y_pred, axis=1), 0.), axis=-1)
    return loss1 + beta * (loss2 + loss3)
# Creating a data structure with 60 timesteps and 1 output
X_test = []
y_test = []

for i in range(3940, 5468):
    X_test.append(training_set_scaled[i - 60:i, 0])
    y_test.append(training_set_scaled[i, 0])
X_test, y_test = np.array(X_test), np.array(y_test)

X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))

pred = regressor.predict(X_test)

y_test = y_test.reshape(1528, 1)
y_test = sc.inverse_transform(y_test)
pred = sc.inverse_transform(pred)

# Visualising the results
plt.plot(y_test, color='red', label='Real')
plt.plot(pred, color='blue', label='Predicted')
plt.xlabel('Time')
plt.ylabel('Yield')
plt.legend()
plt.show()

from sklearn.metrics import mean_squared_error
print(mean_squared_error(y_test, pred))

from keras.losses import mean_absolute_percentage_error
mean_absolute_percentage_error = mean_absolute_percentage_error(y_test, pred)
示例#8
0
def predict():
    global_start_time = time.time()
    epochs = 10
    seq_len = 10
    num_predict = 5

    print('> Loading data... ')

    # X_train, y_train, X_test, Y_test = lstm.load_data('sp500_2.csv', seq_len, True)
    # X_train_, y_train_, X_test_, Y_test_ = lstm.load_data('sp500_2.csv', seq_len, False)
    X_train, y_train, X_test, Y_test = lstm.load_data('ibermansa.csv', seq_len,
                                                      True)
    X_train_, y_train_, X_test_, Y_test_ = lstm.load_data(
        'ibermansa.csv', seq_len, False)

    print('> Data Loaded. Compiling...')

    model = lstm.build_model([1, seq_len, 100, 1])

    model.fit(X_train,
              y_train,
              batch_size=100,
              nb_epoch=epochs,
              validation_split=0.40)

    predictions2, full_predicted = lstm.predict_sequences_multiple(
        model, X_test, seq_len, num_predict)
    # predictions = lstm.predict_sequence_full(model, X_test, seq_len)
    predictions = lstm.predict_point_by_point(model,
                                              X_test,
                                              Y_test,
                                              batch_size=100)

    # sequence_length = seq_len + 1
    # result = []
    # for index in range(len(predictions) - sequence_length):
    #	result.append(predictions[index: index + sequence_length])
    # result = lstm.unnormalise_windows(result)
    # predictions = np.array(result)

    # result = []
    # for index in range(len(Y_test) - sequence_length):
    #	result.append(Y_test[index: index + sequence_length])
    # result = lstm.unnormalise_windows(result)
    # Y_test = np.array(result)

    # Y_test = Y_test+Y_test_.astype(np.float)
    # Y_test = Y_test.astype(np.float)[:296]
    # aux = predictions[:]+Y_test_
    # print(aux)

    # mape = mean_absolute_percentage_error(Y_test[-42:-1], np.array(predictions2)[:,0])
    # mse = mean_squared_error(Y_test[-42:-1],np.array(predictions2)[:,0])
    # mae = mean_absolute_percentage_error(Y_test[-42:-1],np.array(predictions2)[:,0])

    mape = mean_absolute_percentage_error(Y_test[-2050:-1],
                                          full_predicted[0:-1])
    mse = mean_squared_error(Y_test[-2050:-1], full_predicted[0:-1])
    mae = mean_absolute_percentage_error(Y_test[-2050:-1],
                                         full_predicted[0:-1])

    # msle = mean_squared_logarithmic_error(Y_test, predictions)

    # print(mape)

    init_op = tf.initialize_all_variables()
    # def weighted_mape_tf(Y_test,predictions):
    #tot = tf.reduce_sum(Y_test)
    #tot = tf.clip_by_value(tot, clip_value_min=1,clip_value_max=1000)
    #wmape = tf.realdiv(tf.reduce_sum(tf.abs(tf.subtract(Y_test,predictions))),tot)*100#/tot
    #return(wmape)

    # mape = weighted_mape_tf(Y_test,predictions)

    # run the graph
    with tf.Session() as sess:
        sess.run(init_op)
        print('mape -> {} '.format(sess.run(mape)))
        print('mse -> {}'.format(sess.run(mse)))
        print('mae -> {} '.format(sess.run(mae)))
    # print ('msle -> {} %'.format(sess.run(msle)))

    print('Training duration (s) : ', time.time() - global_start_time)
    print(predictions)
    im1 = plot_results(predictions, Y_test)
    im2 = plot_results(np.array(Y_test_) + np.array(predictions), Y_test_)
    im3 = plot_results_multiple(predictions2, Y_test, num_predict)
    im4 = plot_results(
        np.array(Y_test_)[-118:-1] + np.array(full_predicted)[-118:-1],
        Y_test_)
    return im1, im2, im3, im4