示例#1
0
def conv_mae(y_true, y_pred):
    return K.mean(abs(pd.conv(y_pred) - pd.conv(y_true)))
示例#2
0
def RMSE(y_true, y_pred):
    return K.sqrt(K.mean(K.square(pd.conv(y_pred) - pd.conv(y_true))))
示例#3
0
def coeff_determination(y_true, y_pred):
    pd.conv(y_true)
    pd.conv(y_pred)
    SS_res = K.sum(K.square(y_true - y_pred))
    SS_tot = K.sum(K.square(y_true - K.mean(y_true)))
    return (1 - SS_res / (SS_tot + K.epsilon()))
示例#4
0
feat_array = pd.get_feats()

#this data will be saved in csv with all the other attempts
#find best epoch and use that data as that epochs weights are the ones saved.
min_in = mixed.history['val_loss'].index(min(mixed.history['val_loss']))

csv_data = [
    num, feat_array, early_stop.stopped_epoch,
    mixed.history['conv_mae'][min_in], mixed.history['val_conv_mae'][min_in],
    mixed.history['RMSE'][min_in], mixed.history['val_RMSE'][min_in],
    mixed.history['val_coeff_determination'][min_in]
]

dc.write_data(csv_data, "D:\scripts\ML_Attempts\CSV_Files\Mixed_Data_r2.csv")

#this data will be plotted and the graphs will be saved
# dc.plot_data(num,
# early_stop.stopped_epoch,
# mixed.history['loss'],
# mixed.history['val_loss'],
# np.asarray(mixed.history['mean_absolute_error'])*conv,
# np.asarray(mixed.history['val_mean_absolute_error'])*conv)

#predict data and generate residual plot
x_data, true_speeds, dates = pd.eval_batch(folder, 100)
pred_speeds = model.predict(x_data)
pred_speeds_conv = pd.conv(pred_speeds)
print(pd.conv(true_speeds))
print(dates)
# dc.residual_plot(num,pd.conv(true_speeds),pred_speeds_conv,dates)
示例#5
0
    'D:\\scripts\\ML_Attempts\\Weights\\Mixed_Data\\attempt_{}.h5'.format(num))

#--------------------------------------DATA COLLECTION--------------------------------------------------------------------------

new_std_dev = pd.get_dev()
percent_from_std_dev = (
    100 - (mixed.history['val_mean_absolute_error'][-1] / new_std_dev) * 100)

feat_array = pd.get_feats()

#this data will be saved in csv with all the other attempts

csv_data = [
    num, feat_array, early_stop.stopped_epoch, mixed.history['loss'][-1],
    mixed.history['val_loss'][-1],
    pd.conv(mixed.history['mean_absolute_error'][-1]),
    pd.conv(mixed.history['val_mean_absolute_error'][-1]),
    mixed.history['val_coeff_determination'][-1]
]

dc.write_data(csv_data, "D:\scripts\ML_Attempts\CSV_Files\Mixed_Data_r2.csv")

#this data will be plotted and the graphs will be saved
# dc.plot_data(num,
# early_stop.stopped_epoch,
# mixed.history['loss'],
# mixed.history['val_loss'],
# np.asarray(mixed.history['mean_absolute_error'])*conv,
# np.asarray(mixed.history['val_mean_absolute_error'])*conv)

#predict data and generate residual plot
示例#6
0
#save this attempts weights after training
model.save_weights(
    'D:\\scripts\\ML_Attempts\\Weights\\Mixed_Data\\attempt_{}.h5'.format(num))

#--------------------------------------DATA COLLECTION--------------------------------------------------------------------------

new_std_dev = pd.get_dev()
percent_from_std_dev = (100 - (mixed.history['loss'][-1] / new_std_dev) * 100)

feat_array = pd.get_feats()

#this data will be saved in csv with all the other attempts

csv_data = [
    num, feat_array, early_stop.stopped_epoch,
    pd.conv(mixed.history['mean_absolute_error'][-1]),
    pd.conv(mixed.history['val_mean_absolute_error'][-1]),
    pd.conv(mixed.history['RMSE'][-1]),
    pd.conv(mixed.history['val_RMSE'][-1]), mixed.history['val_loss'][-1] * -1
]

dc.write_data(csv_data, "D:\scripts\ML_Attempts\CSV_Files\Mixed_Data_r2.csv")

#this data will be plotted and the graphs will be saved
# dc.plot_data(num,
# early_stop.stopped_epoch,
# mixed.history['loss'],
# mixed.history['val_loss'],
# np.asarray(mixed.history['mean_absolute_error'])*conv,
# np.asarray(mixed.history['val_mean_absolute_error'])*conv)