def print_point_statistics(data, models, externalmodels=None, externalforecasts=None, indexers=None): ret = "Model & Order & RMSE & SMAPE & Theil's U \\\\ \n" for count, model in enumerate(models, start=0): _rmse, _smape, _u = Measures.get_point_statistics( data, model, indexers) ret += model.shortname + " & " ret += str(model.order) + " & " ret += str(_rmse) + " & " ret += str(_smape) + " & " ret += str(_u) #ret += str(round(Measures.TheilsInequality(np.array(data[fts.order:]), np.array(forecasts[:-1])), 4)) ret += " \\\\ \n" if externalmodels is not None: l = len(externalmodels) for k in np.arange(0, l): ret += externalmodels[k] + " & " ret += " 1 & " ret += str(round(Measures.rmse(data, externalforecasts[k][:-1]), 2)) + " & " ret += str( round(Measures.smape(data, externalforecasts[k][:-1]), 2)) + " & " ret += str( round(Measures.UStatistic(data, externalforecasts[k][:-1]), 2)) ret += " \\\\ \n" print(ret)
def print_point_statistics(data, models, externalmodels = None, externalforecasts = None, indexers=None): """ Run point benchmarks on given models and data and print the results :param data: test data :param models: a list of FTS models to benchmark :param externalmodels: a list with benchmark models (façades for other methods) :param externalforecasts: :param indexers: :return: """ ret = "Model & Order & RMSE & SMAPE & Theil's U \\\\ \n" for count,model in enumerate(models,start=0): _rmse, _smape, _u = Measures.get_point_statistics(data, model, indexers) ret += model.shortname + " & " ret += str(model.order) + " & " ret += str(_rmse) + " & " ret += str(_smape)+ " & " ret += str(_u) #ret += str(round(Measures.TheilsInequality(np.array(data[fts.order:]), np.array(forecasts[:-1])), 4)) ret += " \\\\ \n" if externalmodels is not None: l = len(externalmodels) for k in np.arange(0,l): ret += externalmodels[k] + " & " ret += " 1 & " ret += str(round(Measures.rmse(data, externalforecasts[k][:-1]), 2)) + " & " ret += str(round(Measures.smape(data, externalforecasts[k][:-1]), 2))+ " & " ret += str(round(Measures.UStatistic(data, externalforecasts[k][:-1]), 2)) ret += " \\\\ \n" print(ret)
def forecast_params(data, train_split, method, params, plot=False): train, test = sampling.train_test_split(data, train_split) fcst = method(train, test, params) _output = params['output'] _step = params.get('step', 1) _offset = params['order'] + _step - 1 yobs = test[_output].iloc[_offset:].values if plot: plt.figure(figsize=(20, 10)) plt.plot(yobs) plt.plot(fcst) plt.show() rmse = Measures.rmse(yobs, fcst) print("RMSE: ", rmse) nrmse = metrics.normalized_rmse(yobs, fcst) print("nRMSE: ", nrmse) smape = Measures.smape(yobs, fcst) print("SMAPE: ", smape) u = Measures.UStatistic(yobs, fcst) print("U Statistic: ", u) return rmse, nrmse, smape, u
def forecast_best_params(data, train_split, method_id, method, space, plot=False, save=False): print("Running experiment ", method_id) best = pickle.load(open("best_" + method_id + ".pkl", "rb")) train, test = sampling.train_test_split(data, train_split) best_params = space_eval(space, best) fcst = method(train, test, best_params) _order = best_params['order'] _output = best_params['output'] yobs = test[_output].iloc[_order:].values if plot: plt.figure(figsize=(20, 10)) plt.plot(yobs) plt.plot(fcst) plt.show() rmse = Measures.rmse(yobs, fcst) print("RMSE: ", rmse) nrmse = metrics.normalized_rmse(yobs, fcst) print("nRMSE: ", nrmse) smape = Measures.smape(yobs, fcst) print("SMAPE: ", smape) u = Measures.UStatistic(yobs, fcst) print("U Statistic: ", u) if save: results = { "method_id": method_id, "forecast": fcst, "RMSE": rmse, "SMAPE": smape, "U": u } pickle.dump(results, open("results_" + method_id + ".pkl", "wb")) return rmse, nrmse, smape, u
def evaluate_individual_model(model, partitioner, train, test, window_size, time_displacement): import numpy as np from pyFTS.partitioners import Grid from pyFTS.benchmarks import Measures try: model.train(train, sets=partitioner.sets, order=model.order, parameters=window_size) forecasts = model.forecast(test, time_displacement=time_displacement, window_size=window_size) _rmse = Measures.rmse(test[model.order:], forecasts[:-1]) _mape = Measures.mape(test[model.order:], forecasts[:-1]) _u = Measures.UStatistic(test[model.order:], forecasts[:-1]) except Exception as e: print(e) _rmse = np.nan _mape = np.nan _u = np.nan return {'model': model.shortname, 'partitions': partitioner.partitions, 'order': model.order, 'rmse': _rmse, 'mape': _mape, 'u': _u}
def rolling_window_forecast_params(data, train_percent, window_size, method, params): # get training days training_days = pd.unique(data.index.date) fcst = [] yobs = [] for day in training_days: print("Processing :", day) daily_data = data[data.index.date == day] nsamples = len(daily_data.index) train_size = round(nsamples * train_percent) test_end = 0 index = 0 while test_end < nsamples: train_start, train_end, test_start, test_end = get_data_index( index, train_size, window_size, nsamples) train = data[train_start:train_end] test = data[test_start:test_end] index += window_size f = method(train, test, params) fcst.extend(f) _step = params.get('step', 1) _output = params['output'] _offset = params['order'] + _step - 1 yobs.extend(test[_output].iloc[_offset:].values) rmse = Measures.rmse(yobs, fcst) print("RMSE: ", rmse) nrmse = metrics.normalized_rmse(yobs, fcst) print("nRMSE: ", nrmse) smape = Measures.smape(yobs, fcst) print("SMAPE: ", smape) u = Measures.UStatistic(yobs, fcst) print("U Statistic: ", u) return rmse, nrmse, smape, u