def compareModelsTable(original, models_fo, models_ho): fig = plt.figure(figsize=[12, 4]) fig.suptitle("Comparação de modelos ") columns = ['Modelo', 'Ordem', 'Partições', 'RMSE', 'MAPE (%)'] rows = [] for model in models_fo: fts = model["model"] error_r = Measures.rmse(model["forecasted"], original) error_m = round(Measures.mape(model["forecasted"], original) * 100, 2) rows.append( [model["name"], fts.order, len(fts.sets), error_r, error_m]) for model in models_ho: fts = model["model"] error_r = Measures.rmse(model["forecasted"][fts.order:], original[fts.order:]) error_m = round( Measures.mape(model["forecasted"][fts.order:], original[fts.order:]) * 100, 2) rows.append( [model["name"], fts.order, len(fts.sets), error_r, error_m]) ax1 = fig.add_axes([0, 0, 1, 1]) # left, bottom, width, height ax1.set_xticks([]) ax1.set_yticks([]) ax1.table(cellText=rows, colLabels=columns, cellLoc='center', bbox=[0, 0, 1, 1]) sup = "\\begin{tabular}{" header = "" body = "" footer = "" for c in columns: sup = sup + "|c" if len(header) > 0: header = header + " & " header = header + "\\textbf{" + c + "} " sup = sup + "|} \\hline\n" header = header + "\\\\ \\hline \n" for r in rows: lin = "" for c in r: if len(lin) > 0: lin = lin + " & " lin = lin + str(c) body = body + lin + "\\\\ \\hline \n" return sup + header + body + "\\end{tabular}"
def evaluate_individual_model(model, partitioner, train, test, window_size, time_displacement): import numpy as np from pyFTS.partitioners import Grid from pyFTS.benchmarks import Measures try: model.train(train, sets=partitioner.sets, order=model.order, parameters=window_size) forecasts = model.forecast(test, time_displacement=time_displacement, window_size=window_size) _rmse = Measures.rmse(test[model.order:], forecasts[:-1]) _mape = Measures.mape(test[model.order:], forecasts[:-1]) _u = Measures.UStatistic(test[model.order:], forecasts[:-1]) except Exception as e: print(e) _rmse = np.nan _mape = np.nan _u = np.nan return {'model': model.shortname, 'partitions': partitioner.partitions, 'order': model.order, 'rmse': _rmse, 'mape': _mape, 'u': _u}
model.fit(train) forecasts1 = model.predict(test, type='multivariate') forecasts2 = model.predict(test, type='multivariate', generators={'date': time_generator}, steps_ahead=100) for var in ['temperature', 'load']: row = [order, knn, var, len(model)] for horizon in [1, 25, 50, 75, 100]: if horizon == 1: row.append( Measures.mape( test[var].values[model.order:model.order + 10], forecasts1[var].values[:10])) else: row.append( Measures.mape(test[var].values[:horizon], forecasts2[var].values[:horizon])) print(row) rows.append(row) columns = ['Order', 'knn', 'var', 'Rules'] for horizon in [1, 25, 50, 75, 100]: columns.append('h{}'.format(horizon)) final = pd.DataFrame(rows, columns=columns) final.to_csv('gmvfts_gefcom12.csv', sep=';', index=False)