def static_main(f_log_metrics): ################################ # Load Experiment's paramaters # ################################ params = vars(args) logger.info(params) ################################ # Load Dataset # ################################ dataset_name = params['dataset'] if dataset_name == 'gefcom': dataset = gefcom2014 else: dataset = uci_single_households data = dataset.load_data(fill_nan='median', preprocessing=True, split_type='default', is_train=params['train'], detrend=True, use_prebuilt=True) scaler, train, test, trend = data['scaler'], data['train'], data[ 'test'], data['trend'] ################################# # Evaluate on Validation & Test # ################################# X_test, y_test = get_rnn_inputs( test, window_size=params['input_sequence_length'], horizon=params['output_sequence_length'], shuffle=False, multivariate_output=False) # IMPORTANT: Remember to pass the trend values through the same ops as the inputs values _, y_trend_test = get_rnn_inputs( trend[1], window_size=params['input_sequence_length'], horizon=params['output_sequence_length'], shuffle=False) trend = y_trend_test # Define lambdas to be used during eval fn_inverse_val = lambda x: dataset.inverse_transform( x, scaler=scaler, trend=None) fn_inverse_test = lambda x: dataset.inverse_transform( x, scaler=scaler, trend=trend) fn_plot = lambda x: plot(x, dataset.SAMPLES_PER_DAY, save_at=None) test_scores = trend_eval(y_test, trend, fn_inverse=fn_inverse_test, fn_plot=fn_plot) metrics_names = [ m.__name__ if not isinstance(m, str) else m for m in metrics ] return None, \ dict(zip(metrics_names, test_scores)), \ None
def main(_run): ################################ # Load Experiment's paramaters # ################################ params = vars(args) logger.info(params) ################################ # Load Dataset # ################################ dataset_name = params['dataset'] if dataset_name == 'gefcom': dataset = gefcom2014 else: dataset = uci_single_households data = dataset.load_data(fill_nan='median', preprocessing=True, split_type='simple', is_train=params['train'], detrend=params['detrend'], exogenous_vars=params['exogenous'], use_prebuilt=True) scaler, train, test, trend = data['scaler'], data['train'], data[ 'test'], data['trend'] if not params['detrend']: trend = None if params['recursive_forecast']: horizon = 1 else: horizon = params['output_sequence_length'] X_train, y_train = get_rnn_inputs( train, window_size=params['input_sequence_length'], horizon=horizon, shuffle=True, multivariate_output=params['exogenous']) ################################ # Build & Train Model # ################################ if params['ffnn_type'] == 'simple': ffnn = SimpleNet else: ffnn = ResNet ffnn = ffnn(layers=params['layers'], kernel_initializer='glorot_normal', kernel_regularizer=l2(params['l2_reg']), bias_regularizer=l2(params['l2_reg']), use_bias=False, recursive_forecast=params['recursive_forecast']) if params['exogenous']: exog_var_train = y_train[:, :, 1:] # [n_samples, 1, n_features] y_train = y_train[:, :, 0] # [n_samples, 1] conditions_shape = (exog_var_train.shape[1], exog_var_train.shape[-1]) X_test, y_test = get_rnn_inputs( test, window_size=params['input_sequence_length'], horizon=params['output_sequence_length'], shuffle=False, multivariate_output=True) exog_var_test = y_test[:, :, 1:] # [n_samples, 1, n_features] y_test = y_test[:, :, 0] # [n_samples, 1] else: X_test, y_test = get_rnn_inputs( test, window_size=params['input_sequence_length'], horizon=params['output_sequence_length'], shuffle=False) exog_var_train = None exog_var_test = None conditions_shape = None # IMPORTANT: Remember to pass the trend values through the same ops as the inputs values if params['detrend']: X_trend_test, y_trend_test = get_rnn_inputs( trend[1], window_size=params['input_sequence_length'], horizon=params['output_sequence_length'], shuffle=False) trend = y_trend_test model = ffnn.build_model(input_shape=(X_train.shape[1], X_train.shape[-1]), horizon=params['output_sequence_length'], conditions_shape=conditions_shape) if params['load']: logger.info("Loading model's weights from disk using {}".format( params['load'])) model.load_weights(params['load']) optimizer = Adam(params['learning_rate']) model.compile(optimizer=optimizer, loss=['mse'], metrics=metrics) callbacks = [EarlyStopping(patience=50, monitor='val_loss')] if params['exogenous']: history = model.fit([X_train, exog_var_train], y_train, validation_split=0.1, batch_size=params['batch_size'], epochs=params['epochs'], callbacks=callbacks, verbose=2) else: history = model.fit(X_train, y_train, validation_split=0.1, batch_size=params['batch_size'], epochs=params['epochs'], callbacks=callbacks, verbose=2) ################################ # Save weights # ################################ model_filepath = os.path.join( config['weights'], '{}_{}_{}'.format(params['ffnn_type'], params['dataset'], time.time())) model.save_weights(model_filepath) logger.info("Model's weights saved at {}".format(model_filepath)) ################################# # Evaluate on Validation & Test # ################################# fn_inverse_val = lambda x: dataset.inverse_transform( x, scaler=scaler, trend=None) fn_inverse_test = lambda x: dataset.inverse_transform( x, scaler=scaler, trend=trend) fn_plot = lambda x: plot(x, dataset.SAMPLES_PER_DAY, save_at=None) if params['recursive_forecast']: val_scores = [] txt = "When FFNN is trained in Recursive mode training and inference are different. Specifically, training is "\ "a 1 step forecasting problem and inference is multi step forecasting problem. Thus, "\ "validation results will not be provided as they are not comparable with test results" logger.warn(txt) _run.info['extra'] = txt else: # has to add this filter because of unexpected behaviour of history.validation_data when using resent. validation_data = list( filter(lambda x: isinstance(x, np.ndarray), history.validation_data)) val_scores = ffnn.evaluate(validation_data[:-1], fn_inverse=fn_inverse_val) if params['exogenous']: test_scores = ffnn.evaluate([[X_test, exog_var_test], y_test], fn_inverse=fn_inverse_test, fn_plot=fn_plot) else: test_scores = ffnn.evaluate([X_test, y_test], fn_inverse=fn_inverse_test, fn_plot=fn_plot) metrics_names = [ m.__name__ if not isinstance(m, str) else m for m in model.metrics ] return dict(zip(metrics_names, val_scores)), \ dict(zip(metrics_names, test_scores)), \ model_filepath
def main(_run): ################################ # Load Experiment's paramaters # ################################ params = vars(args) logger.info(params) ################################ # Load Dataset # ################################ dataset_name = params['dataset'] if dataset_name == 'gefcom': dataset = gefcom2014 else: dataset = uci_single_households data = dataset.load_data(fill_nan='median', preprocessing=True, split_type='simple', is_train=params['train'], detrend=params['detrend'], exogenous_vars=params['exogenous'], use_prebuilt=True) scaler, train, test, trend = data['scaler'], data['train'], data['test'], data['trend'] if not params['detrend']: trend = None encoder_input_data, decoder_input_data, decoder_target_data = \ get_seq2seq_inputs(train, window_size=params['input_sequence_length'], horizon=params['output_sequence_length'], shuffle=True) ################################ # Build & Train Model # ################################ if params['teacher_forcing']: encoder_inputs_shape = (params['input_sequence_length'], encoder_input_data.shape[-1]) decoder_inputs_shape = (params['output_sequence_length'], decoder_input_data.shape[-1]) seq2seq = Seq2SeqTF(encoder_layers=params['units'], decoder_layers=params['units'], output_sequence_length=params['output_sequence_length'], cell_type=params['cell'], l2=params['l2']) model = seq2seq.build(encoder_inputs=encoder_inputs_shape, decoder_inputs=decoder_inputs_shape) all_inputs = [encoder_input_data, decoder_input_data] else: encoder_inputs_shape = (params['input_sequence_length'], encoder_input_data.shape[-1]) decoder_inputs_shape = (1, 1) if params['exogenous']: decoder_inputs_exog_shape = (params['output_sequence_length'], decoder_input_data.shape[-1] - 1) exog_input_data = decoder_input_data[:, :, 1:] # [batch_size, output_sequence_length, n_features -1] decoder_input_data = decoder_input_data[:, :1, :1] # [batch_size, 1, 1] all_inputs = [encoder_input_data, decoder_input_data, exog_input_data] else: decoder_inputs_exog_shape = None decoder_input_data = decoder_input_data[:, :1, :1] all_inputs = [encoder_input_data, decoder_input_data] seq2seq = Seq2SeqStatic(encoder_layers=params['units'], decoder_layers=params['units'], output_sequence_length=params['output_sequence_length'], cell_type=params['cell']) model = seq2seq.build(encoder_inputs=encoder_inputs_shape, decoder_inputs=decoder_inputs_shape, decoder_inputs_exog=decoder_inputs_exog_shape) if params['load'] is not None: logger.info("Loading model's weights from disk using {}".format(params['load'])) model.load_weights(params['load']) callbacks = [EarlyStopping(patience=50, monitor='val_loss')] model.compile(optimizer=Adam(lr=params['learning_rate']), loss='mse', metrics=metrics) history = model.fit(all_inputs, decoder_target_data, batch_size=params['batch_size'], epochs=params['epochs'], validation_split=0.1, callbacks=callbacks, verbose=2) ################################ # Save weights # ################################ model_filepath = os.path.join( config['weights'],'seq2seq_{}_{}_{}' .format(params['cell'], params['dataset'], time.time())) model.save_weights(model_filepath) logger.info("Model's weights saved at {}".format(model_filepath)) ################################# # Evaluate on Validation & Test # ################################# encoder_input_data, decoder_input_data, decoder_target_data = \ get_seq2seq_inputs(test, window_size=params['input_sequence_length'], horizon=params['output_sequence_length'], shuffle=False) # IMPORTANT: Remember to pass the trend values through the same ops as the inputs values if params['detrend']: _, _, decoder_target_trend = get_seq2seq_inputs( trend[1], window_size=params['input_sequence_length'], horizon=params['output_sequence_length'], shuffle=False) trend = decoder_target_trend # Define lambdas to be used during eval fn_inverse_val = lambda x: dataset.inverse_transform(x, scaler=scaler, trend=None) fn_inverse_test = lambda x: dataset.inverse_transform(x, scaler=scaler, trend=trend) fn_plot = lambda x: plot(x, dataset.SAMPLES_PER_DAY, save_at=None) if params['teacher_forcing']: # decoder_target_data = np.squeeze(decoder_target_data) seq2seq.build_prediction_model((1, decoder_input_data.shape[-1])) if params['exogenous']: val_scores = seq2seq.evaluate([history.validation_data[0], history.validation_data[1][:,:,1:], history.validation_data[2]], fn_inverse=fn_inverse_val, horizon=params['output_sequence_length']) test_scores = seq2seq.evaluate([encoder_input_data, decoder_input_data[:, :, 1:], decoder_target_data], fn_inverse=fn_inverse_test, horizon=params['output_sequence_length'], fn_plot=fn_plot) else: val_scores = seq2seq.evaluate([history.validation_data[0], None, history.validation_data[2]], fn_inverse=fn_inverse_val, horizon=params['output_sequence_length']) test_scores = seq2seq.evaluate([encoder_input_data, None, decoder_target_data], fn_inverse=fn_inverse_test, horizon=params['output_sequence_length'], fn_plot=fn_plot) else: val_scores = seq2seq.evaluate(history.validation_data[:-1], fn_inverse=fn_inverse_val) if params['exogenous']: test_scores = seq2seq.evaluate([encoder_input_data, decoder_input_data[:, :1, :1], decoder_input_data[:, :, 1:], decoder_target_data], fn_inverse=fn_inverse_test, fn_plot=fn_plot) else: test_scores = seq2seq.evaluate([encoder_input_data, decoder_input_data[:, :1, :1], decoder_target_data], fn_inverse=fn_inverse_test, fn_plot=fn_plot) metrics_names = [m.__name__ if not isinstance(m, str) else m for m in model.metrics] return dict(zip(metrics_names, val_scores)), \ dict(zip(metrics_names, test_scores)), \ model_filepath
def main(_run): ################################ # Load Experiment's paramaters # ################################ params = vars(args) print(params) ################################ # Load Dataset # ################################ dataset_name = params['dataset'] if dataset_name == 'gefcom': dataset = gefcom2014 else: dataset = uci_single_households data = dataset.load_data(fill_nan='median', preprocessing=True, split_type='simple', is_train=params['train'], detrend=params['detrend'], exogenous_vars=params['exogenous'], use_prebuilt=True) scaler, train, test, trend = data['scaler'], data['train'], data['test'], data['trend'] if not params['detrend']: trend = None if params['MIMO']: X_train, y_train = get_rnn_inputs(train, window_size=params['input_sequence_length'], horizon=params['output_sequence_length'], shuffle=True, multivariate_output=True) else: X_train, y_train = get_rnn_inputs(train, window_size=params['input_sequence_length'], horizon=1, shuffle=True, multivariate_output=True) if params['exogenous']: exog_var_train = y_train[:, :, 1:] y_train = y_train[:, :, 0] exogenous_shape = (exog_var_train.shape[1], exog_var_train.shape[-1]) if params['MIMO']: X_train = [X_train, exog_var_train] X_test, y_test = get_rnn_inputs(test, window_size=params['input_sequence_length'], horizon=params['output_sequence_length'], shuffle=False, multivariate_output=True) exog_var_test = y_test[:, :, 1:] # [n_samples, 1, n_features] y_test = y_test[:, :, 0] # [n_samples, 1] else: y_train = y_train[:, :, 0] X_test, y_test = get_rnn_inputs(test, window_size=params['input_sequence_length'], horizon=params['output_sequence_length'], shuffle=False) exog_var_train = None exog_var_test = None exogenous_shape = None # IMPORTANT: Remember to pass the trend values through the same ops as the inputs values if params['detrend']: X_trend_test, y_trend_test = get_rnn_inputs(trend[1], window_size=params['input_sequence_length'], horizon=params['output_sequence_length'], shuffle=False) trend = y_trend_test ################################ # Build & Train Model # ################################ cell_params = dict(units=params['units'], activation='tanh', dropout=params['dropout'], kernel_regularizer=l2(params['l2']), recurrent_regularizer=l2(params['l2']), kernel_initializer='lecun_uniform', recurrent_initializer='lecun_uniform') if params['MIMO']: rnn = RecurrentNN_MIMO(cell_type=params['cell'], layers=params['layers'], cell_params=cell_params) if params['exogenous']: model = rnn.build_model(input_shape=(params['input_sequence_length'], X_train[0].shape[-1]), horizon=params['output_sequence_length'], exogenous_shape=exogenous_shape) else: model = rnn.build_model(input_shape=(params['input_sequence_length'], X_train.shape[-1]), horizon=params['output_sequence_length']) else: rnn = RecurrentNN_Rec(cell_type=params['cell'], layers=params['layers'], cell_params=cell_params) model = rnn.build_model(input_shape=(params['input_sequence_length'], X_train.shape[-1]), horizon=params['output_sequence_length']) model.compile(optimizer=Adam(params['learning_rate']), loss='mse', metrics=metrics) callbacks = [EarlyStopping(patience=100, monitor='val_loss'), # LambdaCallback(on_epoch_end=lambda _, logs: f_log_metrics(logs=logs)) ] if params['load']: logger.info("Loading model's weights from disk using {}".format(params['load'])) model.load_weights(params['load']) history = model.fit(X_train, y_train, validation_split=0.1, batch_size = params['batch_size'], # steps_per_epoch= train.shape[0] // params['batch_size'], epochs=params['epochs'], callbacks=callbacks, verbose=2) ################################ # Save weights # ################################ model_filepath = os.path.join(config['weights'],'{}_{}_{}'.format( params['cell'], params['dataset'], time.time())) model.save_weights(model_filepath) logger.info("Model's weights saved at {}".format(model_filepath)) ################################# # Evaluate on Validation & Test # ################################# fn_inverse_val = lambda x: dataset.inverse_transform(x, scaler=scaler, trend=None) fn_inverse_test = lambda x: dataset.inverse_transform(x, scaler=scaler, trend=trend) fn_plot = lambda x: plot(x, dataset.SAMPLES_PER_DAY, save_at=None) if params['MIMO']: val_scores = rnn.evaluate(history.validation_data[:-1], fn_inverse=fn_inverse_val) else: val_scores = [] txt = "When RNN is trained in Recursive mode training and inference are different. Specifically, training is "\ "a 1 step forecasting problem and inference is multi step forecasting problem. Thus, "\ "validation results will not be provided as they are not comparable with test results" logger.warn(txt) _run.info['extra'] = txt if params['exogenous']: test_scores = rnn.evaluate([[X_test, exog_var_test], y_test], fn_inverse=fn_inverse_test, fn_plot=fn_plot) else: test_scores = rnn.evaluate([X_test, y_test], fn_inverse=fn_inverse_test, fn_plot=fn_plot) metrics_names = [m.__name__ if not isinstance(m, str) else m for m in model.metrics] return dict(zip(metrics_names, val_scores)), \ dict(zip(metrics_names, test_scores)), \ model_filepath
def main(_run): ################################ # Load Experiment's paramaters # ################################ params = vars(args) logger.info(params) ################################ # Load Dataset # ################################ dataset_name = params['dataset'] if dataset_name == 'gefcom': dataset = gefcom2014 else: dataset = uci_single_households data = dataset.load_data(fill_nan='median', preprocessing=True, split_type='simple', is_train=params['train'], detrend=params['detrend'], exogenous_vars=params['exogenous'], use_prebuilt=True) scaler, train, test, trend = data['scaler'], data['train'], data['test'], data['trend'] if not params['detrend']: trend = None X_train, y_train = get_rnn_inputs(train, window_size=params['input_sequence_length'], horizon=params['output_sequence_length'], shuffle=True, multivariate_output=params['exogenous']) ################################ # Build & Train Model # ################################ tcn = TCNModel(layers=params['layers'], filters=params['out_channels'], kernel_size=params['kernel_size'], kernel_initializer='glorot_normal', kernel_regularizer=l2(params['l2_reg']), bias_regularizer=l2(params['l2_reg']), dilation_rate=params['dilation'], use_bias=False, return_sequence=True, tcn_type=params['tcn_type']) if params['exogenous']: exog_var_train = y_train[:, :, 1:] # [n_samples, horizon, n_features] y_train = y_train[:, :, 0] # [n_samples, horizon] conditions_shape = (exog_var_train.shape[1], exog_var_train.shape[-1]) X_test, y_test = get_rnn_inputs(test, window_size=params['input_sequence_length'], horizon=params['output_sequence_length'], shuffle=False, multivariate_output=True) exog_var_test = y_test[:, :, 1:] # [n_samples, horizon, n_features] y_test = y_test[:, :, 0] # [n_samples, horizon] else: X_test, y_test = get_rnn_inputs(test, window_size=params['input_sequence_length'], horizon=params['output_sequence_length'], shuffle=False) exog_var_train = None exog_var_test = None conditions_shape = None # IMPORTANT: Remember to pass the trend values through the same ops as the inputs values if params['detrend']: X_trend_test, y_trend_test = get_rnn_inputs(trend[1], window_size=params['input_sequence_length'], horizon=params['output_sequence_length'], shuffle=False) trend = y_trend_test model = tcn.build_model(input_shape=(X_train.shape[1], X_train.shape[-1]), horizon=params['output_sequence_length'], conditions_shape=conditions_shape, use_final_dense=True) if params['load'] is not None: logger.info("Loading model's weights from disk using {}".format(params['load'])) model.load_weights(params['load']) optimizer = Adam(params['learning_rate']) model.compile(optimizer=optimizer, loss=['mse'], metrics=metrics) callbacks = [EarlyStopping(patience=50, monitor='val_loss')] if params['exogenous'] and params['tcn_type'] == 'conditional_tcn': history = model.fit([X_train, exog_var_train], y_train, validation_split=0.1, batch_size=params['batch_size'], epochs=params['epochs'], callbacks=callbacks, verbose=2) else: history = model.fit(X_train, y_train, validation_split=0.1, batch_size=params['batch_size'], epochs=params['epochs'], callbacks=callbacks, verbose=2) ################################ # Save weights # ################################ model_filepath = os.path.join( config['weights'],'{}_{}_{}' .format(params['tcn_type'], params['dataset'], time.time())) model.save_weights(model_filepath) logger.info("Model's weights saved at {}".format(model_filepath)) ################################# # Evaluate on Validation & Test # ################################# fn_inverse_val = lambda x: dataset.inverse_transform(x, scaler=scaler, trend=None) fn_inverse_test = lambda x: dataset.inverse_transform(x, scaler=scaler, trend=trend) fn_plot = lambda x: plot(x, dataset.SAMPLES_PER_DAY, save_at=None) if params['exogenous'] and params['tcn_type'] == 'conditional_tcn': val_scores = tcn.evaluate(history.validation_data[:-1], fn_inverse=fn_inverse_val) test_scores = tcn.evaluate([[X_test, exog_var_test], y_test], fn_inverse=fn_inverse_test, fn_plot=fn_plot) else: val_scores = tcn.evaluate(history.validation_data[:-1], fn_inverse=fn_inverse_val) test_scores = tcn.evaluate([X_test, y_test], fn_inverse=fn_inverse_test, fn_plot=fn_plot) metrics_names = [m.__name__ if not isinstance(m, str) else m for m in model.metrics] return dict(zip(metrics_names, val_scores)), \ dict(zip(metrics_names, test_scores)), \ model_filepath