示例#1
0
def get_results(data):
    print('|--- Test ARIMA')
    if Config.DATA_NAME == Config.DATA_SETS[0]:
        day_size = Config.ABILENE_DAY_SIZE
    else:
        day_size = Config.GEANT_DAY_SIZE

    data[data <= 0] = 0.1

    train_data2d, test_data2d = prepare_train_test_2d(data=data,
                                                      day_size=day_size)

    if Config.DATA_NAME == Config.DATA_SETS[0]:
        print('|--- Remove last 3 days in test_set.')
        test_data2d = test_data2d[0:-day_size * 3]

    # Data normalization
    scaler = data_scalling(train_data2d)

    test_data_normalized2d = scaler.transform(test_data2d)

    _, _, y_true = prepare_test_set_last_5days(test_data2d,
                                               test_data_normalized2d)

    results_path = Config.RESULTS_PATH + '{}-{}-{}-{}/'.format(
        Config.DATA_NAME, Config.ALG, Config.TAG, Config.SCALER)
    results_processing(y_true, Config.ARIMA_TESTING_TIME, results_path)
示例#2
0
def test_fwbw_conv_lstm(data):
    print('|-- Run model testing.')

    if Config.DATA_NAME == Config.DATA_SETS[0]:
        day_size = Config.ABILENE_DAY_SIZE
        assert Config.FWBW_CONV_LSTM_HIGH == 12
        assert Config.FWBW_CONV_LSTM_WIDE == 12

    else:
        day_size = Config.GEANT_DAY_SIZE
        assert Config.FWBW_CONV_LSTM_HIGH == 23
        assert Config.FWBW_CONV_LSTM_WIDE == 23

    print('|--- Splitting train-test set.')
    train_data2d, valid_data2d, test_data2d = prepare_train_valid_test_2d(
        data=data, day_size=day_size)
    print('|--- Normalizing the train set.')

    if Config.DATA_NAME == Config.DATA_SETS[0]:
        print('|--- Remove last 3 days in test data.')
        test_data2d = test_data2d[0:-day_size * 3]

    _, _, test_data_normalized2d, scalers = data_scalling(
        train_data2d, valid_data2d, test_data2d)
    input_shape = (Config.FWBW_CONV_LSTM_STEP, Config.FWBW_CONV_LSTM_WIDE,
                   Config.FWBW_CONV_LSTM_HIGH, Config.FWBW_CONV_LSTM_CHANNEL)

    with tf.device('/device:GPU:{}'.format(Config.GPU)):
        fwbw_conv_lstm_net = load_trained_models(
            input_shape, Config.FWBW_CONV_LSTM_BEST_CHECKPOINT)

    if not os.path.exists(Config.RESULTS_PATH + '{}-{}-{}-{}/'.format(
            Config.DATA_NAME, Config.ALG, Config.TAG, Config.SCALER)):
        os.makedirs(Config.RESULTS_PATH + '{}-{}-{}-{}/'.format(
            Config.DATA_NAME, Config.ALG, Config.TAG, Config.SCALER))

    results_summary = pd.DataFrame(
        index=range(Config.FWBW_CONV_LSTM_TESTING_TIME),
        columns=['No.', 'err', 'r2', 'rmse', 'err_ims', 'r2_ims', 'rmse_ims'])

    results_summary = run_test(test_data2d, test_data_normalized2d,
                               fwbw_conv_lstm_net, scalers, results_summary)

    if Config.FWBW_CONV_LSTM_IMS:
        result_file_name = 'Test_results_ims_{}_{}.csv'.format(
            Config.FWBW_CONV_LSTM_IMS_STEP,
            Config.FWBW_CONV_LSTM_FLOW_SELECTION)
    else:
        result_file_name = 'Test_results_{}.csv'.format(
            Config.FWBW_CONV_LSTM_FLOW_SELECTION)

    results_summary.to_csv(
        Config.RESULTS_PATH +
        '{}-{}-{}-{}/{}'.format(Config.DATA_NAME, Config.ALG, Config.TAG,
                                Config.SCALER, result_file_name),
        index=False)

    return
示例#3
0
def test_res_fwbw_lstm(data):
    print('|-- Run model testing.')
    gpu = Config.GPU

    data_name = Config.DATA_NAME
    if 'Abilene' in data_name:
        day_size = Config.ABILENE_DAY_SIZE
    else:
        day_size = Config.GEANT_DAY_SIZE

    if not Config.ALL_DATA:
        data = data[0:Config.NUM_DAYS * day_size]

    print('|--- Splitting train-test set.')
    train_data2d, valid_data2d, test_data2d = prepare_train_valid_test_2d(
        data=data, day_size=day_size)
    print('|--- Normalizing the train set.')

    if 'Abilene' in data_name:
        print('|--- Remove last 3 days in test data.')
        test_data2d = test_data2d[0:-day_size * 3]

    _, valid_data_normalized2d, test_data_normalized2d, scalers = data_scalling(
        train_data2d, valid_data2d, test_data2d)
    input_shape = (Config.RES_FWBW_LSTM_STEP, Config.RES_FWBW_LSTM_FEATURES)

    with tf.device('/device:GPU:{}'.format(gpu)):
        fwbw_net = load_trained_models(input_shape,
                                       Config.RES_FWBW_LSTM_BEST_CHECKPOINT)

    if not os.path.exists(Config.RESULTS_PATH + '{}-{}-{}-{}/'.format(
            Config.DATA_NAME, Config.ALG, Config.TAG, Config.SCALER)):
        os.makedirs(Config.RESULTS_PATH + '{}-{}-{}-{}/'.format(
            Config.DATA_NAME, Config.ALG, Config.TAG, Config.SCALER))
    results_summary = pd.DataFrame(
        index=range(Config.RES_FWBW_LSTM_TESTING_TIME),
        columns=['No.', 'err', 'r2', 'rmse', 'err_ims', 'r2_ims', 'rmse_ims'])

    results_summary = run_test(test_data2d, test_data_normalized2d, fwbw_net,
                               scalers, results_summary)

    results_summary.to_csv(
        Config.RESULTS_PATH + '{}-{}-{}-{}/Test_results.csv'.format(
            Config.DATA_NAME, Config.ALG, Config.TAG, Config.SCALER),
        index=False)

    return
示例#4
0
def test_fwbw_convlstm(data, experiment):
    print('|-- Run model testing.')
    gpu = Config.GPU

    params = Config.set_comet_params_fwbw_convlstm()

    data_name = Config.DATA_NAME
    if 'Abilene' in data_name:
        day_size = Config.ABILENE_DAY_SIZE
    else:
        day_size = Config.GEANT_DAY_SIZE

    if not Config.ALL_DATA:
        data = data[0:Config.NUM_DAYS * day_size]

    print('|--- Splitting train-test set.')
    train_data2d, valid_data2d, test_data2d = prepare_train_valid_test_2d(data=data, day_size=day_size)
    print('|--- Normalizing the train set.')

    if 'Abilene' in data_name:
        print('|--- Remove last 3 days in test data.')
        test_data2d = test_data2d[0:-day_size * 3]

    _, valid_data_normalized2d, test_data_normalized2d, scalers = data_scalling(train_data2d,
                                                                                valid_data2d,
                                                                                test_data2d)
    input_shape = (Config.FWBW_CONVLSTM_STEP,
                   Config.FWBW_CONVLSTM_WIDE, Config.FWBW_CONVLSTM_HIGH, Config.FWBW_CONVLSTM_CHANNEL)

    with tf.device('/device:GPU:{}'.format(gpu)):
        net = load_trained_models(input_shape, Config.FWBW_CONVLSTM_BEST_CHECKPOINT)

    run_test(test_data2d, test_data_normalized2d, valid_data_normalized2d[-Config.FWBW_CONVLSTM_STEP:],
             net, params, scalers)

    return
示例#5
0
def train_fwbw_conv_lstm(data):
    print('|-- Run model training.')

    if Config.DATA_NAME == Config.DATA_SETS[0]:
        day_size = Config.ABILENE_DAY_SIZE
        assert Config.FWBW_CONV_LSTM_HIGH == 12
        assert Config.FWBW_CONV_LSTM_WIDE == 12
    else:
        day_size = Config.GEANT_DAY_SIZE
        assert Config.FWBW_CONV_LSTM_HIGH == 23
        assert Config.FWBW_CONV_LSTM_WIDE == 23

    print('|--- Splitting train-test set.')
    train_data2d, valid_data2d, test_data2d = prepare_train_valid_test_2d(
        data=data, day_size=day_size)
    print('|--- Normalizing the train set.')
    train_data_normalized2d, valid_data_normalized2d, _, scalers = data_scalling(
        train_data2d, valid_data2d, test_data2d)

    train_data_normalized = np.reshape(
        np.copy(train_data_normalized2d),
        newshape=(train_data_normalized2d.shape[0], Config.FWBW_CONV_LSTM_WIDE,
                  Config.FWBW_CONV_LSTM_HIGH))
    valid_data_normalized = np.reshape(
        np.copy(valid_data_normalized2d),
        newshape=(valid_data_normalized2d.shape[0], Config.FWBW_CONV_LSTM_WIDE,
                  Config.FWBW_CONV_LSTM_HIGH))

    input_shape = (Config.FWBW_CONV_LSTM_STEP, Config.FWBW_CONV_LSTM_WIDE,
                   Config.FWBW_CONV_LSTM_HIGH, Config.FWBW_CONV_LSTM_CHANNEL)

    with tf.device('/device:GPU:{}'.format(Config.GPU)):
        fwbw_conv_lstm_net = build_model(input_shape)

    # --------------------------------------------------------------------------------------------------------------

    # --------------------------------------------Training fw model-------------------------------------------------

    if not Config.FWBW_CONV_LSTM_VALID_TEST or \
            not os.path.isfile(
                fwbw_conv_lstm_net.checkpoints_path + 'weights-{:02d}.hdf5'.format(
                    Config.FWBW_CONV_LSTM_BEST_CHECKPOINT)):
        print('|--- Create offline train set for forward net!')

        trainX, trainY_fw, trainY_bw = create_offline_fwbw_conv_lstm_data_fix_ratio(
            train_data_normalized, input_shape,
            Config.FWBW_CONV_LSTM_MON_RATIO, train_data_normalized.std(), 1)
        print('|--- Create offline valid set for forward net!')

        validX, validY_fw, validY_bw = create_offline_fwbw_conv_lstm_data_fix_ratio(
            valid_data_normalized, input_shape,
            Config.FWBW_CONV_LSTM_MON_RATIO, train_data_normalized.std(), 1)

        checkpoint_callback = ModelCheckpoint(
            fwbw_conv_lstm_net.checkpoints_path + "weights-{epoch:02d}.hdf5",
            monitor='val_loss',
            verbose=1,
            save_best_only=True,
            mode='auto',
            period=1)

        training_history = fwbw_conv_lstm_net.model.fit(
            x=trainX,
            y=[trainY_fw, trainY_bw],
            batch_size=Config.FWBW_CONV_LSTM_BATCH_SIZE,
            epochs=Config.FWBW_CONV_LSTM_N_EPOCH,
            callbacks=[checkpoint_callback],
            validation_data=(validX, [validY_fw, validY_bw]),
            shuffle=True,
            verbose=2)

        # Plot the training history
        if training_history is not None:
            fwbw_conv_lstm_net.plot_training_history(training_history)
            # fwbw_conv_lstm_net.save_model_history(training_history)
    else:
        fwbw_conv_lstm_net.load_model_from_check_point(
            _from_epoch=Config.FWBW_CONV_LSTM_BEST_CHECKPOINT)

    # --------------------------------------------------------------------------------------------------------------

    if not os.path.exists(Config.RESULTS_PATH + '{}-{}-{}-{}/'.format(
            Config.DATA_NAME, Config.ALG, Config.TAG, Config.SCALER)):
        os.makedirs(Config.RESULTS_PATH + '{}-{}-{}-{}/'.format(
            Config.DATA_NAME, Config.ALG, Config.TAG, Config.SCALER))

    results_summary = pd.DataFrame(
        index=range(Config.FWBW_CONV_LSTM_TESTING_TIME),
        columns=['No.', 'err', 'r2', 'rmse', 'err_ims', 'r2_ims', 'rmse_ims'])

    results_summary = run_test(valid_data2d, valid_data_normalized2d,
                               fwbw_conv_lstm_net, scalers, results_summary)

    results_summary.to_csv(
        Config.RESULTS_PATH + '{}-{}-{}-{}/Valid_results.csv'.format(
            Config.DATA_NAME, Config.ALG, Config.TAG, Config.SCALER),
        index=False)

    return
示例#6
0
def train_res_lstm(data):
    print('|-- Run model training.')

    if Config.DATA_NAME == Config.DATA_SETS[0]:
        day_size = Config.ABILENE_DAY_SIZE
    else:
        day_size = Config.GEANT_DAY_SIZE

    print('|--- Splitting train-test set.')
    train_data2d, valid_data2d, test_data2d = prepare_train_valid_test_2d(
        data=data, day_size=day_size)
    print('|--- Normalizing the train set.')
    train_data_normalized2d, valid_data_normalized2d, _, scalers = data_scalling(
        train_data2d, valid_data2d, test_data2d)

    input_shape = (Config.RES_LSTM_STEP, Config.RES_LSTM_FEATURES)

    with tf.device('/device:GPU:{}'.format(Config.GPU)):
        lstm_net = build_model(input_shape)

    if not Config.RES_LSTM_VALID_TEST or \
            not os.path.isfile(
                lstm_net.checkpoints_path + 'weights-{:02d}.hdf5'.format(Config.RES_LSTM_BEST_CHECKPOINT)):
        if os.path.isfile(
                path=lstm_net.checkpoints_path +
                'weights-{:02d}.hdf5'.format(Config.RES_LSTM_N_EPOCH)):
            lstm_net.load_model_from_check_point(
                _from_epoch=Config.RES_LSTM_BEST_CHECKPOINT)

        else:
            print('|---Compile model. Saving path {} --- '.format(
                lstm_net.saving_path))
            from_epoch = lstm_net.load_model_from_check_point()
            # -------------------------------- Create offline training and validating dataset --------------------------
            print('|--- Create offline train set for lstm-nn!')
            trainX, trainY = create_offline_reslstm_nn_data(
                train_data_normalized2d, input_shape, Config.RES_LSTM_MON_RAIO,
                train_data_normalized2d.std())
            print('|--- Create offline valid set for lstm-nn!')
            validX, validY = create_offline_reslstm_nn_data(
                valid_data_normalized2d, input_shape, Config.RES_LSTM_MON_RAIO,
                train_data_normalized2d.std())
            # ----------------------------------------------------------------------------------------------------------

            if from_epoch > 0:
                print('|--- Continue training.')
                training_history = lstm_net.model.fit(
                    x=trainX,
                    y=trainY,
                    batch_size=Config.RES_LSTM_BATCH_SIZE,
                    epochs=Config.RES_LSTM_N_EPOCH,
                    callbacks=lstm_net.callbacks_list,
                    validation_data=(validX, validY),
                    shuffle=True,
                    initial_epoch=from_epoch,
                    verbose=2)
            else:
                print('|--- Training new model.')

                training_history = lstm_net.model.fit(
                    x=trainX,
                    y=trainY,
                    batch_size=Config.RES_LSTM_BATCH_SIZE,
                    epochs=Config.RES_LSTM_N_EPOCH,
                    callbacks=lstm_net.callbacks_list,
                    validation_data=(validX, validY),
                    shuffle=True,
                    verbose=2)

            if training_history is not None:
                lstm_net.plot_training_history(training_history)
                lstm_net.save_model_history(training_history)

    else:
        lstm_net.load_model_from_check_point(
            _from_epoch=Config.RES_LSTM_BEST_CHECKPOINT)
    print(lstm_net.model.summary())

    if not os.path.exists(Config.RESULTS_PATH + '{}-{}-{}-{}/'.format(
            Config.DATA_NAME, Config.ALG, Config.TAG, Config.SCALER)):
        os.makedirs(Config.RESULTS_PATH + '{}-{}-{}-{}/'.format(
            Config.DATA_NAME, Config.ALG, Config.TAG, Config.SCALER))

    results_summary = pd.DataFrame(
        index=range(Config.RES_LSTM_TESTING_TIME),
        columns=['No.', 'err', 'r2', 'rmse', 'err_ims', 'r2_ims', 'rmse_ims'])

    results_summary = run_test(valid_data2d, valid_data_normalized2d, lstm_net,
                               scalers, results_summary)

    results_summary.to_csv(
        Config.RESULTS_PATH + '{}-{}-{}-{}/Valid_results.csv'.format(
            Config.DATA_NAME, Config.ALG, Config.TAG, Config.SCALER),
        index=False)

    return
示例#7
0
def train_res_fwbw_lstm(data):
    print('|-- Run model training fwbw_lstm.')

    if Config.DATA_NAME == Config.DATA_SETS[0]:
        day_size = Config.ABILENE_DAY_SIZE
    else:
        day_size = Config.GEANT_DAY_SIZE

    print('|--- Splitting train-test set.')
    train_data2d, valid_data2d, test_data2d = prepare_train_valid_test_2d(
        data=data, day_size=day_size)
    print('|--- Normalizing the train set.')
    train_data_normalized2d, valid_data_normalized2d, _, scalers = data_scalling(
        train_data2d, valid_data2d, test_data2d)

    input_shape = (Config.RES_FWBW_LSTM_STEP, Config.RES_FWBW_LSTM_FEATURES)

    with tf.device('/device:GPU:{}'.format(Config.GPU)):
        fwbw_net = build_model(input_shape)

    # --------------------------------------------------------------------------------------------------------------

    # --------------------------------------------Training fw model-------------------------------------------------

    if not Config.RES_FWBW_LSTM_VALID_TEST or \
            not os.path.isfile(
                fwbw_net.checkpoints_path + 'weights-{:02d}.hdf5'.format(Config.RES_FWBW_LSTM_BEST_CHECKPOINT)):
        print('|--- Compile model. Saving path %s --- ' % fwbw_net.saving_path)
        # -------------------------------- Create offline training and validating dataset --------------------------

        print('|--- Create offline train set for forward net!')

        trainX_1, trainX_2, trainY_1, trainY_2 = create_offline_res_fwbw_lstm(
            train_data_normalized2d, input_shape,
            Config.RES_FWBW_LSTM_MON_RAIO, train_data_normalized2d.std())
        print('|--- Create offline valid set for forward net!')

        validX_1, validX_2, validY_1, validY_2 = create_offline_res_fwbw_lstm(
            valid_data_normalized2d, input_shape,
            Config.RES_FWBW_LSTM_MON_RAIO, train_data_normalized2d.std())

        # Load model check point
        from_epoch = fwbw_net.load_model_from_check_point()
        if from_epoch > 0:
            print('|--- Continue training forward model from epoch %i --- ' %
                  from_epoch)
            training_fw_history = fwbw_net.model.fit(
                x=[trainX_1, trainX_2],
                y=[trainY_1, trainY_2],
                batch_size=1024,
                epochs=Config.RES_FWBW_LSTM_N_EPOCH,
                callbacks=fwbw_net.callbacks_list,
                validation_data=([validX_1, validX_2], [validY_1, validY_2]),
                shuffle=True,
                initial_epoch=from_epoch,
                verbose=2)
        else:
            print('|--- Training new forward model.')

            training_fw_history = fwbw_net.model.fit(
                x=[trainX_1, trainX_2],
                y=[trainY_1, trainY_2],
                batch_size=1024,
                epochs=Config.RES_FWBW_LSTM_N_EPOCH,
                callbacks=fwbw_net.callbacks_list,
                validation_data=([validX_1, validX_2], [validY_1, validY_2]),
                shuffle=True,
                verbose=2)

        # Plot the training history
        if training_fw_history is not None:
            fwbw_net.plot_training_history(training_fw_history)
            fwbw_net.save_model_history(training_fw_history)

    else:
        fwbw_net.load_model_from_check_point(
            _from_epoch=Config.RES_FWBW_LSTM_BEST_CHECKPOINT)
    # --------------------------------------------------------------------------------------------------------------

    if not os.path.exists(Config.RESULTS_PATH + '{}-{}-{}-{}/'.format(
            Config.DATA_NAME, Config.ALG, Config.TAG, Config.SCALER)):
        os.makedirs(Config.RESULTS_PATH + '{}-{}-{}-{}/'.format(
            Config.DATA_NAME, Config.ALG, Config.TAG, Config.SCALER))
    results_summary = pd.DataFrame(
        index=range(Config.RES_FWBW_LSTM_TESTING_TIME),
        columns=['No.', 'err', 'r2', 'rmse', 'err_ims', 'r2_ims', 'rmse_ims'])

    results_summary = run_test(valid_data2d, valid_data_normalized2d, fwbw_net,
                               scalers, results_summary)

    results_summary.to_csv(
        Config.RESULTS_PATH + '{}-{}-{}-{}/Valid_results.csv'.format(
            Config.DATA_NAME, Config.ALG, Config.TAG, Config.SCALER),
        index=False)

    return
示例#8
0
def train_fwbw_convlstm(data):
    print('|-- Run model training.')

    params = Config.set_comet_params_fwbw_convlstm()

    gpu = Config.GPU

    data_name = Config.DATA_NAME
    if 'Abilene' in data_name:
        day_size = Config.ABILENE_DAY_SIZE
        assert Config.FWBW_CONVLSTM_HIGH == 12
        assert Config.FWBW_CONVLSTM_WIDE == 12
    else:
        day_size = Config.GEANT_DAY_SIZE
        assert Config.FWBW_CONVLSTM_HIGH == 23
        assert Config.FWBW_CONVLSTM_WIDE == 23

    print('|--- Splitting train-test set.')
    train_data2d, valid_data2d, test_data2d = prepare_train_valid_test_2d(data=data, day_size=day_size)
    print('|--- Normalizing the train set.')
    train_data_normalized2d, valid_data_normalized2d, _, scalers = data_scalling(np.copy(train_data2d),
                                                                                 np.copy(valid_data2d),
                                                                                 np.copy(test_data2d))

    train_data_normalized = np.reshape(np.copy(train_data_normalized2d), newshape=(train_data_normalized2d.shape[0],
                                                                                   Config.FWBW_CONVLSTM_WIDE,
                                                                                   Config.FWBW_CONVLSTM_HIGH))
    valid_data_normalized = np.reshape(np.copy(valid_data_normalized2d), newshape=(valid_data_normalized2d.shape[0],
                                                                                   Config.FWBW_CONVLSTM_WIDE,
                                                                                   Config.FWBW_CONVLSTM_HIGH))

    input_shape = (Config.FWBW_CONVLSTM_STEP,
                   Config.FWBW_CONVLSTM_WIDE, Config.FWBW_CONVLSTM_HIGH, Config.FWBW_CONVLSTM_CHANNEL)

    with tf.device('/device:GPU:{}'.format(gpu)):
        net = build_model(input_shape)

    # --------------------------------------------------------------------------------------------------------------

    # --------------------------------------------Training fw model-------------------------------------------------

    if os.path.isfile(path=net.checkpoints_path + 'weights-{:02d}.hdf5'.format(Config.FWBW_CONVLSTM_N_EPOCH)):
        print('|--- Forward model exist! Load model from epoch: {}'.format(Config.FWBW_CONVLSTM_BEST_CHECKPOINT))
        net.load_model_from_check_point(_from_epoch=Config.FWBW_CONVLSTM_BEST_CHECKPOINT)
    else:
        print('|--- Compile model. Saving path %s --- ' % net.saving_path)
        # -------------------------------- Create offline training and validating dataset ------------------------------

        print('|--- Create offline train set!')

        trainX, trainY_1, trainY_2 = create_offline_fwbw_convlstm_data(train_data_normalized,
                                                                       input_shape, Config.FWBW_CONVLSTM_MON_RAIO,
                                                                       train_data_normalized.mean(),
                                                                       4)
        print('|--- Create offline valid set!')

        validX, validY_1, validY_2 = create_offline_fwbw_convlstm_data(valid_data_normalized,
                                                                       input_shape, Config.FWBW_CONVLSTM_MON_RAIO,
                                                                       train_data_normalized.mean(),
                                                                       1)

        # Load model check point
        from_epoch = net.load_model_from_check_point()
        if from_epoch > 0:
            print('|--- Continue training forward model from epoch %i --- ' % from_epoch)
            training_fw_history = net.model.fit(x=trainX,
                                                y={'pred_data': trainY_1, 'corr_data': trainY_2},
                                                batch_size=Config.FWBW_CONVLSTM_BATCH_SIZE,
                                                epochs=Config.FWBW_CONVLSTM_N_EPOCH,
                                                callbacks=net.callbacks_list,
                                                validation_data=(
                                                validX, {'pred_data': validY_1, 'corr_data': validY_2}),
                                                shuffle=True,
                                                initial_epoch=from_epoch,
                                                verbose=2)
        else:
            print('|--- Training new forward model.')

            training_fw_history = net.model.fit(x=trainX,
                                                y={'pred_data': trainY_1, 'corr_data': trainY_2},
                                                batch_size=Config.FWBW_CONVLSTM_BATCH_SIZE,
                                                epochs=Config.FWBW_CONVLSTM_N_EPOCH,
                                                callbacks=net.callbacks_list,
                                                validation_data=(
                                                validX, {'pred_data': validY_1, 'corr_data': validY_2}),
                                                shuffle=True,
                                                verbose=2)

        # Plot the training history
        if training_fw_history is not None:
            net.plot_training_history(training_fw_history)

    # --------------------------------------------------------------------------------------------------------------
    run_test(valid_data2d, valid_data_normalized2d, train_data_normalized2d[-Config.FWBW_CONVLSTM_STEP:],
             net, params, scalers)

    return