コード例 #1
0
def test_fwbw_convlstm(data, experiment):
    print('|-- Run model testing.')
    gpu = Config.GPU

    params = Config.set_comet_params_fwbw_convlstm()

    data_name = Config.DATA_NAME
    if 'Abilene' in data_name:
        day_size = Config.ABILENE_DAY_SIZE
    else:
        day_size = Config.GEANT_DAY_SIZE

    if not Config.ALL_DATA:
        data = data[0:Config.NUM_DAYS * day_size]

    print('|--- Splitting train-test set.')
    train_data2d, valid_data2d, test_data2d = prepare_train_valid_test_2d(data=data, day_size=day_size)
    print('|--- Normalizing the train set.')

    if 'Abilene' in data_name:
        print('|--- Remove last 3 days in test data.')
        test_data2d = test_data2d[0:-day_size * 3]

    _, valid_data_normalized2d, test_data_normalized2d, scalers = data_scalling(train_data2d,
                                                                                valid_data2d,
                                                                                test_data2d)
    input_shape = (Config.FWBW_CONVLSTM_STEP,
                   Config.FWBW_CONVLSTM_WIDE, Config.FWBW_CONVLSTM_HIGH, Config.FWBW_CONVLSTM_CHANNEL)

    with tf.device('/device:GPU:{}'.format(gpu)):
        net = load_trained_models(input_shape, Config.FWBW_CONVLSTM_BEST_CHECKPOINT)

    run_test(test_data2d, test_data_normalized2d, valid_data_normalized2d[-Config.FWBW_CONVLSTM_STEP:],
             net, params, scalers)

    return
コード例 #2
0
def train_fwbw_convlstm(data):
    print('|-- Run model training.')

    params = Config.set_comet_params_fwbw_convlstm()

    gpu = Config.GPU

    data_name = Config.DATA_NAME
    if 'Abilene' in data_name:
        day_size = Config.ABILENE_DAY_SIZE
        assert Config.FWBW_CONVLSTM_HIGH == 12
        assert Config.FWBW_CONVLSTM_WIDE == 12
    else:
        day_size = Config.GEANT_DAY_SIZE
        assert Config.FWBW_CONVLSTM_HIGH == 23
        assert Config.FWBW_CONVLSTM_WIDE == 23

    print('|--- Splitting train-test set.')
    train_data2d, valid_data2d, test_data2d = prepare_train_valid_test_2d(data=data, day_size=day_size)
    print('|--- Normalizing the train set.')
    train_data_normalized2d, valid_data_normalized2d, _, scalers = data_scalling(np.copy(train_data2d),
                                                                                 np.copy(valid_data2d),
                                                                                 np.copy(test_data2d))

    train_data_normalized = np.reshape(np.copy(train_data_normalized2d), newshape=(train_data_normalized2d.shape[0],
                                                                                   Config.FWBW_CONVLSTM_WIDE,
                                                                                   Config.FWBW_CONVLSTM_HIGH))
    valid_data_normalized = np.reshape(np.copy(valid_data_normalized2d), newshape=(valid_data_normalized2d.shape[0],
                                                                                   Config.FWBW_CONVLSTM_WIDE,
                                                                                   Config.FWBW_CONVLSTM_HIGH))

    input_shape = (Config.FWBW_CONVLSTM_STEP,
                   Config.FWBW_CONVLSTM_WIDE, Config.FWBW_CONVLSTM_HIGH, Config.FWBW_CONVLSTM_CHANNEL)

    with tf.device('/device:GPU:{}'.format(gpu)):
        net = build_model(input_shape)

    # --------------------------------------------------------------------------------------------------------------

    # --------------------------------------------Training fw model-------------------------------------------------

    if os.path.isfile(path=net.checkpoints_path + 'weights-{:02d}.hdf5'.format(Config.FWBW_CONVLSTM_N_EPOCH)):
        print('|--- Forward model exist! Load model from epoch: {}'.format(Config.FWBW_CONVLSTM_BEST_CHECKPOINT))
        net.load_model_from_check_point(_from_epoch=Config.FWBW_CONVLSTM_BEST_CHECKPOINT)
    else:
        print('|--- Compile model. Saving path %s --- ' % net.saving_path)
        # -------------------------------- Create offline training and validating dataset ------------------------------

        print('|--- Create offline train set!')

        trainX, trainY_1, trainY_2 = create_offline_fwbw_convlstm_data(train_data_normalized,
                                                                       input_shape, Config.FWBW_CONVLSTM_MON_RAIO,
                                                                       train_data_normalized.mean(),
                                                                       4)
        print('|--- Create offline valid set!')

        validX, validY_1, validY_2 = create_offline_fwbw_convlstm_data(valid_data_normalized,
                                                                       input_shape, Config.FWBW_CONVLSTM_MON_RAIO,
                                                                       train_data_normalized.mean(),
                                                                       1)

        # Load model check point
        from_epoch = net.load_model_from_check_point()
        if from_epoch > 0:
            print('|--- Continue training forward model from epoch %i --- ' % from_epoch)
            training_fw_history = net.model.fit(x=trainX,
                                                y={'pred_data': trainY_1, 'corr_data': trainY_2},
                                                batch_size=Config.FWBW_CONVLSTM_BATCH_SIZE,
                                                epochs=Config.FWBW_CONVLSTM_N_EPOCH,
                                                callbacks=net.callbacks_list,
                                                validation_data=(
                                                validX, {'pred_data': validY_1, 'corr_data': validY_2}),
                                                shuffle=True,
                                                initial_epoch=from_epoch,
                                                verbose=2)
        else:
            print('|--- Training new forward model.')

            training_fw_history = net.model.fit(x=trainX,
                                                y={'pred_data': trainY_1, 'corr_data': trainY_2},
                                                batch_size=Config.FWBW_CONVLSTM_BATCH_SIZE,
                                                epochs=Config.FWBW_CONVLSTM_N_EPOCH,
                                                callbacks=net.callbacks_list,
                                                validation_data=(
                                                validX, {'pred_data': validY_1, 'corr_data': validY_2}),
                                                shuffle=True,
                                                verbose=2)

        # Plot the training history
        if training_fw_history is not None:
            net.plot_training_history(training_fw_history)

    # --------------------------------------------------------------------------------------------------------------
    run_test(valid_data2d, valid_data_normalized2d, train_data_normalized2d[-Config.FWBW_CONVLSTM_STEP:],
             net, params, scalers)

    return