Esempio n. 1
0
def getPredictedPVValue(pvValue, timestamps, delta):
    config_main = ForecastConfig()
    config_pv = ForecastPvConfig(config_main)

    config_main.TIMESTAMPS = constructTimeStamps(
        datetime.strptime(config_pv.BEGIN, "20%y-%m-%d %H:%M:%S"),
        datetime.strptime(config_pv.END, "20%y-%m-%d %H:%M:%S"),
        datetime.strptime(config_pv.STEP_SIZE, "%H:%M:%S") -
        datetime.strptime("00:00:00", "%H:%M:%S"),
    )
    _, endValidation = get_split_indexes(config_main)
    # we drop the year
    a = datetime.strptime(timestamps[0].strftime("%m-%d"), "%m-%d")
    b = datetime.strptime(
        config_main.TIMESTAMPS[endValidation].strftime("%m-%d"), "%m-%d")
    assert (a - b).days >= 0

    df = addMinutes(pvValue)
    df = addMonthOfYear(df)  # , timestamps)
    # datas are normalized
    scaler = joblib.load(config_pv.MODEL_FILE_SC)
    print(scaler.data_max_)
    df = scaler.transform(df)

    x = np.empty(
        (len(df) - config_pv.LOOK_BACK, config_pv.LOOK_BACK, df.shape[1]))
    for i in range(len(df) - config_pv.LOOK_BACK):
        x[i] = df[i:i + config_pv.LOOK_BACK, :]

    model = loadModel(config_pv)
    res = model.predict(x)
    res = invertScaler(res, scaler)

    return res, config_pv.LOOK_BACK, config_pv.OUTPUT_SIZE
Esempio n. 2
0
def getPredictedLoadValue(loadsData, timestamps, timedelta):
    config = ForecastConfig()
    loadConfig = ForecastLoadConfig()
    input_data = addMinutes(loadsData)
    input_data = add_day_of_week(input_data)

    config.TIMESTAMPS = constructTimeStamps(
        datetime.strptime(loadConfig.BEGIN, "20%y-%m-%d %H:%M:%S"),
        datetime.strptime(loadConfig.END, "20%y-%m-%d %H:%M:%S"),
        datetime.strptime(loadConfig.STEPSIZE, "%H:%M:%S") -
        datetime.strptime("00:00:00", "%H:%M:%S"),
    )
    _, endValidation = get_split_indexes(config)
    # we drop the year
    a = datetime.strptime(timestamps[0].strftime("%m-%d"), "%m-%d")
    b = datetime.strptime(config.TIMESTAMPS[endValidation].strftime("%m-%d"),
                          "%m-%d")
    assert (a - b).days >= 0

    for load in loadConfig.APPLIANCES:
        appliance_data = getPecanstreetData(
            loadConfig.DATA_FILE,
            loadConfig.TIME_HEADER,
            loadConfig.DATAID,
            load,
            timestamps,
            timedelta,
        )
        input_data = pd.concat([input_data, appliance_data], axis=1)

    scaler = joblib.load(loadConfig.MODEL_FILE_SC)
    input_data = scaler.transform(input_data)

    x = np.empty((
        len(input_data) - loadConfig.LOOK_BACK,
        loadConfig.LOOK_BACK,
        input_data.shape[1],
    ))
    for i in range(len(input_data) - loadConfig.LOOK_BACK):
        x[i] = input_data[i:i + loadConfig.LOOK_BACK, :]

    model = loadModel(loadConfig)
    res = model.predict(x)
    res = invertScaler(res, scaler)
    return res, loadConfig.LOOK_BACK, loadConfig.OUTPUT_SIZE
Esempio n. 3
0
def main(argv):
    config_main = ForecastConfig()
    config_pv = ForecastPvConfig(config_main)
    np.random.seed(config_main.SEED)
    global outputFolder
    outputFolder = config_pv.OUTPUT_FOLDER
    if not os.path.isdir(outputFolder):
        os.makedirs(outputFolder)

    forecasting(config_main, config_pv)
Esempio n. 4
0
    def __init__(self):
        self.BEGIN = ForecastConfig().BEGIN
        self.END = ForecastConfig().END
        self.STEPSIZE = ForecastConfig().STEPSIZE
        self.TIMESTAMPS = ForecastConfig().TIMESTAMPS

        self.DATA_FILE = "./data/15minute_data_newyork_1222.csv"
        self.TIME_HEADER = "local_15min"
        self.DATAID = 1222

        self.LOAD_MODEL = False
        self.NB_PLOT = 4

        self.OUTPUT_SIZE = 48
        self.LOOK_BACK = 48
        self.BATCH_SIZE = 100
        self.DROPOUT = [0.1, 0.1, 0.1]
        self.NEURONS = [256, 256, 256]
        self.LEARNING_RATE = 0.0003
        self.APPLIANCES = ["heater1", "waterheater1", "drye1"]

        self.MODEL_ID = "ts30_out{}_lb{}_bs{}_lay{}_do1{}_neu1{}_lr{}_appl{}".format(
            self.OUTPUT_SIZE,
            self.LOOK_BACK,
            self.BATCH_SIZE,
            len(self.NEURONS),
            self.DROPOUT[0],
            self.NEURONS[0],
            self.LEARNING_RATE,
            len(self.APPLIANCES),
        )
        self.OUTPUT_FOLDER = "./output/forecast/load/" + self.MODEL_ID + "/"
        self.MODEL_FILE = self.OUTPUT_FOLDER + "model_" + self.MODEL_ID + ".json"
        self.MODEL_FILE_H5 = self.OUTPUT_FOLDER + "model_" + self.MODEL_ID + ".h5"
        self.MODEL_FILE_SC = self.OUTPUT_FOLDER + "model_" + self.MODEL_ID + ".save"

        self.EPOCHS = 100
        self.PATIENCE = 10
        self.MIN_DELTA = 0.00001
        self.ACTIVATION_FUNCTION = "relu"
        self.LOSS_FUNCTION = "mean_squared_error"
        self.OPTIMIZE_FUNCTION = "adam"
Esempio n. 5
0
def main(argv):
    config = ForecastConfig()
    loadConfig = ForecastLoadConfig()

    train, validation, test, scaler = getNormalizedParts(
        config, loadConfig, config.TIMESTAMPS)

    baseline_train = train[:, 0]
    baseline_validation = validation[:, 0]
    baseline_test = test[:, 0]

    _, end_validation = get_split_indexes(config)

    print("Validation:")
    one_step_persistence_model(baseline_validation)
    mean_baseline_one_step(config, baseline_train, baseline_validation)
    predict_zero_one_day(config, baseline_validation)
    predict_zero_one_step(baseline_validation)

    print("Test:")
    one_step_persistence_model(baseline_test)
    mean_baseline_one_step(config, baseline_train, baseline_test)
    mean_baseline_one_day(config, baseline_train, baseline_test)

    print("Train on test and predict for Test:")
    mean_baseline_one_step(config, baseline_test, baseline_test)
    mean_baseline_one_day(config, baseline_train, baseline_test)
    predict_zero_one_day(config, baseline_test)
    predict_zero_one_step(baseline_test)

    test_x, test_y = buildSet(test, loadConfig.LOOK_BACK,
                              loadConfig.OUTPUT_SIZE)
    model = loadModel(loadConfig)
    test_predict = model.predict(test_x)

    if True:
        plotLSTM_Base_Real(loadConfig, baseline_train, test_predict[24],
                           "mean", test_y[24])
    elif loadConfig.OUTPUT_SIZE == 1:
        plotLSTM_Base_Real(loadConfig, baseline_train, test_predict[:48], "",
                           test_y[:48])
Esempio n. 6
0
                result.append(os.path.join(root, name))
    return result


model_paths = find("model_ts30*.json", "./output/forecast/load/")
print(model_paths)

best_model = ""
best_value = 1.0
best_test = 1.0

for model_path in model_paths:
    model_path_h5 = model_path.replace("json", "h5")
    model = load_model(model_path, model_path_h5)

    config = ForecastConfig()
    loadConfig = ForecastLoadConfig()

    loadConfig.LOOK_BACK = model.layers[0].input_shape[1]
    if model.layers[0].input_shape[2] >= 6 and len(loadConfig.APPLIANCES) == 0:
        loadConfig.APPLIANCES = ["heater1", "waterheater1", "drye1"]
    if model.layers[0].input_shape[2] <= 5 and len(loadConfig.APPLIANCES) == 3:
        loadConfig.APPLIANCES = []

    _, validation_part, test_part, scaler = getNormalizedParts(
        config, loadConfig, config.TIMESTAMPS
    )
    validation_x, validation_y = buildSet(
        validation_part, loadConfig.LOOK_BACK, loadConfig.OUTPUT_SIZE
    )
    test_x, test_y = buildSet(test_part, loadConfig.LOOK_BACK, loadConfig.OUTPUT_SIZE)
Esempio n. 7
0
def main(argv):
    config = ForecastConfig()
    pvConfig = ForecastPvConfig(config)

    config.OUTPUT_FOLDER = pvConfig.OUTPUT_FOLDER
    timestamps = constructTimeStamps(
        datetime.strptime(pvConfig.BEGIN, "20%y-%m-%d %H:%M:%S"),
        datetime.strptime(pvConfig.END, "20%y-%m-%d %H:%M:%S"),
        datetime.strptime(pvConfig.STEP_SIZE, "%H:%M:%S") -
        datetime.strptime("00:00:00", "%H:%M:%S"),
    )

    config.TIMESTAMPS = timestamps

    # input datas : uncontrollable resource : solar production
    df = getPecanstreetData(pvConfig.DATA_FILE, pvConfig.TIME_HEADER,
                            pvConfig.DATAID, "solar", timestamps)
    df = addMinutes(df)
    df = addMonthOfYear(df)

    df_train, df_validation, df_test = splitData(config, df)
    print(timestamps[len(df_validation) + len(df_train)])
    # datas are normalized
    scaler = MinMaxScaler()
    scaler.fit(df_train)
    df_train = scaler.transform(df_train)
    df_validation = scaler.transform(df_validation)
    df_test = scaler.transform(df_test)

    X, y = buildSet(df_test, pvConfig.LOOK_BACK, pvConfig.OUTPUT_SIZE)

    df_train = np.array([df_train[i, 0] for i in range(len(df_train))])
    df_validation = np.array(
        [df_validation[i, 0] for i in range(len(df_validation))])
    df_test = np.array([df_test[i, 0] for i in range(len(df_test))])

    model = loadModel(pvConfig)
    testPredictY = model.predict(X)

    import matplotlib.pyplot as plt

    plt.plot(df_test[:100])
    plt.show()
    plt.plot(y[0])
    plt.show()

    # plot_baselines(config, df_train, df_test[:96], timestamps[len(df_train):len(df_train) + 96])
    plotLSTM_Base_Real(config, df_train, testPredictY[72], "mean", y[72])
    # plotLSTM_Base_Real(config, df_train, testPredictY[0], "1step", y[0])

    print("Validation:")
    one_step_persistence_model(df_validation)
    print("Test:")
    one_step_persistence_model(df_test)

    print("Validation:")
    mean_baseline_one_day(config, df_train, df_validation)
    print("Test:")
    mean_baseline_one_day(config, df_train, df_test)
    print("Train on test and predict for Test:")
    mean_baseline_one_day(config, df_test, df_test)

    print("Validation:")
    predict_zero_one_day(config, df_validation)
    print("Test:")
    predict_zero_one_day(config, df_test)

    print("Validation:")
    predict_zero_one_step(df_validation)
    print("Test:")
    predict_zero_one_step(df_test)
Esempio n. 8
0
from data import getPecanstreetData
from forecast import splitData, loadModel, buildSet, addMinutes, addMonthOfYear
from forecast_baseline import (
    one_step_persistence_model,
    predict_zero_one_day,
    predict_zero_one_step,
    plotLSTM_Base_Real,
    mean_baseline_one_day,
)
from forecast_conf import ForecastConfig
from forecast_pv_conf import ForecastPvConfig
from sklearn.preprocessing import MinMaxScaler
from tensorflow import set_random_seed
from util import constructTimeStamps

set_random_seed(ForecastConfig().SEED)
np.random.seed(ForecastConfig().SEED)


def main(argv):
    config = ForecastConfig()
    pvConfig = ForecastPvConfig(config)

    config.OUTPUT_FOLDER = pvConfig.OUTPUT_FOLDER
    timestamps = constructTimeStamps(
        datetime.strptime(pvConfig.BEGIN, "20%y-%m-%d %H:%M:%S"),
        datetime.strptime(pvConfig.END, "20%y-%m-%d %H:%M:%S"),
        datetime.strptime(pvConfig.STEP_SIZE, "%H:%M:%S") -
        datetime.strptime("00:00:00", "%H:%M:%S"),
    )
Esempio n. 9
0
def main(argv):
    config = ForecastConfig()
    loadConfig = ForecastLoadConfig()

    if not loadConfig.LOAD_MODEL:
        assert not os.path.isdir(loadConfig.OUTPUT_FOLDER)
        os.makedirs(loadConfig.OUTPUT_FOLDER)

    train_part, validation_part, test_part, scaler = getNormalizedParts(
        config, loadConfig, config.TIMESTAMPS)

    train_x, train_y = buildSet(train_part, loadConfig.LOOK_BACK,
                                loadConfig.OUTPUT_SIZE)
    validation_x, validation_y = buildSet(validation_part,
                                          loadConfig.LOOK_BACK,
                                          loadConfig.OUTPUT_SIZE)
    test_x, test_y = buildSet(test_part, loadConfig.LOOK_BACK,
                              loadConfig.OUTPUT_SIZE)

    end_train, end_validation = get_split_indexes(config)

    if not loadConfig.LOAD_MODEL:
        copyfile("./code/forecast_conf.py",
                 loadConfig.OUTPUT_FOLDER + "forecast_conf.py")
        copyfile(
            "./code/forecast_load_conf.py",
            loadConfig.OUTPUT_FOLDER + "forecast_load_conf.py",
        )
        model = buildModel(loadConfig, train_x.shape)
        history = train(loadConfig, model, train_x, train_y, validation_x,
                        validation_y)
        saveModel(loadConfig, model)
        plotHistory(loadConfig, history)

        validation_timestamps = config.TIMESTAMPS[end_train:end_validation]
        validation_y_timestamps = validation_timestamps[loadConfig.LOOK_BACK:]
        assert (len(validation_y_timestamps) == len(validation_y) +
                loadConfig.OUTPUT_SIZE)
        validation_prediction = model.predict(validation_x)
        validation_mse = mean_squared_error(validation_y,
                                            validation_prediction)
        print("validation mse: ", validation_mse)
        plotPredictionPart(
            loadConfig,
            validation_y[1, :],
            validation_prediction[1, :],
            "1st day of validation set",
            validation_y_timestamps[:loadConfig.OUTPUT_SIZE],
            "paramtuning1",
        )
    else:
        model = loadModel(loadConfig)
        test_prediction = model.predict(test_x)
        test_mse = mean_squared_error(test_y, test_prediction)
        print("test mse: ", test_mse)

        test_timestamps = config.TIMESTAMPS[end_validation:end_validation +
                                            len(test_part)]
        test_y_timestamps = test_timestamps[loadConfig.LOOK_BACK:]
        assert len(test_y_timestamps) == len(test_y) + loadConfig.OUTPUT_SIZE

        plot_multiple_days(config, loadConfig, test_part[:, 0],
                           test_prediction, test_timestamps)

        plotPrediction(
            train_y,
            model.predict(train_x),
            validation_y,
            model.predict(validation_x),
            test_y,
            test_prediction,
            config.TIMESTAMPS,
            loadConfig,
        )

        plotPredictionPart(
            loadConfig,
            test_y[1, :],
            test_prediction[1, :],
            "1st day of test set",
            test_y_timestamps[:loadConfig.OUTPUT_SIZE],
            "paramtuning2",
        )