Пример #1
0
def getPredictedPVValue(pvValue, timestamps, delta):
    config_main = ForecastConfig()
    config_pv = ForecastPvConfig(config_main)

    config_main.TIMESTAMPS = constructTimeStamps(
        datetime.strptime(config_pv.BEGIN, "20%y-%m-%d %H:%M:%S"),
        datetime.strptime(config_pv.END, "20%y-%m-%d %H:%M:%S"),
        datetime.strptime(config_pv.STEP_SIZE, "%H:%M:%S") -
        datetime.strptime("00:00:00", "%H:%M:%S"),
    )
    _, endValidation = get_split_indexes(config_main)
    # we drop the year
    a = datetime.strptime(timestamps[0].strftime("%m-%d"), "%m-%d")
    b = datetime.strptime(
        config_main.TIMESTAMPS[endValidation].strftime("%m-%d"), "%m-%d")
    assert (a - b).days >= 0

    df = addMinutes(pvValue)
    df = addMonthOfYear(df)  # , timestamps)
    # datas are normalized
    scaler = joblib.load(config_pv.MODEL_FILE_SC)
    print(scaler.data_max_)
    df = scaler.transform(df)

    x = np.empty(
        (len(df) - config_pv.LOOK_BACK, config_pv.LOOK_BACK, df.shape[1]))
    for i in range(len(df) - config_pv.LOOK_BACK):
        x[i] = df[i:i + config_pv.LOOK_BACK, :]

    model = loadModel(config_pv)
    res = model.predict(x)
    res = invertScaler(res, scaler)

    return res, config_pv.LOOK_BACK, config_pv.OUTPUT_SIZE
Пример #2
0
def getPredictedLoadValue(loadsData, timestamps, timedelta):
    config = ForecastConfig()
    loadConfig = ForecastLoadConfig()
    input_data = addMinutes(loadsData)
    input_data = add_day_of_week(input_data)

    config.TIMESTAMPS = constructTimeStamps(
        datetime.strptime(loadConfig.BEGIN, "20%y-%m-%d %H:%M:%S"),
        datetime.strptime(loadConfig.END, "20%y-%m-%d %H:%M:%S"),
        datetime.strptime(loadConfig.STEPSIZE, "%H:%M:%S") -
        datetime.strptime("00:00:00", "%H:%M:%S"),
    )
    _, endValidation = get_split_indexes(config)
    # we drop the year
    a = datetime.strptime(timestamps[0].strftime("%m-%d"), "%m-%d")
    b = datetime.strptime(config.TIMESTAMPS[endValidation].strftime("%m-%d"),
                          "%m-%d")
    assert (a - b).days >= 0

    for load in loadConfig.APPLIANCES:
        appliance_data = getPecanstreetData(
            loadConfig.DATA_FILE,
            loadConfig.TIME_HEADER,
            loadConfig.DATAID,
            load,
            timestamps,
            timedelta,
        )
        input_data = pd.concat([input_data, appliance_data], axis=1)

    scaler = joblib.load(loadConfig.MODEL_FILE_SC)
    input_data = scaler.transform(input_data)

    x = np.empty((
        len(input_data) - loadConfig.LOOK_BACK,
        loadConfig.LOOK_BACK,
        input_data.shape[1],
    ))
    for i in range(len(input_data) - loadConfig.LOOK_BACK):
        x[i] = input_data[i:i + loadConfig.LOOK_BACK, :]

    model = loadModel(loadConfig)
    res = model.predict(x)
    res = invertScaler(res, scaler)
    return res, loadConfig.LOOK_BACK, loadConfig.OUTPUT_SIZE
Пример #3
0
def main(argv):
    config = ForecastConfig()
    loadConfig = ForecastLoadConfig()

    train, validation, test, scaler = getNormalizedParts(
        config, loadConfig, config.TIMESTAMPS)

    baseline_train = train[:, 0]
    baseline_validation = validation[:, 0]
    baseline_test = test[:, 0]

    _, end_validation = get_split_indexes(config)

    print("Validation:")
    one_step_persistence_model(baseline_validation)
    mean_baseline_one_step(config, baseline_train, baseline_validation)
    predict_zero_one_day(config, baseline_validation)
    predict_zero_one_step(baseline_validation)

    print("Test:")
    one_step_persistence_model(baseline_test)
    mean_baseline_one_step(config, baseline_train, baseline_test)
    mean_baseline_one_day(config, baseline_train, baseline_test)

    print("Train on test and predict for Test:")
    mean_baseline_one_step(config, baseline_test, baseline_test)
    mean_baseline_one_day(config, baseline_train, baseline_test)
    predict_zero_one_day(config, baseline_test)
    predict_zero_one_step(baseline_test)

    test_x, test_y = buildSet(test, loadConfig.LOOK_BACK,
                              loadConfig.OUTPUT_SIZE)
    model = loadModel(loadConfig)
    test_predict = model.predict(test_x)

    if True:
        plotLSTM_Base_Real(loadConfig, baseline_train, test_predict[24],
                           "mean", test_y[24])
    elif loadConfig.OUTPUT_SIZE == 1:
        plotLSTM_Base_Real(loadConfig, baseline_train, test_predict[:48], "",
                           test_y[:48])
Пример #4
0
def main(argv):
    config = ForecastConfig()
    pvConfig = ForecastPvConfig(config)

    config.OUTPUT_FOLDER = pvConfig.OUTPUT_FOLDER
    timestamps = constructTimeStamps(
        datetime.strptime(pvConfig.BEGIN, "20%y-%m-%d %H:%M:%S"),
        datetime.strptime(pvConfig.END, "20%y-%m-%d %H:%M:%S"),
        datetime.strptime(pvConfig.STEP_SIZE, "%H:%M:%S") -
        datetime.strptime("00:00:00", "%H:%M:%S"),
    )

    config.TIMESTAMPS = timestamps

    # input datas : uncontrollable resource : solar production
    df = getPecanstreetData(pvConfig.DATA_FILE, pvConfig.TIME_HEADER,
                            pvConfig.DATAID, "solar", timestamps)
    df = addMinutes(df)
    df = addMonthOfYear(df)

    df_train, df_validation, df_test = splitData(config, df)
    print(timestamps[len(df_validation) + len(df_train)])
    # datas are normalized
    scaler = MinMaxScaler()
    scaler.fit(df_train)
    df_train = scaler.transform(df_train)
    df_validation = scaler.transform(df_validation)
    df_test = scaler.transform(df_test)

    X, y = buildSet(df_test, pvConfig.LOOK_BACK, pvConfig.OUTPUT_SIZE)

    df_train = np.array([df_train[i, 0] for i in range(len(df_train))])
    df_validation = np.array(
        [df_validation[i, 0] for i in range(len(df_validation))])
    df_test = np.array([df_test[i, 0] for i in range(len(df_test))])

    model = loadModel(pvConfig)
    testPredictY = model.predict(X)

    import matplotlib.pyplot as plt

    plt.plot(df_test[:100])
    plt.show()
    plt.plot(y[0])
    plt.show()

    # plot_baselines(config, df_train, df_test[:96], timestamps[len(df_train):len(df_train) + 96])
    plotLSTM_Base_Real(config, df_train, testPredictY[72], "mean", y[72])
    # plotLSTM_Base_Real(config, df_train, testPredictY[0], "1step", y[0])

    print("Validation:")
    one_step_persistence_model(df_validation)
    print("Test:")
    one_step_persistence_model(df_test)

    print("Validation:")
    mean_baseline_one_day(config, df_train, df_validation)
    print("Test:")
    mean_baseline_one_day(config, df_train, df_test)
    print("Train on test and predict for Test:")
    mean_baseline_one_day(config, df_test, df_test)

    print("Validation:")
    predict_zero_one_day(config, df_validation)
    print("Test:")
    predict_zero_one_day(config, df_test)

    print("Validation:")
    predict_zero_one_step(df_validation)
    print("Test:")
    predict_zero_one_step(df_test)
Пример #5
0
def main(argv):
    config = ForecastConfig()
    loadConfig = ForecastLoadConfig()

    if not loadConfig.LOAD_MODEL:
        assert not os.path.isdir(loadConfig.OUTPUT_FOLDER)
        os.makedirs(loadConfig.OUTPUT_FOLDER)

    train_part, validation_part, test_part, scaler = getNormalizedParts(
        config, loadConfig, config.TIMESTAMPS)

    train_x, train_y = buildSet(train_part, loadConfig.LOOK_BACK,
                                loadConfig.OUTPUT_SIZE)
    validation_x, validation_y = buildSet(validation_part,
                                          loadConfig.LOOK_BACK,
                                          loadConfig.OUTPUT_SIZE)
    test_x, test_y = buildSet(test_part, loadConfig.LOOK_BACK,
                              loadConfig.OUTPUT_SIZE)

    end_train, end_validation = get_split_indexes(config)

    if not loadConfig.LOAD_MODEL:
        copyfile("./code/forecast_conf.py",
                 loadConfig.OUTPUT_FOLDER + "forecast_conf.py")
        copyfile(
            "./code/forecast_load_conf.py",
            loadConfig.OUTPUT_FOLDER + "forecast_load_conf.py",
        )
        model = buildModel(loadConfig, train_x.shape)
        history = train(loadConfig, model, train_x, train_y, validation_x,
                        validation_y)
        saveModel(loadConfig, model)
        plotHistory(loadConfig, history)

        validation_timestamps = config.TIMESTAMPS[end_train:end_validation]
        validation_y_timestamps = validation_timestamps[loadConfig.LOOK_BACK:]
        assert (len(validation_y_timestamps) == len(validation_y) +
                loadConfig.OUTPUT_SIZE)
        validation_prediction = model.predict(validation_x)
        validation_mse = mean_squared_error(validation_y,
                                            validation_prediction)
        print("validation mse: ", validation_mse)
        plotPredictionPart(
            loadConfig,
            validation_y[1, :],
            validation_prediction[1, :],
            "1st day of validation set",
            validation_y_timestamps[:loadConfig.OUTPUT_SIZE],
            "paramtuning1",
        )
    else:
        model = loadModel(loadConfig)
        test_prediction = model.predict(test_x)
        test_mse = mean_squared_error(test_y, test_prediction)
        print("test mse: ", test_mse)

        test_timestamps = config.TIMESTAMPS[end_validation:end_validation +
                                            len(test_part)]
        test_y_timestamps = test_timestamps[loadConfig.LOOK_BACK:]
        assert len(test_y_timestamps) == len(test_y) + loadConfig.OUTPUT_SIZE

        plot_multiple_days(config, loadConfig, test_part[:, 0],
                           test_prediction, test_timestamps)

        plotPrediction(
            train_y,
            model.predict(train_x),
            validation_y,
            model.predict(validation_x),
            test_y,
            test_prediction,
            config.TIMESTAMPS,
            loadConfig,
        )

        plotPredictionPart(
            loadConfig,
            test_y[1, :],
            test_prediction[1, :],
            "1st day of test set",
            test_y_timestamps[:loadConfig.OUTPUT_SIZE],
            "paramtuning2",
        )
Пример #6
0
def forecasting(config_main, config_pv):
    df, timestamps = dataImport(config_main, config_pv)

    config_main.TIMESTAMPS = timestamps

    df_train, df_validation, df_test, scaler = getParts(
        df, config_main, config_pv)

    # here we have numpy array
    trainX, trainY = buildSet(np.array(df_train), config_pv.LOOK_BACK,
                              config_pv.OUTPUT_SIZE)
    validationX, validationY = buildSet(np.array(df_validation),
                                        config_pv.LOOK_BACK,
                                        config_pv.OUTPUT_SIZE)
    testX, testY = buildSet(np.array(df_test), config_pv.LOOK_BACK,
                            config_pv.OUTPUT_SIZE)

    # plotInputDay(timestamps, trainY[:, 0], config_pv)

    if config_pv.LOAD_MODEL:
        model = loadModel(config_pv)
        history = None
    else:
        model, history = buildModelPv(trainX, trainY, validationX, validationY,
                                      config_pv)

    evalModel(model, testX, testY)

    # plotting
    trainPrediction = model.predict(trainX)
    testPrediction = model.predict(testX)
    valPrediction = model.predict(validationX)

    if history is not None:
        plotHistory(config_pv, history)

    plotPrediction(
        trainY,
        trainPrediction,
        testY,
        validationY,
        valPrediction,
        testPrediction,
        timestamps,
        config_pv,
    )
    plotPredictionPart(
        config_pv,
        trainY[24],
        trainPrediction[24],
        "1st day of train set",
        timestamps[24:config_pv.TIME_PER_DAY + 24],
        "train",
    )
    plotPredictionPart(
        config_pv,
        validationY[24],
        valPrediction[24],
        "3rd day of validation set",
        timestamps[len(trainX) + 24:len(trainX) + 24 + config_pv.TIME_PER_DAY],
        "validation",
    )
    plotPredictionPart(
        config_pv,
        testY[24],
        testPrediction[24],
        "1st day of test set",
        timestamps[len(trainX) + len(validationX) + 24:len(trainX) + 24 +
                   len(validationX) + config_pv.TIME_PER_DAY],
        "test",
    )
    # plotPredictionPartMult(
    #     config_pv,
    #     testY[0],
    #     testPrediction,
    #     "1st day of test set",
    #     timestamps[len(trainX) + len(validationX): len(trainX) + len(validationX) + config_pv.TIME_PER_DAY],
    #     "test"
    # )

    plotEcart(
        trainY,
        trainPrediction,
        validationY,
        valPrediction,
        testY,
        testPrediction,
        timestamps,
        config_pv,
    )
    # printing error
    for _ in [1]:
        print("training\tMSE :\t{}".format(
            mean_squared_error(np.array(trainY), np.array(trainPrediction))))
        print("validation\t\tMSE :\t{}".format(
            mean_squared_error(np.array(validationY),
                               np.array(valPrediction))))
        print("testing\t\tMSE :\t{}".format(
            mean_squared_error(np.array(testY), np.array(testPrediction))))
        ###
        print("training\tMAE :\t{}".format(
            mean_absolute_error(np.array(trainY), np.array(trainPrediction))))
        print("validation\t\tMAE :\t{}".format(
            mean_absolute_error(np.array(validationY),
                                np.array(valPrediction))))
        print("testing\t\tMAE :\t{}".format(
            mean_absolute_error(np.array(testY), np.array(testPrediction))))
        ###
        print("training\tMAPE :\t{} %".format(
            mean_absolute_percentage_error(np.array(trainY),
                                           np.array(trainPrediction))))
        print("validation\t\tMAPE :\t{} %".format(
            mean_absolute_percentage_error(np.array(validationY),
                                           np.array(valPrediction))))
        print("testing\t\tMAPE :\t{} %".format(
            mean_absolute_percentage_error(np.array(testY),
                                           np.array(testPrediction))))