コード例 #1
0
ファイル: main.py プロジェクト: afcarl/sandbox-1
def main(args):
    assert pyro.__version__ >= "0.4.1"
    pyro.enable_validation(__debug__)
    pyro.set_rng_seed(args.seed)
    dataset = load_hourly_od(args)
    if args.tiny:
        dataset["stations"] = dataset["stations"][:args.tiny]
        dataset["counts"] = dataset["counts"][:, :args.tiny, :args.tiny]
    forecaster = train(args, dataset)
    if forecaster is None:
        return

    window_begin = max(0, args.truncate - args.batch_size)
    window_end = args.truncate
    truth = dataset['counts'][window_end:window_end + args.forecast_hours]
    forecast = forecaster(window_begin,
                          window_end,
                          args.forecast_hours,
                          num_samples=args.num_samples)
    assert forecast.shape == (args.num_samples, ) + truth.shape
    log_prob = forecaster.log_prob(window_begin, window_end,
                                   truth) / truth.numel()
    torch.save({
        'forecast': forecast,
        'truth': truth,
        'log_prob': log_prob,
    }, args.forecast_filename)
コード例 #2
0
ファイル: main.py プロジェクト: w4nderlust/sandbox
def main(args):
    assert pyro.__version__ >= "0.4.1"
    pyro.enable_validation(__debug__)
    pyro.set_rng_seed(args.seed)
    dataset = load_hourly_od(args)
    forecaster = train(args, dataset)

    num_samples = 10
    forecast = forecaster(0, 24 * 7, 24)
    assert forecast.shape == (24, ) + dataset["counts"].shape[-2:]
    forecast = forecaster(0, 24 * 7, 24, num_samples=num_samples)
    assert forecast.shape == (num_samples, 24) + dataset["counts"].shape[-2:]
    return forecast
コード例 #3
0
def main(argv):
    config = ForecastConfig()
    loadConfig = ForecastLoadConfig()

    if not loadConfig.LOAD_MODEL:
        assert not os.path.isdir(loadConfig.OUTPUT_FOLDER)
        os.makedirs(loadConfig.OUTPUT_FOLDER)

    train_part, validation_part, test_part, scaler = getNormalizedParts(
        config, loadConfig, config.TIMESTAMPS)

    train_x, train_y = buildSet(train_part, loadConfig.LOOK_BACK,
                                loadConfig.OUTPUT_SIZE)
    validation_x, validation_y = buildSet(validation_part,
                                          loadConfig.LOOK_BACK,
                                          loadConfig.OUTPUT_SIZE)
    test_x, test_y = buildSet(test_part, loadConfig.LOOK_BACK,
                              loadConfig.OUTPUT_SIZE)

    end_train, end_validation = get_split_indexes(config)

    if not loadConfig.LOAD_MODEL:
        copyfile("./code/forecast_conf.py",
                 loadConfig.OUTPUT_FOLDER + "forecast_conf.py")
        copyfile(
            "./code/forecast_load_conf.py",
            loadConfig.OUTPUT_FOLDER + "forecast_load_conf.py",
        )
        model = buildModel(loadConfig, train_x.shape)
        history = train(loadConfig, model, train_x, train_y, validation_x,
                        validation_y)
        saveModel(loadConfig, model)
        plotHistory(loadConfig, history)

        validation_timestamps = config.TIMESTAMPS[end_train:end_validation]
        validation_y_timestamps = validation_timestamps[loadConfig.LOOK_BACK:]
        assert (len(validation_y_timestamps) == len(validation_y) +
                loadConfig.OUTPUT_SIZE)
        validation_prediction = model.predict(validation_x)
        validation_mse = mean_squared_error(validation_y,
                                            validation_prediction)
        print("validation mse: ", validation_mse)
        plotPredictionPart(
            loadConfig,
            validation_y[1, :],
            validation_prediction[1, :],
            "1st day of validation set",
            validation_y_timestamps[:loadConfig.OUTPUT_SIZE],
            "paramtuning1",
        )
    else:
        model = loadModel(loadConfig)
        test_prediction = model.predict(test_x)
        test_mse = mean_squared_error(test_y, test_prediction)
        print("test mse: ", test_mse)

        test_timestamps = config.TIMESTAMPS[end_validation:end_validation +
                                            len(test_part)]
        test_y_timestamps = test_timestamps[loadConfig.LOOK_BACK:]
        assert len(test_y_timestamps) == len(test_y) + loadConfig.OUTPUT_SIZE

        plot_multiple_days(config, loadConfig, test_part[:, 0],
                           test_prediction, test_timestamps)

        plotPrediction(
            train_y,
            model.predict(train_x),
            validation_y,
            model.predict(validation_x),
            test_y,
            test_prediction,
            config.TIMESTAMPS,
            loadConfig,
        )

        plotPredictionPart(
            loadConfig,
            test_y[1, :],
            test_prediction[1, :],
            "1st day of test set",
            test_y_timestamps[:loadConfig.OUTPUT_SIZE],
            "paramtuning2",
        )
コード例 #4
0
def buildModelPv(trainX, trainY, valX, valY, config_pv):
    model = buildModel(config_pv, trainX.shape)
    # training it
    history = train(config_pv, model, trainX, trainY, valX, valY)
    saveModel(config_pv, model)
    return model, history