Exemplo n.º 1
0
    experiment_name = 'ptf_dcc_MLP_1010'
    dataset_name = 'PTF'
    baseline_model = 'dcc'
    hidden_layer_dim = 32
    batch_size = 16
    total_epochs = 30000
    plot_points = 30

    # Data from .csv
    dsf = DatasetFactory.get(name=dataset_name)
    # Train test split
    train_time = dsf.split_train_test(sequential=True, test_size=0.3)
    # Features computation
    dsf.withColumn('y', *T.standardize(*dsf['y']))
    dsf.withColumn('crosses', *T.elements_cross(*dsf['y']))
    dsf.withColumn('x', *T.lagify(*dsf['crosses'], lag=6, collapse=True))
    # The baseline model
    baseline_estimator = {
        'dcc': T.dcc,
        'lw': T.LedoitWolf,
    }[baseline_model]
    baseline_model_line = f'{baseline_model}_line'
    dsf.withColumn(baseline_model, *baseline_estimator(*dsf['y']))
    states_line = [
        M.mu_vcv_linearize(**states) for states in dsf[baseline_model]
    ]
    dsf.withColumn(baseline_model_line, *states_line)
    # Drop rows with NA (empty features)
    dsf.dropna()

    models = {
Exemplo n.º 2
0

if __name__ == '__main__':
    from experiment_econometrics import DatasetFactory
    import timeseries_transformations as T

    x_col = 'x'
    y_col = 'y'
    states0_col = 'ledoitWolf_static'
    dsf = DatasetFactory.get(name='FRED_raw')
    # Train test split
    train_time = dsf.split_train_test(sequential=True, test_size=0.3)

    # Features computation
    dsf.withColumn('y', *T.standardize(*dsf['y']))
    dsf.withColumn('x', *T.lagify(*dsf['y'], lag=12, collapse=True))
    dsf.withColumn('ledoitWolf_static', *T.staticLedoitWolf(*dsf['y']))
    dsf.dropna()
    x, y, states0 = dsf.select([x_col, y_col, states0_col],
                               train=True,
                               test=False)
    x_test, y_test, states0_test = dsf.select([x_col, y_col, states0_col],
                                              train=False,
                                              test=True)
    model = HDE_mu_vcv(dsf, 'x', 'y', 'ledoitWolf_static', learning_rate=1e-1)
    llkls = []
    for i in range(10):
        print(i)
        pdf = tfp.distributions.MultivariateNormalFullCovariance(
            states0['mu'][:, tf.newaxis, :], states0['vcv'][:,
                                                            tf.newaxis, :, :])
Exemplo n.º 3
0
if __name__ == '__main__':
    data_config = {
        'name': 'FRED'
    }
    train_test_config = {
        'sequential': True,
        'test_size': 0.3
    }
    dsf = DatasetFactory.get(**data_config)
    train_time = dsf.split_train_test(**train_test_config)

    # Test all functions
    # dsf.withColumn('dcc', *T.dcc(*dsf['y']))
    dsf.withColumn('crosses', *T.elements_cross(*dsf['y']))
    dsf.withColumn('x_timesteps', *T.lagify(*dsf['y'], collapse=False))
    dsf.withColumn('x', *T.lagify(*dsf['y'], collapse=True))
    dsf.withColumn('ledoitWolf_static', *T.staticLedoitWolf(*dsf['y']))

    dsf.dropna()

    # concrete wrappers testing

    ##Bayesian mlp
    m = CholeskyMLPBayesian(dsf, 'x', 'y', 'ledoitWolf_static')
    m.fit(dsf, 10, 8)
    states, theta, llkl, p0, log_p = m.density_forecast(dsf)
    m.density_forecast_multi_particles(dsf)

    # DCC
    m = DCC_HVI('x_timesteps', 'y', n=dsf.df.shape[1])
Exemplo n.º 4
0
    dataset_name = 'FRED'
    baseline_model = 'dcc'
    hidden_layer_dim = 32
    batch_size = 16
    total_epochs = 30000
    plot_points = 30

    # Data from .csv
    dsf = DatasetFactory.get(name=dataset_name)
    # Train test split
    train_time = dsf.split_train_test(sequential=True, test_size=0.3)
    # Features computation
    dsf.withColumn('y', *T.standardize(*dsf['y']))
    dsf.withColumn('crosses', *T.elements_cross(*dsf['y']))
    dsf.withColumn('x_timesteps',
                   *T.lagify(*dsf['crosses'], lag=6, collapse=False))
    # The baseline model
    baseline_estimator = {
        'dcc': T.dcc,
        'lw': T.LedoitWolf,
    }[baseline_model]
    baseline_model_line = f'{baseline_model}_line'
    dsf.withColumn(baseline_model, *baseline_estimator(*dsf['y']))
    states_line = [
        M.mu_vcv_linearize(**states) for states in dsf[baseline_model]
    ]
    dsf.withColumn(baseline_model_line, *states_line)
    # Drop rows with NA (empty features)
    dsf.dropna()

    models = {