Exemple #1
0
 def __init__(self,
              dsf,
              x_col,
              y_col,
              states0_col,
              layers=1,
              init_scale=1e-2,
              learning_rate=1e-4,
              beta_1=0.1):
     x, y, states0 = dsf.select([x_col, y_col, states0_col],
                                train=True,
                                test=False)
     n = y.shape[1]
     self.n = n
     self.x_col = x_col
     self.y_col = y_col
     self.states0_col = states0_col
     self.center_A = np.eye(n, dtype=np.float32)[np.newaxis]
     self.scale_A = init_scale
     self.scale_b = init_scale
     self.x2theta = [mlp(n * n + n) for i in range(layers)]
     self.log_prior = prior_standard_gaussian
     self.optim = O.optimizer_adam(learning_rate=learning_rate,
                                   beta_1=beta_1)
     self.y_memory = y
     self.x_memory = x
     self.states_memory = states0
     self.n_particles_forecast = 1
Exemple #2
0
 def __init__(self, x_col, y_col, n, p=1, q=1, learning_rate=1e-2, beta_1=0.9, **sampler_config):
     sampler_config = asymmetric_mix_dict(hvi_config, sampler_config)
     self.model = DCC(n=n, p=p, q=q)
     self.sampler_global = hierarchical_sampler(self.model.dim_all, **sampler_config)
     self.inferer = O.optimizer_adam(learning_rate=learning_rate, beta_1=beta_1)
     self.x_col = x_col
     self.y_col = y_col
     self.p = p
     self.q = q
     self.n_particles_forecast = 200
Exemple #3
0
 def __init__(self, dsf, x_col, y_col, states0_col,
              init_scale=1e-2, learning_rate=1e-4, beta_1=0.1):
     x, y, states0 = dsf.select([x_col, y_col, states0_col], train=True, test=False)
     n = y.shape[1]
     d = int(n * (n + 1) / 2 + n)
     model = G.gaussian_timeseries_centered_model(states0)
     shaper = S.transformer_mlp_Bayesian(example=x, out_dim=d, init_scale=init_scale)
     inferer = O.optimizer_adam(learning_rate=learning_rate, beta_1=beta_1)
     sampler_global = naive_sampler_global(model.global_dim)
     self.n_particles_forecast = 200
     WrapperAbstract.__init__(self, model, shaper, inferer, x_col, y_col, states0_col, sampler_global)
Exemple #4
0
    def __init__(self,
                 dsf,
                 x_col,
                 y_col,
                 states0_line_col,
                 recurrent_dim=5,
                 mlp_post_lstm_layers_dim=[10, 10],
                 empirical_prior=True,
                 gaussian_posterior=False,
                 init_scale=1e-2,
                 learning_rate=1e-4,
                 beta_1=0.1):
        x, y, states0_line = dsf.select([x_col, y_col, states0_line_col],
                                        train=True,
                                        test=False)
        n = y.shape[1]
        d = states0_line.shape[1]
        assert d == int(n * (n + 1) / 2 + n)

        # Creates the networks for the estimates:
        lstm = S.LSTM(x.shape[1],
                      recurrent_dim,
                      mlp_post_lstm_layers_dim,
                      d,
                      gaussian_posterior=gaussian_posterior,
                      init_scale=init_scale)
        shaper = lstm

        optimizer = O.optimizer_adam(learning_rate=learning_rate,
                                     beta_1=beta_1)

        if empirical_prior:
            prior = M.empirical_tvp_prior(states0_line, scale=3.0)
        else:
            prior = M.no_prior()

        self.shaper = shaper
        if gaussian_posterior:
            n_particles_forecast = 300
            jacobian_penalty = True
        else:
            n_particles_forecast = 1
            jacobian_penalty = False
        AbstractCholeskyForecaster.__init__(
            self,
            shaper,
            prior,
            optimizer,
            x_col,
            y_col,
            states0_line_col,
            jacobian_penalty=jacobian_penalty,
            n_particles_forecast=n_particles_forecast)
Exemple #5
0
    def __init__(self,
                 dsf,
                 x_col,
                 y_col,
                 states0_line_col,
                 hidden_layers_dim=[10, 10],
                 gaussian_posterior=False,
                 empirical_prior=True,
                 init_scale=1e-2,
                 learning_rate=1e-4,
                 beta_1=0.1):
        x, y, states0_line = dsf.select([x_col, y_col, states0_line_col],
                                        train=True,
                                        test=False)
        n = y.shape[1]
        d = states0_line.shape[1]
        assert d == int(n * (n + 1) / 2 + n)

        # Creates the network for the estimates
        if gaussian_posterior:
            linear_projector = S.LinearProjectorGaussianPosterior
            n_particles_forecast = 300
            jacobian_penalty = True
        else:
            linear_projector = S.LinearProjector
            n_particles_forecast = 1
            jacobian_penalty = False
        mlp, layers = S.MLP(x.shape[1],
                            hidden_layers_dim,
                            states0_line.shape[1],
                            init_scale=init_scale,
                            linear_projector=linear_projector)
        optimizer = O.optimizer_adam(learning_rate=learning_rate,
                                     beta_1=beta_1)
        if empirical_prior:
            prior = M.empirical_tvp_prior(states0_line, scale=3.0)
        else:
            prior = M.no_prior()

        # Build the whole learner
        self.shaper = mlp
        AbstractCholeskyForecaster.__init__(
            self,
            mlp,
            prior,
            optimizer,
            x_col,
            y_col,
            states0_line_col,
            jacobian_penalty=jacobian_penalty,
            n_particles_forecast=n_particles_forecast)
Exemple #6
0
    def __init__(self,
                 dsf,
                 y_col,
                 states0_line_col,
                 hvi_layers_num=3,
                 empirical_prior=True,
                 init_scale=1e-2,
                 learning_rate=1e-4,
                 beta_1=0.1):
        y, states0_line = dsf.select([y_col, states0_line_col],
                                     train=True,
                                     test=False)
        n = y.shape[1]
        d = states0_line.shape[1]
        assert d == int(n * (n + 1) / 2 + n)

        # Creates the networks for the estimates:
        sampler = hierarchical_sampler(d,
                                       layers=hvi_layers_num,
                                       init_scale=init_scale)

        def shaper(x, regularization_penalty=0.0, variables=[]):
            y, logJ, v = sampler.sample(1)
            regularization_penalty -= logJ
            variables += v
            return y, regularization_penalty, variables

        optimizer = O.optimizer_adam(learning_rate=learning_rate,
                                     beta_1=beta_1)

        if empirical_prior:
            prior = M.empirical_tvp_prior(states0_line, scale=3.0)
        else:
            prior = M.no_prior()

        # 3- Composition of both
        n_particles_forecast = 100
        jacobian_penalty = True

        AbstractCholeskyForecaster.__init__(
            self,
            shaper,
            prior,
            optimizer,
            y_col,
            y_col,
            states0_line_col,
            jacobian_penalty=jacobian_penalty,
            n_particles_forecast=n_particles_forecast)
Exemple #7
0
    def __init__(self,
                 dsf,
                 x_col,
                 y_col,
                 states0_line_col,
                 encoder_layers_dim=[10, 10],
                 encode_dim=5,
                 decoder_layers_dim=[10, 10],
                 forecaster_layers_dim=[10, 10],
                 variational_reparametrization=False,
                 empirical_prior=True,
                 PCA_center=False,
                 init_scale=1e-2,
                 learning_rate=1e-4,
                 beta_1=0.1):
        x, y, states0_line = dsf.select([x_col, y_col, states0_line_col],
                                        train=True,
                                        test=False)
        n = y.shape[1]
        d = states0_line.shape[1]
        assert d == int(n * (n + 1) / 2 + n)

        # Creates the networks for the estimates:
        # 1- The Autoencoder
        if variational_reparametrization:
            auto_encoder_class = S.VariationalAutoEncoder
            linear_projector = S.LinearProjectorGaussianPosterior
            n_particles_forecast = 300
            jacobian_penalty = True
        else:
            auto_encoder_class = S.AutoEncoder
            linear_projector = S.LinearProjector
            n_particles_forecast = 1
            jacobian_penalty = False
        if PCA_center:
            pca = PCA(n_components=encode_dim)
            pca.fit(x, x)
            encoder_center = pca.transform
            decoder_center = pca.inverse_transform
        else:
            encoder_center = None
            decoder_center = None
        AE = auto_encoder_class(x.shape[1],
                                encoder_layers_dim,
                                encode_dim,
                                decoder_layers_dim,
                                encoder_center=encoder_center,
                                decoder_center=decoder_center)
        # 2- The forecaster
        mlp, _ = S.MLP(encode_dim,
                       forecaster_layers_dim,
                       states0_line.shape[1],
                       init_scale=init_scale,
                       linear_projector=linear_projector)

        optimizer = O.optimizer_adam(learning_rate=learning_rate,
                                     beta_1=beta_1)
        if empirical_prior:
            prior = M.empirical_tvp_prior(states0_line, scale=3.0)
        else:
            prior = M.no_prior()
        # 3- Composition of both
        shaper = S.compose(AE, mlp)
        self.shaper = shaper

        AbstractCholeskyForecaster.__init__(
            self,
            shaper,
            prior,
            optimizer,
            x_col,
            y_col,
            states0_line_col,
            jacobian_penalty=jacobian_penalty,
            n_particles_forecast=n_particles_forecast)