Exemple #1
0
    def constrain_stationary_multivariate(unconstrained,
                                          variance,
                                          transform_variance=False,
                                          prefix=None):

        use_list = type(unconstrained) == list
        if use_list:
            unconstrained = np.concatenate(unconstrained, axis=1)

        k_endog, order = unconstrained.shape
        order //= k_endog

        if order < 1:
            raise ValueError('Must have order at least 1')
        if k_endog < 1:
            raise ValueError('Must have at least 1 endogenous variable')

        if prefix is None:
            prefix, dtype, _ = find_best_blas_type([unconstrained, variance])
        dtype = prefix_dtype_map[prefix]

        unconstrained = np.asfortranarray(unconstrained, dtype=dtype)
        variance = np.asfortranarray(variance, dtype=dtype)

        # Step 1: convert from arbitrary matrices to those with singular values
        # less than one.
        # sv_constrained = _constrain_sv_less_than_one(unconstrained, order,
        #                                              k_endog, prefix)
        sv_constrained = prefix_sv_map[prefix](unconstrained, order, k_endog)

        # Step 2: convert matrices from our "partial autocorrelation matrix"
        # space (matrices with singular values less than one) to the space of
        # stationary coefficient matrices
        constrained, variance = prefix_pacf_map[prefix](sv_constrained,
                                                        variance,
                                                        transform_variance,
                                                        order, k_endog)

        constrained = np.array(constrained, dtype=dtype)
        variance = np.array(variance, dtype=dtype)

        if use_list:
            constrained = [
                constrained[:k_endog, i * k_endog:(i + 1) * k_endog]
                for i in range(order)
            ]

        return constrained, variance
Exemple #2
0
    def init_filter(self):
        # Use the appropriate Statespace model
        prefix = find_best_blas_type((self.obs, ))
        cls = prefix_statespace_map[prefix[0]]

        # Instantiate the statespace model
        self.model = cls(self.obs, self.design, self.obs_intercept,
                         self.obs_cov, self.transition, self.state_intercept,
                         self.selection, self.state_cov)
        self.model.initialize_known(self.initial_state, self.initial_state_cov)

        # Initialize the appropriate Kalman filter
        cls = prefix_kalman_filter_map[prefix[0]]
        self.filter = cls(self.model,
                          conserve_memory=self.conserve_memory,
                          loglikelihood_burn=self.loglikelihood_burn)
def arma_loglikeobs(endog,
                    ar_params=None,
                    ma_params=None,
                    sigma2=1,
                    prefix=None):
    """
    Compute loglikelihood for each observation assuming an ARMA process

    Parameters
    ----------
    endog : ndarray
        The observed time-series process.
    ar_params : ndarray, optional
        Autoregressive parameters.
    ma_params : ndarray, optional
        Moving average parameters.
    sigma2 : ndarray, optional
        The ARMA innovation variance. Default is 1.
    prefix : str, optional
        The BLAS prefix associated with the datatype. Default is to find the
        best datatype based on given input. This argument is typically only
        used internally.

    Returns
    -------
    loglikeobs : array of numeric
        Array of loglikelihood values for each observation.

    """
    endog = np.array(endog)
    ar_params = np.atleast_1d([] if ar_params is None else ar_params)
    ma_params = np.atleast_1d([] if ma_params is None else ma_params)

    if prefix is None:
        prefix, dtype, _ = find_best_blas_type(
            [endog, ar_params, ma_params,
             np.array(sigma2)])
    dtype = prefix_dtype_map[prefix]

    endog = np.ascontiguousarray(endog, dtype=dtype)
    ar_params = np.asfortranarray(ar_params, dtype=dtype)
    ma_params = np.asfortranarray(ma_params, dtype=dtype)
    sigma2 = dtype(sigma2).item()

    func = getattr(_arma_innovations, prefix + 'arma_loglikeobs_fast')
    return func(endog, ar_params, ma_params, sigma2)
Exemple #4
0
    def init_filter(self):
        # Use the appropriate Statespace model
        prefix = find_best_blas_type((self.obs,))
        cls = prefix_statespace_map[prefix[0]]

        # Instantiate the statespace model
        self.model = cls(
            self.obs, self.design, self.obs_intercept, self.obs_cov,
            self.transition, self.state_intercept, self.selection,
            self.state_cov
        )
        self.model.initialize_known(self.initial_state, self.initial_state_cov)

        # Initialize the appropriate Kalman filter
        cls = prefix_kalman_filter_map[prefix[0]]
        self.filter = cls(self.model, conserve_memory=self.conserve_memory,
                          loglikelihood_burn=self.loglikelihood_burn)
Exemple #5
0
    def constrain_stationary_multivariate(unconstrained, variance,
                                          transform_variance=False,
                                          prefix=None):

        use_list = type(unconstrained) == list
        if use_list:
            unconstrained = np.concatenate(unconstrained, axis=1)

        k_endog, order = unconstrained.shape
        order //= k_endog

        if order < 1:
            raise ValueError('Must have order at least 1')
        if k_endog < 1:
            raise ValueError('Must have at least 1 endogenous variable')

        if prefix is None:
            prefix, dtype, _ = find_best_blas_type(
                [unconstrained, variance])
        dtype = prefix_dtype_map[prefix]

        unconstrained = np.asfortranarray(unconstrained, dtype=dtype)
        variance = np.asfortranarray(variance, dtype=dtype)

        # Step 1: convert from arbitrary matrices to those with singular values
        # less than one.
        # sv_constrained = _constrain_sv_less_than_one(unconstrained, order,
        #                                              k_endog, prefix)
        sv_constrained = prefix_sv_map[prefix](unconstrained, order, k_endog)

        # Step 2: convert matrices from our "partial autocorrelation matrix"
        # space (matrices with singular values less than one) to the space of
        # stationary coefficient matrices
        constrained, variance = prefix_pacf_map[prefix](
            sv_constrained, variance, transform_variance, order, k_endog)

        constrained = np.array(constrained, dtype=dtype)
        variance = np.array(variance, dtype=dtype)

        if use_list:
            constrained = [
                constrained[:k_endog, i*k_endog:(i+1)*k_endog]
                for i in range(order)
            ]

        return constrained, variance
Exemple #6
0
    def init_filter(cls):
        # Use the appropriate Statespace model
        prefix = find_best_blas_type((cls.obs, ))
        klass = prefix_statespace_map[prefix[0]]

        # Instantiate the statespace model
        model = klass(cls.obs, cls.design, cls.obs_intercept, cls.obs_cov,
                      cls.transition, cls.state_intercept, cls.selection,
                      cls.state_cov)
        model.initialize_known(cls.initial_state, cls.initial_state_cov)

        # Initialize the appropriate Kalman filter
        klass = prefix_kalman_filter_map[prefix[0]]
        kfilter = klass(model,
                        conserve_memory=cls.conserve_memory,
                        loglikelihood_burn=cls.loglikelihood_burn)

        return model, kfilter
Exemple #7
0
    def init_filter(cls):
        # Use the appropriate Statespace model
        prefix = find_best_blas_type((cls.obs,))
        klass = prefix_statespace_map[prefix[0]]

        # Instantiate the statespace model
        model = klass(
            cls.obs, cls.design, cls.obs_intercept, cls.obs_cov,
            cls.transition, cls.state_intercept, cls.selection,
            cls.state_cov
        )
        model.initialize_known(cls.initial_state, cls.initial_state_cov)

        # Initialize the appropriate Kalman filter
        klass = prefix_kalman_filter_map[prefix[0]]
        kfilter = klass(model, conserve_memory=cls.conserve_memory,
                        loglikelihood_burn=cls.loglikelihood_burn)

        return model, kfilter
def arma_loglikeobs(endog, ar_params=None, ma_params=None, sigma2=1,
                    prefix=None):
    """
    Compute loglikelihood for each observation assuming an ARMA process

    Parameters
    ----------
    endog : ndarray
        The observed time-series process.
    ar_params : ndarray, optional
        Autoregressive parameters.
    ma_params : ndarray, optional
        Moving average parameters.
    sigma2 : ndarray, optional
        The ARMA innovation variance. Default is 1.
    prefix : str, optional
        The BLAS prefix associated with the datatype. Default is to find the
        best datatype based on given input. This argument is typically only
        used internally.

    Returns
    -------
    loglikeobs : array of numeric
        Array of loglikelihood values for each observation.

    """
    endog = np.array(endog)
    ar_params = np.atleast_1d([] if ar_params is None else ar_params)
    ma_params = np.atleast_1d([] if ma_params is None else ma_params)

    if prefix is None:
        prefix, dtype, _ = find_best_blas_type(
            [endog, ar_params, ma_params, np.array(sigma2)])
    dtype = prefix_dtype_map[prefix]

    endog = np.ascontiguousarray(endog, dtype=dtype)
    ar_params = np.asfortranarray(ar_params, dtype=dtype)
    ma_params = np.asfortranarray(ma_params, dtype=dtype)
    sigma2 = np.asscalar(dtype(sigma2))

    func = getattr(_arma_innovations, prefix + 'arma_loglikeobs_fast')
    return func(endog, ar_params, ma_params, sigma2)
def arma_innovations(endog,
                     ar_params=None,
                     ma_params=None,
                     sigma2=1,
                     normalize=False,
                     prefix=None):
    """
    Compute innovations using a given ARMA process

    Parameters
    ----------
    endog : ndarray
        The observed time-series process, may be univariate or multivariate.
    ar_params : ndarray, optional
        Autoregressive parameters.
    ma_params : ndarray, optional
        Moving average parameters.
    sigma2 : ndarray, optional
        The ARMA innovation variance. Default is 1.
    normalize : boolean, optional
        Whether or not to normalize the returned innovations. Default is False.
    prefix : str, optional
        The BLAS prefix associated with the datatype. Default is to find the
        best datatype based on given input. This argument is typically only
        used internally.

    Returns
    -------
    innovations : ndarray
        Innovations (one-step-ahead prediction errors) for the given `endog`
        series with predictions based on the given ARMA process. If
        `normalize=True`, then the returned innovations have been "whitened" by
        dividing through by the square root of the mean square error.
    innovations_mse : ndarray
        Mean square error for the innovations.

    """
    # Parameters
    endog = np.array(endog)
    squeezed = endog.ndim == 1
    if squeezed:
        endog = endog[:, None]

    ar_params = np.atleast_1d([] if ar_params is None else ar_params)
    ma_params = np.atleast_1d([] if ma_params is None else ma_params)

    nobs, k_endog = endog.shape
    ar = np.r_[1, -ar_params]
    ma = np.r_[1, ma_params]

    # Get BLAS prefix
    if prefix is None:
        prefix, dtype, _ = find_best_blas_type(
            [endog, ar_params, ma_params,
             np.array(sigma2)])
    dtype = prefix_dtype_map[prefix]

    # Make arrays contiguous for BLAS calls
    endog = np.asfortranarray(endog, dtype=dtype)
    ar_params = np.asfortranarray(ar_params, dtype=dtype)
    ma_params = np.asfortranarray(ma_params, dtype=dtype)
    sigma2 = dtype(sigma2).item()

    # Get the appropriate functions
    arma_transformed_acovf_fast = getattr(
        _arma_innovations, prefix + 'arma_transformed_acovf_fast')
    arma_innovations_algo_fast = getattr(_arma_innovations,
                                         prefix + 'arma_innovations_algo_fast')
    arma_innovations_filter = getattr(_arma_innovations,
                                      prefix + 'arma_innovations_filter')

    # Run the innovations algorithm for ARMA coefficients
    arma_acovf = arima_process.arma_acovf(ar, ma, sigma2=sigma2,
                                          nobs=nobs) / sigma2
    acovf, acovf2 = arma_transformed_acovf_fast(ar, ma, arma_acovf)
    theta, v = arma_innovations_algo_fast(nobs, ar_params, ma_params, acovf,
                                          acovf2)
    v = np.array(v)
    if normalize:
        v05 = v**0.5

    # Run the innovations filter across each series
    u = []
    for i in range(k_endog):
        u_i = np.array(
            arma_innovations_filter(endog[:, i], ar_params, ma_params, theta))
        u.append(u_i / v05 if normalize else u_i)
    u = np.vstack(u).T

    # Post-processing
    if squeezed:
        u = u.squeeze()

    return u, v
Exemple #10
0
 def time_find_best_blas_type(self, dtype1, dtype2, dtype1_ord, dtype2_ord, size):
     prefix, dtype, prefer_fortran = bla.find_best_blas_type((self.arr1, self.arr2))
Exemple #11
0
 def time_find_best_blas_type(self, arr1, arr2):
     prefix, dtype, prefer_fortran = bla.find_best_blas_type((arr1, arr2))
def arma_innovations(endog, ar_params=None, ma_params=None, sigma2=1,
                     normalize=False, prefix=None):
    """
    Compute innovations using a given ARMA process

    Parameters
    ----------
    endog : ndarray
        The observed time-series process, may be univariate or multivariate.
    ar_params : ndarray, optional
        Autoregressive parameters.
    ma_params : ndarray, optional
        Moving average parameters.
    sigma2 : ndarray, optional
        The ARMA innovation variance. Default is 1.
    normalize : boolean, optional
        Whether or not to normalize the returned innovations. Default is False.
    prefix : str, optional
        The BLAS prefix associated with the datatype. Default is to find the
        best datatype based on given input. This argument is typically only
        used internally.

    Returns
    -------
    innovations : ndarray
        Innovations (one-step-ahead prediction errors) for the given `endog`
        series with predictions based on the given ARMA process. If
        `normalize=True`, then the returned innovations have been "whitened" by
        dividing through by the square root of the mean square error.
    innovations_mse : ndarray
        Mean square error for the innovations.

    """
    # Parameters
    endog = np.array(endog)
    squeezed = endog.ndim == 1
    if squeezed:
        endog = endog[:, None]

    ar_params = np.atleast_1d([] if ar_params is None else ar_params)
    ma_params = np.atleast_1d([] if ma_params is None else ma_params)

    nobs, k_endog = endog.shape
    ar = np.r_[1, -ar_params]
    ma = np.r_[1, ma_params]

    # Get BLAS prefix
    if prefix is None:
        prefix, dtype, _ = find_best_blas_type(
            [endog, ar_params, ma_params, np.array(sigma2)])
    dtype = prefix_dtype_map[prefix]

    # Make arrays contiguous for BLAS calls
    endog = np.asfortranarray(endog, dtype=dtype)
    ar_params = np.asfortranarray(ar_params, dtype=dtype)
    ma_params = np.asfortranarray(ma_params, dtype=dtype)
    sigma2 = dtype(sigma2).item()

    # Get the appropriate functions
    arma_transformed_acovf_fast = getattr(
        _arma_innovations, prefix + 'arma_transformed_acovf_fast')
    arma_innovations_algo_fast = getattr(
        _arma_innovations, prefix + 'arma_innovations_algo_fast')
    arma_innovations_filter = getattr(
        _arma_innovations, prefix + 'arma_innovations_filter')

    # Run the innovations algorithm for ARMA coefficients
    arma_acovf = arima_process.arma_acovf(ar, ma,
                                          sigma2=sigma2, nobs=nobs) / sigma2
    acovf, acovf2 = arma_transformed_acovf_fast(ar, ma, arma_acovf)
    theta, v = arma_innovations_algo_fast(nobs, ar_params, ma_params,
                                          acovf, acovf2)
    v = np.array(v)
    if normalize:
        v05 = v**0.5

    # Run the innovations filter across each series
    u = []
    for i in range(k_endog):
        u_i = np.array(arma_innovations_filter(endog[:, i], ar_params,
                                               ma_params, theta))
        u.append(u_i / v05 if normalize else u_i)
    u = np.vstack(u).T

    # Post-processing
    if squeezed:
        u = u.squeeze()

    return u, v
Exemple #13
0
 def time_find_best_blas_type(self, dtype1, dtype2, dtype1_ord, dtype2_ord, size):
     prefix, dtype, prefer_fortran = bla.find_best_blas_type((self.arr1, self.arr2))
Exemple #14
0
 def time_find_best_blas_type(self, arr1, arr2):
     prefix, dtype, prefer_fortran = bla.find_best_blas_type((arr1, arr2))