コード例 #1
0
def fit_t_fp(x, p=None):
    """For details, see here.

    Parameters
    ----------
        x : array, shape (t_,)
        p : array, shape (t_,), optional

    Returns
    -------
        nu : float
        mu : float
        sigma2 : float

    """
    t_ = x.shape[0]
    if p is None:
        p = np.ones(t_) / t_

    # Step 1: Compute negative log-likelihood function
    def llh(params):
        nu, mu, sigma = params
        return -p @ t.logpdf(x, nu, mu, sigma)

    # Step 2: Find the optimal dof
    m, s2 = meancov_sp(x, p)
    param0 = [10, m, np.sqrt(s2)]
    bnds = [(1e-20, None), (None, None), (1e-20, None)]
    nu, mu, sigma = minimize(llh, param0, bounds=bnds)['x']
    sigma2 = sigma**2

    return nu, mu, sigma2
コード例 #2
0
ファイル: ewm_meancov.py プロジェクト: s0ap/arpmRes
def ewm_meancov(x, tau_hl, t=None, w=None):
    """For details, see here.

    Parameters
    ----------
        x : array, shape (t_, n_) if n_>1 or (t_,) for n=1
        tau_hl: scalar
        t : int
        w : int

    Returns
    -------
        ewma_t_x : array, shape (n_,)
        ewm_cv_t_x : array, shape (n_, n_)

    """
    t_ = x.shape[0]
    x = x.reshape(t_, -1)

    if t is None:
        t = t_
    if w is None:
        w = t_

    assert (t >= w), "t should be greater or equal to w."

    p_w = np.exp(-np.log(2) / tau_hl * np.arange(0, w))[::-1].reshape(-1)
    gamma_w = np.sum(p_w)
    ewma_t_x, ewm_cv_t_x = meancov_sp(x[t - w:t, :], p_w / gamma_w)

    return np.squeeze(ewma_t_x), np.squeeze(ewm_cv_t_x)
コード例 #3
0
def fit_factor_analysis(x, k_, p=None, method='PrincAxFact'):
    """For details, see here.

    Parameters
    ----------
        x : array, shape (t_, n_) if n_>1 or (t_, ) for n_=1
        k_ : scalar
        p : array, shape (t_,), optional
        method : string, optional

    Returns
    -------
        alpha_hat : array, shape (n_,)
        beta_hat : array, shape (n_, k_) if k_>1 or (n_, ) for k_=1
        delta2 : array, shape(n_, n_)
        z_reg : array, shape(t_, n_) if n_>1 or (t_, ) for n_=1

    """
    t_ = x.shape[0]

    if len(x.shape) == 1:
        x = x.reshape((t_, 1))

    if p is None:
        p = np.ones(t_) / t_

    # Step 1: Compute HFP mean and covariance of X

    m_x_hat_hfp, s2_x_hat_hfp = meancov_sp(x, p)

    # Step 2: Estimate alpha

    alpha_hat = m_x_hat_hfp

    # Step 3: Decompose covariance matrix

    if method == 'PrincAxFact' or method.lower() == 'paf':
        beta_hat, delta2_hat = factor_analysis_paf(s2_x_hat_hfp, k_)
    else:
        beta_hat, delta2_hat = factor_analysis_mlf(s2_x_hat_hfp, k_)
    if k_ == 1:
        beta_hat = beta_hat.reshape(-1, 1)

    # Step 4: Compute factor analysis covariance matrix

    s2_x_hat_fa = beta_hat @ beta_hat.T + np.diagflat(delta2_hat)

    # Step 5: Approximate hidden factor via regression

    if np.all(delta2_hat != 0):
        omega2 = np.diag(1 / delta2_hat)
        z_reg = beta_hat.T @ \
            (omega2-omega2@beta_hat@
             np.linalg.inv(beta_hat.T@omega2@beta_hat + np.eye(k_))@
             beta_hat.T@omega2)@(x-m_x_hat_hfp).T
    else:
        z_reg = beta_hat.T @ np.linalg.inv(s2_x_hat_fa) @ (x - m_x_hat_hfp).T

    return alpha_hat, np.squeeze(beta_hat), delta2_hat, np.squeeze(z_reg.T)
コード例 #4
0
ファイル: fit_lfm_lasso.py プロジェクト: s0ap/arpmRes
def fit_lfm_lasso(x, z, p=None, lam=1e-2, fit_intercept=True):
    """For details, see here.

    Parameters
    ----------
        x : array, shape (t_, n_)
        z : array, shape (t_, k_)
        p : array, optional, shape (t_,)
        lam : float, optional
        fit_intercept : bool, optional

    Returns
    -------
        alpha : array, shape (n_,)
        beta : array, shape (n_, k_)
        s2_u : array, shape (n_, n_)
        u : array, shape (t_, n_)

    """

    if len(x.shape) == 1:
        x = x.reshape(-1, 1)

    if len(z.shape) == 1:
        z = z.reshape(-1, 1)

    t_, n_ = x.shape
    k_ = z.shape[1]

    if p is None:
        p = np.ones(t_) / t_

    if lam == 0:
        alpha, beta, s2_u, u = fit_lfm_ols(x, z, p, fit_intercept)
    else:
        if fit_intercept is True:
            m_x = p @ x
            m_z = p @ z
        else:
            m_x = np.zeros(n_, )
            m_z = np.zeros(k_, )

        x_p = ((x - m_x).T * np.sqrt(p)).T
        z_p = ((z - m_z).T * np.sqrt(p)).T

        clf = Lasso(alpha=lam / (2. * t_), fit_intercept=False)
        clf.fit(z_p, x_p)
        beta = clf.coef_

        if k_ == 1:
            alpha = m_x - beta * m_z
            u = x - alpha - z * beta
        else:
            alpha = m_x - beta @ m_z
            u = x - alpha - z @ np.atleast_2d(beta).T

        _, s2_u = meancov_sp(u, p)

    return alpha, beta, s2_u, u
コード例 #5
0
def twist_scenarios_mom_match(x, m_, s2_, p=None, method='Riccati', d=None):
    """For details, see here.

    Parameters
    ----------
        x : array, shape (j_,n_) if n_>1 or (j_,) for n_=1
        m_ : array, shape (n_,)
        s2_ : array, shape (n_,n_)
        p : array, optional, shape (j_,)
        method : string, optional
        d : array, shape (k_, n_), optional

    Returns
    -------
        x : array, shape (j_, n_) if n_>1 or (j_,) for n_=1

    """

    if np.ndim(m_) == 0:
        m_ = np.reshape(m_, 1).copy()
    else:
        m_ = np.array(m_).copy()
    if np.ndim(s2_) == 0:
        s2_ = np.reshape(s2_, (1, 1))
    else:
        s2_ = np.array(s2_).copy()
    if len(x.shape) == 1:
        x = x.reshape(-1, 1).copy()

    if p is None:
        j_ = x.shape[0]
        p = np.ones(j_) / j_  # uniform probabilities as default value

    # Step 1. Original moments

    m_x, s2_x = meancov_sp(x, p)

    # Step 2. Transpose-square-root of s2_x

    r_x = transpose_square_root(s2_x, method, d)

    # Step 3. Transpose-square-root of s2_

    r_ = transpose_square_root(s2_, method, d)

    # Step 4. Twist matrix

    b = r_ @ np.linalg.inv(r_x)

    # Step 5. Shift vector

    a = m_.reshape(-1, 1) - b @ m_x.reshape(-1, 1)

    # Step 6. Twisted scenarios

    x_ = (a + b @ x.T).T

    return np.squeeze(x_)
コード例 #6
0
ファイル: fit_lfm_pcfp.py プロジェクト: s0ap/arpmRes
def fit_lfm_pcfp(x, p, sig2, k_):
    """For details, see here.

    Parameters
    ----------
        x : array, shape (t_, n_)
        p : array, shape (t_,)
        sig2 : array, shape (t_, t_)
        k_ : scalar

    Returns
    -------
        alpha_PC : array, shape (n_,)
        beta_PC : array, shape (n_, k_)
        gamma_PC : array, shape(n_, k_)
        s2_PC : array, shape(n_, n_)

    """

    t_, n_ = x.shape

    # Step 1: Compute HFP-expectation and covariance of x
    m_x, s2_x = meancov_sp(x, p)

    # Step 2: Compute the Choleski root of sig2
    sig = np.linalg.cholesky(sig2)

    # Step 3: Perform spectral decomposition
    s2_tmp = np.linalg.solve(sig, (s2_x.dot(np.linalg.pinv(sig))))
    e, lambda2 = pca_cov(s2_tmp)

    # Step 4: Compute optimal loadings for PC LFM
    beta_PC = sig @ e[:, :k_]

    # Step 5: Compute factor extraction matrix for PC LFM
    gamma_PC = (np.linalg.solve(sig, np.eye(n_))) @ e[:, :k_]

    # Step 6: Compute shifting term for PC LFM
    alpha_PC = (np.eye(n_) - beta_PC @ gamma_PC.T) @ m_x

    # Step 7: Compute the covariance of residuals
    s2_PC = sig @ e[:, k_:n_] * lambda2[k_:n_] @ e[:, k_:n_].T @ sig.T
    return alpha_PC, beta_PC, gamma_PC, s2_PC
コード例 #7
0
def fit_lfm_roblasso(x,
                     z,
                     p=None,
                     nu=1e9,
                     lambda_beta=0.,
                     lambda_phi=0.,
                     tol=1e-3,
                     fit_intercept=True,
                     maxiter=500,
                     print_iter=False,
                     rescale=False):
    """For details, see here.

    Parameters
    ----------
        x : array, shape(t_, n_)
        z : array, shape(t_, k_)
        p : array, optional, shape(t_)
        nu : scalar, optional
        lambda_beta : scalar, optional
        lambda_phil : scalar, optional
        tol : float, optional
        fit_intercept: bool, optional
        maxiter : scalar, optional
        print_iter : bool, optional
        rescale : bool, optional

    Returns
    -------
       alpha_RMLFP : array, shape(n_,)
       beta_RMLFP : array, shape(n_,k_)
       sig2_RMLFP : array, shape(n_,n_)

    """

    if len(x.shape) == 1:
        x = x.reshape(-1, 1)

    if len(z.shape) == 1:
        z = z.reshape(-1, 1)

    t_, n_ = x.shape

    if p is None:
        p = np.ones(t_) / t_

    # rescale the variables
    if rescale is True:
        _, sigma2_x = meancov_sp(x, p)
        sigma_x = np.sqrt(np.diag(sigma2_x))
        x = x / sigma_x

        _, sigma2_z = meancov_sp(z, p)
        sigma_z = np.sqrt(np.diag(sigma2_z))
        z = z / sigma_z

    # Step 0: Set initial values using method of moments

    alpha, beta, sigma2, u = fit_lfm_lasso(x,
                                           z,
                                           p,
                                           lambda_beta,
                                           fit_intercept=fit_intercept)
    mu_u = np.zeros(n_)

    for i in range(maxiter):

        # Step 1: Update the weights

        if nu >= 1e3 and np.linalg.det(sigma2) < 1e-13:
            w = np.ones(t_)
        else:
            w = (nu + n_) / (nu + mahalanobis_dist(u, mu_u, sigma2)**2)
        q = w * p
        q = q / np.sum(q)

        # Step 2: Update location and dispersion parameters

        alpha_old, beta_old = alpha, beta
        alpha, beta, sigma2, u = fit_lfm_lasso(x,
                                               z,
                                               q,
                                               lambda_beta,
                                               fit_intercept=fit_intercept)
        sigma2, _ = graphical_lasso((w @ p) * sigma2, lambda_phi)

        # Step 3: Check convergence

        errors = [
            np.linalg.norm(alpha - alpha_old, ord=np.inf) /
            max(np.linalg.norm(alpha_old, ord=np.inf), 1e-20),
            np.linalg.norm(beta - beta_old, ord=np.inf) /
            max(np.linalg.norm(beta_old, ord=np.inf), 1e-20)
        ]

        # print the loglikelihood and the error
        if print_iter is True:
            print('Iter: %i; Loglikelihood: %.5f; Errors: %.5f' %
                  (i, p @ mvt_logpdf(u, mu_u, sigma2, nu) -
                   lambda_beta * np.linalg.norm(beta, ord=1), max(errors)))

        if max(errors) <= tol:
            break

    if rescale is True:
        alpha = alpha * sigma_x
        beta = ((beta / sigma_z).T * sigma_x).T
        sigma2 = (sigma2.T * sigma_x).T * sigma_x

    return alpha, beta, sigma2
コード例 #8
0
def fit_lfm_mlfp(x,
                 z,
                 p=None,
                 nu=4,
                 tol=1e-3,
                 fit_intercept=True,
                 maxiter=500,
                 print_iter=False,
                 rescale=False):
    """For details, see here.

    Parameters
    ----------
        x : array, shape (t_, n_) if n_>1 or (t_, ) for n_=1
        z : array, shape (t_, k_) if k_>1 or (t_, ) for k_=1
        p : array, optional, shape (t_,)
        nu : scalar, optional
        tol : float, optional
        fit_intercept: bool, optional
        maxiter : scalar, optional
        print_iter : bool, optional
        rescale : bool, optional

    Returns
    -------
       alpha : array, shape (n_,)
       beta : array, shape (n_, k_) if k_>1 or (n_, ) for k_=1
       sigma2 : array, shape (n_, n_)
       u : shape (t_, n_) if n_>1 or (t_, ) for n_=1

    """

    if np.ndim(x) < 2:
        x = x.reshape(-1, 1).copy()
    t_, n_ = x.shape
    if np.ndim(z) < 2:
        z = z.reshape(-1, 1).copy()
    t_, n_ = x.shape
    k_ = z.shape[1]

    if p is None:
        p = np.ones(t_) / t_

    # rescale the variables
    if rescale is True:
        _, sigma2_x = meancov_sp(x, p)
        sigma_x = np.sqrt(np.diag(sigma2_x))
        x = x.copy() / sigma_x

        _, sigma2_z = meancov_sp(z, p)
        sigma_z = np.sqrt(np.diag(sigma2_z))
        z = z.copy() / sigma_z

    # Step 0: Set initial values using method of moments

    alpha, beta, sigma2, u = fit_lfm_ols(x, z, p, fit_intercept=fit_intercept)
    alpha, beta, sigma2, u = \
        alpha.reshape((n_, 1)), beta.reshape((n_, k_)), \
        sigma2.reshape((n_, n_)), u.reshape((t_, n_))

    if nu > 2.:
        # if nu <=2, then the covariance is not defined
        sigma2 = (nu - 2.) / nu * sigma2

    mu_u = np.zeros(n_)

    for i in range(maxiter):

        # Step 1: Update the weights and historical flexible probabilities

        if nu >= 1e3 and np.linalg.det(sigma2) < 1e-13:
            w = np.ones(t_)
        else:
            w = (nu + n_) / (nu + mahalanobis_dist(u, mu_u, sigma2)**2)
        q = w * p
        q = q / np.sum(q)

        # Step 2: Update shift parameters, factor loadings and covariance

        alpha_old, beta_old, sigma2_old = alpha, beta, sigma2
        alpha, beta, sigma2, u = fit_lfm_ols(x,
                                             z,
                                             q,
                                             fit_intercept=fit_intercept)
        alpha, beta, sigma2, u = \
            alpha.reshape((n_, 1)), beta.reshape((n_, k_)), \
            sigma2.reshape((n_, n_)), u.reshape((t_, n_))
        sigma2 = (w @ p) * sigma2

        # Step 3: Check convergence
        beta_tilde_old = np.column_stack((alpha_old, beta_old))
        beta_tilde = np.column_stack((alpha, beta))
        errors = [
            np.linalg.norm(beta_tilde - beta_tilde_old, ord=np.inf) /
            np.linalg.norm(beta_tilde_old, ord=np.inf),
            np.linalg.norm(sigma2 - sigma2_old, ord=np.inf) /
            np.linalg.norm(sigma2_old, ord=np.inf)
        ]

        # print the loglikelihood and the error
        if print_iter is True:
            print('Iter: %i; Loglikelihood: %.5f; Errors: %.3e' %
                  (i, p @ mvt_logpdf(u, mu_u, sigma2, nu), max(errors)))

        if max(errors) < tol:
            break

    if rescale is True:
        alpha = alpha * sigma_x
        beta = ((beta / sigma_z).T * sigma_x).T
        sigma2 = (sigma2.T * sigma_x).T * sigma_x

    return np.squeeze(alpha), np.squeeze(beta), np.squeeze(sigma2), np.squeeze(
        u)
コード例 #9
0
def invariance_test_ellipsoid(epsi,
                              l_,
                              *,
                              conf_lev=0.95,
                              fit=0,
                              r=2,
                              title='Invariance test',
                              bl=None,
                              bu=None,
                              plot_test=True):
    """For details, see here.

    Parameters
    ----------
        epsi : array, shape(t_)
        l_ : scalar
        conf_lev : scalar, optional
        fit : scalar, optional
        r : scalar, optional
        title : string, optional
        bl : scalar, optional
        bu : scalar, optional
        plot_test : boolean, optional

    Returns
    -------
        rho : array, shape(l_)
        conf_int : array, shape(2)

    """

    if len(epsi.shape) == 2:
        epsi = epsi.reshape(-1)

    if bl is None:
        bl = np.percentile(epsi, 0.25)
    if bu is None:
        bu = np.percentile(epsi, 99.75)

    # Settings
    np.seterr(invalid='ignore')
    sns.set_style('white')
    nb = int(np.round(10 *
                      np.log(epsi.shape)))  # number of bins for histograms

    # Step 1: compute the sample autocorrelations

    rho = np.array([
        st.pearsonr(epsi[:-k] - meancov_sp(epsi[:-k])[0],
                    epsi[k:] - meancov_sp(epsi[k:])[0])[0]
        for k in range(1, l_ + 1)
    ])

    # Step 2: compute confidence interval

    alpha = 1 - conf_lev
    z_alpha_half = st.norm.ppf(1 - alpha / 2) / np.sqrt(epsi.shape[0])
    conf_int = np.array([-z_alpha_half, z_alpha_half])

    # Step 3: plot the ellipse, if requested

    if plot_test:
        plt.style.use('arpm')
        # Ellipsoid test: location-dispersion parameters
        x = epsi[:-l_]
        epsi = epsi[l_:]
        z = np.concatenate((x.reshape((-1, 1)), epsi.reshape((-1, 1))), axis=1)

        # Step 3: Compute the sample mean and sample covariance and generate figure

        mu_hat, sigma2_hat = meancov_sp(z)

        f = plt.figure()
        f.set_size_inches(16, 9)
        gs = plt.GridSpec(9, 16, hspace=1.2, wspace=1.2)

        # max and min value of the first reference axis settings,
        # for the scatter and histogram plots

        # scatter plot (with ellipsoid)

        xx = x.copy()
        yy = epsi.copy()
        xx[x < bl] = np.NaN
        xx[x > bu] = np.NaN
        yy[epsi < bl] = np.NaN
        yy[epsi > bu] = np.NaN
        ax_scatter = f.add_subplot(gs[1:6, 4:9])
        ax_scatter.scatter(xx, yy, marker='.', s=10)
        ax_scatter.ticklabel_format(axis='x', style='sci', scilimits=(-2, 2))
        plt.xlabel('obs', fontsize=17)
        plt.ylabel('lagged obs.', fontsize=17)
        plot_ellipse(mu_hat,
                     sigma2_hat,
                     r=r,
                     plot_axes=False,
                     plot_tang_box=False,
                     color='orange',
                     line_width=2,
                     display_ellipse=True)
        plt.suptitle(title, fontsize=20)
        plt.xticks(fontsize=14)
        plt.yticks(fontsize=14)
        ax_scatter.set_xlim(np.array([bl, bu]))
        ax_scatter.set_ylim(np.array([bl, bu]))

        ax = f.add_subplot(gs[7:, 4:9])

        # histogram plot of observations
        xxx = x[~np.isnan(xx)]
        px = np.ones(xxx.shape[0]) / xxx.shape[0]
        nx, cx = histogram_sp(xxx, p=px, k_=nb)
        hist_kws = {'weights': px.flatten(), 'edgecolor': 'k'}
        fit_kws = {'color': 'orange', 'cut': 0}
        if fit == 1:  # normal
            sns.distplot(xxx, hist_kws=hist_kws, kde=False, fit=st.norm, ax=ax)
            plt.legend(['Normal fit', 'Marginal distr'], fontsize=14)
        elif fit == 2 and sum(x < 0) == 0:  # exponential
            sns.distplot(xxx,
                         hist_kws=hist_kws,
                         fit_kws=fit_kws,
                         kde=False,
                         fit=st.expon,
                         ax=ax)
            plt.legend(['Exponential fit', 'Marginal distr'], fontsize=14)
            plt.xticks(fontsize=14)
            plt.yticks(fontsize=14)
        elif fit == 3 and sum(x < 0) == 0:  # Poisson
            ax.bar(cx,
                   nx,
                   cx[1] - cx[0],
                   facecolor=[0.8, 0.8, 0.8],
                   edgecolor='k')
            k = np.arange(x.max() + 1)
            mlest = x.mean()
            plt.plot(k,
                     st.poisson.pmf(k, mlest),
                     'o',
                     linestyle='-',
                     lw=1,
                     markersize=3,
                     color='orange')
            plt.legend(['Poisson fit', 'Marginal distr.'], loc=1, fontsize=14)
            plt.xticks(fontsize=14)
            plt.yticks(fontsize=14)
        else:
            ax.bar(cx,
                   nx,
                   cx[1] - cx[0],
                   facecolor=[0.8, 0.8, 0.8],
                   edgecolor='k')
            plt.xticks(fontsize=14)
            plt.yticks(fontsize=14)
        ax.get_xaxis().set_visible(False)
        ax.set_xlim(np.array([bl, bu]))
        ax.spines['top'].set_visible(False)
        ax.spines['right'].set_visible(False)
        ax.invert_yaxis()

        ax = f.add_subplot(gs[1:6, 0:3])
        # histogram plot of lagged observations
        yyy = epsi[~np.isnan(yy)]
        py = np.ones(yyy.shape[0]) / yyy.shape[0]
        hist_kws = {'weights': py.flatten(), 'edgecolor': 'k'}
        fit_kws = {'color': 'orange', 'cut': 0}
        ny, cy = histogram_sp(yyy, p=py, k_=nb)
        if fit == 1:
            sns.distplot(yyy,
                         hist_kws=hist_kws,
                         kde=False,
                         fit=st.norm,
                         vertical=True,
                         ax=ax)
            plt.xticks(fontsize=14)
            plt.yticks(fontsize=14)
        elif fit == 2 and sum(epsi < 0) == 0:
            sns.distplot(yyy,
                         hist_kws=hist_kws,
                         fit_kws=fit_kws,
                         kde=False,
                         fit=st.expon,
                         vertical=True,
                         ax=ax)
            plt.xticks(fontsize=14)
            plt.yticks(fontsize=14)
        elif fit == 3 and sum(epsi < 0) == 0:
            ax.barh(cy,
                    ny,
                    cy[1] - cy[0],
                    facecolor=[0.8, 0.8, 0.8],
                    edgecolor='k')
            mlest = epsi.mean()
            k = np.arange(epsi.max() + 1)
            plt.plot(st.poisson.pmf(k, mlest),
                     k,
                     'o',
                     linestyle='-',
                     lw=1,
                     markersize=3,
                     color='orange')
            plt.xticks(fontsize=14)
            plt.yticks(fontsize=14)
        else:
            ax.barh(cy,
                    ny,
                    cy[1] - cy[0],
                    facecolor=[0.8, 0.8, 0.8],
                    edgecolor='k')
            plt.xticks(fontsize=14)
            plt.yticks(fontsize=14)
        ax.get_yaxis().set_visible(False)
        ax.spines['top'].set_visible(False)
        ax.spines['right'].set_visible(False)
        ax.set_ylim(np.array([bl, bu]))
        ax.invert_xaxis()

        # autocorrelation plot
        ax = f.add_subplot(gs[1:6, 10:])
        xx = np.arange(1, l_ + 1)
        xxticks = xx
        if len(xx) > 15:
            xxticks = np.linspace(1, l_ + 1, 10, dtype=int)
        plt.bar(xx, rho[:l_], 0.5, facecolor=[.8, .8, .8], edgecolor='k')
        plt.bar(xx[l_ - 1],
                rho[l_ - 1],
                0.5,
                facecolor='orange',
                edgecolor='k')  # highlighting the last bar
        plt.plot([0, xx[-1] + 1], [conf_int[0], conf_int[0]], ':k')
        plt.plot([0, xx[-1] + 1], [-conf_int[0], -conf_int[0]], ':k')
        plt.xlabel('lag', fontsize=17)
        plt.ylabel('Autocorrelation', fontsize=17)
        plt.axis([0.5, l_ + 0.5, -1, 1])
        plt.xticks(xxticks)
        plt.xticks(fontsize=14)
        plt.yticks(fontsize=14)
        plt.grid(False)
        ax.spines['top'].set_visible(False)
        ax.spines['right'].set_visible(False)
    return rho, conf_int
コード例 #10
0
def fit_locdisp_mlfp(epsi,
                     *,
                     p=None,
                     nu=1000,
                     threshold=1e-3,
                     maxiter=1000,
                     print_iter=False):
    """For details, see here.

    Parameters
    ----------
        epsi : array, shape (t_, i_)
        p : array, shape (t_,), optional
        nu: float, optional
        threshold : float, optional
        maxiter : int, optional
        print_iter : bool

    Returns
    -------
        mu : array, shape (i_,)
        sigma2 : array, shape (i_, i_)

    """

    if len(epsi.shape) == 1:
        epsi = epsi.reshape(-1, 1)

    t_, i_ = epsi.shape

    if p is None:
        p = np.ones(t_) / t_

    # Step 0: Set initial values using method of moments

    mu, sigma2 = meancov_sp(epsi, p)

    if nu > 2.:
        # if nu <=2, then the covariance is not defined
        sigma2 = (nu - 2.) / nu * sigma2

    for i in range(maxiter):

        # Step 1: Update the weights

        if nu >= 1e3 and np.linalg.det(sigma2) < 1e-13:
            w = np.ones(t_)
        else:
            w = (nu + i_) / (nu + mahalanobis_dist(epsi, mu, sigma2)**2)
        q = w * p

        # Step 2: Update location and dispersion parameters

        mu_old, sigma2_old = mu, sigma2
        mu, sigma2 = meancov_sp(epsi, q)
        mu = mu / np.sum(q)

        # Step 3: Check convergence

        err = max(
            np.linalg.norm(mu - mu_old, ord=np.inf) /
            np.linalg.norm(mu_old, ord=np.inf),
            np.linalg.norm(sigma2 - sigma2_old, ord=np.inf) /
            np.linalg.norm(sigma2_old, ord=np.inf))

        if print_iter is True:
            print('Iter: %i; Loglikelihood: %.5f; Error: %.5f' %
                  (i, p @ mvt_logpdf(epsi, mu, sigma2, nu), err))

        if err <= threshold:
            break
    return np.squeeze(mu), np.squeeze(sigma2)
コード例 #11
0
ファイル: fit_garch_fp.py プロジェクト: dpopadic/arpmRes
def fit_garch_fp(dx,
                 p=None,
                 sigma2_0=None,
                 param0=None,
                 g=0.95,
                 rescale=False):
    """For details, see here.

    Parameters
    ----------
        dx : array, shape(t_,)
        p : array, optional, shape(t_)
        sigma2_0 : scalar, optional
        param0 : list or array, optional, shape(4,)
        g : scalar, optional
        rescale : bool, optional

    Returns
    -------
        param : list
        sigma2 : array, shape(t_,)
        epsi : array, shape(t_,)

    """

    t_ = dx.shape[0]

    # flexible probabilities
    if p is None:
        p = np.ones(t_) / t_

    # sample mean and variance
    if (param0 is None) or (rescale is True):
        m, s2 = meancov_sp(dx, p)

    if param0 is None:
        param0 = [0.01, g - 0.01, s2 * (1. - g), m]  # initial parameters

    # Step 0: Set default standard variance if not provided

    if sigma2_0 is None:
        p_tau = exp_decay_fp(t_, t_ / 3, 0)
        _, sigma2_0 = meancov_sp(dx, p_tau)

    # Step 1: standardize data if requested

    if rescale is True:
        dx = (dx - m) / np.sqrt(s2)
        param0[2] = param0[2] / s2
        param0[3] = param0[3] - m
        sigma2_0 = sigma2_0 / s2

    # Step 2: Compute negative log-likelihood of GARCH

    def theta(param):
        a, b, c, mu = param
        sigma2 = sigma2_0
        theta = 0.0
        for t in range(t_):
            # if statement added because of overflow when sigma2 is too low
            if np.abs(sigma2) > 1e-128:
                theta = theta + (
                    (dx[t] - mu)**2 / sigma2 + np.log(sigma2)) * p[t]
            sigma2 = c + a * (dx[t] - mu)**2 + b * sigma2

        return theta

    # Step 3: Minimize the negative log-likelihood

    # parameter boundaries
    bnds = ((1e-20, 1.), (1e-20, 1.), (1e-20, None), (None, None))
    # stationary constraints
    cons = {'type': 'ineq', 'fun': lambda param: g - param[0] - param[1]}
    a_hat, b_hat, c_hat, mu_hat = \
        minimize(theta, param0, bounds=bnds, constraints=cons)['x']

    # Step 4: Compute realized variance and invariants

    sigma2_hat = np.full(t_, sigma2_0)
    for t in range(t_ - 1):
        sigma2_hat[t + 1] = c_hat + a_hat * (dx[t] - mu_hat) ** 2 + \
                            b_hat * sigma2_hat[t]
    epsi = (dx - mu_hat) / np.sqrt(sigma2_hat)

    # Step 5: revert standardization at Step 1, if requested

    if rescale is True:
        c_hat = c_hat * s2
        mu_hat = mu_hat * np.sqrt(s2) + m
        sigma2_hat = sigma2_hat * s2

    return np.array([a_hat, b_hat, c_hat,
                     mu_hat]), sigma2_hat, np.squeeze(epsi)
コード例 #12
0
def cointegration_fp(x, p=None, *, b_threshold=0.99):
    """For details, see here.

    Parameters
    ----------
         x : array, shape(t_, d_)
         p : array, shape(t_, d_)
         b_threshold : scalar

    Returns
    -------
        c_hat : array, shape(d_, l_)
        beta_hat : array, shape(l_, )

    """

    t_ = x.shape[0]
    if len(x.shape) == 1:
        x = x.reshape((t_, 1))
        d_ = 1
    else:
        d_ = x.shape[1]

    if p is None:
        p = np.ones(t_) / t_

    if p is None:
        p = np.ones(t_)/t_

    # Step 1: estimate HFP covariance matrix

    _, sigma2_hat = meancov_sp(x, p)

    # Step 2: find eigenvectors

    e_hat, _ = pca_cov(sigma2_hat)

    # Step 3: detect cointegration vectors

    c_hat = []
    b_hat = []
    p = p[:-1]

    for d in np.arange(0, d_):

        # Step 4: Define series

        y_t = e_hat[:, d] @ x.T

        # Step 5: fit AR(1)

        yt = y_t[1:].reshape((-1, 1))
        ytm1 = y_t[:-1].reshape((-1, 1))
        _, b, _, _ = fit_lfm_mlfp(yt, ytm1, p / np.sum(p))
        if np.ndim(b) < 2:
            b = np.array(b).reshape(-1, 1)

        # Step 6: check stationarity

        if abs(b[0, 0]) <= b_threshold:
            c_hat.append(list(e_hat[:, d]))
            b_hat.append(b[0, 0])

    # Output

    c_hat = np.array(c_hat).T
    b_hat = np.array(b_hat)

    # Step 7: Sort according to the AR(1) parameters beta_hat

    c_hat = c_hat[:, np.argsort(b_hat)]
    b_hat = np.sort(b_hat)

    return c_hat, b_hat
コード例 #13
0
ファイル: fit_lfm_ols.py プロジェクト: s0ap/arpmRes
def fit_lfm_ols(x_t, z_t, p_t=None, fit_intercept=True):
    """For details, see here.

    Parameters
    ----------
        x_t : array, shape (t_, n_) if n_>1 or (t_, ) for n_=1
        z_t : array, shape (t_, k_) if k_>1 or (t_, ) for k_=1
        p_t : array, optional, shape (t_,)
        fit_intercept : bool

    Returns
    -------
        alpha_hat_olsfp : array, shape (n_,)
        beta_hat_olsfp : array, shape (n_, k_) if k_>1 or (n_, ) for k_=1
        s2_u_hat_olsfp : array, shape (n_, n_)
        u_t : array, shape (t_, n_) if n_>1 or (t_, ) for n_=1

    """
    t_ = x_t.shape[0]

    if len(z_t.shape) < 2:
        z_t = z_t.reshape((t_, 1)).copy()
        k_ = 1
    else:
        k_ = z_t.shape[1]

    if len(x_t.shape) < 2:
        x_t = x_t.reshape((t_, 1)).copy()
        n_ = 1
    else:
        n_ = x_t.shape[1]

    if p_t is None:
        p_t = np.ones(t_) / t_

    # Step 1: Compute HFP mean and covariance of (X,Z)'

    if fit_intercept is True:
        m_xz_hat_hfp, s2_xz_hat_hfp = meancov_sp(np.c_[x_t, z_t], p_t)
    else:
        m_xz_hat_hfp = np.zeros(n_ + k_)
        s2_xz_hat_hfp = p_t * np.c_[x_t, z_t].T @ np.c_[x_t, z_t]

    # Step 2: Compute the OLSFP estimates

    s2_z_hat_hfp = s2_xz_hat_hfp[n_:, n_:]
    s_x_z_hat_hfp = s2_xz_hat_hfp[:n_, n_:]
    m_xz_hat_hfp = m_xz_hat_hfp.reshape(-1)
    m_z_hat_hfp = m_xz_hat_hfp[n_:].reshape(-1, 1)
    m_x_hat_hfp = m_xz_hat_hfp[:n_].reshape(-1, 1)

    beta_hat_olsfp = s_x_z_hat_hfp @ np.linalg.inv(s2_z_hat_hfp)
    alpha_hat_olsfp = m_x_hat_hfp - beta_hat_olsfp @ m_z_hat_hfp

    # Step 3: Compute residuals and OLSFP estimate of covariance of U

    u_t = (x_t.T - alpha_hat_olsfp - beta_hat_olsfp @ z_t.T).T
    _, s2_u_hat_olsfp = meancov_sp(u_t, p_t)

    return alpha_hat_olsfp[:, 0], np.squeeze(beta_hat_olsfp),\
        np.squeeze(s2_u_hat_olsfp), np.squeeze(u_t)
コード例 #14
0
def fit_dcc_t(dx, p=None, *, rho2=None, param0=None, g=0.99):
    """For details, see here.

    Parameters
    ----------
        dx : array, shape(t_, i_)
        p : array, optional, shape(t_)
        rho2 : array, shape(i_, i_)
        param0 : list or array, shape(2,)
        g : scalar, optional

    Returns
    -------
        params : list, shape(3,)
        r2_t : array, shape(t_, i_, i_)
        epsi : array, shape(t_, i_)
        q2_t_ : array, shape(i_, i_)

    """

    # Step 0: Setup default values

    t_, i_ = dx.shape

    # flexible probabilities
    if p is None:
        p = np.ones(t_) / t_

    # target correlation
    if rho2 is None:
        _, rho2 = meancov_sp(dx, p)
        rho2, _ = cov_2_corr(rho2)

    # initial parameters
    if param0 is None:
        param0 = [0.01, g - 0.01]  # initial parameters

    # Step 1: Compute negative log-likelihood of GARCH

    def llh(params):
        a, b = params
        mu = np.zeros(i_)
        q2_t = rho2.copy()
        r2_t, _ = cov_2_corr(q2_t)
        llh = 0.0
        for t in range(t_):
            llh = llh - p[t] * multivariate_normal.logpdf(dx[t, :], mu, r2_t)
            q2_t = rho2 * (1 - a - b) + \
                a * np.outer(dx[t, :], dx[t, :]) + b * q2_t
            r2_t, _ = cov_2_corr(q2_t)

        return llh

    # Step 2: Minimize the negative log-likelihood

    # parameter boundaries
    bnds = ((1e-20, 1.), (1e-20, 1.))
    # stationary constraints
    cons = {'type': 'ineq', 'fun': lambda param: g - param[0] - param[1]}
    a, b = minimize(llh, param0, bounds=bnds, constraints=cons)['x']

    # Step 3: Compute realized correlations and residuals

    q2_t = rho2.copy()
    r2_t = np.zeros((t_, i_, i_))
    r2_t[0, :, :], _ = cov_2_corr(q2_t)

    for t in range(t_ - 1):
        q2_t = rho2 * (1 - a - b) + \
            a * np.outer(dx[t, :], dx[t, :]) + b * q2_t
        r2_t[t + 1, :, :], _ = cov_2_corr(q2_t)

    l_t = np.linalg.cholesky(r2_t)
    epsi = np.linalg.solve(l_t, dx)

    return [1. - a - b, a, b], r2_t, epsi, q2_t