Exemple #1
0
def k_2_p_2_sphere_line_bundle():
    Psi = [(2, 1)]
    k_zero = 1
    k = sum([a[1] for a in Psi]) + k_zero
    p = Psi[0][0]
    T = 1000
    n = T + p
    for ii in range(10):
        D = np.arange(k) * 0.2 + 1
        OO = random_orthogonal(k)
        try:
            es = varx_minimal_estimator(Psi, m=k)
            stable, H, F, G, Phi = gen_random_stable(es, k)
            Y, e = VAR_sim(Phi, n, D, OO)
            Y = Y.T
            e = e.T
            X = Y.copy()
            cov_res, cov_xlag, cov_y_xlag = calc_extended_covariance(Y, X, p)
        except Exception:
            continue

        es_G = varx_minimal_estimator(Psi, m=k)
        es_G.set_covs(cov_res, cov_xlag)
        es_G.calc_states(G)

        opt = es.simple_fit(Y, X)
        if not opt['success']:
            print("failed with opt=")
            print(opt)
        else:
            print('orig_llk %s est_llk=%s' %
                  (es_G.neg_log_llk, es.neg_log_llk))

            print(Phi.PolynomialMatrix_to_3darray())
            print(es.Phi)
Exemple #2
0
def test_random_stable():
    Psi = [(2, 1), (1, 2)]
    k_zero = 2
    k = sum([a[1] for a in Psi]) + k_zero

    es = varx_minimal_estimator(Psi, m=k)
    stable, H, F, G, Phi = gen_random_stable(es, k)

    p = Psi[0][0]
    T = 1000
    n = T + p
    D = np.arange(k) * 0.2 + 1
    OO = random_orthogonal(k)
    Y, e = VAR_sim(Phi, n, D, OO)
    Y = Y.T
    e = e.T
    X = Y.copy()
    print(Y[:, -5:])
    print(X[:, -5:])
Exemple #3
0
def k_8_p3_generic():
    Psi = [(3, 1), (2, 1), (1, 2)]
    k_zero = 4
    k = sum([a[1] for a in Psi]) + k_zero
    p = Psi[0][0]
    T = 1000
    n = T + p
    for ii in range(10):
        D = np.arange(k) * 0.2 + 1
        OO = random_orthogonal(k)
        try:
            es = varx_minimal_estimator(Psi, m=k)
            stable, H, F, G, Phi = gen_random_stable(es,
                                                     k,
                                                     scale_range=(0.3, .6))
            Y, e = VAR_sim(Phi, n, D, OO)
            Y = Y.T
            e = e.T
            X = Y.copy()
            cov_res, cov_xlag, cov_y_xlag = calc_extended_covariance(Y, X, p)
        except Exception:
            pass

        es_G = varx_minimal_estimator(Psi, m=k)
        es_G.set_covs(cov_res, cov_xlag)
        es_G.calc_states(G)

        # simple fit does not work
        found = False
        cnt = 0
        while (not found) and (cnt < 11):
            opt = es.simple_fit(Y, X)
            if opt['success']:
                print(Phi.PolynomialMatrix_to_3darray())
                print(es.Phi)
                print(es_G.neg_log_llk)
                print(es.neg_log_llk)
                found = True
            else:
                print(opt)
                cnt += 1
Exemple #4
0
def test_manifold_fit():
    from numpy.linalg import solve
    Psi = [(2, 2), (1, 1)]
    k_zero = 0
    k = sum([a[1] for a in Psi]) + k_zero
    p = Psi[0][0]
    T = 1000
    n = T + p

    for ii in range(1):
        D = np.arange(k) * 0.2 + 1
        OO = random_orthogonal(k)
        try:
            es = varx_minimal_estimator(Psi, m=k)
            stable, H, F, G, Phi = gen_random_stable(es, k)
            Y, e = VAR_sim(Phi, n, D, OO)
            Y = Y.T
            e = e.T
            X = Y.copy()
            cov_res, cov_xlag, cov_y_xlag = calc_extended_covariance(Y, X, p)
        except Exception:
            continue
        es_G = varx_minimal_estimator(Psi, m=k)
        es_G.set_covs(cov_res, cov_xlag)
        es_G.calc_states(G)

        es1 = varx_minimal_estimator(Psi, m=k)
        es1.set_covs(cov_res, cov_xlag)

        es2 = varx_minimal_estimator(Psi, m=k)
        es2.set_covs(cov_res, cov_xlag)
        h_opt = es2.hessian_fit(Y, X)
        print(h_opt)
        # opt = es.gradient_fit(Y, X)
        # self = es1

        opt = es1.manifold_fit(Y, X)
        print es1.neg_log_llk
        """
Exemple #5
0
def test_hessian_fit():
    from numpy.linalg import solve
    Psi = [(3, 1), (2, 2), (1, 1)]
    k_zero = 0
    k = sum([a[1] for a in Psi]) + k_zero
    p = Psi[0][0]
    T = 1000
    n = T + p
    for ii in range(1):
        D = np.arange(k) * 0.2 + 1
        OO = random_orthogonal(k)
        try:
            es = varx_minimal_estimator(Psi, m=k)
            stable, H, F, G, Phi = gen_random_stable(es, k)
            Y, e = VAR_sim(Phi, n, D, OO)
            Y = Y.T
            e = e.T
            X = Y.copy()
            cov_res, cov_xlag, cov_y_xlag = calc_extended_covariance(Y, X, p)
        except Exception:
            continue
        es_G = varx_minimal_estimator(Psi, m=k)
        es_G.set_covs(cov_res, cov_xlag)
        es_G.calc_states(G)

        es1 = varx_minimal_estimator(Psi, m=k)
        es1.set_covs(cov_res, cov_xlag)

        opt = es.gradient_fit(Y, X)
        opt = es1.hessian_fit(Y, X)

        if not opt['success']:
            print("failed with opt=")
            print(opt)
        else:
            print('orig_llk %s est_llk=%s' % (es_G.neg_log_llk, opt['fun']))

            print(Phi.PolynomialMatrix_to_3darray())
            print(es1.Phi)
Exemple #6
0
def k_5_p_2_all_psi():
    k = 5
    m = k
    p = 2
    Psi0 = [(2, 2), (1, 2)]

    T = 1000
    n = T + p
    D = np.arange(k) * 0.2 + 1
    OO = random_orthogonal(k)

    try:
        es = varx_minimal_estimator(Psi0, m=k)
        stable, H, F, G, Phi = gen_random_stable(es, k)
        Y, e = VAR_sim(Phi, n, D, OO)
        Y = Y.T
        e = e.T
        X = Y.copy()
        cov_res, cov_xlag, cov_y_xlag = calc_extended_covariance(Y, X, p)
        es_G = varx_minimal_estimator(Psi0, m=k)
        es_G.set_covs(cov_res, cov_xlag)
        es_G.calc_states(G)
    except Exception:
        pass

    all_psi = list_all_psi_hat(m=m, p=p)
    for Psi_ in all_psi:
        Psi = psi_hat_to_psi(Psi_)
        es = varx_minimal_estimator(Psi, m=k)
        opt = es.simple_fit(Y, X)
        print('Psi=%s' % Psi)
        if not opt['success']:
            print("failed with opt=")
            print(opt)
        else:
            print('orig_llk %s est_llk=%s' %
                  (es_G.neg_log_llk, es.neg_log_llk))
Exemple #7
0
def k_5_p_3_x_not_autogressive():
    np.random.seed(0)
    k = 5
    m = 7
    p = 2
    Psi = [(2, 2), (1, 2)]
    # first generate X

    es = varx_minimal_estimator(Psi, m=m)
    stable, _, _, _, PhiX = gen_random_stable(es, m)
    Dx = np.arange(m) * 0.2 + 1
    OOx = random_orthogonal(m)
    T = 1000
    n = T + p
    X, ex = VAR_sim(PhiX, n, Dx, OOx)
    X = X.T
    # next generate Y
    stable, _, F, G, _ = gen_random_stable(es, m)
    H = np.random.randn(k, es.mm_degree)
    from polynomial_matrix import state_to_Phi
    from utils import random_innovation_series
    Phi = state_to_Phi(H, F, G, Psi)
    Phi_arr = Phi.PolynomialMatrix_to_3darray()
    D = np.arange(k) * 0.2 + 1
    OO = random_orthogonal(k)
    e = random_innovation_series(D, OO, n)
    Y = np.zeros_like(e)

    for j in range(0, n):
        for i in range(min(j, p)):
            Y[j, :] += Phi_arr[:, :, p - i - 1] @ X[j - i - 1, :]
        Y[j, :] += e[j, :]
    Y = Y.T
    es_G = varx_minimal_estimator(Psi, m=m)
    es_G.set_covs(es._cov_numerator, es._cov_denominator)
    es_G.calc_states(G)

    opt = es.simple_fit(Y, X)
    if not opt['success']:
        print("failed with opt=")
        print(opt)
    else:
        print('orig_llk %s est_llk=%s' % (es_G.neg_log_llk, es.neg_log_llk))
Exemple #8
0
def test_hessian_calc():
    from numpy.linalg import solve
    Psi = [(3, 1), (2, 1), (1, 1)]
    k_zero = 1
    k = sum([a[1] for a in Psi]) + k_zero
    p = Psi[0][0]
    T = 1000
    n = T + p
    for ii in range(1):
        D = np.arange(k) * 0.2 + 1
        OO = random_orthogonal(k)
        try:
            es = varx_minimal_estimator(Psi, m=k)
            stable, H, F, G, Phi = gen_random_stable(es, k)
            Y, e = VAR_sim(Phi, n, D, OO)
            Y = Y.T
            e = e.T
            X = Y.copy()
            cov_res, cov_xlag, cov_y_xlag = calc_extended_covariance(Y, X, p)
        except Exception:
            continue
        """
        es_G = varx_minimal_estimator(Psi, m=k)
        es_G.set_covs(cov_res, cov_xlag)
        es_G.calc_states(G)
        """

        es1 = varx_minimal_estimator(Psi, m=k)
        es1.set_covs(cov_res, cov_xlag)
        m = k
        c = np.random.randn(self.mm_degree - self.agg_rnk, m - self.agg_rnk)
        OO = random_orthogonal(m)
        from minimal_varx import make_normalized_G
        G_test = make_normalized_G(es1, m, OO, c)

        es1.calc_states(G_test)

        # h = 1e-6
        # self = es1

        es2 = varx_minimal_estimator(Psi, m=k)
        es2.set_covs(cov_res, cov_xlag)

        h = 1e-6
        self = es1
        # eta = np.random.randn(*G.shape)
        # G_1 = G_test.reshape(-1).copy()
        # G_1 += h * eta.reshape(-1)
        # es2.calc_states(G_1.reshape(self.G.shape))
        # diff = (es2._gradient_tensor - es1._gradient_tensor) / h
        # hs = hessian_prod(self, eta)
        # print(diff)
        g_size = self._gradient_tensor.shape[0]
        appx_Hess_matrix = np.zeros((g_size, g_size))

        for i in range(g_size):
            G_1 = es1.G.reshape(-1).copy()
            G_1[i] += h
            es2.calc_states(G_1.reshape(self.G.shape))
            appx_Hess_matrix[i, :] = (es2._gradient_tensor -
                                      es1._gradient_tensor) / h
        print(appx_Hess_matrix)

        def hessian_prod(self, eta):
            hessp = np.zeros(eta.reshape(-1).shape[0])

            for i in range(hessp.shape[0]):
                a_i = self.kappa_tensor[:, i].reshape(-1, self.p * self.m)
                # numerator_mat
                kappa_num_a_i = self._kappa_cov_numerator @ a_i.T
                a_i_num = a_i @ self._cov_numerator
                kappa_eta_T = self.calc_kappa(eta).T
                a_i_num_eta = a_i_num @ kappa_eta_T
                s1 = solve(self._numerator_mat,
                           kappa_num_a_i + kappa_num_a_i.T)
                s2 = solve(self._numerator_mat,
                           self._kappa_cov_numerator @ kappa_eta_T)
                first_part_num = -s1 @ s2
                second_part_num = solve(self._numerator_mat, a_i_num_eta)

                hess_num = first_part_num + second_part_num

                # denominator
                kappa_denom_a_i = self._kappa_cov_denominator @ a_i.T
                a_i_denom = a_i @ self._cov_denominator
                a_i_denom_eta = a_i_denom @ kappa_eta_T

                sd1 = solve(self._denominator_mat,
                            kappa_denom_a_i + kappa_denom_a_i.T)
                sd2 = solve(self._denominator_mat,
                            self._kappa_cov_denominator @ kappa_eta_T)
                first_part_denom = -sd1 @ sd2
                second_part_denom = solve(self._denominator_mat, a_i_denom_eta)

                hess_denom = first_part_denom + second_part_denom
                hessp[i] = 2 * np.sum(np.diagonal(hess_num - hess_denom))
            return hessp

        exact_Hess = np.zeros((g_size, g_size))
        for jj in range(g_size):
            eta_ = np.zeros(g_size)
            eta_[jj] += 1
            eta = eta_.reshape(G_test.shape)
            exact_Hess[jj, :] = hessian_prod(self, eta)

        print(exact_Hess)