コード例 #1
0
def map_driver(q1, f, bet, t, dt):
    q2 = uf.f_to_srsf(f, t)
    gam = uf.optimum_reparam(q1, t, q2)
    fn = uf.warp_f_gamma(t, f, gam)
    tmp = bet * fn
    y = tmp.sum() * dt

    return y
コード例 #2
0
def MapC_to_y(n, c, B, t, f, parallel):

    dt = np.diff(t)
    dt = dt.mean()

    y = np.zeros(n)

    if parallel:
        bet = np.dot(B, c)
        q1 = uf.f_to_srsf(bet, t)
        y = Parallel(n_jobs=-1)(delayed(map_driver)(q1, f[:, k], bet, t, dt)
                                for k in range(n))
    else:
        for k in range(0, n):
            bet = np.dot(B, c)
            q1 = uf.f_to_srsf(bet, t)
            q2 = uf.f_to_srsf(f[:, k], t)
            gam = uf.optimum_reparam(q1, t, q2)
            fn = uf.warp_f_gamma(t, f[:, k], gam)
            tmp = bet * fn
            y[k] = tmp.sum() * dt

    return (y)
コード例 #3
0
def pairwise_align_bayes(f1i, f2i, time, mcmcopts=None):
    """
    This function aligns two functions using Bayesian framework. It will align
    f2 to f1. It is based on mapping warping functions to a hypersphere, and a
    subsequent exponential mapping to a tangent space. In the tangent space,
    the Z-mixture pCN algorithm is used to explore both local and global
    structure in the posterior distribution.
   
    The Z-mixture pCN algorithm uses a mixture distribution for the proposal
    distribution, controlled by input parameter zpcn. The zpcn$betas must be
    between 0 and 1, and are the coefficients of the mixture components, with
    larger coefficients corresponding to larger shifts in parameter space. The
    zpcn["probs"] give the probability of each shift size.
   
    Usage:  out = pairwise_align_bayes(f1i, f2i, time)
            out = pairwise_align_bayes(f1i, f2i, time, mcmcopts)
    
    :param f1i: vector defining M samples of function 1
    :param f2i: vector defining M samples of function 2
    :param time: time vector of length M
    :param mcmopts: dict of mcmc parameters
    :type mcmcopts: dict
  
    default mcmc options:
    tmp = {"betas":np.array([0.5,0.5,0.005,0.0001]),"probs":np.array([0.1,0.1,0.7,0.1])}
    mcmcopts = {"iter":2*(10**4) ,"burnin":np.minimum(5*(10**3),2*(10**4)//2),
                "alpha0":0.1, "beta0":0.1,"zpcn":tmp,"propvar":1,
                "initcoef":np.repeat(0,20), "npoints":200, "extrainfo":True}
   
    :rtype collection containing
    :return f2_warped: aligned f2
    :return gamma: warping function
    :return g_coef: final g_coef
    :return psi: final psi
    :return sigma1: final sigma
    
    if extrainfo
    :return accept: accept of psi samples
    :return betas_ind
    :return logl: log likelihood
    :return gamma_mat: posterior gammas
    :return gamma_stats: posterior gamma stats
    :return xdist: phase distance posterior
    :return ydist: amplitude distance posterior)
    """

    if mcmcopts is None:
        tmp = {
            "betas": np.array([0.5, 0.5, 0.005, 0.0001]),
            "probs": np.array([0.1, 0.1, 0.7, 0.1])
        }
        mcmcopts = {
            "iter": 2 * (10**4),
            "burnin": np.minimum(5 * (10**3), 2 * (10**4) // 2),
            "alpha0": 0.1,
            "beta0": 0.1,
            "zpcn": tmp,
            "propvar": 1,
            "initcoef": np.repeat(0, 20),
            "npoints": 200,
            "extrainfo": True
        }

    if f1i.shape[0] != f2i.shape[0]:
        raise Exception('Length of f1 and f2 must be equal')

    if f1i.shape[0] != time.shape[0]:
        raise Exception('Length of f1 and time must be equal')

    if mcmcopts["zpcn"]["betas"].shape[0] != mcmcopts["zpcn"]["probs"].shape[0]:
        raise Exception('In zpcn, betas must equal length of probs')

    if np.mod(mcmcopts["initcoef"].shape[0], 2) != 0:
        raise Exception('Length of mcmcopts.initcoef must be even')

    # Number of sig figs to report in gamma_mat
    SIG_GAM = 13
    iter = mcmcopts["iter"]

    # parameter settings
    pw_sim_global_burnin = mcmcopts["burnin"]
    valid_index = np.arange(pw_sim_global_burnin - 1, iter)
    pw_sim_global_Mg = mcmcopts["initcoef"].shape[0] // 2
    g_coef_ini = mcmcopts["initcoef"]
    numSimPoints = mcmcopts["npoints"]
    pw_sim_global_domain_par = np.linspace(0, 1, numSimPoints)
    g_basis = uf.basis_fourier(pw_sim_global_domain_par, pw_sim_global_Mg, 1)
    sigma1_ini = 1
    zpcn = mcmcopts["zpcn"]
    pw_sim_global_sigma_g = mcmcopts["propvar"]

    def propose_g_coef(g_coef_curr):
        pCN_beta = zpcn["betas"]
        pCN_prob = zpcn["probs"]
        probm = np.insert(np.cumsum(pCN_prob), 0, 0)
        z = np.random.rand()
        result = {"prop": g_coef_curr, "ind": 1}
        for i in range(0, pCN_beta.shape[0]):
            if z <= probm[i + 1] and z > probm[i]:
                g_coef_new = normal(
                    0, pw_sim_global_sigma_g /
                    np.repeat(np.arange(1, pw_sim_global_Mg + 1), 2))
                result["prop"] = np.sqrt(
                    1 -
                    pCN_beta[i]**2) * g_coef_curr + pCN_beta[i] * g_coef_new
                result["ind"] = i

        return result

    # normalize time to [0,1]
    time = (time - time.min()) / (time.max() - time.min())
    timet = np.linspace(0, 1, numSimPoints)
    f1 = uf.f_predictfunction(f1i, timet, 0)
    f2 = uf.f_predictfunction(f2i, timet, 0)

    # srsf transformation
    q1 = uf.f_to_srsf(f1, timet)
    q1i = uf.f_to_srsf(f1i, time)
    q2 = uf.f_to_srsf(f2, timet)

    tmp = uf.f_exp1(uf.f_basistofunction(g_basis["x"], 0, g_coef_ini, g_basis))

    if tmp.min() < 0:
        raise Exception("Invalid initial value of g")

    # result vectors
    g_coef = np.zeros((iter, g_coef_ini.shape[0]))
    sigma1 = np.zeros(iter)
    logl = np.zeros(iter)
    SSE = np.zeros(iter)
    accept = np.zeros(iter, dtype=bool)
    accept_betas = np.zeros(iter)

    # init
    g_coef_curr = g_coef_ini
    sigma1_curr = sigma1_ini
    SSE_curr = f_SSEg_pw(
        uf.f_basistofunction(g_basis["x"], 0, g_coef_ini, g_basis), q1, q2)
    logl_curr = f_logl_pw(
        uf.f_basistofunction(g_basis["x"], 0, g_coef_ini, g_basis), q1, q2,
        sigma1_ini**2, SSE_curr)

    g_coef[0, :] = g_coef_ini
    sigma1[0] = sigma1_ini
    SSE[0] = SSE_curr
    logl[0] = logl_curr

    # update the chain for iter-1 times
    for m in tqdm(range(1, iter)):
        # update g
        g_coef_curr, tmp, SSE_curr, accepti, zpcnInd = f_updateg_pw(
            g_coef_curr, g_basis, sigma1_curr**2, q1, q2, SSE_curr,
            propose_g_coef)

        # update sigma1
        newshape = q1.shape[0] / 2 + mcmcopts["alpha0"]
        newscale = 1 / 2 * SSE_curr + mcmcopts["beta0"]
        sigma1_curr = np.sqrt(1 / np.random.gamma(newshape, 1 / newscale))
        logl_curr = f_logl_pw(
            uf.f_basistofunction(g_basis["x"], 0, g_coef_curr, g_basis), q1,
            q2, sigma1_curr**2, SSE_curr)

        # save updates to results
        g_coef[m, :] = g_coef_curr
        sigma1[m] = sigma1_curr
        SSE[m] = SSE_curr
        if mcmcopts["extrainfo"]:
            logl[m] = logl_curr
            accept[m] = accepti
            accept_betas[m] = zpcnInd

    # calculate posterior mean of psi
    pw_sim_est_psi_matrix = np.zeros((numSimPoints, valid_index.shape[0]))
    for k in range(0, valid_index.shape[0]):
        g_temp = uf.f_basistofunction(g_basis["x"], 0,
                                      g_coef[valid_index[k], :], g_basis)
        psi_temp = uf.f_exp1(g_temp)
        pw_sim_est_psi_matrix[:, k] = psi_temp

    result_posterior_psi_simDomain = uf.f_psimean(pw_sim_global_domain_par,
                                                  pw_sim_est_psi_matrix)

    # resample to same number of points as the input f1 and f2
    interp = interp1d(np.linspace(0, 1,
                                  result_posterior_psi_simDomain.shape[0]),
                      result_posterior_psi_simDomain,
                      fill_value="extrapolate")
    result_posterior_psi = interp(np.linspace(0, 1, f1i.shape[0]))

    # transform posterior mean of psi to gamma
    result_posterior_gamma = uf.f_phiinv(result_posterior_psi)
    result_posterior_gamma = uf.norm_gam(result_posterior_gamma)

    # warped f2
    f2_warped = uf.warp_f_gamma(time, f2i, result_posterior_gamma)

    if mcmcopts["extrainfo"]:
        M, N = pw_sim_est_psi_matrix.shape
        gamma_mat = np.zeros((time.shape[0], N))
        one_v = np.ones(M)
        Dx = np.zeros(N)
        Dy = Dx
        for ii in range(0, N):
            interp = interp1d(np.linspace(
                0, 1, result_posterior_psi_simDomain.shape[0]),
                              pw_sim_est_psi_matrix[:, ii],
                              fill_value="extrapolate")
            result_i = interp(time)
            tmp = uf.f_phiinv(result_i)
            gamma_mat[:, ii] = uf.norm_gam(tmp)
            v, theta = geo.inv_exp_map(one_v, pw_sim_est_psi_matrix[:, ii])
            Dx[ii] = np.sqrt(trapz(v**2, pw_sim_global_domain_par))
            q2warp = uf.warp_q_gamma(pw_sim_global_domain_par, q2,
                                     gamma_mat[:, ii])
            Dy[ii] = np.sqrt(trapz((q1i - q2warp)**2, time))

        gamma_stats = uf.statsFun(gamma_mat)

    results_o = collections.namedtuple('align_bayes', [
        'f2_warped', 'gamma', 'g_coef', 'psi', 'sigma1', 'accept', 'betas_ind',
        'logl', 'gamma_mat', 'gamma_stats', 'xdist', 'ydist'
    ])

    out = results_o(f2_warped, result_posterior_gamma, g_coef,
                    result_posterior_psi, sigma1, accept[1:], accept_betas[1:],
                    logl, gamma_mat, gamma_stats, Dx, Dy)

    return (out)
コード例 #4
0
    def joint_gauss_model(self, n=1, no=3):
        """
        This function models the functional data using a joint Gaussian model
        extracted from the principal components of the srsfs

        :param n: number of random samples
        :param no: number of principal components (default = 3)
        :type n: integer
        :type no: integer
        """

        # Parameters
        fn = self.fn
        time = self.time
        qn = self.qn
        gam = self.gam

        M = time.size

        # Perform PCA
        jfpca = fpca.fdajpca(self)
        jfpca.calc_fpca(no=no)
        s = jfpca.latent
        U = jfpca.U
        C = jfpca.C
        mu_psi = jfpca.mu_psi

        # compute mean and covariance
        mq_new = qn.mean(axis=1)
        mididx = jfpca.id
        m_new = np.sign(fn[mididx, :]) * np.sqrt(np.abs(fn[mididx, :]))
        mqn = np.append(mq_new, m_new.mean())

        # generate random samples
        vals = np.random.multivariate_normal(np.zeros(s.shape), np.diag(s), n)

        tmp = np.matmul(U, np.transpose(vals))
        qhat = np.tile(mqn.T, (n, 1)).T + tmp[0:M + 1, :]
        tmp = np.matmul(U, np.transpose(vals) / C)
        vechat = tmp[(M + 1):, :]
        psihat = np.zeros((M, n))
        gamhat = np.zeros((M, n))
        for ii in range(n):
            psihat[:, ii] = geo.exp_map(mu_psi, vechat[:, ii])
            gam_tmp = cumtrapz(psihat[:, ii]**2,
                               np.linspace(0, 1, M),
                               initial=0.0)
            gamhat[:, ii] = (gam_tmp - gam_tmp.min()) / (gam_tmp.max() -
                                                         gam_tmp.min())

        ft = np.zeros((M, n))
        fhat = np.zeros((M, n))
        for ii in range(n):
            fhat[:, ii] = uf.cumtrapzmid(
                time, qhat[0:M, ii] * np.fabs(qhat[0:M, ii]),
                np.sign(qhat[M, ii]) * (qhat[M, ii] * qhat[M, ii]), mididx)
            ft[:, ii] = uf.warp_f_gamma(np.linspace(0, 1, M), fhat[:, ii],
                                        gamhat[:, ii])

        self.rsamps = True
        self.fs = fhat
        self.gams = gamhat
        self.ft = ft
        self.qs = qhat[0:M, :]

        return
コード例 #5
0
    def predict(self, newdata=None):
        """
        This function performs prediction on regression model on new data if available or current stored data in object
        Usage:  obj.predict()
                obj.predict(newdata)

        :param newdata: dict containing new data for prediction (needs the keys below, if None predicts on training data)
        :type newdata: dict
        :param f: (M,N) matrix of functions
        :param time: vector of time points
        :param y: truth if available
        :param smooth: smooth data if needed
        :param sparam: number of times to run filter
        """

        omethod = self.warp_data.method
        lam = self.warp_data.lam
        m = self.n_classes
        M = self.time.shape[0]

        if newdata != None:
            f = newdata['f']
            time = newdata['time']
            y = newdata['y']
            sparam = newdata['sparam']
            if newdata['smooth']:
                f = fs.smooth_data(f, sparam)

            q1 = fs.f_to_srsf(f, time)
            n = q1.shape[1]
            self.y_pred = np.zeros((n, m))
            mq = self.warp_data.mqn
            fn = np.zeros((M, n))
            qn = np.zeros((M, n))
            gam = np.zeros((M, n))
            for ii in range(0, n):
                gam[:, ii] = uf.optimum_reparam(mq, time, q1[:, ii], omethod)
                fn[:, ii] = uf.warp_f_gamma(time, f[:, ii], gam[:, ii])
                qn[:, ii] = uf.f_to_srsf(fn[:, ii], time)

            m_new = np.sign(fn[self.pca.id, :]) * np.sqrt(
                np.abs(fn[self.pca.id, :]))
            qn1 = np.vstack((qn, m_new))
            U = self.pca.U
            no = U.shape[1]

            if self.pca.__class__.__name__ == 'fdajpca':
                C = self.pca.C
                TT = self.time.shape[0]
                mu_g = self.pca.mu_g
                mu_psi = self.pca.mu_psi
                vec = np.zeros((M, n))
                psi = np.zeros((TT, n))
                binsize = np.mean(np.diff(self.time))
                for i in range(0, n):
                    psi[:, i] = np.sqrt(np.gradient(gam[:, i], binsize))
                    vec[:, i] = geo.inv_exp_map(mu_psi, psi[:, i])

                g = np.vstack((qn1, C * vec))
                a = np.zeros((n, no))
                for i in range(0, n):
                    for j in range(0, no):
                        tmp = (g[:, i] - mu_g)
                        a[i, j] = dot(tmp.T, U[:, j])

            elif self.pca.__class__.__name__ == 'fdavpca':
                a = np.zeros((n, no))
                for i in range(0, n):
                    for j in range(0, no):
                        tmp = (qn1[:, i] - self.pca.mqn)
                        a[i, j] = dot(tmp.T, U[:, j])

            elif self.pca.__class__.__name__ == 'fdahpca':
                a = np.zeros((n, no))
                mu_psi = self.pca.psi_mu
                vec = np.zeros((M, n))
                TT = self.time.shape[0]
                psi = np.zeros((TT, n))
                binsize = np.mean(np.diff(self.time))
                for i in range(0, n):
                    psi[:, i] = np.sqrt(np.gradient(gam[:, i], binsize))
                    vec[:, i] = geo.inv_exp_map(mu_psi, psi[:, i])

                vm = self.pca.vec.mean(axis=1)

                for i in range(0, n):
                    for j in range(0, no):
                        a[i, j] = np.sum(dot(vec[:, i] - vm, U[:, j]))
            else:
                raise Exception('Invalid fPCA Method')

            for ii in range(0, n):
                for jj in range(0, m):
                    self.y_pred[ii, jj] = self.alpha[jj] + np.sum(
                        a[ii, :] * self.b[:, jj])

            if y == None:
                self.y_pred = rg.phi(self.y_pred.reshape((1, n * m)))
                self.y_pred = self.y_pred.reshape((n, m))
                self.y_labels = np.argmax(self.y_pred, axis=1)
                self.PC = np.nan
            else:
                self.y_pred = rg.phi(self.y_pred.reshape((1, n * m)))
                self.y_pred = self.y_pred.reshape((n, m))
                self.y_labels = np.argmax(self.y_pred, axis=1)
                self.PC = np.zeros(m)
                cls_set = np.arange(0, m)
                for ii in range(0, m):
                    cls_sub = np.setdiff1d(cls_set, ii)
                    TP = np.sum(y[self.y_labels == ii] == ii)
                    FP = np.sum(y[np.in1d(self.y_labels, cls_sub)] == ii)
                    TN = np.sum(y[np.in1d(self.y_labels, cls_sub)] ==
                                self.y_labels[np.in1d(self.y_labels, cls_sub)])
                    FN = np.sum(np.in1d(y[self.y_labels == ii], cls_sub))
                    self.PC[ii] = (TP + TN) / (TP + FP + FN + TN)

                self.PCo = np.sum(y == self.y_labels) / self.y_labels.shape[0]
        else:
            n = self.pca.coef.shape[1]
            self.y_pred = np.zeros((n, m))
            for ii in range(0, n):
                for jj in range(0, m):
                    self.y_pred[ii, jj] = self.alpha[jj] + np.sum(
                        self.pca.coef[ii, :] * self.b[:, jj])

            self.y_pred = rg.phi(self.y_pred.reshape((1, n * m)))
            self.y_pred = self.y_pred.reshape((n, m))
            self.y_labels = np.argmax(self.y_pred, axis=1)
            self.PC = np.zeros(m)
            cls_set = np.arange(0, m)
            for ii in range(0, m):
                cls_sub = np.setdiff1d(cls_set, ii)
                TP = np.sum(self.y[self.y_labels == ii] == ii)
                FP = np.sum(self.y[np.in1d(self.y_labels, cls_sub)] == ii)
                TN = np.sum(self.y[np.in1d(self.y_labels, cls_sub)] ==
                            self.y_labels[np.in1d(self.y_labels, cls_sub)])
                FN = np.sum(np.in1d(y[self.y_labels == ii], cls_sub))
                self.PC[ii] = (TP + TN) / (TP + FP + FN + TN)

            self.PCo = np.sum(y == self.y_labels) / self.y_labels.shape[0]

            return
コード例 #6
0
ファイル: fPCA.py プロジェクト: jdtuck/fdasrsf_python
def jointfPCA(fn, time, qn, q0, gam, no=2, showplot=True):
    """
    This function calculates joint functional principal component analysis
    on aligned data

    :param fn: numpy ndarray of shape (M,N) of N aligned functions with M
               samples
    :param time: vector of size N describing the sample points
    :param qn: numpy ndarray of shape (M,N) of N aligned SRSF with M samples
    :param no: number of components to extract (default = 2)
    :param showplot: Shows plots of results using matplotlib (default = T)
    :type showplot: bool
    :type no: int

    :rtype: tuple of numpy ndarray
    :return q_pca: srsf principal directions
    :return f_pca: functional principal directions
    :return latent: latent values
    :return coef: coefficients
    :return U: eigenvectors

    """
    coef = np.arange(-1., 2.)
    Nstd = coef.shape[0]

    # set up for fPCA in q-space
    mq_new = qn.mean(axis=1)
    M = time.shape[0]
    mididx = int(np.round(M / 2))
    m_new = np.sign(fn[mididx, :]) * np.sqrt(np.abs(fn[mididx, :]))
    mqn = np.append(mq_new, m_new.mean())
    qn2 = np.vstack((qn, m_new))

    # calculate vector space of warping functions
    mu_psi, gam_mu, psi, vec = uf.SqrtMean(gam)

    # joint fPCA
    C = fminbound(find_C,0,1e4,(qn2,vec,q0,no,mu_psi))
    qhat, gamhat, a, U, s, mu_g = jointfPCAd(qn2, vec, C, no, mu_psi)

    # geodesic paths
    q_pca = np.ndarray(shape=(M, Nstd, no), dtype=float)
    f_pca = np.ndarray(shape=(M, Nstd, no), dtype=float)
    
    for k in range(0, no):
        for l in range(0, Nstd):
            qhat = mqn + dot(U[0:(M+1),k],coef[l]*np.sqrt(s[k]))
            vechat = dot(U[(M+1):,k],(coef[l]*np.sqrt(s[k]))/C)
            psihat = geo.exp_map(mu_psi,vechat)
            gamhat = cumtrapz(psihat*psihat,np.linspace(0,1,M),initial=0)
            gamhat = (gamhat - gamhat.min()) / (gamhat.max() - gamhat.min())
            if (sum(vechat)==0):
                gamhat = np.linspace(0,1,M)
            
            fhat = uf.cumtrapzmid(time, qhat[0:M]*np.fabs(qhat[0:M]), np.sign(qhat[M])*(qhat[M]*qhat[M]), mididx)
            f_pca[:,l,k] = uf.warp_f_gamma(np.linspace(0,1,M), fhat, gamhat)
            q_pca[:,l,k] = uf.warp_q_gamma(np.linspace(0,1,M), qhat[0:M], gamhat)

    jfpca_results = collections.namedtuple('jfpca', ['q_pca', 'f_pca', 'latent', 'coef', 'U'])
    jfpca = jfpca_results(q_pca, f_pca, s, a, U)

    if showplot:
        CBcdict = {
            'Bl': (0, 0, 0),
            'Or': (.9, .6, 0),
            'SB': (.35, .7, .9),
            'bG': (0, .6, .5),
            'Ye': (.95, .9, .25),
            'Bu': (0, .45, .7),
            'Ve': (.8, .4, 0),
            'rP': (.8, .6, .7),
        }
        cl = sorted(CBcdict.keys())
        fig, ax = plt.subplots(2, no)
        for k in range(0, no):
            axt = ax[0, k]
            for l in range(0, Nstd):
                axt.plot(time, q_pca[0:M, l, k], color=CBcdict[cl[l]])

            axt.set_title('q domain: PD %d' % (k + 1))
            axt = ax[1, k]
            for l in range(0, Nstd):
                axt.plot(time, f_pca[:, l, k], color=CBcdict[cl[l]])

            axt.set_title('f domain: PD %d' % (k + 1))
        fig.set_tight_layout(True)

        cumm_coef = 100 * np.cumsum(s) / sum(s)
        idx = np.arange(0, s.shape[0]) + 1
        plot.f_plot(idx, cumm_coef, "Coefficient Cumulative Percentage")
        plt.xlabel("Percentage")
        plt.ylabel("Index")
        plt.show()

    return jfpca
コード例 #7
0
    def predict(self, newdata=None):
        """
        This function performs prediction on regression model on new data if available or current stored data in object
        Usage:  obj.predict()
                obj.predict(newdata)

        :param newdata: dict containing new data for prediction (needs the keys below, if None predicts on training data)
        :type newdata: dict
        :param f: (M,N) matrix of functions
        :param time: vector of time points
        :param y: truth if available
        :param smooth: smooth data if needed
        :param sparam: number of times to run filter
        """

        omethod = self.warp_data.method
        lam = self.warp_data.lam
        M = self.time.shape[0]

        if newdata != None:
            f = newdata['f']
            time = newdata['time']
            y = newdata['y']
            if newdata['smooth']:
                sparam = newdata['sparam']
                f = fs.smooth_data(f,sparam)
            
            q1 = fs.f_to_srsf(f,time)
            n = q1.shape[1]
            self.y_pred = np.zeros(n)
            mq = self.warp_data.mqn
            fn = np.zeros((M,n))
            qn = np.zeros((M,n))
            gam = np.zeros((M,n))
            for ii in range(0,n):
                gam[:,ii] = uf.optimum_reparam(mq,time,q1[:,ii],omethod,lam)
                fn[:,ii] = uf.warp_f_gamma(time,f[:,ii],gam[:,ii])
                qn[:,ii] = uf.f_to_srsf(fn[:,ii],time)
            
            U = self.pca.U
            no = U.shape[1]

            if self.pca.__class__.__name__ == 'fdajpca':
                m_new = np.sign(fn[self.pca.id,:])*np.sqrt(np.abs(fn[self.pca.id,:]))
                qn1 = np.vstack((qn, m_new))
                C = self.pca.C
                TT = self.time.shape[0]
                mu_g = self.pca.mu_g
                mu_psi = self.pca.mu_psi
                vec = np.zeros((M,n))
                psi = np.zeros((TT,n))
                binsize = np.mean(np.diff(self.time))
                for i in range(0,n):
                    psi[:,i] = np.sqrt(np.gradient(gam[:,i],binsize))
                    out, theta = geo.inv_exp_map(mu_psi, psi[:,i])
                    vec[:,i] = out
                
                g = np.vstack((qn1, C*vec))
                a = np.zeros((n,no))
                for i in range(0,n):
                    for j in range(0,no):
                        tmp = (g[:,i]-mu_g)
                        a[i,j] = np.dot(tmp.T, U[:,j])

            elif self.pca.__class__.__name__ == 'fdavpca':
                m_new = np.sign(fn[self.pca.id,:])*np.sqrt(np.abs(fn[self.pca.id,:]))
                qn1 = np.vstack((qn, m_new))
                a = np.zeros((n,no))
                for i in range(0,n):
                    for j in range(0,no):
                        tmp = (qn1[:,i]-self.pca.mqn)
                        a[i,j] = np.dot(tmp.T, U[:,j])

            elif self.pca.__class__.__name__ == 'fdahpca':
                a = np.zeros((n,no))
                mu_psi = self.pca.psi_mu
                vec = np.zeros((M,n))
                TT = self.time.shape[0]
                psi = np.zeros((TT,n))
                binsize = np.mean(np.diff(self.time))
                for i in range(0,n):
                    psi[:,i] = np.sqrt(np.gradient(gam[:,i],binsize))
                    out, theta = geo.inv_exp_map(mu_psi, psi[:,i])
                    vec[:,i] = out
                
                vm = self.pca.vec.mean(axis=1)

                for i in range(0,n):
                    for j in range(0,no):
                        a[i,j] = np.sum(np.dot(vec[:,i]-vm,U[:,j]))
            else: 
                raise Exception('Invalid fPCA Method')

            for ii in range(0,n):
                self.y_pred[ii] = self.alpha + np.dot(a[ii,:],self.b)
            
            if y is None:
                self.SSE = np.nan
            else:
                self.SSE = np.sum((y-self.y_pred)**2)
        else:
            n = self.pca.coef.shape[0]
            self.y_pred = np.zeros(n)
            for ii in range(0,n):
                self.y_pred[ii] = self.alpha + np.dot(self.pca.coef[ii,:],self.b)
            
            self.SSE = np.sum((self.y-self.y_pred)**2)

        return
コード例 #8
0
    def calc_fpca(self,
                  no=3,
                  stds=np.arange(-1., 2.),
                  id=None,
                  parallel=False,
                  cores=-1):
        """
        This function calculates joint functional principal component analysis
        on aligned data

        :param no: number of components to extract (default = 3)
        :param id: point to use for f(0) (default = midpoint)
        :param stds: number of standard deviations along gedoesic to compute (default = -1,0,1)
        :param parallel: run in parallel (default = F)
        :param cores: number of cores for parallel (default = -1 (all))
        :type no: int
        :type id: int
        :type parallel: bool
        :type cores: int

        :rtype: fdajpca object of numpy ndarray
        :return q_pca: srsf principal directions
        :return f_pca: functional principal directions
        :return latent: latent values
        :return coef: coefficients
        :return U: eigenvectors

        """
        fn = self.warp_data.fn
        time = self.warp_data.time
        qn = self.warp_data.qn
        q0 = self.warp_data.q0
        gam = self.warp_data.gam

        M = time.shape[0]
        if id is None:
            mididx = int(np.round(M / 2))
        else:
            mididx = id

        Nstd = stds.shape[0]

        # set up for fPCA in q-space
        mq_new = qn.mean(axis=1)
        m_new = np.sign(fn[mididx, :]) * np.sqrt(np.abs(fn[mididx, :]))
        mqn = np.append(mq_new, m_new.mean())
        qn2 = np.vstack((qn, m_new))

        # calculate vector space of warping functions
        mu_psi, gam_mu, psi, vec = uf.SqrtMean(gam, parallel, cores)

        # joint fPCA
        C = fminbound(find_C, 0, 1e4,
                      (qn2, vec, q0, no, mu_psi, parallel, cores))
        qhat, gamhat, a, U, s, mu_g, g, cov = jointfPCAd(
            qn2, vec, C, no, mu_psi, parallel, cores)

        # geodesic paths
        q_pca = np.ndarray(shape=(M, Nstd, no), dtype=float)
        f_pca = np.ndarray(shape=(M, Nstd, no), dtype=float)

        for k in range(0, no):
            for l in range(0, Nstd):
                qhat = mqn + np.dot(U[0:(M + 1), k], stds[l] * np.sqrt(s[k]))
                vechat = np.dot(U[(M + 1):, k], (stds[l] * np.sqrt(s[k])) / C)
                psihat = geo.exp_map(mu_psi, vechat)
                gamhat = cumtrapz(psihat * psihat,
                                  np.linspace(0, 1, M),
                                  initial=0)
                gamhat = (gamhat - gamhat.min()) / (gamhat.max() -
                                                    gamhat.min())
                if (sum(vechat) == 0):
                    gamhat = np.linspace(0, 1, M)

                fhat = uf.cumtrapzmid(time, qhat[0:M] * np.fabs(qhat[0:M]),
                                      np.sign(qhat[M]) * (qhat[M] * qhat[M]),
                                      mididx)
                f_pca[:, l, k] = uf.warp_f_gamma(np.linspace(0, 1, M), fhat,
                                                 gamhat)
                q_pca[:, l, k] = uf.warp_q_gamma(np.linspace(0, 1, M),
                                                 qhat[0:M], gamhat)

        self.q_pca = q_pca
        self.f_pca = f_pca
        self.latent = s[0:no]
        self.coef = a
        self.U = U[:, 0:no]
        self.mu_psi = mu_psi
        self.mu_g = mu_g
        self.id = mididx
        self.C = C
        self.time = time
        self.g = g
        self.cov = cov
        self.no = no
        self.stds = stds

        return